From 9b2160c0c5337a92136aca125d3d5300c6610778 Mon Sep 17 00:00:00 2001 From: arpbansal Date: Tue, 17 Mar 2026 19:02:04 +0000 Subject: [PATCH 1/4] fixes for chunk download, inference, align with new changes in tensorhub/gen-orchestrator --- examples/firered-image-edit/Dockerfile | 14 +- examples/firered-image-edit/endpoint.toml | 2 +- examples/firered-image-edit/pyproject.toml | 2 +- examples/firered-image-edit/uv.lock | 2 +- examples/flux2-klein-4b/Dockerfile | 16 +- examples/flux2-klein-4b/README.md | 45 +- examples/flux2-klein-4b/endpoint.toml | 16 +- examples/flux2-klein-4b/pyproject.toml | 6 +- .../flux2-klein-4b/src/flux2_klein_4b/main.py | 56 ++- examples/flux2-klein-4b/uv.lock | 2 +- examples/image-gen/Dockerfile | 14 +- examples/image-gen/endpoint.toml | 2 +- examples/image-gen/pyproject.toml | 2 +- examples/image-gen/uv.lock | 2 +- examples/medasr-transcribe/Dockerfile | 14 +- examples/medasr-transcribe/endpoint.toml | 2 +- examples/medasr-transcribe/pyproject.toml | 2 +- examples/medasr-transcribe/uv.lock | 2 +- examples/multi-sdxl-checkpoints/Dockerfile | 14 +- examples/multi-sdxl-checkpoints/endpoint.toml | 2 +- .../multi-sdxl-checkpoints/pyproject.toml | 2 +- examples/multi-sdxl-checkpoints/uv.lock | 2 +- examples/openai-codex/Dockerfile | 10 +- examples/openai-codex/pyproject.toml | 2 +- examples/openai-codex/uv.lock | 2 +- examples/qwen-image-2512/Dockerfile | 12 +- examples/qwen-image-2512/endpoint.toml | 2 +- examples/qwen-image-2512/pyproject.toml | 2 +- examples/qwen-image-2512/uv.lock | 2 +- examples/sd15/Dockerfile | 16 +- examples/sd15/Dockerfile.local | 72 +++ examples/sd15/endpoint.toml | 2 +- examples/sd15/pyproject.toml | 2 +- examples/sd15/uv.lock | 2 +- examples/smoke-test/Dockerfile | 8 +- examples/smoke-test/pyproject.toml | 2 +- examples/smoke-test/uv.lock | 2 +- examples/z-image-lora/Dockerfile | 14 +- examples/z-image-lora/endpoint.toml | 2 +- examples/z-image-lora/pyproject.toml | 2 +- examples/z-image-lora/uv.lock | 2 +- pyproject.toml | 2 +- src/gen_worker/cozy_cas.py | 245 +++++----- src/gen_worker/cozy_pipeline_spec.py | 57 +-- src/gen_worker/cozy_snapshot_v2_downloader.py | 405 ++++++++-------- src/gen_worker/decorators.py | 36 +- src/gen_worker/diffusers_model_manager.py | 1 - src/gen_worker/discover.py | 40 -- src/gen_worker/model_ref_downloader.py | 29 +- src/gen_worker/model_refs.py | 11 +- src/gen_worker/pb/frontend_pb2.py | 32 +- src/gen_worker/pb/frontend_pb2_grpc.py | 28 +- src/gen_worker/pb/worker_scheduler_pb2.py | 142 +++--- src/gen_worker/pipeline_loader.py | 18 +- src/gen_worker/run_metrics_v1.py | 37 +- src/gen_worker/tensorhub_toml.py | 2 +- src/gen_worker/testing/http_runner.py | 4 +- src/gen_worker/testing/mock_orchestrator.py | 12 +- src/gen_worker/worker.py | 178 +++---- tests/test_asset_materialization.py | 148 ++++++ tests/test_chunked_reassembly.py | 333 +++++++++++++ tests/test_cozy_cas_resume.py | 33 +- tests/test_cozy_snapshot_v2_downloader.py | 4 +- tests/test_entrypoint_cache_preflight.py | 42 ++ tests/test_entrypoint_worker_mode.py | 17 + tests/test_error_mapping.py | 59 +++ tests/test_file_token_scoping.py | 257 ++++++++++ tests/test_hf_injection_normalization.py | 60 +++ tests/test_injection_type_enforcement.py | 31 ++ tests/test_mock_orchestrator_dev.py | 109 +++++ tests/test_model_cache.py | 312 ++++++++++++ tests/test_model_refs.py | 15 - tests/test_payload_model_selection.py | 78 +++ tests/test_pipeline_thread_safety.py | 355 ++++++++++++++ tests/test_realtime_socket.py | 83 ++++ tests/test_runtime_batching_config_cmd.py | 113 +++++ tests/test_scheduler_model_scope.py | 36 ++ ...test_signature_contract_and_incremental.py | 178 +++++++ tests/test_startup_model_prefetch.py | 12 +- tests/test_tensorhub_toml.py | 17 - tests/test_trainer_checkpointing.py | 54 +++ tests/test_trainer_runtime_orchestrated.py | 448 ++++++++++++++++++ .../test_training_endpoints_contract_smoke.py | 185 ++++++++ tests/test_worker_jwt_rotation.py | 20 +- tests/test_worker_leader_redirect.py | 19 + tests/test_worker_model_keyspace.py | 53 +++ tests/test_worker_startup_visibility.py | 131 +++++ tests/test_worker_telemetry_issue67.py | 187 ++++++++ tests/test_worker_wire_protocol.py | 28 ++ uv.lock | 2 +- 90 files changed, 4106 insertions(+), 971 deletions(-) create mode 100644 examples/sd15/Dockerfile.local create mode 100644 tests/test_asset_materialization.py create mode 100644 tests/test_chunked_reassembly.py create mode 100644 tests/test_entrypoint_cache_preflight.py create mode 100644 tests/test_entrypoint_worker_mode.py create mode 100644 tests/test_error_mapping.py create mode 100644 tests/test_file_token_scoping.py create mode 100644 tests/test_hf_injection_normalization.py create mode 100644 tests/test_injection_type_enforcement.py create mode 100644 tests/test_mock_orchestrator_dev.py create mode 100644 tests/test_model_cache.py create mode 100644 tests/test_payload_model_selection.py create mode 100644 tests/test_pipeline_thread_safety.py create mode 100644 tests/test_realtime_socket.py create mode 100644 tests/test_runtime_batching_config_cmd.py create mode 100644 tests/test_scheduler_model_scope.py create mode 100644 tests/test_signature_contract_and_incremental.py create mode 100644 tests/test_trainer_checkpointing.py create mode 100644 tests/test_trainer_runtime_orchestrated.py create mode 100644 tests/test_training_endpoints_contract_smoke.py create mode 100644 tests/test_worker_leader_redirect.py create mode 100644 tests/test_worker_model_keyspace.py create mode 100644 tests/test_worker_startup_visibility.py create mode 100644 tests/test_worker_telemetry_issue67.py create mode 100644 tests/test_worker_wire_protocol.py diff --git a/examples/firered-image-edit/Dockerfile b/examples/firered-image-edit/Dockerfile index 1351d5f..5c9919f 100644 --- a/examples/firered-image-edit/Dockerfile +++ b/examples/firered-image-edit/Dockerfile @@ -1,14 +1,13 @@ # Tenant-supplied Dockerfile example (GPU). # -# - Installs torch in a stable cacheable layer. -# - Installs tenant deps from uv.lock without replacing torch. +# - Installs torch + gen-worker in stable cacheable layers. +# - Installs tenant deps from uv.lock without replacing torch/gen-worker. # - Bakes discovered functions into /app/.tensorhub/endpoint.lock at build time. # ARG PYTHON_VERSION=3.12 FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-bookworm-slim AS cozy_base -ARG CUDA_VERSION=12.8 -ARG UV_TORCH_BACKEND= +ARG UV_TORCH_BACKEND=cu126 ARG TORCH_SPEC="~=2.10.0" WORKDIR /app @@ -23,9 +22,10 @@ RUN --mount=type=cache,id=cozy-apt-cache,target=/var/cache/apt,sharing=locked \ && apt-get clean RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - backend="${UV_TORCH_BACKEND:-cu$(printf '%s' "${CUDA_VERSION}" | tr -d '.')}" \ - && uv pip install --system --break-system-packages --torch-backend "$backend" \ + uv pip install --system --break-system-packages --torch-backend ${UV_TORCH_BACKEND} \ "torch${TORCH_SPEC}" +RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ + uv pip install --system --break-system-packages gen-worker==0.3.0 FROM cozy_base AS cozy_final @@ -33,7 +33,7 @@ COPY pyproject.toml uv.lock /app/ RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ uv export --no-dev --no-hashes --no-sources --no-emit-project --no-emit-local \ - --no-emit-package torch \ + --no-emit-package torch --no-emit-package gen-worker \ -o /tmp/requirements.all.txt \ && grep -Ev '^(torch|triton|nvidia-|cuda-)' /tmp/requirements.all.txt > /tmp/requirements.txt \ && uv pip install --system --break-system-packages --no-deps -r /tmp/requirements.txt diff --git a/examples/firered-image-edit/endpoint.toml b/examples/firered-image-edit/endpoint.toml index 086e2a8..f39a8ec 100644 --- a/examples/firered-image-edit/endpoint.toml +++ b/examples/firered-image-edit/endpoint.toml @@ -7,7 +7,7 @@ main = "firered_image_edit.main" firered_image_edit = { ref = "fireredteam/firered-image-edit-1.0", dtypes = ["bf16"] } [host.requirements] -cuda = "12.8" +cuda = "12.6" [resources] vram_gb = 24 diff --git a/examples/firered-image-edit/pyproject.toml b/examples/firered-image-edit/pyproject.toml index 6ff91ec..9374d51 100644 --- a/examples/firered-image-edit/pyproject.toml +++ b/examples/firered-image-edit/pyproject.toml @@ -4,7 +4,7 @@ version = "0.1.0" description = "FireRed Image Edit example (inference-only; Cozy manifest via endpoint.toml)" requires-python = ">=3.12" dependencies = [ - "gen-worker[torch]", + "gen-worker[torch]==0.3.0", "diffusers @ git+https://github.com/huggingface/diffusers", "transformers>=4.51.3", "accelerate", diff --git a/examples/firered-image-edit/uv.lock b/examples/firered-image-edit/uv.lock index 320d631..077624f 100644 --- a/examples/firered-image-edit/uv.lock +++ b/examples/firered-image-edit/uv.lock @@ -503,7 +503,7 @@ dev = [ requires-dist = [ { name = "accelerate" }, { name = "diffusers", git = "https://github.com/huggingface/diffusers" }, - { name = "gen-worker", extras = ["torch"] }, + { name = "gen-worker", extras = ["torch"], specifier = "==0.3.0" }, { name = "pillow" }, { name = "protobuf" }, { name = "sentencepiece" }, diff --git a/examples/flux2-klein-4b/Dockerfile b/examples/flux2-klein-4b/Dockerfile index cff5e4c..8326ef1 100644 --- a/examples/flux2-klein-4b/Dockerfile +++ b/examples/flux2-klein-4b/Dockerfile @@ -1,7 +1,7 @@ # Tenant-supplied Dockerfile example. # -# - Installs torch in a stable cacheable layer. -# - Installs tenant deps from uv.lock without replacing torch. +# - Installs torch + gen-worker in stable cacheable layers. +# - Installs tenant deps from uv.lock without replacing torch/gen-worker. # - Bakes discovered functions into /app/.tensorhub/endpoint.lock at build time. # # Local build (GPU): @@ -10,8 +10,7 @@ ARG PYTHON_VERSION=3.12 FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-bookworm-slim AS cozy_base -ARG CUDA_VERSION=12.8 -ARG UV_TORCH_BACKEND= +ARG UV_TORCH_BACKEND=cu126 ARG TORCH_SPEC="~=2.10.0" WORKDIR /app @@ -27,19 +26,20 @@ RUN --mount=type=cache,id=cozy-apt-cache,target=/var/cache/apt,sharing=locked \ # Stable runtime layers (avoid re-downloading torch/cu libs on every tenant build). RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - backend="${UV_TORCH_BACKEND:-cu$(printf '%s' "${CUDA_VERSION}" | tr -d '.')}" \ - && uv pip install --system --break-system-packages --torch-backend "$backend" \ + uv pip install --system --break-system-packages --torch-backend ${UV_TORCH_BACKEND} \ "torch${TORCH_SPEC}" +RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ + uv pip install --system --break-system-packages gen-worker==0.3.0 FROM cozy_base AS cozy_final # Copy lock metadata first so dependency layers are cacheable across source edits. COPY pyproject.toml uv.lock /app/ -# Install tenant dependencies into the global environment without replacing torch. +# Install tenant dependencies into the global environment without replacing torch/gen-worker. RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ uv export --no-dev --no-hashes --no-sources --no-emit-project --no-emit-local \ - --no-emit-package torch \ + --no-emit-package torch --no-emit-package gen-worker \ -o /tmp/requirements.all.txt \ && grep -Ev '^(torch|triton|nvidia-|cuda-)' /tmp/requirements.all.txt > /tmp/requirements.txt \ && uv pip install --system --break-system-packages --no-deps -r /tmp/requirements.txt diff --git a/examples/flux2-klein-4b/README.md b/examples/flux2-klein-4b/README.md index e222d4f..962b512 100644 --- a/examples/flux2-klein-4b/README.md +++ b/examples/flux2-klein-4b/README.md @@ -1,25 +1,40 @@ # flux2-klein-4b -FLUX.2-klein 4B endpoint with separate base/turbo functions and dtype-specific variants. +FLUX.2-klein turbo example using Cozy’s injection pattern (4B + 9B variants). -Naming convention in this repo: +- The worker function only defines input/output + runs inference. +- Fixed model selection is declared in code via `ModelRef(Src.FIXED, "")`. +- Model refs/dtypes are declared in `endpoint.toml [models]`. +- This model is treated as a turbo model: the worker forces `num_inference_steps=8`. + +Steps: + +- `num_inference_steps` is accepted in the payload, but it is clamped to `[4, 8]` (rounded) for predictable cost/latency. -- Base model ref: `black-forest-labs/flux.2-klein-4b-base` -- Turbo model ref: `black-forest-labs/flux.2-klein-4b-turbo` +Code uses: -This avoids ambiguity with upstream naming where `flux.2-klein-4b` is commonly used for turbo variants. +```py +pipeline: Annotated[ + Flux2KleinPipeline, + ModelRef(Src.FIXED, "flux2-klein-4b"), +] +``` Functions: -- `generate`: base bf16 -- `generate_turbo`: turbo bf16 -- `generate_fp8`: base fp8 -- `generate_turbo_fp8`: turbo fp8 -- `generate_nvfp4`: base nvfp4 -- `generate_turbo_nvfp4`: turbo nvfp4 +- `generate`: 4B bf16 (regular turbo baseline) +- `generate_fp8`: 4B fp8 +- `generate_9b`: 9B bf16 (regular turbo baseline) +- `generate_9b_fp8`: 9B fp8 +- `generate_int8`: int8-only +- `generate_int4`: int4-only -Notes: +Notes on FP8: -- Fixed model selection is declared in code via `ModelRef(Src.FIXED, "")`. -- Model refs/dtypes are declared in `endpoint.toml [models]`. -- `num_inference_steps` is accepted in the payload, but clamped to `[4, 8]`. +- FP8 support here is **weight-only** quantization via `torchao` (Diffusers TorchAoConfig). +- GPUs vary: FP8 acceleration typically requires newer NVIDIA GPUs (e.g. Ada/Hopper class). + +Notes on INT8/INT4: + +- INT8/INT4 support here is **weight-only** quantization via `torchao` (Diffusers TorchAoConfig). +- INT4 is experimental for diffusion; expect quality regressions or incompatibilities. diff --git a/examples/flux2-klein-4b/endpoint.toml b/examples/flux2-klein-4b/endpoint.toml index ada324a..fe645d6 100644 --- a/examples/flux2-klein-4b/endpoint.toml +++ b/examples/flux2-klein-4b/endpoint.toml @@ -1,20 +1,20 @@ schema_version = 1 -name = "flux.2-klein-4b" +name = "flux2-klein-4b" # Python import path used for function discovery (not a Docker ENTRYPOINT). main = "flux2_klein_4b.main" [models] -flux2-klein-4b-base = { ref = "black-forest-labs/flux.2-klein-4b-base", dtypes = ["bf16"] } -flux2-klein-4b-turbo = { ref = "black-forest-labs/flux.2-klein-4b-turbo", dtypes = ["bf16"] } -flux2-klein-4b-base_fp8 = { ref = "black-forest-labs/flux.2-klein-4b-base", dtypes = ["fp8"] } -flux2-klein-4b-turbo_fp8 = { ref = "black-forest-labs/flux.2-klein-4b-turbo", dtypes = ["fp8"] } -flux2-klein-4b-base_nvfp4 = { ref = "black-forest-labs/flux.2-klein-4b-base", dtypes = ["nvfp4"] } -flux2-klein-4b-turbo_nvfp4 = { ref = "black-forest-labs/flux.2-klein-4b-turbo", dtypes = ["nvfp4"] } +flux2-klein-4b = { ref = "black-forest-labs/flux.2-klein-4b", dtypes = ["bf16"] } +flux2-klein-4b_fp8 = { ref = "black-forest-labs/flux.2-klein-4b", dtypes = ["fp8"] } +flux2-klein-9b = { ref = "black-forest-labs/flux.2-klein-9b", dtypes = ["bf16"] } +flux2-klein-9b_fp8 = { ref = "black-forest-labs/flux.2-klein-9b", dtypes = ["fp8"] } +flux2-klein-4b_int8 = { ref = "black-forest-labs/flux.2-klein-4b", dtypes = ["int8"] } +flux2-klein-4b_int4 = { ref = "black-forest-labs/flux.2-klein-4b", dtypes = ["int4"] } [host.requirements] # If cuda is set, the platform treats this worker as requiring an NVIDIA GPU. -cuda = "12.8" +cuda = "12.6" [resources] vram_gb = 24 diff --git a/examples/flux2-klein-4b/pyproject.toml b/examples/flux2-klein-4b/pyproject.toml index c1abd27..9d6cf0c 100644 --- a/examples/flux2-klein-4b/pyproject.toml +++ b/examples/flux2-klein-4b/pyproject.toml @@ -1,15 +1,15 @@ [project] name = "flux2-klein-4b" version = "0.1.0" -description = "Flux2-klein-4B endpoint (base+turbo; inference-only; Cozy manifest via endpoint.toml)" +description = "Flux2-klein-4B example (inference-only; Cozy manifest via endpoint.toml)" requires-python = ">=3.12" dependencies = [ - "gen-worker[torch]", + "gen-worker[torch]==0.3.0", "diffusers @ git+https://github.com/huggingface/diffusers.git@99e2cfff27dec514a43e260e885c5e6eca038b36", "transformers>=4.56,<5", "accelerate", "pillow", - # Needed for fp8/nvfp4 variants (diffusers TorchAoConfig and/or variant-aware loading). + # Needed for fp8 weight-only quantization variants (diffusers TorchAoConfig). "torchao", ] diff --git a/examples/flux2-klein-4b/src/flux2_klein_4b/main.py b/examples/flux2-klein-4b/src/flux2_klein_4b/main.py index 127cdba..6f1823c 100644 --- a/examples/flux2-klein-4b/src/flux2_klein_4b/main.py +++ b/examples/flux2-klein-4b/src/flux2_klein_4b/main.py @@ -18,7 +18,6 @@ logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s") _flux_resources = ResourceRequirements() -_nvfp4_resources = ResourceRequirements(compute_capability_min=10.0) _pipeline_locks_guard = threading.Lock() _pipeline_locks: dict[int, threading.Lock] = {} @@ -104,68 +103,85 @@ def generate( ctx: ActionContext, pipeline: Annotated[ Flux2KleinPipeline, - ModelRef(Src.FIXED, "flux2-klein-4b-base"), + ModelRef(Src.FIXED, "flux2-klein-4b"), ], payload: GenerateInput, ) -> GenerateOutput: - return _generate(ctx, pipeline, payload, "flux2-klein-4b-base") + return _generate(ctx, pipeline, payload, "flux2-klein-4b") @worker_function(_flux_resources) -def generate_turbo( +def generate_fp8( ctx: ActionContext, pipeline: Annotated[ Flux2KleinPipeline, - ModelRef(Src.FIXED, "flux2-klein-4b-turbo"), + ModelRef(Src.FIXED, "flux2-klein-4b_fp8"), ], payload: GenerateInput, ) -> GenerateOutput: - return _generate(ctx, pipeline, payload, "flux2-klein-4b-turbo") + """ + FP8 function. + + This endpoint is intended to run against an fp8-weight-only artifact (or an artifact + that the worker can load with torchao-backed fp8 quantization enabled). + """ + return _generate(ctx, pipeline, payload, "flux2-klein-4b_fp8") @worker_function(_flux_resources) -def generate_fp8( +def generate_9b( ctx: ActionContext, pipeline: Annotated[ Flux2KleinPipeline, - ModelRef(Src.FIXED, "flux2-klein-4b-base_fp8"), + ModelRef(Src.FIXED, "flux2-klein-9b"), ], payload: GenerateInput, ) -> GenerateOutput: - return _generate(ctx, pipeline, payload, "flux2-klein-4b-base_fp8") + return _generate(ctx, pipeline, payload, "flux2-klein-9b") @worker_function(_flux_resources) -def generate_turbo_fp8( +def generate_9b_fp8( ctx: ActionContext, pipeline: Annotated[ Flux2KleinPipeline, - ModelRef(Src.FIXED, "flux2-klein-4b-turbo_fp8"), + ModelRef(Src.FIXED, "flux2-klein-9b_fp8"), ], payload: GenerateInput, ) -> GenerateOutput: - return _generate(ctx, pipeline, payload, "flux2-klein-4b-turbo_fp8") + return _generate(ctx, pipeline, payload, "flux2-klein-9b_fp8") -@worker_function(_nvfp4_resources) -def generate_nvfp4( +@worker_function(_flux_resources) +def generate_int8( ctx: ActionContext, pipeline: Annotated[ Flux2KleinPipeline, - ModelRef(Src.FIXED, "flux2-klein-4b-base_nvfp4"), + ModelRef(Src.FIXED, "flux2-klein-4b_int8"), ], payload: GenerateInput, ) -> GenerateOutput: - return _generate(ctx, pipeline, payload, "flux2-klein-4b-base_nvfp4") + """ + INT8 function (weight-only). + This endpoint is intended to run against an int8-weight-only artifact (or an artifact + that the worker can load with torchao-backed int8 quantization enabled). + """ + return _generate(ctx, pipeline, payload, "flux2-klein-4b_int8") -@worker_function(_nvfp4_resources) -def generate_turbo_nvfp4( + +@worker_function(_flux_resources) +def generate_int4( ctx: ActionContext, pipeline: Annotated[ Flux2KleinPipeline, - ModelRef(Src.FIXED, "flux2-klein-4b-turbo_nvfp4"), + ModelRef(Src.FIXED, "flux2-klein-4b_int4"), ], payload: GenerateInput, ) -> GenerateOutput: - return _generate(ctx, pipeline, payload, "flux2-klein-4b-turbo_nvfp4") + """ + INT4 function (weight-only). + + This endpoint is experimental; expect quality regressions or incompatibilities. + """ + return _generate(ctx, pipeline, payload, "flux2-klein-4b_int4") diff --git a/examples/flux2-klein-4b/uv.lock b/examples/flux2-klein-4b/uv.lock index 448d669..8353718 100644 --- a/examples/flux2-klein-4b/uv.lock +++ b/examples/flux2-klein-4b/uv.lock @@ -512,7 +512,7 @@ dev = [ requires-dist = [ { name = "accelerate" }, { name = "diffusers", git = "https://github.com/huggingface/diffusers.git?rev=99e2cfff27dec514a43e260e885c5e6eca038b36" }, - { name = "gen-worker", extras = ["torch"] }, + { name = "gen-worker", extras = ["torch"], specifier = "==0.3.0" }, { name = "pillow" }, { name = "torchao" }, { name = "transformers", specifier = ">=4.56,<5" }, diff --git a/examples/image-gen/Dockerfile b/examples/image-gen/Dockerfile index 1203d3c..cd4c800 100644 --- a/examples/image-gen/Dockerfile +++ b/examples/image-gen/Dockerfile @@ -1,14 +1,13 @@ # Tenant-supplied Dockerfile example. # -# - Installs torch in a stable cacheable layer. -# - Installs tenant deps from uv.lock without replacing torch. +# - Installs torch + gen-worker in stable cacheable layers. +# - Installs tenant deps from uv.lock without replacing torch/gen-worker. # - Bakes discovered functions into /app/.tensorhub/endpoint.lock at build time. # ARG PYTHON_VERSION=3.12 FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-bookworm-slim AS cozy_base -ARG CUDA_VERSION=12.8 -ARG UV_TORCH_BACKEND= +ARG UV_TORCH_BACKEND=cu126 ARG TORCH_SPEC="~=2.10.0" WORKDIR /app @@ -24,9 +23,10 @@ RUN --mount=type=cache,id=cozy-apt-cache,target=/var/cache/apt,sharing=locked \ && apt-get clean RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - backend="${UV_TORCH_BACKEND:-cu$(printf '%s' "${CUDA_VERSION}" | tr -d '.')}" \ - && uv pip install --system --break-system-packages --torch-backend "$backend" \ + uv pip install --system --break-system-packages --torch-backend ${UV_TORCH_BACKEND} \ "torch${TORCH_SPEC}" +RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ + uv pip install --system --break-system-packages gen-worker==0.3.0 FROM cozy_base AS cozy_final @@ -34,7 +34,7 @@ COPY pyproject.toml uv.lock /app/ RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ uv export --no-dev --no-hashes --no-sources --no-emit-project --no-emit-local \ - --no-emit-package torch \ + --no-emit-package torch --no-emit-package gen-worker \ -o /tmp/requirements.all.txt \ && grep -Ev '^(torch|triton|nvidia-|cuda-)' /tmp/requirements.all.txt > /tmp/requirements.txt \ && uv pip install --system --break-system-packages --no-deps -r /tmp/requirements.txt diff --git a/examples/image-gen/endpoint.toml b/examples/image-gen/endpoint.toml index de4a700..2daf35f 100644 --- a/examples/image-gen/endpoint.toml +++ b/examples/image-gen/endpoint.toml @@ -7,7 +7,7 @@ main = "image_gen.main" sdxl = { ref = "stabilityai/stable-diffusion-xl-base-1.0", dtypes = ["fp16", "bf16"] } [host.requirements] -cuda = "12.8" +cuda = "12.6" [resources] vram_gb = 12 diff --git a/examples/image-gen/pyproject.toml b/examples/image-gen/pyproject.toml index 7902855..f6074d2 100644 --- a/examples/image-gen/pyproject.toml +++ b/examples/image-gen/pyproject.toml @@ -4,7 +4,7 @@ version = "0.1.0" description = "Image generation function" requires-python = ">=3.12" dependencies = [ - "gen-worker[torch]", + "gen-worker[torch]==0.3.0", "diffusers", "transformers", "accelerate", diff --git a/examples/image-gen/uv.lock b/examples/image-gen/uv.lock index 74df27e..481720f 100644 --- a/examples/image-gen/uv.lock +++ b/examples/image-gen/uv.lock @@ -786,7 +786,7 @@ dev = [ requires-dist = [ { name = "accelerate" }, { name = "diffusers" }, - { name = "gen-worker", extras = ["torch"] }, + { name = "gen-worker", extras = ["torch"], specifier = "==0.3.0" }, { name = "pillow" }, { name = "transformers" }, ] diff --git a/examples/medasr-transcribe/Dockerfile b/examples/medasr-transcribe/Dockerfile index 8c87c3e..837f56e 100644 --- a/examples/medasr-transcribe/Dockerfile +++ b/examples/medasr-transcribe/Dockerfile @@ -1,15 +1,14 @@ # Tenant-supplied Dockerfile example. # -# - Installs torch in a stable cacheable layer. -# - Installs tenant deps from uv.lock without replacing torch. +# - Installs torch + gen-worker in stable cacheable layers. +# - Installs tenant deps from uv.lock without replacing torch/gen-worker. # - Installs required system libs (libsndfile1) for soundfile. # - Bakes discovered functions into /app/.tensorhub/endpoint.lock at build time. # ARG PYTHON_VERSION=3.12 FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-bookworm-slim AS cozy_base -ARG CUDA_VERSION=12.8 -ARG UV_TORCH_BACKEND= +ARG UV_TORCH_BACKEND=cu126 ARG TORCH_SPEC="~=2.10.0" WORKDIR /app @@ -25,9 +24,10 @@ RUN --mount=type=cache,id=cozy-apt-cache,target=/var/cache/apt,sharing=locked \ && apt-get clean RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - backend="${UV_TORCH_BACKEND:-cu$(printf '%s' "${CUDA_VERSION}" | tr -d '.')}" \ - && uv pip install --system --break-system-packages --torch-backend "$backend" \ + uv pip install --system --break-system-packages --torch-backend ${UV_TORCH_BACKEND} \ "torch${TORCH_SPEC}" +RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ + uv pip install --system --break-system-packages gen-worker==0.3.0 FROM cozy_base AS cozy_final @@ -42,7 +42,7 @@ COPY pyproject.toml uv.lock /app/ RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ uv export --no-dev --no-hashes --no-sources --no-emit-project --no-emit-local \ - --no-emit-package torch \ + --no-emit-package torch --no-emit-package gen-worker \ -o /tmp/requirements.all.txt \ && grep -Ev '^(torch|triton|nvidia-|cuda-)' /tmp/requirements.all.txt > /tmp/requirements.txt \ && uv pip install --system --break-system-packages --no-deps -r /tmp/requirements.txt diff --git a/examples/medasr-transcribe/endpoint.toml b/examples/medasr-transcribe/endpoint.toml index 6362d69..dd7ebeb 100644 --- a/examples/medasr-transcribe/endpoint.toml +++ b/examples/medasr-transcribe/endpoint.toml @@ -7,7 +7,7 @@ main = "medasr_transcribe.main" medasr = { ref = "google/medasr", dtypes = ["fp16", "bf16"] } [host.requirements] -cuda = "12.8" +cuda = "12.6" [resources] vram_gb = 8 diff --git a/examples/medasr-transcribe/pyproject.toml b/examples/medasr-transcribe/pyproject.toml index 44e0889..f2d7c0d 100644 --- a/examples/medasr-transcribe/pyproject.toml +++ b/examples/medasr-transcribe/pyproject.toml @@ -4,7 +4,7 @@ version = "0.1.0" description = "MedASR transcription worker function example" requires-python = ">=3.12" dependencies = [ - "gen-worker[torch]", + "gen-worker[torch]==0.3.0", "numpy", "soundfile", "soxr", diff --git a/examples/medasr-transcribe/uv.lock b/examples/medasr-transcribe/uv.lock index 75ae15d..2214c33 100644 --- a/examples/medasr-transcribe/uv.lock +++ b/examples/medasr-transcribe/uv.lock @@ -841,7 +841,7 @@ dev = [ [package.metadata] requires-dist = [ - { name = "gen-worker", extras = ["torch"] }, + { name = "gen-worker", extras = ["torch"], specifier = "==0.3.0" }, { name = "numpy" }, { name = "soundfile" }, { name = "soxr" }, diff --git a/examples/multi-sdxl-checkpoints/Dockerfile b/examples/multi-sdxl-checkpoints/Dockerfile index f4c4f1b..4f87469 100644 --- a/examples/multi-sdxl-checkpoints/Dockerfile +++ b/examples/multi-sdxl-checkpoints/Dockerfile @@ -1,14 +1,13 @@ # Tenant-supplied Dockerfile example. # -# - Installs torch in a stable cacheable layer. -# - Installs tenant deps from uv.lock without replacing torch. +# - Installs torch + gen-worker in stable cacheable layers. +# - Installs tenant deps from uv.lock without replacing torch/gen-worker. # - Bakes discovered functions into /app/.tensorhub/endpoint.lock at build time. # ARG PYTHON_VERSION=3.12 FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-bookworm-slim AS cozy_base -ARG CUDA_VERSION=12.8 -ARG UV_TORCH_BACKEND= +ARG UV_TORCH_BACKEND=cu126 ARG TORCH_SPEC="~=2.10.0" WORKDIR /app @@ -24,10 +23,11 @@ RUN --mount=type=cache,id=cozy-apt-cache,target=/var/cache/apt,sharing=locked \ && apt-get clean RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - backend="${UV_TORCH_BACKEND:-cu$(printf '%s' "${CUDA_VERSION}" | tr -d '.')}" \ - && uv pip install --system --break-system-packages --torch-backend "$backend" \ + uv pip install --system --break-system-packages --torch-backend ${UV_TORCH_BACKEND} \ "torch${TORCH_SPEC}" +RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ + uv pip install --system --break-system-packages gen-worker==0.3.0 FROM cozy_base AS cozy_final @@ -35,7 +35,7 @@ COPY pyproject.toml uv.lock /app/ RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ uv export --no-dev --no-hashes --no-sources --no-emit-project --no-emit-local \ - --no-emit-package torch \ + --no-emit-package torch --no-emit-package gen-worker \ -o /tmp/requirements.all.txt \ && grep -Ev '^(torch|triton|nvidia-|cuda-)' /tmp/requirements.all.txt > /tmp/requirements.txt \ && uv pip install --system --break-system-packages --no-deps -r /tmp/requirements.txt diff --git a/examples/multi-sdxl-checkpoints/endpoint.toml b/examples/multi-sdxl-checkpoints/endpoint.toml index 0a8f312..1fd8208 100644 --- a/examples/multi-sdxl-checkpoints/endpoint.toml +++ b/examples/multi-sdxl-checkpoints/endpoint.toml @@ -4,7 +4,7 @@ name = "multi-sdxl-checkpoints" main = "multi_sdxl_checkpoints.main" [host.requirements] -cuda = "12.8" +cuda = "12.6" [models.generate] sdxl-base = "stabilityai/stable-diffusion-xl-base-1.0" diff --git a/examples/multi-sdxl-checkpoints/pyproject.toml b/examples/multi-sdxl-checkpoints/pyproject.toml index a4a586e..f3ddf56 100644 --- a/examples/multi-sdxl-checkpoints/pyproject.toml +++ b/examples/multi-sdxl-checkpoints/pyproject.toml @@ -4,7 +4,7 @@ version = "0.1.0" description = "Multi SDXL checkpoints example with payload-based model selection" requires-python = ">=3.12" dependencies = [ - "gen-worker[torch]", + "gen-worker[torch]==0.3.0", "diffusers", "transformers", "accelerate", diff --git a/examples/multi-sdxl-checkpoints/uv.lock b/examples/multi-sdxl-checkpoints/uv.lock index 872c7f7..07fa718 100644 --- a/examples/multi-sdxl-checkpoints/uv.lock +++ b/examples/multi-sdxl-checkpoints/uv.lock @@ -971,7 +971,7 @@ dev = [ requires-dist = [ { name = "accelerate" }, { name = "diffusers" }, - { name = "gen-worker", extras = ["torch"] }, + { name = "gen-worker", extras = ["torch"], specifier = "==0.3.0" }, { name = "pillow" }, { name = "transformers" }, ] diff --git a/examples/openai-codex/Dockerfile b/examples/openai-codex/Dockerfile index 73fb8fd..b0b3f67 100644 --- a/examples/openai-codex/Dockerfile +++ b/examples/openai-codex/Dockerfile @@ -1,7 +1,7 @@ # Tenant-supplied Dockerfile example: OpenAI Codex CLI worker (Ubuntu 24.04). # # Contract: -# - Installs dependencies from pyproject.toml/uv.lock. +# - Installs gen-worker. # - Runs discovery at build time, baking /app/.tensorhub/endpoint.lock into the image. # - Uses gen-worker as the image ENTRYPOINT. # @@ -53,12 +53,18 @@ RUN set -eu; \ chmod +x /usr/local/bin/codex; \ rm -rf /tmp/codex.tar.gz +# Stable runtime layer. +RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ + uv pip install --system --break-system-packages gen-worker==0.3.0 + # Copy lock metadata first (cache-friendly). COPY pyproject.toml uv.lock /app/ RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ uv export --no-dev --no-hashes --no-sources --no-emit-project --no-emit-local \ - -o /tmp/requirements.txt \ + --no-emit-package gen-worker \ + -o /tmp/requirements.all.txt \ + && grep -Ev '^(gen-worker)' /tmp/requirements.all.txt > /tmp/requirements.txt \ && uv pip install --system --break-system-packages --no-deps -r /tmp/requirements.txt # Copy app code late. diff --git a/examples/openai-codex/pyproject.toml b/examples/openai-codex/pyproject.toml index 8f7394f..52f20ed 100644 --- a/examples/openai-codex/pyproject.toml +++ b/examples/openai-codex/pyproject.toml @@ -4,7 +4,7 @@ version = "0.1.0" description = "Example Cozy worker that shells out to OpenAI Codex CLI and streams JSONL events." requires-python = ">=3.12" dependencies = [ - "gen-worker", + "gen-worker==0.3.0", "msgspec", ] diff --git a/examples/openai-codex/uv.lock b/examples/openai-codex/uv.lock index 2d01c9e..a3aabb1 100644 --- a/examples/openai-codex/uv.lock +++ b/examples/openai-codex/uv.lock @@ -852,7 +852,7 @@ dependencies = [ [package.metadata] requires-dist = [ - { name = "gen-worker" }, + { name = "gen-worker", specifier = "==0.3.0" }, { name = "msgspec" }, ] diff --git a/examples/qwen-image-2512/Dockerfile b/examples/qwen-image-2512/Dockerfile index 4fae936..6a531e4 100644 --- a/examples/qwen-image-2512/Dockerfile +++ b/examples/qwen-image-2512/Dockerfile @@ -1,14 +1,13 @@ # Tenant-supplied Dockerfile example. # # - Installs torch in a stable cacheable layer. -# - Installs tenant deps from uv.lock without replacing torch. +# - Installs tenant deps from uv.lock without replacing torch/gen-worker. # - Bakes discovered functions into /app/.tensorhub/endpoint.lock at build time. # ARG PYTHON_VERSION=3.12 FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-bookworm-slim AS cozy_base -ARG CUDA_VERSION=12.8 -ARG UV_TORCH_BACKEND= +ARG UV_TORCH_BACKEND=cu126 ARG TORCH_SPEC="~=2.10.0" WORKDIR /app @@ -24,9 +23,10 @@ RUN --mount=type=cache,id=cozy-apt-cache,target=/var/cache/apt,sharing=locked \ && apt-get clean RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - backend="${UV_TORCH_BACKEND:-cu$(printf '%s' "${CUDA_VERSION}" | tr -d '.')}" \ - && uv pip install --system --break-system-packages --torch-backend "$backend" \ + uv pip install --system --break-system-packages --torch-backend ${UV_TORCH_BACKEND} \ "torch${TORCH_SPEC}" +RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ + uv pip install --system --break-system-packages gen-worker==0.3.0 FROM cozy_base AS cozy_final @@ -34,7 +34,7 @@ COPY pyproject.toml uv.lock /app/ RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ uv export --no-dev --no-hashes --no-sources --no-emit-project --no-emit-local \ - --no-emit-package torch \ + --no-emit-package torch --no-emit-package gen-worker \ -o /tmp/requirements.all.txt \ && grep -Ev '^(torch|triton|nvidia-|cuda-)' /tmp/requirements.all.txt > /tmp/requirements.txt \ && uv pip install --system --break-system-packages --no-deps -r /tmp/requirements.txt diff --git a/examples/qwen-image-2512/endpoint.toml b/examples/qwen-image-2512/endpoint.toml index e56bb6b..d0e3bc4 100644 --- a/examples/qwen-image-2512/endpoint.toml +++ b/examples/qwen-image-2512/endpoint.toml @@ -7,7 +7,7 @@ main = "qwen_image_2512.main" qwen_image = { ref = "qwen/qwen-image-2512", dtypes = ["bf16", "fp16"] } [host.requirements] -cuda = "12.8" +cuda = "12.6" [resources] vram_gb = 24 diff --git a/examples/qwen-image-2512/pyproject.toml b/examples/qwen-image-2512/pyproject.toml index 342c9e7..14a3b92 100644 --- a/examples/qwen-image-2512/pyproject.toml +++ b/examples/qwen-image-2512/pyproject.toml @@ -4,7 +4,7 @@ version = "0.1.0" description = "Qwen Image 2512 generation function" requires-python = ">=3.12" dependencies = [ - "gen-worker[torch]", + "gen-worker[torch]==0.3.0", "diffusers @ git+https://github.com/huggingface/diffusers", "transformers>=4.51.3", "accelerate", diff --git a/examples/qwen-image-2512/uv.lock b/examples/qwen-image-2512/uv.lock index b96fbac..77af1b4 100644 --- a/examples/qwen-image-2512/uv.lock +++ b/examples/qwen-image-2512/uv.lock @@ -1598,7 +1598,7 @@ dev = [ requires-dist = [ { name = "accelerate" }, { name = "diffusers", git = "https://github.com/huggingface/diffusers" }, - { name = "gen-worker", extras = ["torch"] }, + { name = "gen-worker", extras = ["torch"], specifier = "==0.3.0" }, { name = "pillow" }, { name = "transformers", specifier = ">=4.51.3" }, ] diff --git a/examples/sd15/Dockerfile b/examples/sd15/Dockerfile index c52a5dc..99bb5a2 100644 --- a/examples/sd15/Dockerfile +++ b/examples/sd15/Dockerfile @@ -1,7 +1,7 @@ # Tenant-supplied Dockerfile example. # -# - Installs torch in a stable cacheable layer. -# - Installs tenant deps from uv.lock without replacing torch. +# - Installs torch + gen-worker in stable cacheable layers. +# - Installs tenant deps from uv.lock without replacing torch/gen-worker. # - Bakes discovered functions into /app/.tensorhub/endpoint.lock at build time. # # Local build (GPU): @@ -10,8 +10,7 @@ ARG PYTHON_VERSION=3.12 FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-bookworm-slim AS cozy_base -ARG CUDA_VERSION=12.8 -ARG UV_TORCH_BACKEND= +ARG UV_TORCH_BACKEND=cu126 ARG TORCH_SPEC="~=2.10.0" WORKDIR /app @@ -28,19 +27,20 @@ RUN --mount=type=cache,id=cozy-apt-cache,target=/var/cache/apt,sharing=locked \ # Stable runtime layers (avoid re-downloading torch/cu libs on every tenant build). RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - backend="${UV_TORCH_BACKEND:-cu$(printf '%s' "${CUDA_VERSION}" | tr -d '.')}" \ - && uv pip install --system --break-system-packages --torch-backend "$backend" \ + uv pip install --system --break-system-packages --torch-backend ${UV_TORCH_BACKEND} \ "torch${TORCH_SPEC}" +RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ + uv pip install --system --break-system-packages gen-worker FROM cozy_base AS cozy_final # Copy lock metadata first so dependency layers are cacheable across source edits. COPY pyproject.toml uv.lock /app/ -# Install tenant dependencies into the global environment without replacing torch. +# Install tenant dependencies into the global environment without replacing torch/gen-worker. RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ uv export --no-dev --no-hashes --no-sources --no-emit-project --no-emit-local \ - --no-emit-package torch \ + --no-emit-package torch --no-emit-package gen-worker \ -o /tmp/requirements.all.txt \ && grep -Ev '^(torch|triton|nvidia-|cuda-)' /tmp/requirements.all.txt > /tmp/requirements.txt \ && uv pip install --system --break-system-packages --no-deps -r /tmp/requirements.txt diff --git a/examples/sd15/Dockerfile.local b/examples/sd15/Dockerfile.local new file mode 100644 index 0000000..959a94c --- /dev/null +++ b/examples/sd15/Dockerfile.local @@ -0,0 +1,72 @@ +# Local-dev Dockerfile for the sd15 example. +# +# Difference vs ./Dockerfile: +# - Installs gen-worker from the local checkout (this repo) so you can test +# unreleased changes. +# +# Build from repo root: +# docker build -f examples/sd15/Dockerfile.local -t cozy-example-sd15-local:dev . +# +ARG PYTHON_VERSION=3.12 +FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-bookworm-slim AS cozy_base + +ARG UV_TORCH_BACKEND=cu126 +ARG TORCH_SPEC="~=2.10.0" + +WORKDIR /app + +ENV UV_CACHE_DIR=/var/cache/uv +ENV UV_LINK_MODE=copy + +RUN --mount=type=cache,id=cozy-apt-cache,target=/var/cache/apt,sharing=locked \ + --mount=type=cache,id=cozy-apt-lists,target=/var/lib/apt/lists,sharing=locked \ + apt-get update && apt-get install -y --no-install-recommends \ + git \ + && apt-get clean + +RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ + uv pip install --system --break-system-packages --torch-backend ${UV_TORCH_BACKEND} \ + "torch${TORCH_SPEC}" + +# Install gen-worker from the local checkout. +COPY pyproject.toml uv.lock README.md /gen-worker/ +COPY src/ /gen-worker/src/ +RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ + uv pip install --system --break-system-packages /gen-worker + +FROM cozy_base AS cozy_final + +# Copy lock metadata first so dependency layers are cacheable across source edits. +COPY examples/sd15/pyproject.toml examples/sd15/uv.lock /app/ + +# Install tenant dependencies into the global environment without replacing torch/gen-worker. +RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ + uv export --no-dev --no-hashes --no-sources --no-emit-project --no-emit-local \ + --no-emit-package torch --no-emit-package gen-worker \ + -o /tmp/requirements.all.txt \ + && grep -Ev '^(torch|triton|nvidia-|cuda-)' /tmp/requirements.all.txt > /tmp/requirements.txt \ + && uv pip install --system --break-system-packages --no-deps -r /tmp/requirements.txt + +# Copy app code late so app edits only invalidate the final layers. +COPY examples/sd15/ /app/ + +# Install the project itself without altering dependency layers. +RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ + uv pip install --system --break-system-packages --no-deps --no-sources /app + +# Bake discovered functions into the image so Cozy Hub can read them without executing tenant code. +RUN mkdir -p /app/.tensorhub \ + && python -m gen_worker.discover > /app/.tensorhub/endpoint.lock + +# Run as non-root at runtime. +RUN groupadd --system --gid 10001 cozy \ + && useradd --system --uid 10001 --gid cozy --create-home --home-dir /home/cozy --shell /usr/sbin/nologin cozy \ + && chown -R cozy:cozy /app /home/cozy + +ENV HOME=/home/cozy +ENV XDG_CACHE_HOME=/home/cozy/.cache +ENV HF_HOME=/home/cozy/.cache/huggingface + +USER cozy:cozy + +ENTRYPOINT ["python", "-m", "gen_worker.entrypoint"] diff --git a/examples/sd15/endpoint.toml b/examples/sd15/endpoint.toml index 4248a9f..98d3a69 100644 --- a/examples/sd15/endpoint.toml +++ b/examples/sd15/endpoint.toml @@ -12,7 +12,7 @@ sd15_int4 = { ref = "stable-diffusion-v1-5/stable-diffusion-v1-5", dtypes = ["in [host.requirements] # If cuda is set, the platform treats this worker as requiring an NVIDIA GPU. -cuda = "12.8" +cuda = "12.6" [resources] vram_gb = 8 diff --git a/examples/sd15/pyproject.toml b/examples/sd15/pyproject.toml index 9c83eac..c8fb76b 100644 --- a/examples/sd15/pyproject.toml +++ b/examples/sd15/pyproject.toml @@ -4,7 +4,7 @@ version = "0.1.1" description = "Stable Diffusion 1.5 example (inference-only; Cozy manifest via endpoint.toml)" requires-python = ">=3.12" dependencies = [ - "gen-worker[torch]", + "gen-worker[torch]==0.3.0", "diffusers", "transformers<5", "accelerate", diff --git a/examples/sd15/uv.lock b/examples/sd15/uv.lock index aded35c..8574804 100644 --- a/examples/sd15/uv.lock +++ b/examples/sd15/uv.lock @@ -1733,7 +1733,7 @@ requires-dist = [ { name = "accelerate" }, { name = "diffusers" }, { name = "ftfy" }, - { name = "gen-worker", extras = ["torch"] }, + { name = "gen-worker", extras = ["torch"], specifier = "==0.3.0" }, { name = "pillow" }, { name = "torchao" }, { name = "transformers", specifier = "<5" }, diff --git a/examples/smoke-test/Dockerfile b/examples/smoke-test/Dockerfile index 11df331..4fab034 100644 --- a/examples/smoke-test/Dockerfile +++ b/examples/smoke-test/Dockerfile @@ -1,8 +1,8 @@ # Tenant-supplied Dockerfile example (CPU). # -# - Installs dependencies from pyproject.toml/uv.lock. +# - Installs gen-worker in a stable cacheable layer. # - Installs torch (CPU) so Cozy Hub can record a stable backend version for tagging. -# - Installs tenant deps from uv.lock. +# - Installs tenant deps from uv.lock without reinstalling gen-worker. # - Bakes discovered functions into /app/.tensorhub/endpoint.lock at build time. # ARG PYTHON_VERSION=3.12 @@ -26,12 +26,14 @@ RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ uv pip install --system --break-system-packages --torch-backend ${UV_TORCH_BACKEND} \ "torch${TORCH_SPEC}" +RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ + uv pip install --system --break-system-packages gen-worker==0.3.0 COPY pyproject.toml uv.lock /app/ RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ uv export --no-dev --no-hashes --no-sources --no-emit-project --no-emit-local \ - --no-emit-package torch \ + --no-emit-package gen-worker --no-emit-package torch \ -o /tmp/requirements.txt \ && uv pip install --system --break-system-packages --no-deps -r /tmp/requirements.txt diff --git a/examples/smoke-test/pyproject.toml b/examples/smoke-test/pyproject.toml index 8713ff5..858525b 100644 --- a/examples/smoke-test/pyproject.toml +++ b/examples/smoke-test/pyproject.toml @@ -4,7 +4,7 @@ version = "0.1.0" description = "Simple worker functions for smoke testing" requires-python = ">=3.12" dependencies = [ - "gen-worker", + "gen-worker==0.3.0", ] [dependency-groups] diff --git a/examples/smoke-test/uv.lock b/examples/smoke-test/uv.lock index 1202299..7ebd088 100644 --- a/examples/smoke-test/uv.lock +++ b/examples/smoke-test/uv.lock @@ -1212,7 +1212,7 @@ dev = [ ] [package.metadata] -requires-dist = [{ name = "gen-worker" }] +requires-dist = [{ name = "gen-worker", specifier = "==0.3.0" }] [package.metadata.requires-dev] dev = [{ name = "mypy", specifier = ">=1.10.0" }] diff --git a/examples/z-image-lora/Dockerfile b/examples/z-image-lora/Dockerfile index 1203d3c..cd4c800 100644 --- a/examples/z-image-lora/Dockerfile +++ b/examples/z-image-lora/Dockerfile @@ -1,14 +1,13 @@ # Tenant-supplied Dockerfile example. # -# - Installs torch in a stable cacheable layer. -# - Installs tenant deps from uv.lock without replacing torch. +# - Installs torch + gen-worker in stable cacheable layers. +# - Installs tenant deps from uv.lock without replacing torch/gen-worker. # - Bakes discovered functions into /app/.tensorhub/endpoint.lock at build time. # ARG PYTHON_VERSION=3.12 FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-bookworm-slim AS cozy_base -ARG CUDA_VERSION=12.8 -ARG UV_TORCH_BACKEND= +ARG UV_TORCH_BACKEND=cu126 ARG TORCH_SPEC="~=2.10.0" WORKDIR /app @@ -24,9 +23,10 @@ RUN --mount=type=cache,id=cozy-apt-cache,target=/var/cache/apt,sharing=locked \ && apt-get clean RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - backend="${UV_TORCH_BACKEND:-cu$(printf '%s' "${CUDA_VERSION}" | tr -d '.')}" \ - && uv pip install --system --break-system-packages --torch-backend "$backend" \ + uv pip install --system --break-system-packages --torch-backend ${UV_TORCH_BACKEND} \ "torch${TORCH_SPEC}" +RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ + uv pip install --system --break-system-packages gen-worker==0.3.0 FROM cozy_base AS cozy_final @@ -34,7 +34,7 @@ COPY pyproject.toml uv.lock /app/ RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ uv export --no-dev --no-hashes --no-sources --no-emit-project --no-emit-local \ - --no-emit-package torch \ + --no-emit-package torch --no-emit-package gen-worker \ -o /tmp/requirements.all.txt \ && grep -Ev '^(torch|triton|nvidia-|cuda-)' /tmp/requirements.all.txt > /tmp/requirements.txt \ && uv pip install --system --break-system-packages --no-deps -r /tmp/requirements.txt diff --git a/examples/z-image-lora/endpoint.toml b/examples/z-image-lora/endpoint.toml index 08f5fcf..080fb51 100644 --- a/examples/z-image-lora/endpoint.toml +++ b/examples/z-image-lora/endpoint.toml @@ -7,7 +7,7 @@ main = "z_image_lora.main" z-image-turbo = { ref = "tongyi-mai/z-image-turbo", dtypes = ["bf16", "fp16"] } [host.requirements] -cuda = "12.8" +cuda = "12.6" [resources] vram_gb = 16 diff --git a/examples/z-image-lora/pyproject.toml b/examples/z-image-lora/pyproject.toml index 77dc2f0..e13f00f 100644 --- a/examples/z-image-lora/pyproject.toml +++ b/examples/z-image-lora/pyproject.toml @@ -4,7 +4,7 @@ version = "0.1.0" description = "Image generation with dynamic LoRA loading (z-image pattern)" requires-python = ">=3.12" dependencies = [ - "gen-worker[torch]", + "gen-worker[torch]==0.3.0", "diffusers>=0.36.0", "transformers", "accelerate", diff --git a/examples/z-image-lora/uv.lock b/examples/z-image-lora/uv.lock index cf8fe8f..aed4a1d 100644 --- a/examples/z-image-lora/uv.lock +++ b/examples/z-image-lora/uv.lock @@ -2088,7 +2088,7 @@ dev = [ requires-dist = [ { name = "accelerate" }, { name = "diffusers", specifier = ">=0.36.0" }, - { name = "gen-worker", extras = ["torch"] }, + { name = "gen-worker", extras = ["torch"], specifier = "==0.3.0" }, { name = "peft" }, { name = "pillow" }, { name = "transformers" }, diff --git a/pyproject.toml b/pyproject.toml index 45d7a6d..cb741d9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "gen-worker" -version = "0.3.9" +version = "0.3.7" description = "A library used to build custom functions in Cozy Creator's serverless function platform." readme = "README.md" license = "MIT" diff --git a/src/gen_worker/cozy_cas.py b/src/gen_worker/cozy_cas.py index d871f1d..0cdfb64 100644 --- a/src/gen_worker/cozy_cas.py +++ b/src/gen_worker/cozy_cas.py @@ -307,16 +307,26 @@ def _safe_symlink_dir(target: Path, link: Path) -> None: max_value=30, # cap backoff at 30s between retries ) async def _download_one_file(url: str, dst: Path, expected_size: int, expected_blake3: str) -> None: - import fcntl + """Download a single file with HTTP Range resume, size + blake3 validation. + + Fully async — no blocking calls that would stall the event loop. + Caller is responsible for ensuring only one coroutine downloads a given + dst at a time (dedup by digest in _ensure_blobs). + """ import logging log = logging.getLogger("gen_worker.download") - log.info("download_start path=%s expected_size=%s expected_blake3=%s", dst.name, expected_size, (expected_blake3 or "")[:16]) - print(f"DEBUG download_start path={dst.name} expected_size={expected_size} expected_blake3={(expected_blake3 or '')[:16]}") + def _human_size(n: int) -> str: + if n >= 1 << 30: + return f"{n / (1 << 30):.1f}GB" + if n >= 1 << 20: + return f"{n / (1 << 20):.1f}MB" + if n >= 1 << 10: + return f"{n / (1 << 10):.1f}KB" + return f"{n}B" + # Already downloaded and valid? if dst.exists(): - log.info("dst_exists path=%s size=%s", dst, dst.stat().st_size) - print(f"DEBUG dst_exists path={dst} size={dst.stat().st_size}") try: if expected_size and dst.stat().st_size != expected_size: raise ValueError("size mismatch") @@ -324,142 +334,109 @@ async def _download_one_file(url: str, dst: Path, expected_size: int, expected_b got = _blake3_file(dst) if got.lower() != expected_blake3.lower(): raise ValueError("blake3 mismatch") + log.info("download_cached path=%s size=%s", dst.name, _human_size(dst.stat().st_size)) return except Exception: - # Fall through to re-download. - pass - - # Use sock_read instead of total timeout so actively-streaming large files - # are not killed. total=None lets multi-GB downloads run as long as data - # keeps flowing; sock_read=120 catches genuine stalls. - timeout = aiohttp.ClientTimeout(total=None, sock_connect=float(os.getenv("WORKER_MODEL_DOWNLOAD_SOCK_CONNECT_TIMEOUT_S", "60")), - sock_read=float(os.getenv("WORKER_MODEL_DOWNLOAD_SOCK_READ_TIMEOUT_S", "180"))) + pass # re-download + + timeout = aiohttp.ClientTimeout( + total=None, + sock_connect=float(os.getenv("WORKER_MODEL_DOWNLOAD_SOCK_CONNECT_TIMEOUT_S", "60")), + sock_read=float(os.getenv("WORKER_MODEL_DOWNLOAD_SOCK_READ_TIMEOUT_S", "180")), + ) tmp = dst.with_suffix(dst.suffix + ".part") - lock_path = dst.with_suffix(dst.suffix + ".lock") - # File-level exclusive lock: prevents concurrent writes to the same .part - # file even from different async tasks or downloader instances. - lock_path.parent.mkdir(parents=True, exist_ok=True) - lock_fd = open(lock_path, "w") - try: - fcntl.flock(lock_fd.fileno(), fcntl.LOCK_EX) - print(f"DEBUG file_lock_acquired path={dst.name}") - - # Re-check dst after acquiring the lock — another holder might have - # already completed the download while we waited. - if dst.exists(): - try: - if expected_size and dst.stat().st_size != expected_size: - raise ValueError("size mismatch after lock") - if expected_blake3: - got = _blake3_file(dst) - if got.lower() != expected_blake3.lower(): - raise ValueError("blake3 mismatch after lock") - print(f"DEBUG file_lock_dst_completed path={dst.name} (another writer finished)") - return - except Exception: - pass - - # If we have a partial file, try to resume via HTTP Range. - offset = 0 - if tmp.exists(): - try: - offset = tmp.stat().st_size - except OSError: - # Another coroutine may have renamed tmp→dst between the exists() check and stat(). - offset = 0 - if offset: - log.info("resume_attempt path=%s offset=%s expected_size=%s", dst.name, offset, expected_size) - print(f"DEBUG resume_attempt path={dst.name} offset={offset} expected_size={expected_size}") - if expected_size and offset > expected_size: - tmp.unlink(missing_ok=True) - offset = 0 - - # If the partial file is already complete, validate + finalize. - if offset and expected_size and offset == expected_size: - got = _blake3_file(tmp) - if expected_blake3 and got.lower() != expected_blake3.lower(): - tmp.unlink(missing_ok=True) - else: - tmp.rename(dst) - return - - headers: Dict[str, str] = {} - mode = "wb" - if offset and expected_size: - headers["Range"] = f"bytes={offset}-" - mode = "ab" - print(f"DEBUG range_header path={dst.name} Range=bytes={offset}- mode={mode}") - - async def _stream_to_file(resp: aiohttp.ClientResponse, *, mode: str, start: int) -> None: - nonlocal expected_size - size = start - with open(tmp, mode) as f: - async for chunk in resp.content.iter_chunked(1 << 20): - if not chunk: - continue - f.write(chunk) - size += len(chunk) - if expected_size and size > expected_size: - raise ValueError("download exceeded expected size") - - async with aiohttp.ClientSession(timeout=timeout) as session: - async with session.get(url, headers=headers) as resp: - content_range = str(resp.headers.get("Content-Range") or "").strip() - print(f"DEBUG http_response path={dst.name} status={resp.status} content_range={content_range!r} content_length={resp.headers.get('Content-Length', 'unknown')} offset={offset}") - # If the server ignored our Range request, restart from scratch to avoid - # duplicating bytes by appending a full response. - # Some gateways can return 206 with an unexpected range start. - # Treat that the same as a 200-on-resume and restart from byte 0. - if offset and ( - resp.status == 200 - or ( - resp.status == 206 - and not content_range.startswith(f"bytes {offset}-") - ) - ): - print(f"DEBUG range_ignored path={dst.name} status={resp.status} content_range={content_range!r} restarting_from_zero=True") - resp.release() - async with session.get(url) as resp2: - resp2.raise_for_status() - print(f"DEBUG range_restart path={dst.name} status={resp2.status} content_length={resp2.headers.get('Content-Length', 'unknown')}") - await _stream_to_file(resp2, mode="wb", start=0) - else: - resp.raise_for_status() - await _stream_to_file(resp, mode=mode, start=offset) - - # Validate final file. - actual_size = tmp.stat().st_size - log.info("download_complete path=%s actual_size=%s expected_size=%s", dst.name, actual_size, expected_size) - print(f"DEBUG download_complete path={dst.name} actual_size={actual_size} expected_size={expected_size}") - if expected_size and actual_size != expected_size: - log.error("size_mismatch path=%s expected=%s got=%s url=%s", dst.name, expected_size, actual_size, url[:80]) - print(f"DEBUG size_mismatch path={dst.name} expected={expected_size} got={actual_size} url={url[:80]}") - tmp.unlink(missing_ok=True) - raise ValueError(f"size mismatch (expected {expected_size}, got {actual_size})") - if expected_blake3: - got = _blake3_file(tmp) - log.info("blake3_check path=%s expected=%s got=%s", dst.name, (expected_blake3 or "")[:16], got[:16]) - print(f"DEBUG blake3_check path={dst.name} expected={(expected_blake3 or '')[:16]} got={got[:16]}") - if got.lower() != expected_blake3.lower(): - log.error("blake3_mismatch path=%s", dst.name) - print(f"DEBUG blake3_mismatch path={dst.name}") - tmp.unlink(missing_ok=True) - raise ValueError("blake3 mismatch") - # A concurrent coroutine may have already renamed tmp→dst (won the race). - # Use an atomic replace so we don't fail if dst now exists. + # Resume from partial download if available. + offset = 0 + if tmp.exists(): try: - tmp.replace(dst) + offset = tmp.stat().st_size except OSError: - # dst was created by another coroutine; .part is stale, just remove it. + offset = 0 + if expected_size and offset > expected_size: tmp.unlink(missing_ok=True) - finally: - fcntl.flock(lock_fd.fileno(), fcntl.LOCK_UN) - lock_fd.close() - try: - lock_path.unlink(missing_ok=True) - except OSError: - pass + offset = 0 + + # Partial file already complete? Validate and finalize. + if offset and expected_size and offset == expected_size: + got = _blake3_file(tmp) + if expected_blake3 and got.lower() != expected_blake3.lower(): + log.warning("partial_corrupt path=%s (blake3 mismatch, restarting)", dst.name) + tmp.unlink(missing_ok=True) + offset = 0 + else: + tmp.rename(dst) + log.info("download_resumed_complete path=%s size=%s", dst.name, _human_size(expected_size)) + return + + headers: Dict[str, str] = {} + mode = "wb" + if offset and expected_size: + headers["Range"] = f"bytes={offset}-" + mode = "ab" + log.info("download_resume path=%s offset=%s/%s (%s/%s)", + dst.name, offset, expected_size, + _human_size(offset), _human_size(expected_size)) + else: + log.info("download_start path=%s size=%s blake3=%s", + dst.name, _human_size(expected_size) if expected_size else "unknown", + (expected_blake3 or "n/a")[:16]) + + async def _stream(resp: aiohttp.ClientResponse, *, write_mode: str, start: int) -> None: + downloaded = start + last_log = start + log_every = max(expected_size // 10, 50 << 20) if expected_size else (100 << 20) + with open(tmp, write_mode) as f: + async for chunk in resp.content.iter_chunked(1 << 20): + if not chunk: + continue + f.write(chunk) + downloaded += len(chunk) + if expected_size and downloaded > expected_size: + raise ValueError(f"download exceeded expected size ({downloaded} > {expected_size})") + if downloaded - last_log >= log_every: + pct = f" ({100 * downloaded // expected_size}%)" if expected_size else "" + log.info("download_progress path=%s downloaded=%s%s", + dst.name, _human_size(downloaded), pct) + last_log = downloaded + + async with aiohttp.ClientSession(timeout=timeout) as session: + async with session.get(url, headers=headers) as resp: + content_range = str(resp.headers.get("Content-Range") or "").strip() + + # Server ignored Range or returned unexpected range start? + # Restart from byte 0 to avoid corrupted appends. + if offset and ( + resp.status == 200 + or (resp.status == 206 and not content_range.startswith(f"bytes {offset}-")) + ): + log.info("download_range_ignored path=%s status=%s (restarting from 0)", dst.name, resp.status) + resp.release() + async with session.get(url) as resp2: + resp2.raise_for_status() + await _stream(resp2, write_mode="wb", start=0) + else: + resp.raise_for_status() + await _stream(resp, write_mode=mode, start=offset) + + # Validate. + actual_size = tmp.stat().st_size + if expected_size and actual_size != expected_size: + log.error("download_size_mismatch path=%s expected=%s got=%s", dst.name, expected_size, actual_size) + tmp.unlink(missing_ok=True) + raise ValueError(f"size mismatch (expected {expected_size}, got {actual_size})") + + if expected_blake3: + got = _blake3_file(tmp) + if got.lower() != expected_blake3.lower(): + log.error("download_blake3_mismatch path=%s expected=%s got=%s", + dst.name, expected_blake3[:16], got[:16]) + tmp.unlink(missing_ok=True) + raise ValueError("blake3 mismatch") + + # Atomic finalize. + tmp.replace(dst) + log.info("download_done path=%s size=%s", dst.name, _human_size(actual_size)) def _blake3_file(path: Path, chunk_size: int = 1 << 20) -> str: diff --git a/src/gen_worker/cozy_pipeline_spec.py b/src/gen_worker/cozy_pipeline_spec.py index c8c80f0..21fa5a7 100644 --- a/src/gen_worker/cozy_pipeline_spec.py +++ b/src/gen_worker/cozy_pipeline_spec.py @@ -1,21 +1,16 @@ from __future__ import annotations import json -import logging import os -import tomllib from dataclasses import dataclass from pathlib import Path from typing import Any, Dict, Optional, Tuple import yaml -logger = logging.getLogger(__name__) COZY_PIPELINE_LOCK_FILENAME = "cozy.pipeline.lock.yaml" COZY_PIPELINE_FILENAME = "cozy.pipeline.yaml" -PIPELINE_LOCK_TOML_FILENAME = "pipeline.lock" -PIPELINE_TOML_FILENAME = "pipeline.toml" DIFFUSERS_MODEL_INDEX_FILENAME = "model_index.json" @@ -38,13 +33,6 @@ def custom_pipeline_path(self) -> Optional[str]: s = str(v).strip() return s or None - @property - def variant(self) -> Optional[str]: - """Diffusers variant (e.g. 'fp16', 'fp8') from the pipeline spec.""" - pipe = self.raw.get("pipe") or {} - v = str(pipe.get("variant") or "").strip() - return v or None - def _safe_child_path(root: Path, rel: str) -> Path: # Ensure rel doesn't escape root (best-effort). @@ -62,41 +50,24 @@ def load_cozy_pipeline_spec(model_root: Path) -> Optional[CozyPipelineSpec]: This is a worker-side helper used during pipeline loading to implement: - prefer `cozy.pipeline.lock.yaml` when present - fall back to `cozy.pipeline.yaml` otherwise - - fall back to `pipeline.lock` / `pipeline.toml` (TOML) if no YAML found """ root = Path(model_root) lock_path = root / COZY_PIPELINE_LOCK_FILENAME spec_path = lock_path if lock_path.exists() else (root / COZY_PIPELINE_FILENAME) - if spec_path.exists(): - raw = yaml.safe_load(spec_path.read_text(encoding="utf-8")) - if not isinstance(raw, dict): - raise ValueError("invalid cozy pipeline spec (expected mapping)") - api = str(raw.get("apiVersion") or "").strip() - kind = str(raw.get("kind") or "").strip() - if api and api != "v1": - raise ValueError(f"unsupported cozy pipeline apiVersion: {api!r}") - if kind and kind != "DiffusersPipeline": - raise ValueError(f"unsupported cozy pipeline kind: {kind!r}") - logger.info("DEBUG loaded cozy pipeline spec from %s", spec_path.name) - return CozyPipelineSpec(source_path=spec_path, raw=raw) - - # Fallback: read pipeline.lock / pipeline.toml (TOML format, stored by tensorhub ingest). - toml_lock = root / PIPELINE_LOCK_TOML_FILENAME - toml_spec = toml_lock if toml_lock.exists() else (root / PIPELINE_TOML_FILENAME) - if toml_spec.exists(): - raw = tomllib.loads(toml_spec.read_text(encoding="utf-8")) - if not isinstance(raw, dict): - raise ValueError("invalid pipeline toml (expected mapping)") - api = str(raw.get("apiVersion") or "").strip() - kind = str(raw.get("kind") or "").strip() - if api and api != "v1": - raise ValueError(f"unsupported pipeline toml apiVersion: {api!r}") - if kind and kind != "DiffusersPipeline": - raise ValueError(f"unsupported pipeline toml kind: {kind!r}") - logger.info("DEBUG loaded cozy pipeline spec from %s (toml fallback)", toml_spec.name) - return CozyPipelineSpec(source_path=toml_spec, raw=raw) - - return None + if not spec_path.exists(): + return None + + raw = yaml.safe_load(spec_path.read_text(encoding="utf-8")) + if not isinstance(raw, dict): + raise ValueError("invalid cozy pipeline spec (expected mapping)") + api = str(raw.get("apiVersion") or "").strip() + kind = str(raw.get("kind") or "").strip() + if api and api != "v1": + raise ValueError(f"unsupported cozy pipeline apiVersion: {api!r}") + if kind and kind != "DiffusersPipeline": + raise ValueError(f"unsupported cozy pipeline kind: {kind!r}") + + return CozyPipelineSpec(source_path=spec_path, raw=raw) def cozy_custom_pipeline_arg(model_root: Path, spec: CozyPipelineSpec) -> Optional[str]: diff --git a/src/gen_worker/cozy_snapshot_v2_downloader.py b/src/gen_worker/cozy_snapshot_v2_downloader.py index f15020c..645def1 100644 --- a/src/gen_worker/cozy_snapshot_v2_downloader.py +++ b/src/gen_worker/cozy_snapshot_v2_downloader.py @@ -2,58 +2,47 @@ import asyncio import json +import logging import os import re import shutil import threading from pathlib import Path -from typing import Any, Coroutine, Dict, List, Optional +from typing import Any, Coroutine, Dict, List, Optional, Set -from .cozy_cas import _download_one_file as _download_one_file # reuse verified Range-resume downloader +from .cozy_cas import _download_one_file as _download_one_file from .cozy_cas import _norm_rel_path from .tensorhub_policy import default_resolve_preferences, detect_worker_capabilities from .tensorhub_v2 import CozyHubV2Client, CozyHubResolveArtifactResult, CozyHubSnapshotFile from .model_refs import CozyRef -# Module-global blob download locks shared across ALL CozySnapshotV2Downloader -# instances. Without this, concurrent callers (startup prefetch, task handler, -# LoadModelCommand) each create their own downloader instance with separate -# _blob_locks dicts, allowing parallel writes to the same .part file and -# causing file corruption (interleaved appends → oversized / invalid blobs). +_log = logging.getLogger("gen_worker.download") -# threading.Lock (not asyncio.Lock) is used so that the locks work correctly -# even when callers run in different event loops (e.g. startup prefetch via -# asyncio.run() in a thread vs. LoadModelCommand on the main loop). -_GLOBAL_BLOB_LOCKS_LOCK = threading.Lock() -_GLOBAL_BLOB_LOCKS: Dict[str, threading.Lock] = {} +# --------------------------------------------------------------------------- +# Snapshot build coordination (threading-based, works across event loops) +# --------------------------------------------------------------------------- class _SnapshotEntry: - """Coordinates concurrent snapshot builds: one builder, zero-or-more waiters.""" + """One builder, zero-or-more waiters.""" def __init__(self) -> None: - self.event = threading.Event() # set when snap_dir is ready (or build failed) + self.event = threading.Event() self.exception: Optional[BaseException] = None -_GLOBAL_SNAPSHOT_LOCKS_LOCK = threading.Lock() -_GLOBAL_SNAPSHOT_LOCKS: Dict[str, _SnapshotEntry] = {} +_SNAP_LOCK = threading.Lock() +_SNAP_ENTRIES: Dict[str, _SnapshotEntry] = {} -def _get_blob_lock(digest: str) -> threading.Lock: - """Return (or create) a per-digest threading.Lock from the module-global map.""" - with _GLOBAL_BLOB_LOCKS_LOCK: - lock = _GLOBAL_BLOB_LOCKS.get(digest) - if lock is None: - lock = threading.Lock() - _GLOBAL_BLOB_LOCKS[digest] = lock - return lock - +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- def _blob_path(blobs_root: Path, digest: str) -> Path: digest = (digest or "").strip().lower() if len(digest) < 4: - raise ValueError("invalid blake3 digest") + raise ValueError(f"invalid blake3 digest: {digest!r}") return blobs_root / "blake3" / digest[:2] / digest[2:4] / digest @@ -61,25 +50,23 @@ def _blob_path(blobs_root: Path, digest: str) -> Path: def _strip_blake3_prefix(digest: str) -> str: - """Strip the 'blake3:' scheme prefix if present, returning the bare hex.""" + """'blake3:abcd...' -> 'abcd...'""" d = (digest or "").strip().lower() if d.startswith("blake3:"): - d = d[len("blake3:"):] + return d[7:] return d def _is_part_file(path: str) -> bool: - """Return True if the path is a chunked part file (e.g. foo.part0001).""" return bool(_PART_FILE_RE.search(path)) def _is_parts_manifest(path: str) -> bool: - """Return True if the path is a chunked-blob manifes (e.g. foo.parts.json).""" return path.endswith(".parts.json") -def _resolve_field(obj: Any, *keys: str) -> Any: - """Get a field from either a dict or an attribute-bearing object, trying keys in order.""" +def _field(obj: Any, *keys: str) -> Any: + """Read a field from dict or object, trying keys in order.""" for k in keys: if isinstance(obj, dict): v = obj.get(k) @@ -106,38 +93,33 @@ def _try_hardlink_or_copy(src: Path, dst: Path) -> None: shutil.copy2(src, dst) -def _coerce_resolved_model(ref: CozyRef, resolved: Any) -> CozyHubResolveArtifactResult: - """Coerce an orchestrator-provided resolved model object into CozyHubResolveArtifactResult. - - Handles two wire shapes: - - Legacy: .snapshot_digest (bare hex) + .files[] - - v2: .snapshot_digest ('blake3:...' prefixed) + .entries[] (new chunked-blob format) +# --------------------------------------------------------------------------- +# Coerce orchestrator wire format -> internal type +# --------------------------------------------------------------------------- - Both protobuf attribute access and plain-dict access are supported. - """ - snapshot_digest = str(_resolve_field(resolved, "snapshot_digest", "snapshotDigest") or "").strip() +def _coerce_resolved_model(ref: CozyRef, resolved: Any) -> CozyHubResolveArtifactResult: + """Handle both legacy (.files[]) and v2 (.entries[], blake3:-prefixed digests).""" + snapshot_digest = str(_field(resolved, "snapshot_digest", "snapshotDigest") or "").strip() if not snapshot_digest: raise ValueError("resolved model missing snapshot_digest") - # Strip scheme prefix so the digest is a bare hex string suitable for path use. snapshot_digest = _strip_blake3_prefix(snapshot_digest) or snapshot_digest - # New format uses "entries"; legacy format uses "files". - files_raw = list(_resolve_field(resolved, "entries", "files") or []) + # v2 uses "entries", legacy uses "files" + files_raw = list(_field(resolved, "entries", "files") or []) files: List[CozyHubSnapshotFile] = [] for ent in files_raw: - path = str(_resolve_field(ent, "path") or "").strip() + path = str(_field(ent, "path") or "").strip() if not path: continue - # Prefer bare "blake3" field; fall back to "digest" which may carry the prefix. - blake3_hex = str(_resolve_field(ent, "blake3", "BLAKE3") or "").strip().lower() + blake3_hex = str(_field(ent, "blake3", "BLAKE3") or "").strip().lower() if not blake3_hex: - digest_raw = str(_resolve_field(ent, "digest") or "").strip().lower() - blake3_hex = _strip_blake3_prefix(digest_raw) - url = str(_resolve_field(ent, "url") or "").strip() or None - size_bytes = int(_resolve_field(ent, "size_bytes") or 0) + blake3_hex = _strip_blake3_prefix(str(_field(ent, "digest") or "")) + url = str(_field(ent, "url") or "").strip() or None + size_bytes = int(_field(ent, "size_bytes") or 0) if not blake3_hex or not url: raise ValueError(f"resolved model file missing blake3/url: {path}") files.append(CozyHubSnapshotFile(path=path, size_bytes=size_bytes, blake3=blake3_hex, url=url)) + if not files: raise ValueError("resolved model has empty files list") @@ -149,21 +131,16 @@ def _coerce_resolved_model(ref: CozyRef, resolved: Any) -> CozyHubResolveArtifac ) -class CozySnapshotV2Downloader: - """Cozy Hub v2 downloader. - - Normal mode: - - resolve owner/repo:tag (or @digest) via resolve API - - download all referenced blobs to a local blob store - - materialize a snapshot checkout by hardlinking blobs into the snapshot tree +# --------------------------------------------------------------------------- +# Main downloader +# --------------------------------------------------------------------------- - Issue #92 mode: - - if `resolved` is provided, skip Cozy Hub API calls and use the provided - presigned URLs directly. +class CozySnapshotV2Downloader: + """Downloads blobs into a CAS layout, reassembles chunked files, materializes snapshot. - On-disk layout under : - - blobs/blake3/// - - snapshots//... + Layout under : + blobs/blake3/// + snapshots//... """ def __init__(self, client: Optional[CozyHubV2Client]) -> None: @@ -188,193 +165,194 @@ async def ensure_snapshot( snap_dir = snaps_root / res.snapshot_digest if snap_dir.exists(): + _log.info("snapshot_cached digest=%s", res.snapshot_digest[:16]) return snap_dir - # Claim building responsibility or wait for another caller already building - # this snapshot. threading primitives (not asyncio.Lock) are used so that - # this works correctly across different event loops (startup prefetch thread - # vs. LoadModelCommand on the main loop). + # Coordinate concurrent builders via threading (works across event loops). loop = asyncio.get_running_loop() - with _GLOBAL_SNAPSHOT_LOCKS_LOCK: - if snap_dir.exists(): # double-check under the guard + with _SNAP_LOCK: + if snap_dir.exists(): return snap_dir - _snap_entry = _GLOBAL_SNAPSHOT_LOCKS.get(res.snapshot_digest) - if _snap_entry is None: - _snap_entry = _SnapshotEntry() - _GLOBAL_SNAPSHOT_LOCKS[res.snapshot_digest] = _snap_entry - _is_builder = True + entry = _SNAP_ENTRIES.get(res.snapshot_digest) + if entry is None: + entry = _SnapshotEntry() + _SNAP_ENTRIES[res.snapshot_digest] = entry + is_builder = True else: - _is_builder = False - - if not _is_builder: - # Another caller is already building this snapshot; wait for it. - await loop.run_in_executor(None, _snap_entry.event.wait) - if _snap_entry.exception is not None: - raise RuntimeError( - f"concurrent snapshot build failed for {res.snapshot_digest}" - ) from _snap_entry.exception + is_builder = False + + if not is_builder: + _log.info("snapshot_waiting digest=%s (another builder active)", res.snapshot_digest[:16]) + await loop.run_in_executor(None, entry.event.wait) + if entry.exception is not None: + raise RuntimeError(f"concurrent snapshot build failed") from entry.exception return snap_dir try: - if snap_dir.exists(): - return snap_dir - + _log.info("snapshot_build_start digest=%s files=%d", res.snapshot_digest[:16], len(res.files)) await self._ensure_blobs(blobs_root, res.files) tmp = snaps_root / f"{res.snapshot_digest}.building" + if tmp.exists(): + shutil.rmtree(tmp) tmp.mkdir(parents=True, exist_ok=True) - # Reassemble any chunked files (produced by chunkedblob on ingest). - # A ".parts.json" entry describes how to reassemble N part blobs into - # the original file. Part blobs and the manifest itself are all already - # in the blob store at this point. - parts_manifest_entries = [f for f in res.files if _is_parts_manifest(f.path)] - part_file_paths = {f.path for f in res.files if _is_part_file(f.path)} - - import logging as _logging - _log = _logging.getLogger("gen_worker.download") + self._reassemble_chunked(blobs_root, tmp, res.files) + self._materialize_regular(blobs_root, tmp, res.files) - for pm_entry in parts_manifest_entries: - _log.info("reassemble_start manifest=%s", pm_entry.path) - print(f"DEBUG reassemble_start manifest={pm_entry.path}") - parts_json_blob = _blob_path(blobs_root, pm_entry.blake3) - try: - parts_manifest = json.loads(parts_json_blob.read_bytes()) - except Exception as exc: - raise ValueError(f"failed to parse parts manifest {pm_entry.path}: {exc}") from exc - - original_path = str(parts_manifest.get("original_path") or "").strip() - if not original_path: - raise ValueError(f"parts manifest {pm_entry.path} missing original_path") - parts = parts_manifest.get("parts") or [] - if not parts: - raise ValueError(f"parts manifest {pm_entry.path} has no parts") - - rel = _norm_rel_path(original_path) - dst = tmp / rel - dst.parent.mkdir(parents=True, exist_ok=True) - if dst.exists(): - dst.unlink() - - with open(dst, "wb") as out_f: - for i, part in enumerate(parts): - part_digest = _strip_blake3_prefix( - str(part.get("digest") or "").strip().lower() - ) - if not part_digest: - raise ValueError(f"part entry in {pm_entry.path} missing digest") - part_blob = _blob_path(blobs_root, part_digest) - _log.info(" concat_part index=%d digest=%s exists=%s size=%s", - i, part_digest[:16], part_blob.exists(), - part_blob.stat().st_size if part_blob.exists() else -1) - print(f"DEBUG concat_part index={i} digest={part_digest[:16]} exists={part_blob.exists()} size={part_blob.stat().st_size if part_blob.exists() else -1}") - with open(part_blob, "rb") as in_f: - shutil.copyfileobj(in_f, out_f) - - # Materialize regular files; skip part files and parts manifests since they - # have already been consumed above during reassembly. - for f in res.files: - if _is_parts_manifest(f.path) or f.path in part_file_paths: - continue - rel = _norm_rel_path(f.path) - dst = tmp / rel - dst.parent.mkdir(parents=True, exist_ok=True) - src = _blob_path(blobs_root, f.blake3) - _try_hardlink_or_copy(src, dst) - - # Another concurrent caller (e.g. a different downloader instance) may have - # already materialized and renamed the snapshot while we were assembling ours. - # In that case, discard our tmp dir and return the existing snapshot. + # Atomic rename; handle race with concurrent builder. if snap_dir.exists(): shutil.rmtree(tmp, ignore_errors=True) - return snap_dir - try: - tmp.rename(snap_dir) - except OSError: - # Lost the race — snap_dir was created between the exists() check and rename(). - shutil.rmtree(tmp, ignore_errors=True) - if not snap_dir.exists(): - raise + else: + try: + tmp.rename(snap_dir) + except OSError: + shutil.rmtree(tmp, ignore_errors=True) + if not snap_dir.exists(): + raise + + _log.info("snapshot_build_done digest=%s", res.snapshot_digest[:16]) return snap_dir - except BaseException as _exc: - _snap_entry.exception = _exc + except BaseException as exc: + entry.exception = exc raise finally: - _snap_entry.event.set() + entry.event.set() - async def _resolve(self, ref: CozyRef) -> CozyHubResolveArtifactResult: - if self._client is None: - raise RuntimeError("cozy hub api resolve is disabled") - - prefs = default_resolve_preferences() - caps = detect_worker_capabilities() - return await self._client.resolve_artifact( - owner=ref.owner, - repo=ref.repo, - tag=ref.tag, - digest=ref.digest, - include_urls=True, - preferences=prefs, - capabilities=caps.to_dict(), - ) + # ------------------------------------------------------------------ + # Blob download (deduplicated, parallel) + # ------------------------------------------------------------------ async def _ensure_blobs(self, blobs_root: Path, files: List[CozyHubSnapshotFile]) -> None: - import logging - log = logging.getLogger("gen_worker.download") - - log.info("ensure_blobs total_files=%d", len(files)) - print(f"DEBUG ensure_blobs total_files={len(files)}") - for f in files: - log.info(" entry path=%s size=%s digest=%s url_present=%s", f.path, f.size_bytes, (f.blake3 or "")[:16], bool(f.url)) - print(f"DEBUG entry path={f.path} size={f.size_bytes} digest={(f.blake3 or '')[:16]} url_present={bool(f.url)}") - - all_blobs: List[tuple[CozyHubSnapshotFile, str, Path]] = [] + # Deduplicate by digest — same blob referenced by multiple paths (e.g. + # fp16 and normal variants sharing the same part) is downloaded once. + seen: Set[str] = set() + unique: List[CozyHubSnapshotFile] = [] for f in files: digest = (f.blake3 or "").strip().lower() if not digest: raise ValueError(f"missing blake3 for {f.path}") if not f.url: raise ValueError(f"missing url for {f.path}") - dst = _blob_path(blobs_root, digest) - dst.parent.mkdir(parents=True, exist_ok=True) - all_blobs.append((f, digest, dst)) + if digest not in seen: + seen.add(digest) + unique.append(f) + + _log.info("ensure_blobs total_entries=%d unique_blobs=%d", len(files), len(unique)) + + # Sort largest first for better overlap, then download in parallel. + unique.sort(key=lambda f: int(f.size_bytes or 0), reverse=True) - # Parallelize shard/blob downloads to reduce first-load latency for - # multi-file transformer checkpoints. max_conc = max(1, int(os.getenv("WORKER_MODEL_DOWNLOAD_CONCURRENCY", "4") or "4")) sem = asyncio.Semaphore(max_conc) - async def _ensure_one(f: CozyHubSnapshotFile, digest: str, dst: Path) -> None: - # Acquire the GLOBAL per-digest threading.Lock (loop-independent) so that - # concurrent callers across different downloader instances (startup prefetch, - # task request, LoadModelCommand) don't write to the same .part file in - # parallel. run_in_executor is used so that waiting for the lock does not - # block the event loop. - _lock = _get_blob_lock(digest) - _lloop = asyncio.get_running_loop() - await _lloop.run_in_executor(None, _lock.acquire) - try: + async def _dl(f: CozyHubSnapshotFile) -> None: + digest = f.blake3.strip().lower() + dst = _blob_path(blobs_root, digest) + dst.parent.mkdir(parents=True, exist_ok=True) + if dst.exists(): + _log.info("blob_cached path=%s digest=%s", f.path, digest[:16]) + return + async with sem: if dst.exists(): return - async with sem: - if dst.exists(): - return - assert f.url is not None - await _download_one_file( - f.url, - dst, - expected_size=int(f.size_bytes or 0), - expected_blake3=digest, - ) - finally: - _lock.release() + _log.info("blob_download_start path=%s size=%s digest=%s", + f.path, f.size_bytes, digest[:16]) + assert f.url is not None # validated above in _ensure_blobs loop + await _download_one_file( + f.url, + dst, + expected_size=int(f.size_bytes or 0), + expected_blake3=digest, + ) + _log.info("blob_download_done path=%s digest=%s", f.path, digest[:16]) + + await asyncio.gather(*(_dl(f) for f in unique)) + + # ------------------------------------------------------------------ + # Chunked file reassembly + # ------------------------------------------------------------------ + + def _reassemble_chunked( + self, blobs_root: Path, tmp: Path, files: List[CozyHubSnapshotFile] + ) -> None: + """Read .parts.json manifests and concatenate part blobs into original files.""" + for f in files: + if not _is_parts_manifest(f.path): + continue - # Start larger blobs first for better overlap. - all_blobs.sort(key=lambda row: int(row[0].size_bytes or 0), reverse=True) - await asyncio.gather(*(_ensure_one(f, digest, dst) for f, digest, dst in all_blobs)) + _log.info("reassemble_start manifest=%s", f.path) + manifest_blob = _blob_path(blobs_root, f.blake3) + manifest = json.loads(manifest_blob.read_bytes()) + original_path = str(manifest.get("original_path") or "").strip() + if not original_path: + raise ValueError(f"parts manifest {f.path} missing original_path") + parts = manifest.get("parts") or [] + if not parts: + raise ValueError(f"parts manifest {f.path} has no parts") + + dst = tmp / _norm_rel_path(original_path) + dst.parent.mkdir(parents=True, exist_ok=True) + if dst.exists(): + dst.unlink() + + total_written = 0 + with open(dst, "wb") as out_f: + for i, part in enumerate(parts): + part_digest = _strip_blake3_prefix(str(part.get("digest") or "")) + if not part_digest: + raise ValueError(f"part[{i}] in {f.path} missing digest") + part_blob = _blob_path(blobs_root, part_digest) + part_size = part_blob.stat().st_size + _log.info(" concat part=%d/%d digest=%s size=%s", + i + 1, len(parts), part_digest[:16], part_size) + with open(part_blob, "rb") as in_f: + shutil.copyfileobj(in_f, out_f) + total_written += part_size + + _log.info("reassemble_done file=%s total_size=%s", original_path, total_written) + + # ------------------------------------------------------------------ + # Regular (non-chunked) file materialization + # ------------------------------------------------------------------ + + def _materialize_regular( + self, blobs_root: Path, tmp: Path, files: List[CozyHubSnapshotFile] + ) -> None: + """Hardlink/copy non-chunked blobs into the snapshot tree.""" + part_paths = {f.path for f in files if _is_part_file(f.path)} + for f in files: + if _is_parts_manifest(f.path) or f.path in part_paths: + continue + dst = tmp / _norm_rel_path(f.path) + dst.parent.mkdir(parents=True, exist_ok=True) + src = _blob_path(blobs_root, f.blake3) + _try_hardlink_or_copy(src, dst) + + # ------------------------------------------------------------------ + # Hub resolution + # ------------------------------------------------------------------ + + async def _resolve(self, ref: CozyRef) -> CozyHubResolveArtifactResult: + if self._client is None: + raise RuntimeError("cozy hub api resolve is disabled") + prefs = default_resolve_preferences() + caps = detect_worker_capabilities() + return await self._client.resolve_artifact( + owner=ref.owner, + repo=ref.repo, + tag=ref.tag, + digest=ref.digest, + include_urls=True, + preferences=prefs, + capabilities=caps.to_dict(), + ) +# --------------------------------------------------------------------------- +# Convenience wrappers +# --------------------------------------------------------------------------- async def ensure_snapshot_async( *, @@ -384,7 +362,6 @@ async def ensure_snapshot_async( token: Optional[str], resolved: Optional[Any] = None, ) -> Path: - """Async version of ensure_snapshot_sync for use in async contexts.""" client: Optional[CozyHubV2Client] = None if resolved is None: if not (base_url or "").strip(): diff --git a/src/gen_worker/decorators.py b/src/gen_worker/decorators.py index d73012f..c7447fe 100644 --- a/src/gen_worker/decorators.py +++ b/src/gen_worker/decorators.py @@ -1,33 +1,7 @@ -import math from typing import Any, Callable, Dict, Optional, TypeVar F = TypeVar("F", bound=Callable[..., Any]) - -def canonicalize_compute_capability_min(raw: Any) -> str: - """ - Normalize function-level compute capability minima into canonical v1 form. - - Canonical form is a decimal string with one fractional digit (for example - "10.0", "8.9"). - """ - if isinstance(raw, str): - v = raw.strip() - if v == "": - raise ValueError("compute_capability_min must not be empty") - try: - n = float(v) - except Exception as e: # pragma: no cover - defensive - raise ValueError("compute_capability_min must be numeric") from e - elif isinstance(raw, (int, float)): - n = float(raw) - else: - raise ValueError("compute_capability_min must be numeric") - - if not math.isfinite(n) or n <= 0: - raise ValueError("compute_capability_min must be > 0") - return f"{n:.1f}" - class ResourceRequirements: """ Specifies the resource requirements for a worker function. @@ -45,7 +19,7 @@ def __init__( memory_hint_mb: Optional[int] = None, stage_profile: Optional[str] = None, stage_traits: Optional[list[str]] = None, - compute_capability_min: Optional[float | str] = None, + compute_capability_min: Optional[float] = None, ) -> None: self.batch_size_min = batch_size_min self.batch_size_target = batch_size_target @@ -55,6 +29,7 @@ def __init__( self.memory_hint_mb = memory_hint_mb self.stage_profile = stage_profile self.stage_traits = list(stage_traits or []) + self.compute_capability_min = compute_capability_min self._requirements: Dict[str, Any] = {} if batch_size_min is not None: self._requirements["batch_size_min"] = int(batch_size_min) @@ -73,9 +48,10 @@ def __init__( if self.stage_traits: self._requirements["stage_traits"] = [str(x).strip() for x in self.stage_traits if str(x).strip()] if compute_capability_min is not None: - self._requirements["compute_capability"] = { - "min": canonicalize_compute_capability_min(compute_capability_min) - } + val = float(compute_capability_min) + if val <= 0: + raise ValueError(f"compute_capability_min must be positive, got {val}") + self._requirements["compute_capability"] = {"min": f"{val:.1f}"} def to_dict(self) -> Dict[str, Any]: """Returns a dictionary representation of the defined requirements.""" diff --git a/src/gen_worker/diffusers_model_manager.py b/src/gen_worker/diffusers_model_manager.py index b4d720d..3fe4d9a 100644 --- a/src/gen_worker/diffusers_model_manager.py +++ b/src/gen_worker/diffusers_model_manager.py @@ -55,7 +55,6 @@ async def load_model_into_vram(self, model_id: str) -> bool: local_path = self._downloader.download(model_id, cache_dir) except Exception as e: logger.warning("DiffusersModelManager: download failed for %s: %s", model_id, e) - return False loaded = await self._loader.load(model_id, model_path=local_path) logger.info( diff --git a/src/gen_worker/discover.py b/src/gen_worker/discover.py index 48478f3..2779f94 100644 --- a/src/gen_worker/discover.py +++ b/src/gen_worker/discover.py @@ -34,7 +34,6 @@ load_tensorhub_toml, ) from gen_worker.names import slugify_endpoint_name, slugify_function_name -from gen_worker.decorators import canonicalize_compute_capability_min def _type_id(t: type) -> Dict[str, str]: @@ -189,7 +188,6 @@ def _extract_function_metadata(func: Any, module_name: str) -> Dict[str, Any]: res_dict.update(raw) except Exception: pass - _normalize_compute_capability_resource(res_dict, func.__name__) hints = typing.get_type_hints(func, globalns=func.__globals__, include_extras=True) sig = inspect.signature(func) @@ -447,11 +445,6 @@ def _module_is_in_project(mod: Any) -> bool: seen_functions.add(key) fn_meta = _extract_function_metadata(obj, module_name) functions.append(fn_meta) - functions.sort(key=lambda fn: ( - str(fn.get("name") or ""), - str(fn.get("python_name") or ""), - str(fn.get("module") or ""), - )) return functions # Fallback: scan the filesystem for decorated functions. @@ -484,41 +477,9 @@ def _module_is_in_project(mod: Any) -> bool: print(f"warning: failed to extract metadata from {name}: {e}", file=sys.stderr) raise - functions.sort(key=lambda fn: ( - str(fn.get("name") or ""), - str(fn.get("python_name") or ""), - str(fn.get("module") or ""), - )) return functions -def _normalize_compute_capability_resource(resources: Dict[str, Any], fn_name: str) -> None: - if not isinstance(resources, dict): - return - - # Convenience alias for decorators/configs that use a flat key. - if "compute_capability" not in resources and "compute_capability_min" in resources: - resources["compute_capability"] = {"min": resources.get("compute_capability_min")} - resources.pop("compute_capability_min", None) - - if "compute_capability" not in resources: - return - - raw = resources.get("compute_capability") - min_raw: Any = None - if isinstance(raw, dict): - min_raw = raw.get("min") - else: - min_raw = raw - if min_raw is None: - raise ValueError(f"{fn_name}: resources.compute_capability.min is required when compute_capability is set") - try: - min_norm = canonicalize_compute_capability_min(min_raw) - except ValueError as e: - raise ValueError(f"{fn_name}: invalid resources.compute_capability.min: {e}") from e - resources["compute_capability"] = {"min": min_norm} - - def discover_manifest(root: Optional[Path] = None) -> Dict[str, Any]: """ Discover functions and load tensorhub manifest config to build complete manifest. @@ -546,7 +507,6 @@ def discover_manifest(root: Optional[Path] = None) -> Dict[str, Any]: if isinstance(base, dict): merged.update(base) merged.update(hints) - _normalize_compute_capability_resource(merged, fn_name) fn["resources"] = merged batch_path = (tensorhub_manifest.function_batch_dimensions.get(fn_name) or "").strip() diff --git a/src/gen_worker/model_ref_downloader.py b/src/gen_worker/model_ref_downloader.py index 4bbe8c3..f6aa338 100644 --- a/src/gen_worker/model_ref_downloader.py +++ b/src/gen_worker/model_ref_downloader.py @@ -19,7 +19,6 @@ ) from .hf_downloader import HuggingFaceHubDownloader from .model_refs import CozyRef, ParsedModelRef, parse_model_ref -import threading # Per-task resolved manifests provided by gen-orchestrator (issue #92). # Shape: {canonical_model_id: ResolvedCozyModel-like object} @@ -60,26 +59,6 @@ def _get_prefs_for_ref(canonical_ref: str) -> Mapping[str, Any]: return v if isinstance(v, Mapping) else {} -def _lookup_resolved_cozy_entry(resolved_mapping: Optional[Mapping[str, Any]], canonical_ref: str) -> Any: - if resolved_mapping is None: - return None - key = str(canonical_ref or "").strip() - if not key: - return None - candidates = [key] - if key.startswith("cozy:"): - candidates.append(key.split(":", 1)[1].strip()) - else: - candidates.append(f"cozy:{key}") - for cand in candidates: - if not cand: - continue - ent = resolved_mapping.get(cand) - if ent is not None: - return ent - return None - - class ModelRefDownloader(ModelDownloader): """Composite downloader for phase-1 model refs. @@ -149,7 +128,7 @@ async def _download_async(self, parsed: ParsedModelRef, dest_dir: Path) -> Path: if parsed.scheme == "cozy" and parsed.cozy is not None: canonical = parsed.cozy.canonical() resolved_mapping = _resolved_cozy_models_by_id.get() - resolved_entry = _lookup_resolved_cozy_entry(resolved_mapping, canonical) + resolved_entry = resolved_mapping.get(canonical) if resolved_mapping is not None else None if resolved_entry is not None: return await ensure_snapshot_async( @@ -292,14 +271,14 @@ def _run_in_thread(coro: Coroutine[Any, Any, Path]) -> str: out: dict[str, str] = {} err: dict[str, BaseException] = {} - ctx = contextvars.copy_context() - def runner() -> None: try: - out["v"] = ctx.run(asyncio.run, coro).as_posix() + out["v"] = asyncio.run(coro).as_posix() except BaseException as e: err["e"] = e + import threading + t = threading.Thread(target=runner, daemon=True) t.start() t.join() diff --git a/src/gen_worker/model_refs.py b/src/gen_worker/model_refs.py index 3bc784b..7f9ee3a 100644 --- a/src/gen_worker/model_refs.py +++ b/src/gen_worker/model_refs.py @@ -16,8 +16,8 @@ def repo_id(self) -> str: def canonical(self) -> str: if self.digest: - return f"{self.repo_id()}@{self.digest}" - return f"{self.repo_id()}:{self.tag}" + return f"cozy:{self.repo_id()}@{self.digest}" + return f"cozy:{self.repo_id()}:{self.tag}" @dataclass(frozen=True) @@ -37,13 +37,6 @@ class ParsedModelRef: cozy: Optional[CozyRef] = None hf: Optional[HuggingFaceRef] = None - def canonical(self) -> str: - if self.scheme == "cozy" and self.cozy is not None: - return self.cozy.canonical() - if self.scheme == "hf" and self.hf is not None: - return self.hf.canonical() - raise ValueError("invalid parsed model ref") - def _strip_scheme(raw: str) -> tuple[Optional[str], str]: s = (raw or "").strip() diff --git a/src/gen_worker/pb/frontend_pb2.py b/src/gen_worker/pb/frontend_pb2.py index e3b1e83..1f4d0af 100644 --- a/src/gen_worker/pb/frontend_pb2.py +++ b/src/gen_worker/pb/frontend_pb2.py @@ -24,7 +24,7 @@ -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0e\x66rontend.proto\x12\x0b\x66rontend.v1\"C\n\rActionOptions\x12\n\n\x02id\x18\x01 \x01(\t\x12\x12\n\ntimeout_ms\x18\x02 \x01(\x03J\x04\x08\x03\x10\x04R\x0cretry_policy\"\x84\x03\n\x12VariantPreferences\x12;\n\x05quant\x18\x01 \x01(\x0e\x32,.frontend.v1.VariantPreferences.Quantization\x12<\n\tpackaging\x18\x02 \x01(\x0e\x32).frontend.v1.VariantPreferences.Packaging\x12\x36\n\x06layout\x18\x03 \x01(\x0e\x32&.frontend.v1.VariantPreferences.Layout\"B\n\x0cQuantization\x12\x15\n\x11QUANT_UNSPECIFIED\x10\x00\x12\x08\n\x04\x46P16\x10\x01\x12\x08\n\x04\x42\x46\x31\x36\x10\x02\x12\x07\n\x03\x46P8\x10\x03\"F\n\tPackaging\x12\x19\n\x15PACKAGING_UNSPECIFIED\x10\x00\x12\x0f\n\x0bSAFETENSORS\x10\x01\x12\r\n\tFLASHPACK\x10\x02\"/\n\x06Layout\x12\x16\n\x12LAYOUT_UNSPECIFIED\x10\x00\x12\r\n\tDIFFUSERS\x10\x01\"\x95\x02\n\x14\x45xecuteActionRequest\x12\x15\n\rfunction_name\x18\x01 \x01(\t\x12\x15\n\rinput_payload\x18\x02 \x01(\x0c\x12+\n\x07options\x18\x03 \x01(\x0b\x32\x1a.frontend.v1.ActionOptions\x12\x12\n\nrelease_id\x18\x04 \x01(\t\x12\x12\n\ninvoker_id\x18\x06 \x01(\t\x12\x1a\n\x12required_repo_refs\x18\x07 \x03(\t\x12\r\n\x05owner\x18\x08 \x01(\t\x12\x36\n\rvariant_prefs\x18\t \x01(\x0b\x32\x1f.frontend.v1.VariantPreferencesJ\x04\x08\x05\x10\x06R\x11required_model_id\"\'\n\x15\x45xecuteActionResponse\x12\x0e\n\x06run_id\x18\x01 \x01(\t\"\"\n\x10\x43\x61ncelRunRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\t\"\x1f\n\x11\x43\x61ncelRunResponse\x12\n\n\x02ok\x18\x01 \x01(\x08\"\x84\x01\n\x0cRealtimeOpen\x12\x12\n\nsession_id\x18\x01 \x01(\t\x12\x12\n\nrelease_id\x18\x02 \x01(\t\x12\x15\n\rfunction_name\x18\x03 \x01(\t\x12\r\n\x05owner\x18\x04 \x01(\t\x12\x12\n\ninvoker_id\x18\x05 \x01(\t\x12\x12\n\ntimeout_ms\x18\x06 \x01(\x03\"B\n\rRealtimeFrame\x12\x12\n\nsession_id\x18\x01 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\x0f\n\x07is_text\x18\x03 \x01(\x08\"3\n\rRealtimeClose\x12\x12\n\nsession_id\x18\x01 \x01(\t\x12\x0e\n\x06reason\x18\x02 \x01(\t\"\xa2\x01\n\x14RealtimeProxyMessage\x12)\n\x04open\x18\x01 \x01(\x0b\x32\x19.frontend.v1.RealtimeOpenH\x00\x12+\n\x05\x66rame\x18\x02 \x01(\x0b\x32\x1a.frontend.v1.RealtimeFrameH\x00\x12+\n\x05\x63lose\x18\x03 \x01(\x0b\x32\x1a.frontend.v1.RealtimeCloseH\x00\x42\x05\n\x03msg2\x92\x02\n\x0f\x46rontendService\x12V\n\rExecuteAction\x12!.frontend.v1.ExecuteActionRequest\x1a\".frontend.v1.ExecuteActionResponse\x12J\n\tCancelRun\x12\x1d.frontend.v1.CancelRunRequest\x1a\x1e.frontend.v1.CancelRunResponse\x12[\n\x0fRealtimeSession\x12!.frontend.v1.RealtimeProxyMessage\x1a!.frontend.v1.RealtimeProxyMessage(\x01\x30\x01\x42GZEgithub.com/cozy-creator/gen-orchestrator/pkg/pb/frontendv1;frontendv1b\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x0e\x66rontend.proto\x12\x0b\x66rontend.v1\"C\n\rActionOptions\x12\n\n\x02id\x18\x01 \x01(\t\x12\x12\n\ntimeout_ms\x18\x02 \x01(\x03J\x04\x08\x03\x10\x04R\x0cretry_policy\"\x84\x03\n\x12VariantPreferences\x12;\n\x05quant\x18\x01 \x01(\x0e\x32,.frontend.v1.VariantPreferences.Quantization\x12<\n\tpackaging\x18\x02 \x01(\x0e\x32).frontend.v1.VariantPreferences.Packaging\x12\x36\n\x06layout\x18\x03 \x01(\x0e\x32&.frontend.v1.VariantPreferences.Layout\"B\n\x0cQuantization\x12\x15\n\x11QUANT_UNSPECIFIED\x10\x00\x12\x08\n\x04\x46P16\x10\x01\x12\x08\n\x04\x42\x46\x31\x36\x10\x02\x12\x07\n\x03\x46P8\x10\x03\"F\n\tPackaging\x12\x19\n\x15PACKAGING_UNSPECIFIED\x10\x00\x12\x0f\n\x0bSAFETENSORS\x10\x01\x12\r\n\tFLASHPACK\x10\x02\"/\n\x06Layout\x12\x16\n\x12LAYOUT_UNSPECIFIED\x10\x00\x12\r\n\tDIFFUSERS\x10\x01\"\x95\x02\n\x14\x45xecuteActionRequest\x12\x15\n\rfunction_name\x18\x01 \x01(\t\x12\x15\n\rinput_payload\x18\x02 \x01(\x0c\x12+\n\x07options\x18\x03 \x01(\x0b\x32\x1a.frontend.v1.ActionOptions\x12\x12\n\nrelease_id\x18\x04 \x01(\t\x12\x12\n\ninvoker_id\x18\x06 \x01(\t\x12\x1a\n\x12required_repo_refs\x18\x07 \x03(\t\x12\r\n\x05owner\x18\x08 \x01(\t\x12\x36\n\rvariant_prefs\x18\t \x01(\x0b\x32\x1f.frontend.v1.VariantPreferencesJ\x04\x08\x05\x10\x06R\x11required_model_id\"+\n\x15\x45xecuteActionResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\t\"*\n\x14\x43\x61ncelRequestRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\"#\n\x15\x43\x61ncelRequestResponse\x12\n\n\x02ok\x18\x01 \x01(\x08\"\x84\x01\n\x0cRealtimeOpen\x12\x12\n\nsession_id\x18\x01 \x01(\t\x12\x12\n\nrelease_id\x18\x02 \x01(\t\x12\x15\n\rfunction_name\x18\x03 \x01(\t\x12\r\n\x05owner\x18\x04 \x01(\t\x12\x12\n\ninvoker_id\x18\x05 \x01(\t\x12\x12\n\ntimeout_ms\x18\x06 \x01(\x03\"B\n\rRealtimeFrame\x12\x12\n\nsession_id\x18\x01 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\x0f\n\x07is_text\x18\x03 \x01(\x08\"3\n\rRealtimeClose\x12\x12\n\nsession_id\x18\x01 \x01(\t\x12\x0e\n\x06reason\x18\x02 \x01(\t\"\xa2\x01\n\x14RealtimeProxyMessage\x12)\n\x04open\x18\x01 \x01(\x0b\x32\x19.frontend.v1.RealtimeOpenH\x00\x12+\n\x05\x66rame\x18\x02 \x01(\x0b\x32\x1a.frontend.v1.RealtimeFrameH\x00\x12+\n\x05\x63lose\x18\x03 \x01(\x0b\x32\x1a.frontend.v1.RealtimeCloseH\x00\x42\x05\n\x03msg2\x9e\x02\n\x0f\x46rontendService\x12V\n\rExecuteAction\x12!.frontend.v1.ExecuteActionRequest\x1a\".frontend.v1.ExecuteActionResponse\x12V\n\rCancelRequest\x12!.frontend.v1.CancelRequestRequest\x1a\".frontend.v1.CancelRequestResponse\x12[\n\x0fRealtimeSession\x12!.frontend.v1.RealtimeProxyMessage\x1a!.frontend.v1.RealtimeProxyMessage(\x01\x30\x01\x42GZEgithub.com/cozy-creator/gen-orchestrator/pkg/pb/frontendv1;frontendv1b\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -45,19 +45,19 @@ _globals['_EXECUTEACTIONREQUEST']._serialized_start=492 _globals['_EXECUTEACTIONREQUEST']._serialized_end=769 _globals['_EXECUTEACTIONRESPONSE']._serialized_start=771 - _globals['_EXECUTEACTIONRESPONSE']._serialized_end=810 - _globals['_CANCELRUNREQUEST']._serialized_start=812 - _globals['_CANCELRUNREQUEST']._serialized_end=846 - _globals['_CANCELRUNRESPONSE']._serialized_start=848 - _globals['_CANCELRUNRESPONSE']._serialized_end=879 - _globals['_REALTIMEOPEN']._serialized_start=882 - _globals['_REALTIMEOPEN']._serialized_end=1014 - _globals['_REALTIMEFRAME']._serialized_start=1016 - _globals['_REALTIMEFRAME']._serialized_end=1082 - _globals['_REALTIMECLOSE']._serialized_start=1084 - _globals['_REALTIMECLOSE']._serialized_end=1135 - _globals['_REALTIMEPROXYMESSAGE']._serialized_start=1138 - _globals['_REALTIMEPROXYMESSAGE']._serialized_end=1300 - _globals['_FRONTENDSERVICE']._serialized_start=1303 - _globals['_FRONTENDSERVICE']._serialized_end=1577 + _globals['_EXECUTEACTIONRESPONSE']._serialized_end=814 + _globals['_CANCELREQUESTREQUEST']._serialized_start=816 + _globals['_CANCELREQUESTREQUEST']._serialized_end=858 + _globals['_CANCELREQUESTRESPONSE']._serialized_start=860 + _globals['_CANCELREQUESTRESPONSE']._serialized_end=895 + _globals['_REALTIMEOPEN']._serialized_start=898 + _globals['_REALTIMEOPEN']._serialized_end=1030 + _globals['_REALTIMEFRAME']._serialized_start=1032 + _globals['_REALTIMEFRAME']._serialized_end=1098 + _globals['_REALTIMECLOSE']._serialized_start=1100 + _globals['_REALTIMECLOSE']._serialized_end=1151 + _globals['_REALTIMEPROXYMESSAGE']._serialized_start=1154 + _globals['_REALTIMEPROXYMESSAGE']._serialized_end=1316 + _globals['_FRONTENDSERVICE']._serialized_start=1319 + _globals['_FRONTENDSERVICE']._serialized_end=1605 # @@protoc_insertion_point(module_scope) diff --git a/src/gen_worker/pb/frontend_pb2_grpc.py b/src/gen_worker/pb/frontend_pb2_grpc.py index dcd9c3b..a3d4fb3 100644 --- a/src/gen_worker/pb/frontend_pb2_grpc.py +++ b/src/gen_worker/pb/frontend_pb2_grpc.py @@ -40,10 +40,10 @@ def __init__(self, channel): request_serializer=frontend__pb2.ExecuteActionRequest.SerializeToString, response_deserializer=frontend__pb2.ExecuteActionResponse.FromString, _registered_method=True) - self.CancelRun = channel.unary_unary( - '/frontend.v1.FrontendService/CancelRun', - request_serializer=frontend__pb2.CancelRunRequest.SerializeToString, - response_deserializer=frontend__pb2.CancelRunResponse.FromString, + self.CancelRequest = channel.unary_unary( + '/frontend.v1.FrontendService/CancelRequest', + request_serializer=frontend__pb2.CancelRequestRequest.SerializeToString, + response_deserializer=frontend__pb2.CancelRequestResponse.FromString, _registered_method=True) self.RealtimeSession = channel.stream_stream( '/frontend.v1.FrontendService/RealtimeSession', @@ -63,8 +63,8 @@ def ExecuteAction(self, request, context): context.set_details('Method not implemented!') raise NotImplementedError('Method not implemented!') - def CancelRun(self, request, context): - """2) Cancel an in-flight action/job. + def CancelRequest(self, request, context): + """2) Cancel an in-flight request. """ context.set_code(grpc.StatusCode.UNIMPLEMENTED) context.set_details('Method not implemented!') @@ -85,10 +85,10 @@ def add_FrontendServiceServicer_to_server(servicer, server): request_deserializer=frontend__pb2.ExecuteActionRequest.FromString, response_serializer=frontend__pb2.ExecuteActionResponse.SerializeToString, ), - 'CancelRun': grpc.unary_unary_rpc_method_handler( - servicer.CancelRun, - request_deserializer=frontend__pb2.CancelRunRequest.FromString, - response_serializer=frontend__pb2.CancelRunResponse.SerializeToString, + 'CancelRequest': grpc.unary_unary_rpc_method_handler( + servicer.CancelRequest, + request_deserializer=frontend__pb2.CancelRequestRequest.FromString, + response_serializer=frontend__pb2.CancelRequestResponse.SerializeToString, ), 'RealtimeSession': grpc.stream_stream_rpc_method_handler( servicer.RealtimeSession, @@ -135,7 +135,7 @@ def ExecuteAction(request, _registered_method=True) @staticmethod - def CancelRun(request, + def CancelRequest(request, target, options=(), channel_credentials=None, @@ -148,9 +148,9 @@ def CancelRun(request, return grpc.experimental.unary_unary( request, target, - '/frontend.v1.FrontendService/CancelRun', - frontend__pb2.CancelRunRequest.SerializeToString, - frontend__pb2.CancelRunResponse.FromString, + '/frontend.v1.FrontendService/CancelRequest', + frontend__pb2.CancelRequestRequest.SerializeToString, + frontend__pb2.CancelRequestResponse.FromString, options, channel_credentials, insecure, diff --git a/src/gen_worker/pb/worker_scheduler_pb2.py b/src/gen_worker/pb/worker_scheduler_pb2.py index 63c0575..d82c370 100644 --- a/src/gen_worker/pb/worker_scheduler_pb2.py +++ b/src/gen_worker/pb/worker_scheduler_pb2.py @@ -24,7 +24,7 @@ -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16worker_scheduler.proto\x12\x0cscheduler.v1\"\xb7\x06\n\x0fWorkerResources\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x11\n\tcpu_cores\x18\x02 \x01(\x05\x12\x14\n\x0cmemory_bytes\x18\x03 \x01(\x03\x12\x11\n\tgpu_count\x18\x04 \x01(\x05\x12\x18\n\x10gpu_memory_bytes\x18\x05 \x01(\x03\x12\x1b\n\x13\x61vailable_functions\x18\x06 \x03(\t\x12\x13\n\x0bvram_models\x18\x07 \x03(\t\x12\x1e\n\x16supports_model_loading\x18\x08 \x01(\x08\x12\x12\n\nrelease_id\x18\t \x01(\t\x12\x15\n\rrunpod_pod_id\x18\n \x01(\t\x12\x13\n\x0bgpu_is_busy\x18\x0b \x01(\x08\x12\x1d\n\x15gpu_memory_used_bytes\x18\x0c \x01(\x03\x12\x10\n\x08gpu_name\x18\r \x01(\t\x12\x12\n\ngpu_driver\x18\x0e \x01(\t\x12\x1d\n\x15gpu_memory_free_bytes\x18\x0f \x01(\x03\x12\x17\n\x0fmax_concurrency\x18\x10 \x01(\x05\x12T\n\x14\x66unction_concurrency\x18\x11 \x03(\x0b\x32\x36.scheduler.v1.WorkerResources.FunctionConcurrencyEntry\x12\x14\n\x0c\x63uda_version\x18\x12 \x01(\t\x12\x15\n\rtorch_version\x18\x13 \x01(\t\x12\x36\n\x10\x66unction_schemas\x18\x14 \x03(\x0b\x32\x1c.scheduler.v1.FunctionSchema\x12\x0e\n\x06gpu_sm\x18\x15 \x01(\t\x12\x18\n\x10tensorrt_version\x18\x16 \x01(\t\x12\x1b\n\x13onnxruntime_version\x18\x17 \x01(\t\x12\x13\n\x0b\x64isk_models\x18\x18 \x03(\t\x12\x16\n\x0einstalled_libs\x18\x19 \x03(\t\x12\x14\n\x0cimage_digest\x18\x1a \x01(\t\x12\x12\n\ngit_commit\x18\x1b \x01(\t\x12\x17\n\x0f\x62uild_timestamp\x18\x1c \x01(\x03\x1a:\n\x18\x46unctionConcurrencyEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\"\xa4\x01\n\x0e\x46unctionSchema\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x19\n\x11input_schema_json\x18\x02 \x01(\x0c\x12\x1a\n\x12output_schema_json\x18\x03 \x01(\x0c\x12\x16\n\x0einjection_json\x18\x04 \x01(\x0c\x12\x1a\n\x12incremental_output\x18\x05 \x01(\x08\x12\x19\n\x11\x64\x65lta_schema_json\x18\x06 \x01(\x0c\"\x8c\x01\n\x12WorkerRegistration\x12\x30\n\tresources\x18\x01 \x01(\x0b\x32\x1d.scheduler.v1.WorkerResources\x12\x14\n\x0cis_heartbeat\x18\x02 \x01(\x08\x12\x16\n\x0eprotocol_major\x18\x03 \x01(\x05\x12\x16\n\x0eprotocol_minor\x18\x04 \x01(\x05\"\xe4\x01\n\x10LoadModelCommand\x12\x10\n\x08model_id\x18\x01 \x01(\t\x12^\n\x1aresolved_cozy_models_by_id\x18\x02 \x03(\x0b\x32:.scheduler.v1.LoadModelCommand.ResolvedCozyModelsByIdEntry\x1a^\n\x1bResolvedCozyModelsByIdEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12.\n\x05value\x18\x02 \x01(\x0b\x32\x1f.scheduler.v1.ResolvedCozyModel:\x02\x38\x01\"&\n\x12UnloadModelCommand\x12\x10\n\x08model_id\x18\x01 \x01(\t\"&\n\x14InterruptTaskCommand\x12\x0e\n\x06run_id\x18\x01 \x01(\t\"V\n\x15ResolvedCozyModelFile\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x12\n\nsize_bytes\x18\x02 \x01(\x03\x12\x0e\n\x06\x62lake3\x18\x03 \x01(\t\x12\x0b\n\x03url\x18\x04 \x01(\t\"`\n\x11ResolvedCozyModel\x12\x17\n\x0fsnapshot_digest\x18\x01 \x01(\t\x12\x32\n\x05\x66iles\x18\x02 \x03(\x0b\x32#.scheduler.v1.ResolvedCozyModelFile\".\n\x1a\x43ozyModelURLRefreshRequest\x12\x10\n\x08model_id\x18\x01 \x01(\t\"b\n\x1b\x43ozyModelURLRefreshResponse\x12\x10\n\x08model_id\x18\x01 \x01(\t\x12\x31\n\x08resolved\x18\x02 \x01(\x0b\x32\x1f.scheduler.v1.ResolvedCozyModel\"\x99\x03\n\x14TaskExecutionRequest\x12\x0e\n\x06run_id\x18\x01 \x01(\t\x12\x15\n\rfunction_name\x18\x02 \x01(\t\x12\x15\n\rinput_payload\x18\x03 \x01(\x0c\x12\x1d\n\x15required_variant_refs\x18\x04 \x03(\t\x12\x12\n\ntimeout_ms\x18\x05 \x01(\x03\x12\r\n\x05owner\x18\x06 \x01(\t\x12\x12\n\ninvoker_id\x18\x07 \x01(\t\x12\x15\n\rfile_base_url\x18\x08 \x01(\t\x12\x12\n\nfile_token\x18\t \x01(\t\x12\x62\n\x1aresolved_cozy_models_by_id\x18\n \x03(\x0b\x32>.scheduler.v1.TaskExecutionRequest.ResolvedCozyModelsByIdEntry\x1a^\n\x1bResolvedCozyModelsByIdEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12.\n\x05value\x18\x02 \x01(\x0b\x32\x1f.scheduler.v1.ResolvedCozyModel:\x02\x38\x01\"\xa2\x01\n\x13TaskExecutionResult\x12\x0e\n\x06run_id\x18\x01 \x01(\t\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x16\n\x0eoutput_payload\x18\x03 \x01(\x0c\x12\x15\n\rerror_message\x18\x04 \x01(\t\x12\x12\n\nerror_type\x18\x05 \x01(\t\x12\x11\n\tretryable\x18\x06 \x01(\x08\x12\x14\n\x0csafe_message\x18\x07 \x01(\t\"G\n\x0bWorkerEvent\x12\x0e\n\x06run_id\x18\x01 \x01(\t\x12\x12\n\nevent_type\x18\x02 \x01(\t\x12\x14\n\x0cpayload_json\x18\x03 \x01(\x0c\"\xc1\x01\n\x13RealtimeOpenCommand\x12\x12\n\nsession_id\x18\x01 \x01(\t\x12\x15\n\rfunction_name\x18\x02 \x01(\t\x12\x1d\n\x15required_variant_refs\x18\x03 \x03(\t\x12\r\n\x05owner\x18\x04 \x01(\t\x12\x12\n\ninvoker_id\x18\x05 \x01(\t\x12\x12\n\ntimeout_ms\x18\x06 \x01(\x03\x12\x15\n\rfile_base_url\x18\x07 \x01(\t\x12\x12\n\nfile_token\x18\x08 \x01(\t\"B\n\rRealtimeFrame\x12\x12\n\nsession_id\x18\x01 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\x0f\n\x07is_text\x18\x03 \x01(\x08\":\n\x14RealtimeCloseCommand\x12\x12\n\nsession_id\x18\x01 \x01(\t\x12\x0e\n\x06reason\x18\x02 \x01(\t\"K\n\x0fLoadModelResult\x12\x10\n\x08model_id\x18\x01 \x01(\t\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x15\n\rerror_message\x18\x03 \x01(\t\"M\n\x11UnloadModelResult\x12\x10\n\x08model_id\x18\x01 \x01(\t\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x15\n\rerror_message\x18\x03 \x01(\t\"(\n\tModelSpec\x12\x0b\n\x03ref\x18\x01 \x01(\t\x12\x0e\n\x06\x64types\x18\x02 \x03(\t\"\x8c\x01\n\x0bModelsByKey\x12\x35\n\x06models\x18\x01 \x03(\x0b\x32%.scheduler.v1.ModelsByKey.ModelsEntry\x1a\x46\n\x0bModelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x05value\x18\x02 \x01(\x0b\x32\x17.scheduler.v1.ModelSpec:\x02\x38\x01\"\xe1\x04\n\x15ReleaseArtifactConfig\x12\x1b\n\x13supported_repo_refs\x18\x01 \x03(\t\x12N\n\x0frepo_ref_by_key\x18\x02 \x03(\x0b\x32\x35.scheduler.v1.ReleaseArtifactConfig.RepoRefByKeyEntry\x12t\n#resolved_cozy_models_by_variant_ref\x18\x03 \x03(\x0b\x32G.scheduler.v1.ReleaseArtifactConfig.ResolvedCozyModelsByVariantRefEntry\x12\x1d\n\x15required_variant_refs\x18\x04 \x03(\t\x12U\n\x12models_by_function\x18\x05 \x03(\x0b\x32\x39.scheduler.v1.ReleaseArtifactConfig.ModelsByFunctionEntry\x1a\x33\n\x11RepoRefByKeyEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x66\n#ResolvedCozyModelsByVariantRefEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12.\n\x05value\x18\x02 \x01(\x0b\x32\x1f.scheduler.v1.ResolvedCozyModel:\x02\x38\x01\x1aR\n\x15ModelsByFunctionEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.scheduler.v1.ModelsByKey:\x02\x38\x01\"\xfa\x07\n\x16WorkerSchedulerMessage\x12?\n\x13worker_registration\x18\x01 \x01(\x0b\x32 .scheduler.v1.WorkerRegistrationH\x00\x12\x37\n\nrun_result\x18\x02 \x01(\x0b\x32!.scheduler.v1.TaskExecutionResultH\x00\x12:\n\x11load_model_result\x18\x04 \x01(\x0b\x32\x1d.scheduler.v1.LoadModelResultH\x00\x12>\n\x13unload_model_result\x18\x05 \x01(\x0b\x32\x1f.scheduler.v1.UnloadModelResultH\x00\x12\x31\n\x0cworker_event\x18\x06 \x01(\x0b\x32\x19.scheduler.v1.WorkerEventH\x00\x12R\n\x1e\x63ozy_model_url_refresh_request\x18\x07 \x01(\x0b\x32(.scheduler.v1.CozyModelURLRefreshRequestH\x00\x12\x39\n\x0brun_request\x18\n \x01(\x0b\x32\".scheduler.v1.TaskExecutionRequestH\x00\x12\x38\n\x0eload_model_cmd\x18\x0b \x01(\x0b\x32\x1e.scheduler.v1.LoadModelCommandH\x00\x12<\n\x10unload_model_cmd\x18\x0c \x01(\x0b\x32 .scheduler.v1.UnloadModelCommandH\x00\x12?\n\x11interrupt_run_cmd\x18\r \x01(\x0b\x32\".scheduler.v1.InterruptTaskCommandH\x00\x12\x46\n\x17release_artifact_config\x18\x0e \x01(\x0b\x32#.scheduler.v1.ReleaseArtifactConfigH\x00\x12>\n\x11realtime_open_cmd\x18\x0f \x01(\x0b\x32!.scheduler.v1.RealtimeOpenCommandH\x00\x12\x35\n\x0erealtime_frame\x18\x10 \x01(\x0b\x32\x1b.scheduler.v1.RealtimeFrameH\x00\x12@\n\x12realtime_close_cmd\x18\x11 \x01(\x0b\x32\".scheduler.v1.RealtimeCloseCommandH\x00\x12T\n\x1f\x63ozy_model_url_refresh_response\x18\x12 \x01(\x0b\x32).scheduler.v1.CozyModelURLRefreshResponseH\x00\x42\x05\n\x03msgJ\x04\x08\x03\x10\x04R\x0bspawn_tasks2y\n\x16SchedulerWorkerService\x12_\n\rConnectWorker\x12$.scheduler.v1.WorkerSchedulerMessage\x1a$.scheduler.v1.WorkerSchedulerMessage(\x01\x30\x01\x42UZSgithub.com/cozy-creator/gen-orchestrator/pkg/pb/workerschedulerv1;workerschedulerv1b\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x16worker_scheduler.proto\x12\x0cscheduler.v1\"\xb7\x06\n\x0fWorkerResources\x12\x11\n\tworker_id\x18\x01 \x01(\t\x12\x11\n\tcpu_cores\x18\x02 \x01(\x05\x12\x14\n\x0cmemory_bytes\x18\x03 \x01(\x03\x12\x11\n\tgpu_count\x18\x04 \x01(\x05\x12\x18\n\x10gpu_memory_bytes\x18\x05 \x01(\x03\x12\x1b\n\x13\x61vailable_functions\x18\x06 \x03(\t\x12\x13\n\x0bvram_models\x18\x07 \x03(\t\x12\x1e\n\x16supports_model_loading\x18\x08 \x01(\x08\x12\x12\n\nrelease_id\x18\t \x01(\t\x12\x15\n\rrunpod_pod_id\x18\n \x01(\t\x12\x13\n\x0bgpu_is_busy\x18\x0b \x01(\x08\x12\x1d\n\x15gpu_memory_used_bytes\x18\x0c \x01(\x03\x12\x10\n\x08gpu_name\x18\r \x01(\t\x12\x12\n\ngpu_driver\x18\x0e \x01(\t\x12\x1d\n\x15gpu_memory_free_bytes\x18\x0f \x01(\x03\x12\x17\n\x0fmax_concurrency\x18\x10 \x01(\x05\x12T\n\x14\x66unction_concurrency\x18\x11 \x03(\x0b\x32\x36.scheduler.v1.WorkerResources.FunctionConcurrencyEntry\x12\x14\n\x0c\x63uda_version\x18\x12 \x01(\t\x12\x15\n\rtorch_version\x18\x13 \x01(\t\x12\x36\n\x10\x66unction_schemas\x18\x14 \x03(\x0b\x32\x1c.scheduler.v1.FunctionSchema\x12\x0e\n\x06gpu_sm\x18\x15 \x01(\t\x12\x18\n\x10tensorrt_version\x18\x16 \x01(\t\x12\x1b\n\x13onnxruntime_version\x18\x17 \x01(\t\x12\x13\n\x0b\x64isk_models\x18\x18 \x03(\t\x12\x16\n\x0einstalled_libs\x18\x19 \x03(\t\x12\x14\n\x0cimage_digest\x18\x1a \x01(\t\x12\x12\n\ngit_commit\x18\x1b \x01(\t\x12\x17\n\x0f\x62uild_timestamp\x18\x1c \x01(\x03\x1a:\n\x18\x46unctionConcurrencyEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\"\xa4\x01\n\x0e\x46unctionSchema\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x19\n\x11input_schema_json\x18\x02 \x01(\x0c\x12\x1a\n\x12output_schema_json\x18\x03 \x01(\x0c\x12\x16\n\x0einjection_json\x18\x04 \x01(\x0c\x12\x1a\n\x12incremental_output\x18\x05 \x01(\x08\x12\x19\n\x11\x64\x65lta_schema_json\x18\x06 \x01(\x0c\"\x8c\x01\n\x12WorkerRegistration\x12\x30\n\tresources\x18\x01 \x01(\x0b\x32\x1d.scheduler.v1.WorkerResources\x12\x14\n\x0cis_heartbeat\x18\x02 \x01(\x08\x12\x16\n\x0eprotocol_major\x18\x03 \x01(\x05\x12\x16\n\x0eprotocol_minor\x18\x04 \x01(\x05\"$\n\x10LoadModelCommand\x12\x10\n\x08model_id\x18\x01 \x01(\t\"&\n\x12UnloadModelCommand\x12\x10\n\x08model_id\x18\x01 \x01(\t\"X\n\x14InterruptTaskCommand\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x10\n\x08item_ids\x18\x02 \x03(\t\x12\x1a\n\x12\x63\x61ncel_queued_only\x18\x03 \x01(\x08\"\xb7\x01\n\x15RuntimeBatchingConfig\x12\x15\n\rfunction_name\x18\x01 \x01(\t\x12\x19\n\x11\x62\x61tch_size_target\x18\x02 \x01(\x05\x12\x16\n\x0e\x62\x61tch_size_min\x18\x03 \x01(\x05\x12\x16\n\x0e\x62\x61tch_size_max\x18\x04 \x01(\x05\x12\x16\n\x0eprefetch_depth\x18\x05 \x01(\x05\x12\x13\n\x0bmax_wait_ms\x18\x06 \x01(\x05\x12\x0f\n\x07version\x18\x07 \x01(\x03\"S\n\x1cRuntimeBatchingConfigCommand\x12\x33\n\x06\x63onfig\x18\x01 \x01(\x0b\x32#.scheduler.v1.RuntimeBatchingConfig\"m\n\x1bRuntimeBatchingConfigResult\x12\x15\n\rfunction_name\x18\x01 \x01(\t\x12\x0f\n\x07version\x18\x02 \x01(\x03\x12\x0f\n\x07success\x18\x03 \x01(\x08\x12\x15\n\rerror_message\x18\x04 \x01(\t\"V\n\x15ResolvedCozyModelFile\x12\x0c\n\x04path\x18\x01 \x01(\t\x12\x12\n\nsize_bytes\x18\x02 \x01(\x03\x12\x0e\n\x06\x62lake3\x18\x03 \x01(\t\x12\x0b\n\x03url\x18\x04 \x01(\t\"`\n\x11ResolvedCozyModel\x12\x17\n\x0fsnapshot_digest\x18\x01 \x01(\t\x12\x32\n\x05\x66iles\x18\x02 \x03(\x0b\x32#.scheduler.v1.ResolvedCozyModelFile\".\n\x1a\x43ozyModelURLRefreshRequest\x12\x10\n\x08model_id\x18\x01 \x01(\t\"b\n\x1b\x43ozyModelURLRefreshResponse\x12\x10\n\x08model_id\x18\x01 \x01(\t\x12\x31\n\x08resolved\x18\x02 \x01(\x0b\x32\x1f.scheduler.v1.ResolvedCozyModel\"\xf7\x03\n\x14TaskExecutionRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x15\n\rfunction_name\x18\x02 \x01(\t\x12\x15\n\rinput_payload\x18\x03 \x01(\x0c\x12\x1d\n\x15required_variant_refs\x18\x04 \x03(\t\x12\x12\n\ntimeout_ms\x18\x05 \x01(\x03\x12\r\n\x05owner\x18\x06 \x01(\t\x12\x12\n\ninvoker_id\x18\x07 \x01(\t\x12\x15\n\rfile_base_url\x18\x08 \x01(\t\x12\x12\n\nfile_token\x18\t \x01(\t\x12\x62\n\x1aresolved_cozy_models_by_id\x18\n \x03(\x0b\x32>.scheduler.v1.TaskExecutionRequest.ResolvedCozyModelsByIdEntry\x12\x19\n\x11parent_request_id\x18\x0b \x01(\t\x12\x18\n\x10\x63hild_request_id\x18\x0c \x01(\t\x12\x0f\n\x07item_id\x18\r \x01(\t\x12\x12\n\nitem_index\x18\x0e \x01(\x05\x1a^\n\x1bResolvedCozyModelsByIdEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12.\n\x05value\x18\x02 \x01(\x0b\x32\x1f.scheduler.v1.ResolvedCozyModel:\x02\x38\x01\"\x80\x02\n\x13TaskExecutionResult\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x16\n\x0eoutput_payload\x18\x03 \x01(\x0c\x12\x15\n\rerror_message\x18\x04 \x01(\t\x12\x12\n\nerror_type\x18\x05 \x01(\t\x12\x11\n\tretryable\x18\x06 \x01(\x08\x12\x14\n\x0csafe_message\x18\x07 \x01(\t\x12\x19\n\x11parent_request_id\x18\x08 \x01(\t\x12\x18\n\x10\x63hild_request_id\x18\t \x01(\t\x12\x0f\n\x07item_id\x18\n \x01(\t\x12\x12\n\nitem_index\x18\x0b \x01(\x05\"\xc8\x03\n\x12\x42\x61tchExecutionItem\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x0f\n\x07item_id\x18\x02 \x01(\t\x12\r\n\x05owner\x18\x03 \x01(\t\x12\x12\n\ninvoker_id\x18\x04 \x01(\t\x12\x15\n\rfunction_name\x18\x05 \x01(\t\x12\x15\n\rinput_payload\x18\x06 \x01(\x0c\x12\x1d\n\x15required_variant_refs\x18\x07 \x03(\t\x12\x12\n\ntimeout_ms\x18\x08 \x01(\x03\x12`\n\x1aresolved_cozy_models_by_id\x18\t \x03(\x0b\x32<.scheduler.v1.BatchExecutionItem.ResolvedCozyModelsByIdEntry\x12\x19\n\x11parent_request_id\x18\n \x01(\t\x12\x18\n\x10\x63hild_request_id\x18\x0b \x01(\t\x12\x12\n\nitem_index\x18\x0c \x01(\x05\x1a^\n\x1bResolvedCozyModelsByIdEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12.\n\x05value\x18\x02 \x01(\x0b\x32\x1f.scheduler.v1.ResolvedCozyModel:\x02\x38\x01\"\xbc\x01\n\x15\x42\x61tchExecutionRequest\x12\x10\n\x08\x62\x61tch_id\x18\x01 \x01(\t\x12\x12\n\nrelease_id\x18\x02 \x01(\t\x12\x15\n\rfunction_name\x18\x03 \x01(\t\x12\x19\n\x11\x63ompatibility_key\x18\x04 \x01(\t\x12\x1a\n\x12\x63reated_at_unix_ms\x18\x05 \x01(\x03\x12/\n\x05items\x18\x06 \x03(\x0b\x32 .scheduler.v1.BatchExecutionItem\"\x85\x02\n\x18\x42\x61tchExecutionItemResult\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x0f\n\x07item_id\x18\x02 \x01(\t\x12\x0f\n\x07success\x18\x03 \x01(\x08\x12\x16\n\x0eoutput_payload\x18\x04 \x01(\x0c\x12\x12\n\nerror_type\x18\x05 \x01(\t\x12\x11\n\tretryable\x18\x06 \x01(\x08\x12\x14\n\x0csafe_message\x18\x07 \x01(\t\x12\x15\n\rerror_message\x18\x08 \x01(\t\x12\x19\n\x11parent_request_id\x18\t \x01(\t\x12\x18\n\x10\x63hild_request_id\x18\n \x01(\t\x12\x12\n\nitem_index\x18\x0b \x01(\x05\"_\n\x14\x42\x61tchExecutionResult\x12\x10\n\x08\x62\x61tch_id\x18\x01 \x01(\t\x12\x35\n\x05items\x18\x02 \x03(\x0b\x32&.scheduler.v1.BatchExecutionItemResult\"\xa5\x01\n\x0bWorkerEvent\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x12\n\nevent_type\x18\x02 \x01(\t\x12\x14\n\x0cpayload_json\x18\x03 \x01(\x0c\x12\x19\n\x11parent_request_id\x18\x04 \x01(\t\x12\x18\n\x10\x63hild_request_id\x18\x05 \x01(\t\x12\x0f\n\x07item_id\x18\x06 \x01(\t\x12\x12\n\nitem_index\x18\x07 \x01(\x05\"\xf3\x01\n\x15IncrementalTokenDelta\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x0f\n\x07item_id\x18\x02 \x01(\t\x12\x15\n\rfunction_name\x18\x03 \x01(\t\x12\x10\n\x08sequence\x18\x04 \x01(\x03\x12\x19\n\x11timestamp_unix_ms\x18\x05 \x01(\x03\x12\x12\n\ndelta_text\x18\x06 \x01(\t\x12\x14\n\x0cpayload_json\x18\x07 \x01(\x0c\x12\x19\n\x11parent_request_id\x18\x08 \x01(\t\x12\x18\n\x10\x63hild_request_id\x18\t \x01(\t\x12\x12\n\nitem_index\x18\n \x01(\x05\"\xce\x01\n\x1aIncrementalTokenStreamDone\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x0f\n\x07item_id\x18\x02 \x01(\t\x12\x15\n\rfunction_name\x18\x03 \x01(\t\x12\x10\n\x08sequence\x18\x04 \x01(\x03\x12\x19\n\x11timestamp_unix_ms\x18\x05 \x01(\x03\x12\x19\n\x11parent_request_id\x18\x06 \x01(\t\x12\x18\n\x10\x63hild_request_id\x18\x07 \x01(\t\x12\x12\n\nitem_index\x18\x08 \x01(\x05\"\xe6\x01\n\x1bIncrementalTokenStreamError\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x0f\n\x07item_id\x18\x02 \x01(\t\x12\x15\n\rfunction_name\x18\x03 \x01(\t\x12\x10\n\x08sequence\x18\x04 \x01(\x03\x12\x19\n\x11timestamp_unix_ms\x18\x05 \x01(\x03\x12\x15\n\rerror_message\x18\x06 \x01(\t\x12\x19\n\x11parent_request_id\x18\x07 \x01(\t\x12\x18\n\x10\x63hild_request_id\x18\x08 \x01(\t\x12\x12\n\nitem_index\x18\t \x01(\x05\"\xc1\x01\n\x13RealtimeOpenCommand\x12\x12\n\nsession_id\x18\x01 \x01(\t\x12\x15\n\rfunction_name\x18\x02 \x01(\t\x12\x1d\n\x15required_variant_refs\x18\x03 \x03(\t\x12\r\n\x05owner\x18\x04 \x01(\t\x12\x12\n\ninvoker_id\x18\x05 \x01(\t\x12\x12\n\ntimeout_ms\x18\x06 \x01(\x03\x12\x15\n\rfile_base_url\x18\x07 \x01(\t\x12\x12\n\nfile_token\x18\x08 \x01(\t\"B\n\rRealtimeFrame\x12\x12\n\nsession_id\x18\x01 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x02 \x01(\x0c\x12\x0f\n\x07is_text\x18\x03 \x01(\x08\":\n\x14RealtimeCloseCommand\x12\x12\n\nsession_id\x18\x01 \x01(\t\x12\x0e\n\x06reason\x18\x02 \x01(\t\"K\n\x0fLoadModelResult\x12\x10\n\x08model_id\x18\x01 \x01(\t\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x15\n\rerror_message\x18\x03 \x01(\t\"M\n\x11UnloadModelResult\x12\x10\n\x08model_id\x18\x01 \x01(\t\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x15\n\rerror_message\x18\x03 \x01(\t\"(\n\tModelSpec\x12\x0b\n\x03ref\x18\x01 \x01(\t\x12\x0e\n\x06\x64types\x18\x02 \x03(\t\"\x8c\x01\n\x0bModelsByKey\x12\x35\n\x06models\x18\x01 \x03(\x0b\x32%.scheduler.v1.ModelsByKey.ModelsEntry\x1a\x46\n\x0bModelsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12&\n\x05value\x18\x02 \x01(\x0b\x32\x17.scheduler.v1.ModelSpec:\x02\x38\x01\"\xc5\x04\n\x0e\x45ndpointConfig\x12\x1b\n\x13supported_repo_refs\x18\x01 \x03(\t\x12G\n\x0frepo_ref_by_key\x18\x02 \x03(\x0b\x32..scheduler.v1.EndpointConfig.RepoRefByKeyEntry\x12m\n#resolved_cozy_models_by_variant_ref\x18\x03 \x03(\x0b\x32@.scheduler.v1.EndpointConfig.ResolvedCozyModelsByVariantRefEntry\x12\x1d\n\x15required_variant_refs\x18\x04 \x03(\t\x12N\n\x12models_by_function\x18\x05 \x03(\x0b\x32\x32.scheduler.v1.EndpointConfig.ModelsByFunctionEntry\x1a\x33\n\x11RepoRefByKeyEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1a\x66\n#ResolvedCozyModelsByVariantRefEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12.\n\x05value\x18\x02 \x01(\x0b\x32\x1f.scheduler.v1.ResolvedCozyModel:\x02\x38\x01\x1aR\n\x15ModelsByFunctionEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12(\n\x05value\x18\x02 \x01(\x0b\x32\x19.scheduler.v1.ModelsByKey:\x02\x38\x01\"\x85\x0c\n\x16WorkerSchedulerMessage\x12?\n\x13worker_registration\x18\x01 \x01(\x0b\x32 .scheduler.v1.WorkerRegistrationH\x00\x12\x37\n\nrun_result\x18\x02 \x01(\x0b\x32!.scheduler.v1.TaskExecutionResultH\x00\x12:\n\x11load_model_result\x18\x04 \x01(\x0b\x32\x1d.scheduler.v1.LoadModelResultH\x00\x12>\n\x13unload_model_result\x18\x05 \x01(\x0b\x32\x1f.scheduler.v1.UnloadModelResultH\x00\x12\x31\n\x0cworker_event\x18\x06 \x01(\x0b\x32\x19.scheduler.v1.WorkerEventH\x00\x12R\n\x1e\x63ozy_model_url_refresh_request\x18\x07 \x01(\x0b\x32(.scheduler.v1.CozyModelURLRefreshRequestH\x00\x12S\n\x1eruntime_batching_config_result\x18\x13 \x01(\x0b\x32).scheduler.v1.RuntimeBatchingConfigResultH\x00\x12>\n\x10\x62\x61tch_run_result\x18\x15 \x01(\x0b\x32\".scheduler.v1.BatchExecutionResultH\x00\x12\x46\n\x17incremental_token_delta\x18\x17 \x01(\x0b\x32#.scheduler.v1.IncrementalTokenDeltaH\x00\x12Q\n\x1dincremental_token_stream_done\x18\x18 \x01(\x0b\x32(.scheduler.v1.IncrementalTokenStreamDoneH\x00\x12S\n\x1eincremental_token_stream_error\x18\x19 \x01(\x0b\x32).scheduler.v1.IncrementalTokenStreamErrorH\x00\x12\x39\n\x0brun_request\x18\n \x01(\x0b\x32\".scheduler.v1.TaskExecutionRequestH\x00\x12@\n\x11\x62\x61tch_run_request\x18\x16 \x01(\x0b\x32#.scheduler.v1.BatchExecutionRequestH\x00\x12\x38\n\x0eload_model_cmd\x18\x0b \x01(\x0b\x32\x1e.scheduler.v1.LoadModelCommandH\x00\x12<\n\x10unload_model_cmd\x18\x0c \x01(\x0b\x32 .scheduler.v1.UnloadModelCommandH\x00\x12?\n\x11interrupt_run_cmd\x18\r \x01(\x0b\x32\".scheduler.v1.InterruptTaskCommandH\x00\x12\x37\n\x0f\x65ndpoint_config\x18\x0e \x01(\x0b\x32\x1c.scheduler.v1.EndpointConfigH\x00\x12>\n\x11realtime_open_cmd\x18\x0f \x01(\x0b\x32!.scheduler.v1.RealtimeOpenCommandH\x00\x12\x35\n\x0erealtime_frame\x18\x10 \x01(\x0b\x32\x1b.scheduler.v1.RealtimeFrameH\x00\x12@\n\x12realtime_close_cmd\x18\x11 \x01(\x0b\x32\".scheduler.v1.RealtimeCloseCommandH\x00\x12T\n\x1f\x63ozy_model_url_refresh_response\x18\x12 \x01(\x0b\x32).scheduler.v1.CozyModelURLRefreshResponseH\x00\x12Q\n\x1bruntime_batching_config_cmd\x18\x14 \x01(\x0b\x32*.scheduler.v1.RuntimeBatchingConfigCommandH\x00\x42\x05\n\x03msgJ\x04\x08\x03\x10\x04R\x0bspawn_tasks2y\n\x16SchedulerWorkerService\x12_\n\rConnectWorker\x12$.scheduler.v1.WorkerSchedulerMessage\x1a$.scheduler.v1.WorkerSchedulerMessage(\x01\x30\x01\x42UZSgithub.com/cozy-creator/gen-orchestrator/pkg/pb/workerschedulerv1;workerschedulerv1b\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -34,18 +34,18 @@ _globals['DESCRIPTOR']._serialized_options = b'ZSgithub.com/cozy-creator/gen-orchestrator/pkg/pb/workerschedulerv1;workerschedulerv1' _globals['_WORKERRESOURCES_FUNCTIONCONCURRENCYENTRY']._loaded_options = None _globals['_WORKERRESOURCES_FUNCTIONCONCURRENCYENTRY']._serialized_options = b'8\001' - _globals['_LOADMODELCOMMAND_RESOLVEDCOZYMODELSBYIDENTRY']._loaded_options = None - _globals['_LOADMODELCOMMAND_RESOLVEDCOZYMODELSBYIDENTRY']._serialized_options = b'8\001' _globals['_TASKEXECUTIONREQUEST_RESOLVEDCOZYMODELSBYIDENTRY']._loaded_options = None _globals['_TASKEXECUTIONREQUEST_RESOLVEDCOZYMODELSBYIDENTRY']._serialized_options = b'8\001' + _globals['_BATCHEXECUTIONITEM_RESOLVEDCOZYMODELSBYIDENTRY']._loaded_options = None + _globals['_BATCHEXECUTIONITEM_RESOLVEDCOZYMODELSBYIDENTRY']._serialized_options = b'8\001' _globals['_MODELSBYKEY_MODELSENTRY']._loaded_options = None _globals['_MODELSBYKEY_MODELSENTRY']._serialized_options = b'8\001' - _globals['_RELEASEARTIFACTCONFIG_REPOREFBYKEYENTRY']._loaded_options = None - _globals['_RELEASEARTIFACTCONFIG_REPOREFBYKEYENTRY']._serialized_options = b'8\001' - _globals['_RELEASEARTIFACTCONFIG_RESOLVEDCOZYMODELSBYVARIANTREFENTRY']._loaded_options = None - _globals['_RELEASEARTIFACTCONFIG_RESOLVEDCOZYMODELSBYVARIANTREFENTRY']._serialized_options = b'8\001' - _globals['_RELEASEARTIFACTCONFIG_MODELSBYFUNCTIONENTRY']._loaded_options = None - _globals['_RELEASEARTIFACTCONFIG_MODELSBYFUNCTIONENTRY']._serialized_options = b'8\001' + _globals['_ENDPOINTCONFIG_REPOREFBYKEYENTRY']._loaded_options = None + _globals['_ENDPOINTCONFIG_REPOREFBYKEYENTRY']._serialized_options = b'8\001' + _globals['_ENDPOINTCONFIG_RESOLVEDCOZYMODELSBYVARIANTREFENTRY']._loaded_options = None + _globals['_ENDPOINTCONFIG_RESOLVEDCOZYMODELSBYVARIANTREFENTRY']._serialized_options = b'8\001' + _globals['_ENDPOINTCONFIG_MODELSBYFUNCTIONENTRY']._loaded_options = None + _globals['_ENDPOINTCONFIG_MODELSBYFUNCTIONENTRY']._serialized_options = b'8\001' _globals['_WORKERRESOURCES']._serialized_start=41 _globals['_WORKERRESOURCES']._serialized_end=864 _globals['_WORKERRESOURCES_FUNCTIONCONCURRENCYENTRY']._serialized_start=806 @@ -54,56 +54,76 @@ _globals['_FUNCTIONSCHEMA']._serialized_end=1031 _globals['_WORKERREGISTRATION']._serialized_start=1034 _globals['_WORKERREGISTRATION']._serialized_end=1174 - _globals['_LOADMODELCOMMAND']._serialized_start=1177 - _globals['_LOADMODELCOMMAND']._serialized_end=1405 - _globals['_LOADMODELCOMMAND_RESOLVEDCOZYMODELSBYIDENTRY']._serialized_start=1311 - _globals['_LOADMODELCOMMAND_RESOLVEDCOZYMODELSBYIDENTRY']._serialized_end=1405 - _globals['_UNLOADMODELCOMMAND']._serialized_start=1407 - _globals['_UNLOADMODELCOMMAND']._serialized_end=1445 - _globals['_INTERRUPTTASKCOMMAND']._serialized_start=1447 - _globals['_INTERRUPTTASKCOMMAND']._serialized_end=1485 - _globals['_RESOLVEDCOZYMODELFILE']._serialized_start=1487 - _globals['_RESOLVEDCOZYMODELFILE']._serialized_end=1573 - _globals['_RESOLVEDCOZYMODEL']._serialized_start=1575 - _globals['_RESOLVEDCOZYMODEL']._serialized_end=1671 - _globals['_COZYMODELURLREFRESHREQUEST']._serialized_start=1673 - _globals['_COZYMODELURLREFRESHREQUEST']._serialized_end=1719 - _globals['_COZYMODELURLREFRESHRESPONSE']._serialized_start=1721 - _globals['_COZYMODELURLREFRESHRESPONSE']._serialized_end=1819 - _globals['_TASKEXECUTIONREQUEST']._serialized_start=1822 - _globals['_TASKEXECUTIONREQUEST']._serialized_end=2231 - _globals['_TASKEXECUTIONREQUEST_RESOLVEDCOZYMODELSBYIDENTRY']._serialized_start=1311 - _globals['_TASKEXECUTIONREQUEST_RESOLVEDCOZYMODELSBYIDENTRY']._serialized_end=1405 - _globals['_TASKEXECUTIONRESULT']._serialized_start=2234 - _globals['_TASKEXECUTIONRESULT']._serialized_end=2396 - _globals['_WORKEREVENT']._serialized_start=2398 - _globals['_WORKEREVENT']._serialized_end=2469 - _globals['_REALTIMEOPENCOMMAND']._serialized_start=2472 - _globals['_REALTIMEOPENCOMMAND']._serialized_end=2665 - _globals['_REALTIMEFRAME']._serialized_start=2667 - _globals['_REALTIMEFRAME']._serialized_end=2733 - _globals['_REALTIMECLOSECOMMAND']._serialized_start=2735 - _globals['_REALTIMECLOSECOMMAND']._serialized_end=2793 - _globals['_LOADMODELRESULT']._serialized_start=2795 - _globals['_LOADMODELRESULT']._serialized_end=2870 - _globals['_UNLOADMODELRESULT']._serialized_start=2872 - _globals['_UNLOADMODELRESULT']._serialized_end=2949 - _globals['_MODELSPEC']._serialized_start=2951 - _globals['_MODELSPEC']._serialized_end=2991 - _globals['_MODELSBYKEY']._serialized_start=2994 - _globals['_MODELSBYKEY']._serialized_end=3134 - _globals['_MODELSBYKEY_MODELSENTRY']._serialized_start=3064 - _globals['_MODELSBYKEY_MODELSENTRY']._serialized_end=3134 - _globals['_RELEASEARTIFACTCONFIG']._serialized_start=3137 - _globals['_RELEASEARTIFACTCONFIG']._serialized_end=3746 - _globals['_RELEASEARTIFACTCONFIG_REPOREFBYKEYENTRY']._serialized_start=3507 - _globals['_RELEASEARTIFACTCONFIG_REPOREFBYKEYENTRY']._serialized_end=3558 - _globals['_RELEASEARTIFACTCONFIG_RESOLVEDCOZYMODELSBYVARIANTREFENTRY']._serialized_start=3560 - _globals['_RELEASEARTIFACTCONFIG_RESOLVEDCOZYMODELSBYVARIANTREFENTRY']._serialized_end=3662 - _globals['_RELEASEARTIFACTCONFIG_MODELSBYFUNCTIONENTRY']._serialized_start=3664 - _globals['_RELEASEARTIFACTCONFIG_MODELSBYFUNCTIONENTRY']._serialized_end=3746 - _globals['_WORKERSCHEDULERMESSAGE']._serialized_start=3749 - _globals['_WORKERSCHEDULERMESSAGE']._serialized_end=4767 - _globals['_SCHEDULERWORKERSERVICE']._serialized_start=4769 - _globals['_SCHEDULERWORKERSERVICE']._serialized_end=4890 + _globals['_LOADMODELCOMMAND']._serialized_start=1176 + _globals['_LOADMODELCOMMAND']._serialized_end=1212 + _globals['_UNLOADMODELCOMMAND']._serialized_start=1214 + _globals['_UNLOADMODELCOMMAND']._serialized_end=1252 + _globals['_INTERRUPTTASKCOMMAND']._serialized_start=1254 + _globals['_INTERRUPTTASKCOMMAND']._serialized_end=1342 + _globals['_RUNTIMEBATCHINGCONFIG']._serialized_start=1345 + _globals['_RUNTIMEBATCHINGCONFIG']._serialized_end=1528 + _globals['_RUNTIMEBATCHINGCONFIGCOMMAND']._serialized_start=1530 + _globals['_RUNTIMEBATCHINGCONFIGCOMMAND']._serialized_end=1613 + _globals['_RUNTIMEBATCHINGCONFIGRESULT']._serialized_start=1615 + _globals['_RUNTIMEBATCHINGCONFIGRESULT']._serialized_end=1724 + _globals['_RESOLVEDCOZYMODELFILE']._serialized_start=1726 + _globals['_RESOLVEDCOZYMODELFILE']._serialized_end=1812 + _globals['_RESOLVEDCOZYMODEL']._serialized_start=1814 + _globals['_RESOLVEDCOZYMODEL']._serialized_end=1910 + _globals['_COZYMODELURLREFRESHREQUEST']._serialized_start=1912 + _globals['_COZYMODELURLREFRESHREQUEST']._serialized_end=1958 + _globals['_COZYMODELURLREFRESHRESPONSE']._serialized_start=1960 + _globals['_COZYMODELURLREFRESHRESPONSE']._serialized_end=2058 + _globals['_TASKEXECUTIONREQUEST']._serialized_start=2061 + _globals['_TASKEXECUTIONREQUEST']._serialized_end=2564 + _globals['_TASKEXECUTIONREQUEST_RESOLVEDCOZYMODELSBYIDENTRY']._serialized_start=2470 + _globals['_TASKEXECUTIONREQUEST_RESOLVEDCOZYMODELSBYIDENTRY']._serialized_end=2564 + _globals['_TASKEXECUTIONRESULT']._serialized_start=2567 + _globals['_TASKEXECUTIONRESULT']._serialized_end=2823 + _globals['_BATCHEXECUTIONITEM']._serialized_start=2826 + _globals['_BATCHEXECUTIONITEM']._serialized_end=3282 + _globals['_BATCHEXECUTIONITEM_RESOLVEDCOZYMODELSBYIDENTRY']._serialized_start=2470 + _globals['_BATCHEXECUTIONITEM_RESOLVEDCOZYMODELSBYIDENTRY']._serialized_end=2564 + _globals['_BATCHEXECUTIONREQUEST']._serialized_start=3285 + _globals['_BATCHEXECUTIONREQUEST']._serialized_end=3473 + _globals['_BATCHEXECUTIONITEMRESULT']._serialized_start=3476 + _globals['_BATCHEXECUTIONITEMRESULT']._serialized_end=3737 + _globals['_BATCHEXECUTIONRESULT']._serialized_start=3739 + _globals['_BATCHEXECUTIONRESULT']._serialized_end=3834 + _globals['_WORKEREVENT']._serialized_start=3837 + _globals['_WORKEREVENT']._serialized_end=4002 + _globals['_INCREMENTALTOKENDELTA']._serialized_start=4005 + _globals['_INCREMENTALTOKENDELTA']._serialized_end=4248 + _globals['_INCREMENTALTOKENSTREAMDONE']._serialized_start=4251 + _globals['_INCREMENTALTOKENSTREAMDONE']._serialized_end=4457 + _globals['_INCREMENTALTOKENSTREAMERROR']._serialized_start=4460 + _globals['_INCREMENTALTOKENSTREAMERROR']._serialized_end=4690 + _globals['_REALTIMEOPENCOMMAND']._serialized_start=4693 + _globals['_REALTIMEOPENCOMMAND']._serialized_end=4886 + _globals['_REALTIMEFRAME']._serialized_start=4888 + _globals['_REALTIMEFRAME']._serialized_end=4954 + _globals['_REALTIMECLOSECOMMAND']._serialized_start=4956 + _globals['_REALTIMECLOSECOMMAND']._serialized_end=5014 + _globals['_LOADMODELRESULT']._serialized_start=5016 + _globals['_LOADMODELRESULT']._serialized_end=5091 + _globals['_UNLOADMODELRESULT']._serialized_start=5093 + _globals['_UNLOADMODELRESULT']._serialized_end=5170 + _globals['_MODELSPEC']._serialized_start=5172 + _globals['_MODELSPEC']._serialized_end=5212 + _globals['_MODELSBYKEY']._serialized_start=5215 + _globals['_MODELSBYKEY']._serialized_end=5355 + _globals['_MODELSBYKEY_MODELSENTRY']._serialized_start=5285 + _globals['_MODELSBYKEY_MODELSENTRY']._serialized_end=5355 + _globals['_ENDPOINTCONFIG']._serialized_start=5358 + _globals['_ENDPOINTCONFIG']._serialized_end=5939 + _globals['_ENDPOINTCONFIG_REPOREFBYKEYENTRY']._serialized_start=5700 + _globals['_ENDPOINTCONFIG_REPOREFBYKEYENTRY']._serialized_end=5751 + _globals['_ENDPOINTCONFIG_RESOLVEDCOZYMODELSBYVARIANTREFENTRY']._serialized_start=5753 + _globals['_ENDPOINTCONFIG_RESOLVEDCOZYMODELSBYVARIANTREFENTRY']._serialized_end=5855 + _globals['_ENDPOINTCONFIG_MODELSBYFUNCTIONENTRY']._serialized_start=5857 + _globals['_ENDPOINTCONFIG_MODELSBYFUNCTIONENTRY']._serialized_end=5939 + _globals['_WORKERSCHEDULERMESSAGE']._serialized_start=5942 + _globals['_WORKERSCHEDULERMESSAGE']._serialized_end=7483 + _globals['_SCHEDULERWORKERSERVICE']._serialized_start=7485 + _globals['_SCHEDULERWORKERSERVICE']._serialized_end=7606 # @@protoc_insertion_point(module_scope) diff --git a/src/gen_worker/pipeline_loader.py b/src/gen_worker/pipeline_loader.py index 72be53f..ea20b2f 100644 --- a/src/gen_worker/pipeline_loader.py +++ b/src/gen_worker/pipeline_loader.py @@ -537,24 +537,21 @@ def get_torch_dtype(dtype_str: Optional[str], model_id: str) -> Any: def detect_diffusers_variant(model_path: Path) -> Optional[str]: """ - Detect a diffusers `variant=` value ("bf16", "fp8", "fp16", "int8", "int4", "nvfp4") - from files on disk. + Detect a diffusers `variant=` value ("bf16", "fp8", "fp16", "int8", "int4") from files on disk. Many diffusers repos store weights as: - `unet/diffusion_pytorch_model.fp16.safetensors` - `text_encoder/model.fp16.safetensors` - sharded: `*.fp16.safetensors.index.json` + `*.fp16-00001-of-0000N.safetensors` """ - candidates = ["bf16", "fp8", "fp16", "int8", "int4", "nvfp4"] + candidates = ["bf16", "fp8", "fp16", "int8", "int4"] for p in model_path.rglob("*"): if not p.is_file(): continue name = p.name.lower() for v in candidates: if f".{v}." in name and name.endswith((".safetensors", ".json")): - print(f"DEBUG detect_diffusers_variant matched variant={v} file={p.name}") return v - print(f"DEBUG detect_diffusers_variant no variant found in {model_path}") return None @@ -1458,17 +1455,6 @@ async def load( raise ModelNotFoundError(model_id, path) config = config or PipelineConfig(model_path=str(path)) - if config.variant is None: - # Prefer variant from cozy/pipeline spec, fall back to file-name scan. - try: - from .cozy_pipeline_spec import load_cozy_pipeline_spec - - spec = load_cozy_pipeline_spec(path) - if spec is not None and spec.variant: - config.variant = spec.variant - print(f"DEBUG pipeline_loader variant_from_spec={config.variant}") - except Exception: - pass if config.variant is None: config.variant = detect_diffusers_variant(path) diff --git a/src/gen_worker/run_metrics_v1.py b/src/gen_worker/run_metrics_v1.py index 0db2b0e..b23dee0 100644 --- a/src/gen_worker/run_metrics_v1.py +++ b/src/gen_worker/run_metrics_v1.py @@ -41,18 +41,6 @@ def _extract_snapshot_digest(resolved_entry: Any) -> Optional[str]: return snap or None -def _resolved_entry_for_model_id(resolved: Mapping[str, Any], canonical_model_id: str) -> Any: - key = str(canonical_model_id or "").strip() - if not key: - return None - ent = resolved.get(key) - if ent is not None: - return ent - if key.startswith("cozy:"): - return resolved.get(key.split(":", 1)[1].strip()) - return resolved.get(f"cozy:{key}") - - def _extract_resolved_files(resolved_entry: Any) -> List[Any]: if resolved_entry is None: return [] @@ -102,17 +90,6 @@ def _cache_dir() -> Path: return worker_model_cache_dir() -def _canonicalize_model_id(raw: str) -> str: - s = str(raw or "").strip() - if not s: - return s - try: - parsed = parse_model_ref(s) - return parsed.canonical() - except Exception: - return s - - @dataclass class ModelMetricsV1: model_id: str @@ -380,8 +357,8 @@ def best_effort_init_model_metrics( - vram_models/disk_models are the worker-reported canonical lists. - For Cozy refs with resolved snapshot digests, we can also check snapshot dirs. """ - vram_set = set(_canonicalize_model_id(str(x)) for x in (vram_models or [])) - disk_set = set(_canonicalize_model_id(str(x)) for x in (disk_models or [])) + vram_set = set(str(x) for x in (vram_models or [])) + disk_set = set(str(x) for x in (disk_models or [])) base = cache_dir or _cache_dir() resolved = rm.resolved_cozy_models_by_id or {} @@ -389,11 +366,17 @@ def best_effort_init_model_metrics( mid = str(raw or "").strip() if not mid: continue - canon = _canonicalize_model_id(mid) + canon = mid snap: Optional[str] = None cache_state: Optional[str] = None + try: + parsed = parse_model_ref(mid) + if parsed.scheme == "cozy" and parsed.cozy is not None: + canon = parsed.cozy.canonical() + except Exception: + pass - resolved_entry = _resolved_entry_for_model_id(resolved, canon) if isinstance(resolved, Mapping) else None + resolved_entry = resolved.get(canon) if isinstance(resolved, Mapping) else None snap = _extract_snapshot_digest(resolved_entry) if canon in vram_set: diff --git a/src/gen_worker/tensorhub_toml.py b/src/gen_worker/tensorhub_toml.py index b6074dc..5a6e186 100644 --- a/src/gen_worker/tensorhub_toml.py +++ b/src/gen_worker/tensorhub_toml.py @@ -10,7 +10,7 @@ from .names import slugify_function_name _DEFAULT_DTYPES: tuple[str, ...] = ("fp16", "bf16") -_ALLOWED_DTYPES: frozenset[str] = frozenset({"fp16", "bf16", "fp8", "fp32", "int8", "int4", "nvfp4"}) +_ALLOWED_DTYPES: frozenset[str] = frozenset({"fp16", "bf16", "fp8", "fp32", "int8", "int4"}) _RE_CLAUSE = re.compile(r"^\s*(>=|<=|==|~=|>|<)?\s*([0-9]+(?:\.[0-9]+)*)\s*$") _RE_VERSION_PREFIX = re.compile(r"^\s*([0-9]+(?:\.[0-9]+)*)") diff --git a/src/gen_worker/testing/http_runner.py b/src/gen_worker/testing/http_runner.py index e38da01..b6d905a 100644 --- a/src/gen_worker/testing/http_runner.py +++ b/src/gen_worker/testing/http_runner.py @@ -113,7 +113,7 @@ def run_task_sync( raw = msgspec.msgpack.encode(payload_obj) req = pb.TaskExecutionRequest( - run_id=rid, + request_id=rid, function_name=fn, input_payload=raw, required_variant_refs=[str(v).strip() for v in (required_variant_refs or []) if str(v).strip()], @@ -152,7 +152,7 @@ def run_task_sync( payload = {} events.append( { - "request_id": str(ev.run_id or ""), + "request_id": str(ev.request_id or ""), "event_type": str(ev.event_type or ""), "payload": payload, } diff --git a/src/gen_worker/testing/mock_orchestrator.py b/src/gen_worker/testing/mock_orchestrator.py index ee32dee..de3bb37 100644 --- a/src/gen_worker/testing/mock_orchestrator.py +++ b/src/gen_worker/testing/mock_orchestrator.py @@ -198,7 +198,7 @@ def run_task( payload = _rewrite_refs_to_urls(payload_obj, input_ref_urls) raw = msgspec.msgpack.encode(payload) req = pb.TaskExecutionRequest( - run_id=rid, + request_id=rid, function_name=function_name, input_payload=raw, required_variant_refs=list(required_variant_refs), @@ -336,18 +336,18 @@ def _format_msg(msg: pb.WorkerSchedulerMessage) -> str: return "worker_registration" if msg.HasField("run_result"): rr = msg.run_result - return f"run_result request_id={rr.run_id} success={rr.success} error_type={rr.error_type!r} retryable={rr.retryable}" + return f"run_result request_id={rr.request_id} success={rr.success} error_type={rr.error_type!r} retryable={rr.retryable}" if msg.HasField("worker_event"): ev = msg.worker_event - return f"worker_event request_id={ev.run_id} type={ev.event_type}" + return f"worker_event request_id={ev.request_id} type={ev.event_type}" if msg.HasField("load_model_result"): return "load_model_result" if msg.HasField("unload_model_result"): return "unload_model_result" if msg.HasField("interrupt_run_cmd"): return "interrupt_run_cmd" - if msg.HasField("release_artifact_config"): - return "release_artifact_config" + if msg.HasField("endpoint_config"): + return "endpoint_config" if msg.HasField("realtime_open_cmd"): return "realtime_open_cmd" if msg.HasField("realtime_frame"): @@ -455,7 +455,7 @@ def main(argv: Optional[list[str]] = None) -> int: payload = "" print(f"[event] {msg.worker_event.event_type}: {payload}") continue - if msg.HasField("run_result") and msg.run_result.run_id == request_id: + if msg.HasField("run_result") and msg.run_result.request_id == request_id: rr = msg.run_result if rr.output_payload: out_obj = msgspec.msgpack.decode(rr.output_payload) diff --git a/src/gen_worker/worker.py b/src/gen_worker/worker.py index 936ddf9..80f12a7 100644 --- a/src/gen_worker/worker.py +++ b/src/gen_worker/worker.py @@ -64,7 +64,7 @@ from .model_interface import ModelManagementInterface from .downloader import CozyHubDownloader, ModelDownloader from .model_ref_downloader import ModelRefDownloader -from .model_refs import ParsedModelRef, parse_model_ref +from .model_refs import parse_model_ref from .types import Asset from .model_cache import ModelCache, ModelCacheStats, ModelLocation from .run_metrics_v1 import RunMetricsV1, best_effort_bytes_downloaded, best_effort_init_model_metrics, safe_json_bytes @@ -984,7 +984,7 @@ def __init__( self._fixed_model_spec_by_key: Dict[str, Dict[str, Any]] = {} self._payload_model_spec_by_key_by_function: Dict[str, Dict[str, Dict[str, Any]]] = {} # Orchestrator-resolved manifests received in EndpointConfig (startup prefetch baseline). - # Keys should be canonical model ref strings (e.g. "owner/repo@sha256:"). + # Keys should be canonical model ref strings (e.g. "cozy:owner/repo@sha256:"). self._resolved_cozy_models_by_id_baseline: Dict[str, Any] = {} self._prefetch_lock = threading.Lock() self._prefetch_thread: Optional[threading.Thread] = None @@ -2266,9 +2266,9 @@ def _register_worker(self, is_heartbeat: bool = False) -> None: vram_models = self._model_manager.get_vram_loaded_models() supports_model_loading_flag = True elif self._model_cache: - # Cache-only workers can still report VRAM/disk inventory, but - # they do not support explicit Load/UnloadModelCommand handling. + # Use model cache for VRAM-loaded models if no legacy model_manager vram_models = self._model_cache.get_vram_models() + supports_model_loading_flag = True # Get disk-cached and downloading models from model cache if self._model_cache: @@ -2752,7 +2752,7 @@ def _process_message(self, message: WorkerSchedulerMessage) -> None: self._handle_unload_model_cmd(message.unload_model_cmd) elif msg_type == 'interrupt_run_cmd': cmd = message.interrupt_run_cmd - request_id = cmd.run_id + request_id = cmd.request_id item_ids = [str(x).strip() for x in list(getattr(cmd, "item_ids", []) or []) if str(x).strip()] cancel_queued_only = bool(getattr(cmd, "cancel_queued_only", False)) self._handle_interrupt_request(request_id, item_ids=item_ids, cancel_queued_only=cancel_queued_only) @@ -2767,11 +2767,11 @@ def _process_message(self, message: WorkerSchedulerMessage) -> None: elif msg_type == "worker_event": self._handle_worker_event_from_scheduler(message.worker_event) # Add handling for other message types if needed (e.g., config updates) - elif msg_type == 'release_artifact_config': - cfg = message.release_artifact_config + elif msg_type == 'endpoint_config': + cfg = message.endpoint_config resolved_by_variant = dict(getattr(cfg, "resolved_cozy_models_by_variant_ref", {}) or {}) logger.info( - "Received ReleaseArtifactConfig (supported=%d required=%d resolved=%d)", + "Received EndpointConfig (supported=%d required=%d resolved=%d)", len(cfg.supported_repo_refs), len(cfg.required_variant_refs), len(resolved_by_variant), @@ -2984,12 +2984,12 @@ def worker() -> None: ).encode("utf-8") self._send_message( pb.WorkerSchedulerMessage( - worker_event=pb.WorkerEvent(run_id="", event_type="model.download.completed", payload_json=payload) + worker_event=pb.WorkerEvent(request_id="", event_type="model.download.completed", payload_json=payload) ) ) self._send_message( pb.WorkerSchedulerMessage( - worker_event=pb.WorkerEvent(run_id="", event_type="model.ready", payload_json=json.dumps({"model_id": canon}, separators=(",", ":"), sort_keys=True).encode("utf-8")) + worker_event=pb.WorkerEvent(request_id="", event_type="model.ready", payload_json=json.dumps({"model_id": canon}, separators=(",", ":"), sort_keys=True).encode("utf-8")) ) ) except Exception: @@ -3003,7 +3003,7 @@ def worker() -> None: ).encode("utf-8") self._send_message( pb.WorkerSchedulerMessage( - worker_event=pb.WorkerEvent(run_id="", event_type="model.cached", payload_json=payload) + worker_event=pb.WorkerEvent(request_id="", event_type="model.cached", payload_json=payload) ) ) except Exception: @@ -3017,7 +3017,7 @@ def worker() -> None: payload = json.dumps({"model_id": canon}, separators=(",", ":"), sort_keys=True).encode("utf-8") self._send_message( pb.WorkerSchedulerMessage( - worker_event=pb.WorkerEvent(run_id="", event_type="model.download.started", payload_json=payload) + worker_event=pb.WorkerEvent(request_id="", event_type="model.download.started", payload_json=payload) ) ) except Exception: @@ -3039,7 +3039,7 @@ def worker() -> None: payload = json.dumps({"model_id": canon}, separators=(",", ":"), sort_keys=True).encode("utf-8") self._send_message( pb.WorkerSchedulerMessage( - worker_event=pb.WorkerEvent(run_id="", event_type="model.ready", payload_json=payload) + worker_event=pb.WorkerEvent(request_id="", event_type="model.ready", payload_json=payload) ) ) except Exception: @@ -3054,7 +3054,7 @@ def worker() -> None: ).encode("utf-8") self._send_message( pb.WorkerSchedulerMessage( - worker_event=pb.WorkerEvent(run_id="", event_type="model.cached", payload_json=payload) + worker_event=pb.WorkerEvent(request_id="", event_type="model.cached", payload_json=payload) ) ) except Exception: @@ -3068,7 +3068,7 @@ def worker() -> None: ).encode("utf-8") self._send_message( pb.WorkerSchedulerMessage( - worker_event=pb.WorkerEvent(run_id="", event_type="model.download.completed", payload_json=payload) + worker_event=pb.WorkerEvent(request_id="", event_type="model.download.completed", payload_json=payload) ) ) except Exception: @@ -3089,7 +3089,7 @@ def worker() -> None: payload = json.dumps({"model_id": canon}, separators=(",", ":"), sort_keys=True).encode("utf-8") self._send_message( pb.WorkerSchedulerMessage( - worker_event=pb.WorkerEvent(run_id="", event_type="model.url_refresh", payload_json=payload) + worker_event=pb.WorkerEvent(request_id="", event_type="model.url_refresh", payload_json=payload) ) ) except Exception: @@ -3103,7 +3103,7 @@ def worker() -> None: ).encode("utf-8") self._send_message( pb.WorkerSchedulerMessage( - worker_event=pb.WorkerEvent(run_id="", event_type="model.download.failed", payload_json=payload) + worker_event=pb.WorkerEvent(request_id="", event_type="model.download.failed", payload_json=payload) ) ) except Exception: @@ -3206,7 +3206,7 @@ def _handle_load_model_cmd(self, cmd: LoadModelCommand) -> None: payload = json.dumps({"model_id": model_id}, separators=(",", ":"), sort_keys=True).encode("utf-8") self._send_message( pb.WorkerSchedulerMessage( - worker_event=pb.WorkerEvent(run_id="", event_type="model.load.started", payload_json=payload) + worker_event=pb.WorkerEvent(request_id="", event_type="model.load.started", payload_json=payload) ) ) except Exception: @@ -3229,7 +3229,7 @@ def _handle_load_model_cmd(self, cmd: LoadModelCommand) -> None: # Set resolved cozy models context so downloads can use orchestrator-resolved URLs. from .model_ref_downloader import reset_resolved_cozy_models_by_id, set_resolved_cozy_models_by_id per_cmd = dict(getattr(cmd, "resolved_cozy_models_by_id", {}) or {}) - baseline = self._resolved_cozy_models_by_id_baseline or {} + baseline = getattr(self, "_resolved_cozy_models_by_id_baseline", None) or {} merged = {**baseline, **per_cmd} if per_cmd else dict(baseline) tok = set_resolved_cozy_models_by_id(merged or None) try: @@ -3271,7 +3271,7 @@ def _handle_load_model_cmd(self, cmd: LoadModelCommand) -> None: ).encode("utf-8") self._send_message( pb.WorkerSchedulerMessage( - worker_event=pb.WorkerEvent(run_id="", event_type=ev_type, payload_json=payload) + worker_event=pb.WorkerEvent(request_id="", event_type=ev_type, payload_json=payload) ) ) except Exception: @@ -3296,7 +3296,7 @@ def _handle_unload_model_cmd(self, cmd: Any) -> None: ) self._send_message( pb.WorkerSchedulerMessage( - worker_event=pb.WorkerEvent(run_id="", event_type="model.unload.failed", payload_json=payload) + worker_event=pb.WorkerEvent(request_id="", event_type="model.unload.failed", payload_json=payload) ) ) except Exception: @@ -3307,7 +3307,7 @@ def _handle_unload_model_cmd(self, cmd: Any) -> None: payload = json.dumps({"model_id": model_id}, separators=(",", ":"), sort_keys=True).encode("utf-8") self._send_message( pb.WorkerSchedulerMessage( - worker_event=pb.WorkerEvent(run_id="", event_type="model.unload.started", payload_json=payload) + worker_event=pb.WorkerEvent(request_id="", event_type="model.unload.started", payload_json=payload) ) ) except Exception: @@ -3354,7 +3354,7 @@ def _handle_unload_model_cmd(self, cmd: Any) -> None: ).encode("utf-8") self._send_message( pb.WorkerSchedulerMessage( - worker_event=pb.WorkerEvent(run_id="", event_type=ev_type, payload_json=payload) + worker_event=pb.WorkerEvent(request_id="", event_type=ev_type, payload_json=payload) ) ) except Exception: @@ -3362,7 +3362,7 @@ def _handle_unload_model_cmd(self, cmd: Any) -> None: def _handle_run_request(self, request: TaskExecutionRequest) -> None: """Handle a task execution request from the scheduler.""" - request_id = request.run_id + request_id = request.request_id function_name = request.function_name input_payload = request.input_payload required_model_id_for_exec = "" @@ -3371,9 +3371,7 @@ def _handle_run_request(self, request: TaskExecutionRequest) -> None: invoker_id = str(getattr(request, "invoker_id", "") or "") file_base_url = str(getattr(request, "file_base_url", "") or "") file_token = str(getattr(request, "file_token", "") or "") - resolved_cozy_models_by_id = self._canonicalize_resolved_models_map( - dict(getattr(request, "resolved_cozy_models_by_id", {}) or {}) - ) + resolved_cozy_models_by_id = dict(getattr(request, "resolved_cozy_models_by_id", {}) or {}) parent_request_id = str(getattr(request, "parent_request_id", "") or "").strip() or None child_request_id = str(getattr(request, "child_request_id", "") or "").strip() or None item_id = str(getattr(request, "item_id", "") or "").strip() or None @@ -3543,9 +3541,7 @@ def _handle_batch_run_request(self, request: Any) -> None: timeout_ms=int(getattr(item, "timeout_ms", 0) or 0), owner=str(getattr(item, "owner", "") or ""), invoker_id=str(getattr(item, "invoker_id", "") or ""), - resolved_cozy_models_by_id=self._canonicalize_resolved_models_map( - dict(getattr(item, "resolved_cozy_models_by_id", {}) or {}) - ), + resolved_cozy_models_by_id=dict(getattr(item, "resolved_cozy_models_by_id", {}) or {}), parent_request_id=str(getattr(item, "parent_request_id", "") or ""), child_request_id=str(getattr(item, "child_request_id", "") or ""), item_id=item_id, @@ -3809,17 +3805,12 @@ def _execute_task( models_in_use: set[str] = set() inference_watchdog: Optional[threading.Timer] = None - print(f"DEBUG [execute_task] entered request_id={request_id} function={spec.name} payload_bytes={len(input_payload or b'')} timeout_ms={ctx.timeout_ms or 'none'}", flush=True) try: - _is_canceled = ctx.is_canceled() - print(f"DEBUG [execute_task] cancellation_check request_id={request_id} is_canceled={_is_canceled} deadline={getattr(ctx, '_deadline', None)}", flush=True) - if _is_canceled: + if ctx.is_canceled(): raise CanceledError("canceled") # Decode payload strictly. - print(f"DEBUG [execute_task] decoding_payload request_id={request_id} payload_type={spec.payload_type}", flush=True) input_obj = msgspec.msgpack.decode(input_payload, type=spec.payload_type) - print(f"DEBUG [execute_task] payload_decoded request_id={request_id} input={input_obj!r}", flush=True) # Optional post-decode constraints (e.g. clamping) declared on the payload type. try: from .payload_constraints import apply_payload_constraints @@ -3827,9 +3818,7 @@ def _execute_task( _ = apply_payload_constraints(input_obj) except Exception: pass - print(f"DEBUG [execute_task] materializing_assets request_id={request_id}", flush=True) self._materialize_assets(ctx, input_obj) - print(f"DEBUG [execute_task] assets_materialized request_id={request_id}", flush=True) # Best-effort extract diffusion-ish numeric fields for metrics.run. try: def _get_num(name: str) -> Optional[float]: @@ -3863,10 +3852,8 @@ def _get_num(name: str) -> Optional[float]: call_kwargs[spec.ctx_param] = ctx call_kwargs[spec.payload_param] = input_obj - print(f"DEBUG [execute_task] injection_loop request_id={request_id} injections={len(spec.injections)}", flush=True) for inj in spec.injections: resolve_t0 = time.monotonic() - print(f"DEBUG [execute_task] resolving_model request_id={request_id} param={inj.param_name}", flush=True) resolve_watchdog = self._start_task_phase_watchdog( request_id=request_id, phase="model_resolve", @@ -3949,9 +3936,12 @@ def _get_num(name: str) -> Optional[float]: }, ) try: - print(f"DEBUG [execute_task] loading_model request_id={request_id} param={inj.param_name} model_id={canon_model_id}", flush=True) call_kwargs[inj.param_name] = self._resolve_injected_value(ctx, inj.param_type, model_id, inj) - print(f"DEBUG [execute_task] model_loaded request_id={request_id} param={inj.param_name} model_id={canon_model_id} pipeline_type={type(call_kwargs[inj.param_name]).__name__}", flush=True) + logger.info( + "[request_id=%s] model load resolved: param=%s model=%s duration_ms=%d", + request_id, inj.param_name, canon_model_id, + int((time.monotonic() - load_t0) * 1000), + ) self._emit_task_event( request_id, "task.model_load.completed", @@ -3980,19 +3970,15 @@ def _get_num(name: str) -> Optional[float]: load_watchdog.cancel() # Invoke. - t_infer0 = time.monotonic() - _infer_warn_s = float(getattr(self, "_warn_inference_s", 60.0)) logger.info( - "inference.start request_id=%s function=%s timeout_ms=%s warn_after_s=%.1f", - request_id, - spec.name, - ctx.timeout_ms if ctx.timeout_ms else "none", - _infer_warn_s, + "[request_id=%s] all injections resolved, entering inference for function=%s canceled=%s", + request_id, spec.name, ctx.is_canceled(), ) + t_infer0 = time.monotonic() inference_watchdog = self._start_task_phase_watchdog( request_id=request_id, phase="inference", - warn_after_s=_infer_warn_s, + warn_after_s=float(getattr(self, "_warn_inference_s", 60.0)), payload={"function_name": spec.name, "output_mode": spec.output_mode}, ) self._emit_task_event( @@ -4000,14 +3986,14 @@ def _get_num(name: str) -> Optional[float]: "task.inference.started", {"function_name": spec.name, "output_mode": spec.output_mode}, ) - print(f"DEBUG [execute_task] calling_func request_id={request_id} function={spec.name} is_canceled={ctx.is_canceled()} kwargs_keys={list(call_kwargs.keys())}", flush=True) + logger.info("[request_id=%s] calling %s", request_id, spec.name) if inspect.iscoroutinefunction(spec.func): result = asyncio.run(spec.func(**call_kwargs)) elif inspect.isasyncgenfunction(spec.func): result = spec.func(**call_kwargs) else: result = spec.func(**call_kwargs) - print(f"DEBUG [execute_task] func_returned request_id={request_id} function={spec.name} result_type={type(result).__name__}", flush=True) + logger.info("[request_id=%s] %s returned, output_mode=%s", request_id, spec.name, spec.output_mode) if ctx.is_canceled(): raise CanceledError("canceled") @@ -4056,7 +4042,7 @@ def emit_delta(delta_obj: msgspec.Struct) -> None: if not emitted: self._send_message( pb.WorkerSchedulerMessage( - worker_event=pb.WorkerEvent(run_id=request_id, event_type="output.delta", payload_json=raw) + worker_event=pb.WorkerEvent(request_id=request_id, event_type="output.delta", payload_json=raw) ) ) last_item_id = item_id @@ -4102,7 +4088,7 @@ async def consume_async() -> None: if not emitted_done: self._send_message( pb.WorkerSchedulerMessage( - worker_event=pb.WorkerEvent(run_id=request_id, event_type="output.completed", payload_json=b"{}") + worker_event=pb.WorkerEvent(request_id=request_id, event_type="output.completed", payload_json=b"{}") ) ) output_payload = b"" @@ -4166,7 +4152,7 @@ async def consume_async() -> None: if not emitted_err: self._send_message( pb.WorkerSchedulerMessage( - worker_event=pb.WorkerEvent(run_id=request_id, event_type="output.error", payload_json=payload) + worker_event=pb.WorkerEvent(request_id=request_id, event_type="output.error", payload_json=payload) ) ) except Exception: @@ -4429,13 +4415,7 @@ def _resolve_injected_value(self, ctx: ActionContext, requested_type: Any, model # If we have an orchestrator-resolved manifest, estimate missing bytes. resolved_entry = None try: - resolved_map = getattr(ctx, "resolved_cozy_models_by_id", None) or {} - resolved_entry = resolved_map.get(canon) - if resolved_entry is None: - if canon.startswith("cozy:"): - resolved_entry = resolved_map.get(canon.split(":", 1)[1].strip()) - else: - resolved_entry = resolved_map.get(f"cozy:{canon}") + resolved_entry = (getattr(ctx, "resolved_cozy_models_by_id", None) or {}).get(canon) except Exception: resolved_entry = None bytes_dl = None @@ -4467,7 +4447,7 @@ def _resolve_injected_value(self, ctx: ActionContext, requested_type: Any, model ).encode("utf-8") self._send_message( pb.WorkerSchedulerMessage( - worker_event=pb.WorkerEvent(run_id="", event_type="model.cached", payload_json=payload) + worker_event=pb.WorkerEvent(request_id="", event_type="model.cached", payload_json=payload) ) ) except Exception: @@ -4550,27 +4530,17 @@ def _resolve_injected_value(self, ctx: ActionContext, requested_type: Any, model kwargs["custom_pipeline"] = custom_pipeline except Exception: pass - # Read variant from cozy/pipeline spec (authoritative). - if spec.variant: - kwargs["variant"] = spec.variant - print(f"DEBUG cozy_spec_variant={spec.variant} source={spec.source_path.name}") - except Exception as _spec_exc: - print(f"DEBUG cozy_spec_load_error={_spec_exc}") - - # Fallback: scan files on disk for diffusers variant naming. - if "variant" not in kwargs: - try: - from gen_worker.pipeline_loader import detect_diffusers_variant + except Exception: + pass - variant = detect_diffusers_variant(local_path) - if variant is not None: - kwargs["variant"] = variant - print(f"DEBUG detect_diffusers_variant={variant} path={local_path}") - else: - print(f"DEBUG detect_diffusers_variant=None path={local_path}") - except Exception as _detect_exc: - print(f"DEBUG detect_variant_error={_detect_exc}") - print(f"DEBUG from_pretrained variant={kwargs.get('variant')} path={local_path}") + try: + from gen_worker.pipeline_loader import detect_diffusers_variant + + variant = detect_diffusers_variant(local_path) + if variant is not None: + kwargs["variant"] = variant + except Exception: + pass # Quantized weight-only inference requires explicit loader hints. # @@ -4619,7 +4589,7 @@ def _resolve_injected_value(self, ctx: ActionContext, requested_type: Any, model device_is_cuda = str(ctx.device).startswith("cuda") and torch.cuda.is_available() variant = str(kwargs.get("variant") or "").strip().lower() if device_is_cuda: - if variant in ("fp8", "int8", "int4", "nvfp4") and hasattr(torch.cuda, "is_bf16_supported") and torch.cuda.is_bf16_supported(): + if variant in ("fp8", "int8", "int4") and hasattr(torch.cuda, "is_bf16_supported") and torch.cuda.is_bf16_supported(): kwargs["torch_dtype"] = torch.bfloat16 else: kwargs["torch_dtype"] = torch.float16 @@ -4674,31 +4644,20 @@ def _resolve_injected_value(self, ctx: ActionContext, requested_type: Any, model from_pretrained = getattr(requested_type, "from_pretrained") model_source: str = str(model_id) preload_kwargs: dict[str, Any] = {} - _ref: ParsedModelRef | None = None try: p = Path(model_source) if p.exists(): model_source = p.as_posix() else: - _ref = parse_model_ref(model_source) - if _ref.scheme in ("cozy", "hf") and self._downloader is not None: + parsed = parse_model_ref(model_source) + if self._downloader is not None and parsed.scheme in ("cozy", "hf"): model_source = self._downloader.download(model_source, str(worker_model_cache_dir())) - elif _ref.scheme == "hf" and _ref.hf is not None: + elif parsed.scheme == "hf" and parsed.hf is not None: # Fallback path when downloader is unavailable. - model_source = _ref.hf.repo_id - if _ref.hf.revision: - preload_kwargs["revision"] = _ref.hf.revision - elif _ref.scheme == "cozy": - raise RuntimeError( - f"cozy model resolution requires downloader for ref {model_source!r}" - ) - except Exception as e: - # Never hand raw cozy refs to huggingface loaders; surface the real - # download/resolve error instead of falling back to repo-id parsing. - if _ref is not None and getattr(_ref, "scheme", "") == "cozy": - raise RuntimeError( - f"cozy model materialization failed for {model_id!r}: {e}" - ) from e + model_source = parsed.hf.repo_id + if parsed.hf.revision: + preload_kwargs["revision"] = parsed.hf.revision + except Exception: model_source = str(model_id) preload_kwargs = {} @@ -4769,11 +4728,16 @@ def _resolve_injected_value(self, ctx: ActionContext, requested_type: Any, model # Fallback heuristic when deltas are noisy. size_gb = float(os.getenv("WORKER_DIFFUSERS_VRAM_GB_FALLBACK", "10") or "10") self._model_cache.mark_loaded_to_vram(canon, obj, size_gb) + logger.info( + "pipeline injection resolved: model=%s size_gb=%.1f device=%s", + canon, size_gb, str(ctx.device), + ) return obj - except Exception: - pass - except Exception: - pass + except Exception as _cache_exc: + logger.warning("model_cache mark_loaded_to_vram failed: %s", _cache_exc) + except Exception as _to_exc: + logger.error("failed to move pipeline to device=%s: %s", str(ctx.device), _to_exc) + raise self._custom_runtime_cache[key] = obj return obj @@ -4896,7 +4860,7 @@ def _send_task_result( ) else: result = pb.TaskExecutionResult( - run_id=request_id, + request_id=request_id, success=success, output_payload=(output_payload or b'') if success else b'', # Default to b'' if None error_message=error_message if not success else "", diff --git a/tests/test_asset_materialization.py b/tests/test_asset_materialization.py new file mode 100644 index 0000000..77755c9 --- /dev/null +++ b/tests/test_asset_materialization.py @@ -0,0 +1,148 @@ +import hashlib +import os +import tempfile +import unittest +from typing import Any, Dict, Optional +from unittest.mock import patch + +from gen_worker.types import Asset +from gen_worker.worker import ActionContext, Worker + + +class _FakeHeaders(dict): + def get(self, key: str, default: Optional[str] = None) -> Optional[str]: # type: ignore[override] + return super().get(key, default) + + +class _FakeHTTPResponse: + def __init__(self, body: bytes, status: int = 200, headers: Optional[Dict[str, str]] = None) -> None: + self._body = body + self._pos = 0 + self.status = status + self.headers: Any = _FakeHeaders(headers or {}) + + def read(self, n: int = -1) -> bytes: + if self._pos >= len(self._body): + return b"" + if n is None or n < 0: + n = len(self._body) - self._pos + chunk = self._body[self._pos : self._pos + n] + self._pos += len(chunk) + return chunk + + def __enter__(self) -> "_FakeHTTPResponse": + return self + + def __exit__(self, exc_type, exc, tb) -> None: + return None + + +class TestAssetMaterialization(unittest.TestCase): + def _worker(self, owner: str = "tenant-1") -> Worker: + w = Worker.__new__(Worker) + w.owner = owner + return w + + def test_materialize_external_url(self) -> None: + w = self._worker() + # Use a literal public IP so SSRF/DNS resolution doesn't depend on DNS working. + a = Asset(ref="https://1.1.1.1/a.png") + data = b"\x89PNG\r\n\x1a\nhello" + + with tempfile.TemporaryDirectory() as td: + os.environ["WORKER_RUN_DIR"] = td + os.environ["WORKER_CACHE_DIR"] = os.path.join(td, "cache") + + class _Opener: + def open(self, req: Any, timeout: int = 0) -> _FakeHTTPResponse: + _ = req, timeout + return _FakeHTTPResponse(data) + + with patch("urllib.request.build_opener", return_value=_Opener()) as _mock: + w._materialize_asset(ActionContext("run-1", owner=w.owner), a) + self.assertGreaterEqual(_mock.call_count, 1) + + self.assertIsNotNone(a.local_path) + assert a.local_path is not None + with open(a.local_path, "rb") as f: + self.assertEqual(f.read(), data) + + self.assertEqual(a.size_bytes, len(data)) + self.assertEqual(a.sha256, hashlib.sha256(data).hexdigest()) + self.assertEqual(a.mime_type, "image/png") + + def test_materialize_external_url_size_cap(self) -> None: + w = self._worker() + a = Asset(ref="https://1.1.1.1/a.bin") + data = b"1234" + + with tempfile.TemporaryDirectory() as td: + os.environ["WORKER_RUN_DIR"] = td + os.environ["WORKER_CACHE_DIR"] = os.path.join(td, "cache") + os.environ["WORKER_MAX_INPUT_FILE_BYTES"] = "1" + + class _Opener: + def open(self, req: Any, timeout: int = 0) -> _FakeHTTPResponse: + _ = req, timeout + return _FakeHTTPResponse(data) + + with patch("urllib.request.build_opener", return_value=_Opener()): + with self.assertRaises(Exception): + w._materialize_asset(ActionContext("run-1", owner=w.owner), a) + + def test_materialize_tensorhub_ref(self) -> None: + w = self._worker(owner="tenant-1") + a = Asset(ref="my-uploads/cat.png") + body = b"\x89PNG\r\n\x1a\ncat" + sha = hashlib.sha256(body).hexdigest() + + def fake_urlopen(req: Any, timeout: int = 0) -> _FakeHTTPResponse: + # HEAD request (urllib.request.Request) + method = getattr(req, "method", None) + items = getattr(req, "header_items", None) + hdrs: Dict[str, str] = {} + if callable(items): + for k, v in items(): + hdrs[str(k).lower()] = str(v) + if method == "HEAD": + self.assertEqual(hdrs.get("authorization"), "Bearer tok") + self.assertEqual(hdrs.get("x-cozy-owner"), "tenant-1") + return _FakeHTTPResponse( + b"", + status=200, + headers={ + "X-Cozy-Size-Bytes": str(len(body)), + "X-Cozy-SHA256": sha, + "X-Cozy-Mime-Type": "image/png", + }, + ) + # GET request + self.assertEqual(hdrs.get("authorization"), "Bearer tok") + self.assertEqual(hdrs.get("x-cozy-owner"), "tenant-1") + return _FakeHTTPResponse(body, status=200, headers={"Content-Type": "image/png"}) + + with tempfile.TemporaryDirectory() as td: + os.environ["WORKER_RUN_DIR"] = td + os.environ["WORKER_CACHE_DIR"] = os.path.join(td, "cache") + os.environ["WORKER_MAX_INPUT_FILE_BYTES"] = "9999999" + + with patch("urllib.request.urlopen", side_effect=fake_urlopen): + ctx = ActionContext( + "run-1", + owner="tenant-1", + file_api_base_url="https://tensorhub.example", + file_api_token="tok", + ) + w._materialize_asset(ctx, a) + + self.assertIsNotNone(a.local_path) + assert a.local_path is not None + with open(a.local_path, "rb") as f: + self.assertEqual(f.read(), body) + self.assertEqual(a.mime_type, "image/png") + self.assertEqual(a.size_bytes, len(body)) + self.assertEqual(a.sha256, sha) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_chunked_reassembly.py b/tests/test_chunked_reassembly.py new file mode 100644 index 0000000..af7c6bf --- /dev/null +++ b/tests/test_chunked_reassembly.py @@ -0,0 +1,333 @@ +""" +A/B test: chunked split + reassemble produces byte-identical output. + +Proves that the ingest chunking (Go: SplitAndStore at 1 GiB boundaries) +followed by the worker reassembly (_reassemble_chunked) does NOT corrupt +weights. The reassembled file must be byte-for-byte identical to the +original, with matching blake3 hashes. + +Test matrix: + - small file (under chunk threshold, no split) + - file exactly at 1 GiB boundary + - file slightly over 1 GiB (2 parts) + - large file spanning 3 parts + - random binary content (simulates real safetensors weights) +""" + +from __future__ import annotations + +import json +import os +import shutil +import tempfile +from pathlib import Path +from typing import List + +import pytest +from blake3 import blake3 + + +# --------------------------------------------------------------------------- +# Constants matching Go chunkedblob.MaxChunkSize +# --------------------------------------------------------------------------- + +MAX_CHUNK_SIZE = 1 << 30 # 1 GiB + +# For fast tests, use a smaller chunk size. Set CHUNKED_TEST_REAL_SIZE=1 to +# test at actual 1 GiB boundaries (slow, ~3 GiB of I/O). +FAST_CHUNK = 1 << 20 # 1 MiB for fast tests + + +def _chunk_size() -> int: + if os.getenv("CHUNKED_TEST_REAL_SIZE") == "1": + return MAX_CHUNK_SIZE + return FAST_CHUNK + + +# --------------------------------------------------------------------------- +# Simulate Go-side ingest: split file into chunks + write parts.json +# --------------------------------------------------------------------------- + +def _blake3_bytes(data: bytes) -> str: + h = blake3() + h.update(data) + return h.hexdigest() + + +def _blake3_file(path: Path) -> str: + h = blake3() + with open(path, "rb") as f: + while True: + b = f.read(1 << 20) + if not b: + break + h.update(b) + return h.hexdigest() + + +def _simulate_ingest( + original_file: Path, + blobs_root: Path, + chunk_size: int, + rel_path: str | None = None, +) -> List[dict]: + """Simulate Go SplitAndStore: split file into chunks, store blobs, return entries list.""" + data = original_file.read_bytes() + total_size = len(data) + original_path = rel_path or original_file.name + + entries = [] + + if total_size <= chunk_size: + # No chunking needed — store as single blob. + digest = _blake3_bytes(data) + blob_dir = blobs_root / "blake3" / digest[:2] / digest[2:4] + blob_dir.mkdir(parents=True, exist_ok=True) + (blob_dir / digest).write_bytes(data) + entries.append({ + "path": original_path, + "type": "file", + "size_bytes": total_size, + "digest": f"blake3:{digest}", + "blake3": digest, + }) + return entries + + # Split into chunks. + parts_meta = [] + offset = 0 + part_idx = 0 + while offset < total_size: + end = min(offset + chunk_size, total_size) + chunk_data = data[offset:end] + chunk_digest = _blake3_bytes(chunk_data) + chunk_path = f"{original_path}.part{part_idx:04d}" + + # Store chunk blob. + blob_dir = blobs_root / "blake3" / chunk_digest[:2] / chunk_digest[2:4] + blob_dir.mkdir(parents=True, exist_ok=True) + (blob_dir / chunk_digest).write_bytes(chunk_data) + + entries.append({ + "path": chunk_path, + "type": "file", + "size_bytes": len(chunk_data), + "digest": f"blake3:{chunk_digest}", + "blake3": chunk_digest, + }) + parts_meta.append({ + "path": chunk_path, + "size_bytes": len(chunk_data), + "digest": f"blake3:{chunk_digest}", + }) + + offset = end + part_idx += 1 + + # Build and store .parts.json manifest. + manifest = { + "original_path": original_path, + "total_bytes": total_size, + "parts": parts_meta, + } + manifest_bytes = json.dumps(manifest).encode() + manifest_digest = _blake3_bytes(manifest_bytes) + manifest_path = f"{original_path}.parts.json" + + blob_dir = blobs_root / "blake3" / manifest_digest[:2] / manifest_digest[2:4] + blob_dir.mkdir(parents=True, exist_ok=True) + (blob_dir / manifest_digest).write_bytes(manifest_bytes) + + entries.append({ + "path": manifest_path, + "type": "file", + "size_bytes": len(manifest_bytes), + "digest": f"blake3:{manifest_digest}", + "blake3": manifest_digest, + }) + + return entries + + +# --------------------------------------------------------------------------- +# Simulate worker-side reassembly (same logic as _reassemble_chunked) +# --------------------------------------------------------------------------- + +def _simulate_reassemble(blobs_root: Path, output_dir: Path, entries: List[dict]) -> None: + """Mirrors CozySnapshotV2Downloader._reassemble_chunked + _materialize_regular.""" + import re + PART_RE = re.compile(r"\.part\d{4}$") + + parts_manifests = [e for e in entries if e["path"].endswith(".parts.json")] + part_paths = {e["path"] for e in entries if PART_RE.search(e["path"])} + + # Reassemble chunked files. + for pm in parts_manifests: + digest = pm["blake3"] + blob = blobs_root / "blake3" / digest[:2] / digest[2:4] / digest + manifest = json.loads(blob.read_bytes()) + + original_path = manifest["original_path"] + parts = manifest["parts"] + + dst = output_dir / original_path + dst.parent.mkdir(parents=True, exist_ok=True) + + with open(dst, "wb") as out_f: + for part in parts: + pd = part["digest"] + if pd.startswith("blake3:"): + pd = pd[7:] + part_blob = blobs_root / "blake3" / pd[:2] / pd[2:4] / pd + with open(part_blob, "rb") as in_f: + shutil.copyfileobj(in_f, out_f) + + # Materialize regular (non-chunked) files. + for e in entries: + if e["path"].endswith(".parts.json") or e["path"] in part_paths: + continue + digest = e["blake3"] + src = blobs_root / "blake3" / digest[:2] / digest[2:4] / digest + dst = output_dir / e["path"] + dst.parent.mkdir(parents=True, exist_ok=True) + shutil.copy2(src, dst) + + +# --------------------------------------------------------------------------- +# Test cases +# --------------------------------------------------------------------------- + +class TestChunkedReassembly: + """A/B: original bytes == reassembled bytes after split+merge.""" + + def _run_ab(self, original_data: bytes, filename: str = "model.safetensors"): + chunk_size = _chunk_size() + + with tempfile.TemporaryDirectory() as tmp: + tmp_path = Path(tmp) + original_file = tmp_path / "original" / filename + original_file.parent.mkdir(parents=True) + original_file.write_bytes(original_data) + original_hash = _blake3_file(original_file) + + blobs_root = tmp_path / "blobs" + output_dir = tmp_path / "output" + blobs_root.mkdir() + output_dir.mkdir() + + # A: ingest (split) + entries = _simulate_ingest(original_file, blobs_root, chunk_size, rel_path=filename) + + # B: worker (reassemble) + _simulate_reassemble(blobs_root, output_dir, entries) + + # Verify + reassembled = output_dir / filename + assert reassembled.exists(), f"reassembled file not found: {reassembled}" + + reassembled_data = reassembled.read_bytes() + reassembled_hash = _blake3_file(reassembled) + + # Byte-for-byte identical + assert len(reassembled_data) == len(original_data), ( + f"size mismatch: original={len(original_data)} reassembled={len(reassembled_data)}" + ) + assert reassembled_data == original_data, "WEIGHT CORRUPTION: bytes differ after reassembly" + assert reassembled_hash == original_hash, ( + f"blake3 mismatch: original={original_hash[:16]} reassembled={reassembled_hash[:16]}" + ) + + def test_small_file_no_chunking(self): + """File under chunk size — passes through without splitting.""" + data = os.urandom(500_000) # 500 KB + self._run_ab(data) + + def test_exact_boundary(self): + """File exactly at chunk boundary — single chunk, no remainder.""" + chunk = _chunk_size() + data = os.urandom(chunk) + self._run_ab(data) + + def test_one_byte_over(self): + """File 1 byte over chunk size — triggers 2 parts.""" + chunk = _chunk_size() + data = os.urandom(chunk + 1) + self._run_ab(data) + + def test_two_full_parts(self): + """File exactly 2x chunk size — 2 full parts.""" + chunk = _chunk_size() + data = os.urandom(chunk * 2) + self._run_ab(data) + + def test_three_parts_with_remainder(self): + """File spanning 3 parts with a small remainder.""" + chunk = _chunk_size() + data = os.urandom(chunk * 2 + chunk // 3) + self._run_ab(data) + + def test_random_weights_pattern(self): + """Simulates real safetensors: structured header + random float bytes.""" + chunk = _chunk_size() + # Fake safetensors header (8 bytes length + JSON) + header = b'{"weight_0": {"dtype": "F16", "shape": [1024, 1024], "data_offsets": [0, 2097152]}}' + header_with_len = len(header).to_bytes(8, "little") + header + # Fill rest with random "weight" bytes to span 2+ chunks + weight_bytes = os.urandom(chunk * 2 - len(header_with_len)) + data = header_with_len + weight_bytes + self._run_ab(data, filename="unet/diffusion_pytorch_model.safetensors") + + def test_all_zeros(self): + """Edge case: all-zero file (compressible but split is still raw bytes).""" + chunk = _chunk_size() + data = b"\x00" * (chunk + 100) + self._run_ab(data) + + def test_identical_chunks(self): + """Two chunks with identical content (same digest) — dedup in blob store.""" + chunk = _chunk_size() + pattern = os.urandom(chunk) + data = pattern + pattern # 2 identical chunks + self._run_ab(data) + + def test_fp16_and_normal_share_parts(self): + """Simulates sd-turbo manifest: fp16 and normal variants share same chunk digests.""" + chunk = _chunk_size() + + with tempfile.TemporaryDirectory() as tmp: + tmp_path = Path(tmp) + blobs_root = tmp_path / "blobs" + output_dir = tmp_path / "output" + blobs_root.mkdir() + output_dir.mkdir() + + # Same underlying data for both variants (they share part digests). + data = os.urandom(chunk + chunk // 2) + original_hash = _blake3_bytes(data) + + # Ingest fp16 variant. + fp16_file = tmp_path / "model.fp16.safetensors" + fp16_file.write_bytes(data) + fp16_entries = _simulate_ingest(fp16_file, blobs_root, chunk) + + # Ingest normal variant (same data, different filename). + normal_file = tmp_path / "model.safetensors" + normal_file.write_bytes(data) + normal_entries = _simulate_ingest(normal_file, blobs_root, chunk) + + # Combined entries (like the real manifest). + all_entries = fp16_entries + normal_entries + + # Reassemble both. + _simulate_reassemble(blobs_root, output_dir, all_entries) + + # Both must be byte-identical to original. + for name in ["model.fp16.safetensors", "model.safetensors"]: + result = output_dir / name + assert result.exists(), f"{name} not found" + assert result.read_bytes() == data, f"WEIGHT CORRUPTION in {name}" + assert _blake3_file(result) == original_hash, f"blake3 mismatch in {name}" + + +if __name__ == "__main__": + pytest.main([__file__, "-v"]) diff --git a/tests/test_cozy_cas_resume.py b/tests/test_cozy_cas_resume.py index f581056..0aa4bf0 100644 --- a/tests/test_cozy_cas_resume.py +++ b/tests/test_cozy_cas_resume.py @@ -66,6 +66,7 @@ def _parse_range(h: str) -> int: return int(start_s) +@pytest.mark.skip(reason="Range header resume not supported by tensorhub presigned URLs yet") def test_cozy_cas_resume_with_range(tmp_path: Path) -> None: data = (b"0123456789abcdef" * 65536) # 1MB-ish expected_size = len(data) @@ -125,35 +126,3 @@ async def handler(req: web.Request) -> web.StreamResponse: assert state["saw_range"] is True finally: _stop_server(srv) - - -def test_cozy_cas_resume_restarts_on_bad_content_range_start(tmp_path: Path) -> None: - data = b"y" * 1024 * 256 - expected_size = len(data) - expected_b3 = blake3(data).hexdigest() - - state = {"saw_range": False, "saw_plain": False} - - async def handler(req: web.Request) -> web.StreamResponse: - if req.headers.get("Range"): - state["saw_range"] = True - # Broken origin behavior: returns 206 but wrong start offset. - resp = web.Response(status=206, body=data) - resp.headers["Accept-Ranges"] = "bytes" - resp.headers["Content-Range"] = f"bytes 0-{len(data)-1}/{len(data)}" - return resp - state["saw_plain"] = True - return web.Response(status=200, body=data, headers={"Accept-Ranges": "bytes"}) - - srv = _start_server(handler) # type: ignore[arg-type] - try: - dst = tmp_path / "out.bin" - part = dst.with_suffix(dst.suffix + ".part") - part.write_bytes(data[:12345]) - - asyncio.run(_download_one_file(f"{srv.base_url}/file", dst, expected_size=expected_size, expected_blake3=expected_b3)) - assert dst.read_bytes() == data - assert state["saw_range"] is True - assert state["saw_plain"] is True - finally: - _stop_server(srv) diff --git a/tests/test_cozy_snapshot_v2_downloader.py b/tests/test_cozy_snapshot_v2_downloader.py index 90f5208..47deac8 100644 --- a/tests/test_cozy_snapshot_v2_downloader.py +++ b/tests/test_cozy_snapshot_v2_downloader.py @@ -268,12 +268,12 @@ async def get_pipeline(_req: web.Request) -> web.Response: ], ) - # Accept legacy resolved-map keys that still include a cozy: scheme prefix. + # NOTE: canonical() lowercases and adds cozy: prefix. resolved_by_id = {"cozy:o/r:latest": resolved} tok = set_resolved_cozy_models_by_id(resolved_by_id) try: dl = ModelRefDownloader(cozy_base_url=None, cozy_token=None, allow_tensorhub_api_resolve=False) - local = dl.download("o/r:latest", tmp_path.as_posix()) + local = dl.download("cozy:o/r:latest", tmp_path.as_posix()) assert (Path(local) / "cozy.pipeline.yaml").read_bytes() == b1 finally: reset_resolved_cozy_models_by_id(tok) diff --git a/tests/test_entrypoint_cache_preflight.py b/tests/test_entrypoint_cache_preflight.py new file mode 100644 index 0000000..06a90b2 --- /dev/null +++ b/tests/test_entrypoint_cache_preflight.py @@ -0,0 +1,42 @@ +from __future__ import annotations + +from pathlib import Path + +import pytest + +from gen_worker import entrypoint + + +def test_preflight_cache_dirs_fails_without_fallback(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None: + monkeypatch.setenv("TENSORHUB_CACHE_DIR", str(tmp_path / "primary-root")) + monkeypatch.delenv("WORKER_LOCAL_MODEL_CACHE_DIR", raising=False) + + def _fail(_: Path) -> None: + raise PermissionError("permission denied") + + monkeypatch.setattr(entrypoint, "_probe_cache_path_writable", _fail) + + with pytest.raises(RuntimeError, match="tensorhub CAS path"): + entrypoint._preflight_cache_dirs() + + +def test_preflight_cache_dirs_uses_tensorhub_cache_dir_root( + monkeypatch: pytest.MonkeyPatch, tmp_path: Path +) -> None: + root = tmp_path / "cache-root" + primary = root / "cas" + monkeypatch.setenv("TENSORHUB_CACHE_DIR", str(root)) + monkeypatch.delenv("WORKER_LOCAL_MODEL_CACHE_DIR", raising=False) + + def _probe(path: Path) -> None: + path.mkdir(parents=True, exist_ok=True) + probe = path / ".probe" + probe.write_bytes(b"ok") + probe.unlink() + + monkeypatch.setattr(entrypoint, "_probe_cache_path_writable", _probe) + + cfg = entrypoint._preflight_cache_dirs() + assert cfg["model_cache_dir"] == str(primary) + assert cfg["local_model_cache_dir"] == "" + assert primary.exists() diff --git a/tests/test_entrypoint_worker_mode.py b/tests/test_entrypoint_worker_mode.py new file mode 100644 index 0000000..9c4a2fc --- /dev/null +++ b/tests/test_entrypoint_worker_mode.py @@ -0,0 +1,17 @@ +from __future__ import annotations + +import pytest + +from gen_worker import entrypoint +from gen_worker.trainer import runtime as trainer_runtime + + +def test_entrypoint_routes_to_trainer_mode(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("WORKER_MODE", "trainer") + monkeypatch.setattr(trainer_runtime, "run_training_runtime_from_env", lambda: 13) + assert entrypoint._run_main() == 13 + + +def test_entrypoint_rejects_invalid_worker_mode(monkeypatch: pytest.MonkeyPatch) -> None: + monkeypatch.setenv("WORKER_MODE", "wat") + assert entrypoint._run_main() == 1 diff --git a/tests/test_error_mapping.py b/tests/test_error_mapping.py new file mode 100644 index 0000000..d45956c --- /dev/null +++ b/tests/test_error_mapping.py @@ -0,0 +1,59 @@ +import unittest + +from gen_worker.errors import CanceledError, FatalError, ResourceError, RetryableError, ValidationError +from gen_worker.worker import Worker + + +class TestErrorMapping(unittest.TestCase): + def _worker(self) -> Worker: + # Avoid running Worker.__init__ (network/env-heavy); these helpers don't depend on init state. + return Worker.__new__(Worker) + + def test_sanitize_safe_message_redacts_tokens_urls_paths(self) -> None: + w = self._worker() + msg = "Bearer abc.def.ghi https://example.com/secret /home/user/token.txt" + out = w._sanitize_safe_message(msg) + self.assertNotIn("abc.def.ghi", out) + self.assertNotIn("https://example.com/secret", out) + self.assertNotIn("/home/user/token.txt", out) + + def test_map_exception_validation(self) -> None: + w = self._worker() + error_type, retryable, safe, internal = w._map_exception(ValidationError("bad input")) + self.assertEqual(error_type, "validation") + self.assertFalse(retryable) + self.assertIn("bad input", safe) + self.assertIn("ValidationError", internal) + + def test_map_exception_retryable(self) -> None: + w = self._worker() + error_type, retryable, safe, _ = w._map_exception(RetryableError("temporary")) + self.assertEqual(error_type, "retryable") + self.assertTrue(retryable) + self.assertIn("temporary", safe) + + def test_map_exception_fatal(self) -> None: + w = self._worker() + error_type, retryable, safe, _ = w._map_exception(FatalError("no")) + self.assertEqual(error_type, "fatal") + self.assertFalse(retryable) + self.assertIn("no", safe) + + def test_map_exception_resource(self) -> None: + w = self._worker() + error_type, retryable, safe, _ = w._map_exception(ResourceError("oom")) + self.assertEqual(error_type, "resource") + self.assertFalse(retryable) + self.assertIn("oom", safe) + + def test_map_exception_canceled(self) -> None: + w = self._worker() + error_type, retryable, safe, _ = w._map_exception(CanceledError("stop")) + self.assertEqual(error_type, "canceled") + self.assertFalse(retryable) + self.assertEqual(safe, "canceled") + + +if __name__ == "__main__": + unittest.main() + diff --git a/tests/test_file_token_scoping.py b/tests/test_file_token_scoping.py new file mode 100644 index 0000000..0632223 --- /dev/null +++ b/tests/test_file_token_scoping.py @@ -0,0 +1,257 @@ +"""Tests for per-run file token scoping (issue #50).""" +import os +import tempfile +import unittest +from typing import Any, Dict, Optional +from unittest.mock import patch +import urllib.error + +from gen_worker.errors import AuthError +from gen_worker.types import Asset +from gen_worker.worker import ActionContext, Worker + + +class _FakeHeaders(dict): + def get(self, key: str, default: Optional[str] = None) -> Optional[str]: # type: ignore[override] + return super().get(key, default) + + +class _FakeHTTPResponse: + def __init__(self, body: bytes, status: int = 200, headers: Optional[Dict[str, str]] = None) -> None: + self._body = body + self._pos = 0 + self.status = status + self.headers: Any = _FakeHeaders(headers or {}) + + def read(self, n: int = -1) -> bytes: + if self._pos >= len(self._body): + return b"" + if n is None or n < 0: + n = len(self._body) - self._pos + chunk = self._body[self._pos : self._pos + n] + self._pos += len(chunk) + return chunk + + def __enter__(self) -> "_FakeHTTPResponse": + return self + + def __exit__(self, exc_type, exc, tb) -> None: + return None + + +def _make_http_error(code: int, msg: str = "error") -> urllib.error.HTTPError: + """Create a real HTTPError for testing.""" + import io + return urllib.error.HTTPError( + url="https://example.com", + code=code, + msg=msg, + hdrs={}, # type: ignore + fp=io.BytesIO(b""), + ) + + +class TestFileTokenScoping(unittest.TestCase): + """Test that per-run file tokens are used instead of env vars.""" + + def test_save_bytes_uses_per_run_token(self) -> None: + """save_bytes should use the token from ActionContext, not env.""" + captured_auth: list[str] = [] + + def fake_urlopen(req: Any, timeout: int = 0) -> _FakeHTTPResponse: + items = getattr(req, "header_items", None) + if callable(items): + for k, v in items(): + if str(k).lower() == "authorization": + captured_auth.append(str(v)) + return _FakeHTTPResponse(b'{"size_bytes": 5, "sha256": "abc123"}', status=200) + + # Set env var to a different value to prove it's not used + os.environ["FILE_API_TOKEN"] = "env-token-should-not-be-used" + os.environ["FILE_API_BASE_URL"] = "https://should-not-be-used.example" + + ctx = ActionContext( + "run-123", + owner="tenant-1", + file_api_base_url="https://tensorhub.example", + file_api_token="per-run-token", + ) + + with patch("urllib.request.urlopen", side_effect=fake_urlopen): + asset = ctx.save_bytes("runs/run-123/outputs/test.bin", b"hello") + + self.assertEqual(len(captured_auth), 1) + self.assertEqual(captured_auth[0], "Bearer per-run-token") + self.assertIsNotNone(asset) + + def test_save_bytes_falls_back_to_env_when_no_per_run_token(self) -> None: + """save_bytes should fall back to env var if no per-run token provided.""" + captured_auth: list[str] = [] + + def fake_urlopen(req: Any, timeout: int = 0) -> _FakeHTTPResponse: + items = getattr(req, "header_items", None) + if callable(items): + for k, v in items(): + if str(k).lower() == "authorization": + captured_auth.append(str(v)) + return _FakeHTTPResponse(b'{"size_bytes": 5, "sha256": "abc123"}', status=200) + + os.environ["FILE_API_TOKEN"] = "env-fallback-token" + os.environ["FILE_API_BASE_URL"] = "https://tensorhub.example" + + ctx = ActionContext( + "run-456", + owner="tenant-1", + # No file_api_token provided + ) + + with patch("urllib.request.urlopen", side_effect=fake_urlopen): + asset = ctx.save_bytes("runs/run-456/outputs/test.bin", b"hello") + + self.assertEqual(len(captured_auth), 1) + self.assertEqual(captured_auth[0], "Bearer env-fallback-token") + self.assertIsNotNone(asset) + + +class TestAuthErrorHandling(unittest.TestCase): + """Test that 401/403 errors raise AuthError (non-retryable).""" + + def test_save_bytes_raises_auth_error_on_401(self) -> None: + """save_bytes should raise AuthError on 401 response.""" + def fake_urlopen(req: Any, timeout: int = 0) -> _FakeHTTPResponse: + raise _make_http_error(401, "Unauthorized") + + ctx = ActionContext( + "run-789", + owner="tenant-1", + file_api_base_url="https://tensorhub.example", + file_api_token="expired-token", + ) + + with patch("urllib.request.urlopen", side_effect=fake_urlopen): + with self.assertRaises(AuthError) as cm: + ctx.save_bytes("runs/run-789/outputs/test.bin", b"hello") + + self.assertIn("401", str(cm.exception)) + self.assertIn("file_token", str(cm.exception)) + + def test_save_bytes_raises_auth_error_on_403(self) -> None: + """save_bytes should raise AuthError on 403 response.""" + def fake_urlopen(req: Any, timeout: int = 0) -> _FakeHTTPResponse: + raise _make_http_error(403, "Forbidden") + + ctx = ActionContext( + "run-abc", + owner="tenant-1", + file_api_base_url="https://tensorhub.example", + file_api_token="wrong-scope-token", + ) + + with patch("urllib.request.urlopen", side_effect=fake_urlopen): + with self.assertRaises(AuthError) as cm: + ctx.save_bytes("runs/run-abc/outputs/test.bin", b"hello") + + self.assertIn("403", str(cm.exception)) + + def test_save_bytes_create_raises_auth_error_on_401(self) -> None: + """save_bytes_create should raise AuthError on 401 response.""" + def fake_urlopen(req: Any, timeout: int = 0) -> _FakeHTTPResponse: + raise _make_http_error(401, "Unauthorized") + + ctx = ActionContext( + "run-def", + owner="tenant-1", + file_api_base_url="https://tensorhub.example", + file_api_token="bad-token", + ) + + with patch("urllib.request.urlopen", side_effect=fake_urlopen): + with self.assertRaises(AuthError): + ctx.save_bytes_create("runs/run-def/outputs/new.bin", b"data") + + def test_materialize_asset_raises_auth_error_on_401(self) -> None: + """_materialize_asset should raise AuthError on 401 for HEAD request.""" + w = Worker.__new__(Worker) + w.owner = "tenant-1" + + def fake_urlopen(req: Any, timeout: int = 0) -> _FakeHTTPResponse: + raise _make_http_error(401, "Unauthorized") + + a = Asset(ref="some-file.png") + + with tempfile.TemporaryDirectory() as td: + os.environ["WORKER_RUN_DIR"] = td + os.environ["WORKER_CACHE_DIR"] = os.path.join(td, "cache") + + ctx = ActionContext( + "run-ghi", + owner="tenant-1", + file_api_base_url="https://tensorhub.example", + file_api_token="expired-token", + ) + + with patch("urllib.request.urlopen", side_effect=fake_urlopen): + with self.assertRaises(AuthError) as cm: + w._materialize_asset(ctx, a) + + self.assertIn("401", str(cm.exception)) + + def test_materialize_asset_raises_auth_error_on_403(self) -> None: + """_materialize_asset should raise AuthError on 403 for HEAD request.""" + w = Worker.__new__(Worker) + w.owner = "tenant-1" + + def fake_urlopen(req: Any, timeout: int = 0) -> _FakeHTTPResponse: + raise _make_http_error(403, "Forbidden - path not in allowed prefixes") + + a = Asset(ref="other-run/outputs/secret.png") + + with tempfile.TemporaryDirectory() as td: + os.environ["WORKER_RUN_DIR"] = td + os.environ["WORKER_CACHE_DIR"] = os.path.join(td, "cache") + + ctx = ActionContext( + "run-jkl", + owner="tenant-1", + file_api_base_url="https://tensorhub.example", + file_api_token="scoped-token", + ) + + with patch("urllib.request.urlopen", side_effect=fake_urlopen): + with self.assertRaises(AuthError) as cm: + w._materialize_asset(ctx, a) + + self.assertIn("403", str(cm.exception)) + + +class TestAuthErrorMapping(unittest.TestCase): + """Test that AuthError is mapped to non-retryable error type.""" + + def test_auth_error_is_non_retryable(self) -> None: + """AuthError should map to 'auth' error type with retryable=False.""" + w = Worker.__new__(Worker) + w.owner = "tenant-1" + + exc = AuthError("token expired") + error_type, retryable, safe_msg, internal_msg = w._map_exception(exc) + + self.assertEqual(error_type, "auth") + self.assertFalse(retryable) + # safe_msg contains the exception message + self.assertIn("token expired", safe_msg.lower()) + + def test_auth_error_default_message(self) -> None: + """AuthError with empty message should use 'authentication failed'.""" + w = Worker.__new__(Worker) + w.owner = "tenant-1" + + exc = AuthError("") + error_type, retryable, safe_msg, internal_msg = w._map_exception(exc) + + self.assertEqual(error_type, "auth") + self.assertFalse(retryable) + self.assertIn("authentication", safe_msg.lower()) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_hf_injection_normalization.py b/tests/test_hf_injection_normalization.py new file mode 100644 index 0000000..a9eca59 --- /dev/null +++ b/tests/test_hf_injection_normalization.py @@ -0,0 +1,60 @@ +from __future__ import annotations + +from gen_worker.injection import InjectionSpec, ModelRef, ModelRefSource as Src +from gen_worker.worker import ActionContext, Worker + + +class _DummyModel: + last_source: str = "" + last_kwargs: dict = {} + + @classmethod + def from_pretrained(cls, source: str, **kwargs): + cls.last_source = str(source) + cls.last_kwargs = dict(kwargs) + return cls() + + +class _DownloaderStub: + def __init__(self, local_path: str) -> None: + self.local_path = local_path + self.calls: list[tuple[str, str]] = [] + + def download(self, model_ref: str, cache_dir: str) -> str: + self.calls.append((model_ref, cache_dir)) + return self.local_path + + +def _bare_worker() -> Worker: + w = Worker.__new__(Worker) + w._model_manager = None + w._model_cache = None + w._custom_runtime_locks = {} + w._custom_runtime_cache = {} + w._downloader = None + return w + + +def test_non_diffusers_hf_ref_normalizes_without_downloader() -> None: + w = _bare_worker() + ctx = ActionContext("run-hf-no-downloader") + inj = InjectionSpec(param_name="model", param_type=_DummyModel, model_ref=ModelRef(Src.FIXED, "joycaption")) + + _ = Worker._resolve_injected_value(w, ctx, _DummyModel, "hf:owner/repo@main", inj) # type: ignore[arg-type] + + assert _DummyModel.last_source == "owner/repo" + assert _DummyModel.last_kwargs.get("revision") == "main" + + +def test_non_diffusers_hf_ref_uses_downloader_path_when_available() -> None: + w = _bare_worker() + dl = _DownloaderStub("/tmp/cozy-model-cache/hf-owner-repo-main") + w._downloader = dl + ctx = ActionContext("run-hf-downloader") + inj = InjectionSpec(param_name="model", param_type=_DummyModel, model_ref=ModelRef(Src.FIXED, "joycaption")) + + _ = Worker._resolve_injected_value(w, ctx, _DummyModel, "hf:owner/repo@main", inj) # type: ignore[arg-type] + + assert dl.calls + assert dl.calls[0][0] == "hf:owner/repo@main" + assert _DummyModel.last_source == "/tmp/cozy-model-cache/hf-owner-repo-main" diff --git a/tests/test_injection_type_enforcement.py b/tests/test_injection_type_enforcement.py new file mode 100644 index 0000000..05047de --- /dev/null +++ b/tests/test_injection_type_enforcement.py @@ -0,0 +1,31 @@ +import unittest + +from gen_worker.injection import InjectionSpec, ModelRef, ModelRefSource +from gen_worker.worker import ActionContext, Worker + + +class _StubModelManager: + async def get_active_pipeline(self, model_id: str): + return _ActualPipeline() + + +class _ExpectedPipeline: + pass + + +class _ActualPipeline: + pass + + +class TestInjectionTypeEnforcement(unittest.TestCase): + def test_rejects_model_manager_type_mismatch(self) -> None: + w = Worker(user_module_names=[], model_manager=_StubModelManager(), worker_jwt="dummy-worker-jwt") + ctx = ActionContext("run-1") + inj = InjectionSpec( + param_name="pipeline", + param_type=_ExpectedPipeline, + model_ref=ModelRef(ModelRefSource.FIXED, "foo"), + ) + + with self.assertRaises(ValueError): + w._resolve_injected_value(ctx, _ExpectedPipeline, "model-id", inj) diff --git a/tests/test_mock_orchestrator_dev.py b/tests/test_mock_orchestrator_dev.py new file mode 100644 index 0000000..0cb4dd8 --- /dev/null +++ b/tests/test_mock_orchestrator_dev.py @@ -0,0 +1,109 @@ +from __future__ import annotations + +import os +import signal +import subprocess +import sys +import time +from pathlib import Path + +import pytest + +from gen_worker.testing.mock_orchestrator import _MockOrchestrator +from gen_worker.pb import worker_scheduler_pb2_grpc as pb_grpc + + +@pytest.mark.skipif(os.getenv("COZY_DEV_GRPC_E2E") != "1", reason="set COZY_DEV_GRPC_E2E=1 to run gRPC e2e") +def test_mock_orchestrator_can_run_one_task(tmp_path: Path) -> None: + # Start mock orchestrator server. + orch = _MockOrchestrator() + import grpc + from concurrent import futures + + server = grpc.server(futures.ThreadPoolExecutor(max_workers=8)) + pb_grpc.add_SchedulerWorkerServiceServicer_to_server(orch, server) + port = server.add_insecure_port("127.0.0.1:0") + server.start() + + # Write a tiny module with a single worker function into a temp dir. + mod_dir = tmp_path / "mod" + mod_dir.mkdir(parents=True, exist_ok=True) + (mod_dir / "hello_mod.py").write_text( + """ +from __future__ import annotations + +import msgspec +from gen_worker import ActionContext, ResourceRequirements, worker_function + + +class Input(msgspec.Struct): + name: str + + +class Output(msgspec.Struct): + message: str + + +@worker_function(ResourceRequirements()) +def hello(ctx: ActionContext, payload: Input) -> Output: + return Output(message=f"hello {payload.name}") +""".lstrip(), + encoding="utf-8", + ) + + # Start a worker process that connects to our mock orchestrator. + env = dict(os.environ) + env["SCHEDULER_PUBLIC_ADDR"] = f"127.0.0.1:{port}" + (tmp_path / "endpoint.toml").write_text( + """ +schema_version = 1 +name = "dev-test" +main = "hello_mod" +""".lstrip(), + encoding="utf-8", + ) + env["ENDPOINT_TOML_PATH"] = str(tmp_path / "endpoint.toml") + env["PYTHONPATH"] = f"{mod_dir}:{env.get('PYTHONPATH','')}" + env["WORKER_ID"] = "dev-test" + env["WORKER_JWT"] = "dev-test-jwt" + + proc = subprocess.Popen( + [sys.executable, "-m", "gen_worker.entrypoint"], + env=env, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, + text=True, + ) + try: + sess = orch.get_session(timeout_s=30.0) + assert sess is not None + assert "hello" in sess.available_functions + + request_id = sess.run_task(function_name="hello", payload_obj={"name": "world"}) + + # Wait for a result. + start = time.monotonic() + while time.monotonic() - start < 30: + msg = sess.recv(timeout_s=0.5) + if msg is None: + continue + if msg.HasField("run_result") and msg.run_result.request_id == request_id: + assert msg.run_result.success is True + assert msg.run_result.output_payload + import msgspec + + out = msgspec.msgpack.decode(msg.run_result.output_payload) + assert out["message"] == "hello world" + return + + raise AssertionError("timed out waiting for run_result") + finally: + try: + proc.send_signal(signal.SIGINT) + except Exception: + pass + try: + proc.wait(timeout=5) + except Exception: + proc.kill() + server.stop(grace=None) diff --git a/tests/test_model_cache.py b/tests/test_model_cache.py new file mode 100644 index 0000000..14c3080 --- /dev/null +++ b/tests/test_model_cache.py @@ -0,0 +1,312 @@ +"""Tests for the ModelCache class.""" + +import unittest +from unittest.mock import patch, MagicMock + +from gen_worker.model_cache import ( + ModelCache, + ModelCacheStats, + ModelLocation, + CachedModel, +) + + +class TestModelCache(unittest.TestCase): + """Tests for ModelCache LRU eviction and stats.""" + + def setUp(self) -> None: + """Create a fresh ModelCache for each test.""" + # Patch torch to avoid CUDA detection + with patch("gen_worker.model_cache.ModelCache._detect_total_vram", return_value=24.0): + self.cache = ModelCache( + max_vram_gb=20.0, + vram_safety_margin_gb=4.0, + ) + + def test_register_model_vram(self) -> None: + """Test registering a model in VRAM.""" + self.cache.register_model( + model_id="model-a", + location=ModelLocation.VRAM, + size_gb=5.0, + pipeline=MagicMock(), + ) + + self.assertTrue(self.cache.has_model("model-a")) + self.assertTrue(self.cache.is_in_vram("model-a")) + self.assertFalse(self.cache.is_on_disk("model-a")) + + def test_register_model_disk(self) -> None: + """Test registering a model on disk.""" + from pathlib import Path + + self.cache.register_model( + model_id="model-b", + location=ModelLocation.DISK, + size_gb=10.0, + disk_path=Path("/tmp/model-b"), + ) + + self.assertTrue(self.cache.has_model("model-b")) + self.assertFalse(self.cache.is_in_vram("model-b")) + self.assertTrue(self.cache.is_on_disk("model-b")) + + def test_lru_ordering(self) -> None: + """Test that LRU ordering works correctly.""" + # Register three models + self.cache.register_model("model-a", ModelLocation.VRAM, 5.0, MagicMock()) + self.cache.register_model("model-b", ModelLocation.VRAM, 5.0, MagicMock()) + self.cache.register_model("model-c", ModelLocation.VRAM, 5.0, MagicMock()) + + # model-a is LRU (first registered) + lru = self.cache._get_lru_vram_models() + self.assertEqual(lru[0], "model-a") + + # Access model-a, now model-b should be LRU + self.cache._touch("model-a") + lru = self.cache._get_lru_vram_models() + self.assertEqual(lru[0], "model-b") + + def test_lru_eviction(self) -> None: + """Test that LRU eviction frees space.""" + # Fill cache: 5 + 5 + 5 = 15GB used out of 20GB max + self.cache.register_model("model-a", ModelLocation.VRAM, 5.0, MagicMock()) + self.cache.register_model("model-b", ModelLocation.VRAM, 5.0, MagicMock()) + self.cache.register_model("model-c", ModelLocation.VRAM, 5.0, MagicMock()) + + self.assertEqual(self.cache._vram_used_gb, 15.0) + + # Try to add 10GB model - should evict LRU models + # Need to evict 5GB (15 + 10 - 20 = 5) + freed = self.cache._evict_lru_for_space(10.0) + + # model-a (5GB) should be evicted + self.assertEqual(freed, 5.0) + self.assertEqual(self.cache._vram_used_gb, 10.0) + self.assertFalse(self.cache.is_in_vram("model-a")) + + def test_get_stats(self) -> None: + """Test stats generation for heartbeat.""" + self.cache.register_model("model-vram-1", ModelLocation.VRAM, 5.0, MagicMock()) + self.cache.register_model("model-vram-2", ModelLocation.VRAM, 3.0, MagicMock()) + from pathlib import Path + self.cache.register_model("model-disk-1", ModelLocation.DISK, 10.0, disk_path=Path("/tmp/m")) + self.cache.mark_downloading("model-dl-1", 0.5) + + stats = self.cache.get_stats() + + self.assertIsInstance(stats, ModelCacheStats) + self.assertEqual(len(stats.vram_models), 2) + self.assertIn("model-vram-1", stats.vram_models) + self.assertIn("model-vram-2", stats.vram_models) + self.assertEqual(len(stats.disk_models), 1) + self.assertIn("model-disk-1", stats.disk_models) + self.assertEqual(len(stats.downloading_models), 1) + self.assertIn("model-dl-1", stats.downloading_models) + self.assertEqual(stats.vram_used_gb, 8.0) + self.assertEqual(stats.vram_model_count, 2) + self.assertEqual(stats.disk_model_count, 1) + self.assertEqual(stats.total_models, 4) + + def test_stats_to_dict(self) -> None: + """Test stats serialization to dict.""" + self.cache.register_model("model-a", ModelLocation.VRAM, 5.5, MagicMock()) + stats = self.cache.get_stats() + d = stats.to_dict() + + self.assertIsInstance(d, dict) + self.assertIn("vram_models", d) + self.assertIn("vram_used_gb", d) + self.assertEqual(d["vram_used_gb"], 5.5) + + def test_unload_model(self) -> None: + """Test unloading a model from cache.""" + self.cache.register_model("model-a", ModelLocation.VRAM, 5.0, MagicMock()) + self.assertTrue(self.cache.has_model("model-a")) + self.assertEqual(self.cache._vram_used_gb, 5.0) + + result = self.cache.unload_model("model-a") + self.assertTrue(result) + self.assertFalse(self.cache.has_model("model-a")) + self.assertEqual(self.cache._vram_used_gb, 0.0) + + def test_unload_nonexistent(self) -> None: + """Test unloading a model that doesn't exist.""" + result = self.cache.unload_model("nonexistent") + self.assertFalse(result) + + def test_mark_loaded_to_vram(self) -> None: + """Test marking a model as loaded to VRAM.""" + self.cache.mark_loaded_to_vram("model-a", MagicMock(), 8.0) + + self.assertTrue(self.cache.is_in_vram("model-a")) + self.assertEqual(self.cache._vram_used_gb, 8.0) + + def test_mark_cached_to_disk(self) -> None: + """Test marking a model as cached to disk.""" + from pathlib import Path + + # First load to VRAM + self.cache.mark_loaded_to_vram("model-a", MagicMock(), 8.0) + self.assertTrue(self.cache.is_in_vram("model-a")) + + # Then mark as disk-cached (offloaded) + self.cache.mark_cached_to_disk("model-a", Path("/tmp/model-a"), 8.0) + + self.assertFalse(self.cache.is_in_vram("model-a")) + self.assertTrue(self.cache.is_on_disk("model-a")) + self.assertEqual(self.cache._vram_used_gb, 0.0) + + def test_can_fit_in_vram(self) -> None: + """Test checking if model can fit in VRAM.""" + # Empty cache with 20GB max + self.assertTrue(self.cache.can_fit_in_vram(10.0)) + self.assertTrue(self.cache.can_fit_in_vram(20.0)) + self.assertFalse(self.cache.can_fit_in_vram(25.0)) + + # With some models loaded + self.cache.register_model("model-a", ModelLocation.VRAM, 15.0, MagicMock()) + self.assertTrue(self.cache.can_fit_in_vram(5.0)) # 15 + 5 = 20 + self.assertTrue(self.cache.can_fit_in_vram(20.0)) # Can evict model-a + self.assertFalse(self.cache.can_fit_in_vram(25.0)) # Too big even after eviction + + def test_get_pipeline(self) -> None: + """Test getting a pipeline from cache.""" + pipeline = MagicMock() + self.cache.register_model("model-a", ModelLocation.VRAM, 5.0, pipeline) + + retrieved = self.cache.get_pipeline("model-a") + self.assertIs(retrieved, pipeline) + + # Getting pipeline should update LRU + self.cache.register_model("model-b", ModelLocation.VRAM, 5.0, MagicMock()) + self.cache.get_pipeline("model-a") + lru = self.cache._get_lru_vram_models() + self.assertEqual(lru[0], "model-b") # model-b is now LRU + + def test_get_pipeline_disk_model(self) -> None: + """Test getting pipeline for disk-cached model returns None.""" + from pathlib import Path + self.cache.register_model("model-a", ModelLocation.DISK, 5.0, disk_path=Path("/tmp")) + + retrieved = self.cache.get_pipeline("model-a") + self.assertIsNone(retrieved) + + def test_download_progress(self) -> None: + """Test download progress tracking.""" + self.cache.mark_downloading("model-a", 0.0) + model = self.cache._models.get("model-a") + self.assertIsNotNone(model) + self.assertEqual(model.location, ModelLocation.DOWNLOADING) + self.assertEqual(model.download_progress, 0.0) + + self.cache.update_download_progress("model-a", 0.5) + self.assertEqual(model.download_progress, 0.5) + + self.cache.update_download_progress("model-a", 1.0) + self.assertEqual(model.download_progress, 1.0) + + +class TestModelCacheEnvironment(unittest.TestCase): + """Test ModelCache configuration from environment.""" + + def test_env_config(self) -> None: + """Test that environment variables configure the cache.""" + import os + + with patch.dict(os.environ, { + "WORKER_MAX_VRAM_GB": "16", + "WORKER_VRAM_SAFETY_MARGIN_GB": "2.5", + }): + with patch("gen_worker.model_cache.ModelCache._detect_total_vram", return_value=24.0): + cache = ModelCache() + self.assertEqual(cache._max_vram_gb, 16.0) + self.assertEqual(cache._vram_safety_margin, 2.5) + + +class TestProgressiveAvailability(unittest.TestCase): + """Tests for progressive model availability.""" + + def setUp(self) -> None: + """Create a fresh ModelCache for each test.""" + with patch("gen_worker.model_cache.ModelCache._detect_total_vram", return_value=24.0): + self.cache = ModelCache(max_vram_gb=20.0) + + def test_are_models_available_all_ready(self) -> None: + """Test that available check passes when all models are ready.""" + self.cache.register_model("model-a", ModelLocation.VRAM, 5.0, MagicMock()) + from pathlib import Path + self.cache.register_model("model-b", ModelLocation.DISK, 5.0, disk_path=Path("/tmp")) + + # Both VRAM and disk models should be considered available + self.assertTrue(self.cache.are_models_available(["model-a"])) + self.assertTrue(self.cache.are_models_available(["model-b"])) + self.assertTrue(self.cache.are_models_available(["model-a", "model-b"])) + + def test_are_models_available_downloading(self) -> None: + """Test that available check fails when model is downloading.""" + self.cache.register_model("model-a", ModelLocation.VRAM, 5.0, MagicMock()) + self.cache.mark_downloading("model-b", 0.5) + + # model-a is available, model-b is not + self.assertTrue(self.cache.are_models_available(["model-a"])) + self.assertFalse(self.cache.are_models_available(["model-b"])) + self.assertFalse(self.cache.are_models_available(["model-a", "model-b"])) + + def test_are_models_available_missing(self) -> None: + """Test that available check fails for unknown models.""" + self.cache.register_model("model-a", ModelLocation.VRAM, 5.0, MagicMock()) + + self.assertTrue(self.cache.are_models_available(["model-a"])) + self.assertFalse(self.cache.are_models_available(["model-a", "model-unknown"])) + self.assertFalse(self.cache.are_models_available(["model-unknown"])) + + def test_get_available_models(self) -> None: + """Test getting list of available models.""" + self.cache.register_model("model-vram", ModelLocation.VRAM, 5.0, MagicMock()) + from pathlib import Path + self.cache.register_model("model-disk", ModelLocation.DISK, 5.0, disk_path=Path("/tmp")) + self.cache.mark_downloading("model-dl", 0.5) + + available = self.cache.get_available_models() + self.assertEqual(len(available), 2) + self.assertIn("model-vram", available) + self.assertIn("model-disk", available) + self.assertNotIn("model-dl", available) + + def test_get_downloading_models(self) -> None: + """Test getting list of downloading models.""" + self.cache.register_model("model-a", ModelLocation.VRAM, 5.0, MagicMock()) + self.cache.mark_downloading("model-b", 0.3) + self.cache.mark_downloading("model-c", 0.7) + + downloading = self.cache.get_downloading_models() + self.assertEqual(len(downloading), 2) + self.assertIn("model-b", downloading) + self.assertIn("model-c", downloading) + + def test_get_download_progress(self) -> None: + """Test getting download progress for a model.""" + self.cache.mark_downloading("model-a", 0.5) + self.cache.register_model("model-b", ModelLocation.VRAM, 5.0, MagicMock()) + + self.assertEqual(self.cache.get_download_progress("model-a"), 0.5) + self.assertIsNone(self.cache.get_download_progress("model-b")) + self.assertIsNone(self.cache.get_download_progress("unknown")) + + def test_max_concurrent_downloads_config(self) -> None: + """Test max concurrent downloads configuration.""" + import os + + # Default value + self.assertEqual(self.cache.get_max_concurrent_downloads(), 2) + + # From environment + with patch.dict(os.environ, {"WORKER_MAX_CONCURRENT_DOWNLOADS": "4"}): + with patch("gen_worker.model_cache.ModelCache._detect_total_vram", return_value=24.0): + cache = ModelCache() + self.assertEqual(cache.get_max_concurrent_downloads(), 4) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_model_refs.py b/tests/test_model_refs.py index d829fd5..7cad5e9 100644 --- a/tests/test_model_refs.py +++ b/tests/test_model_refs.py @@ -52,18 +52,3 @@ def test_parse_hf_revision() -> None: def test_parse_invalid_hf() -> None: with pytest.raises(ValueError): parse_model_ref("hf:justonepart") - - -def test_cozy_canonical_is_unprefixed_repo_ref() -> None: - p = parse_model_ref("cozy:fancyfeast/llama-joycaption-beta-one-hf-llava@blake3:abcd") - assert p.scheme == "cozy" - assert p.cozy is not None - # Regression guard: canonical cozy refs must not prepend "cozy:". - assert p.canonical() == "fancyfeast/llama-joycaption-beta-one-hf-llava@blake3:abcd" - - -def test_parse_hf_double_prefix_tolerated() -> None: - p = parse_model_ref("hf:hf:owner/repo@main") - assert p.scheme == "hf" - assert p.hf is not None - assert p.canonical() == "hf:owner/repo@main" diff --git a/tests/test_payload_model_selection.py b/tests/test_payload_model_selection.py new file mode 100644 index 0000000..7fdf444 --- /dev/null +++ b/tests/test_payload_model_selection.py @@ -0,0 +1,78 @@ +import unittest + +import msgspec + +from gen_worker.injection import InjectionSpec, ModelRef, ModelRefSource +from gen_worker.worker import Worker + + +class _Payload(msgspec.Struct): + model: str + + +class TestPayloadModelSelection(unittest.TestCase): + def test_fixed_key_must_exist_in_mapping(self) -> None: + w = Worker( + user_module_names=[], + worker_jwt="dummy-worker-jwt", + manifest={ + "models": { + "sd15": {"ref": "demo/sd15", "dtypes": ["fp16", "bf16"]}, + } + }, + ) + inj = InjectionSpec( + param_name="pipeline", + param_type=object, + model_ref=ModelRef(ModelRefSource.FIXED, "missing"), + ) + payload = _Payload(model="sd15") + with self.assertRaises(ValueError) as ctx: + w._resolve_model_id_for_injection("generate", inj, payload) + self.assertIn("unknown fixed model key", str(ctx.exception).lower()) + + def test_payload_key_must_exist_in_function_keyspace(self) -> None: + w = Worker( + user_module_names=[], + worker_jwt="dummy-worker-jwt", + manifest={ + "models_by_function": { + "generate": { + "sd15": {"ref": "demo/sd15", "dtypes": ["fp16", "bf16"]}, + "flux": {"ref": "demo/flux", "dtypes": ["bf16"]}, + } + } + }, + ) + inj = InjectionSpec( + param_name="pipeline", + param_type=object, + model_ref=ModelRef(ModelRefSource.PAYLOAD, "model"), + ) + payload = _Payload(model="does-not-exist") + with self.assertRaises(ValueError) as ctx: + w._resolve_model_id_for_injection("generate", inj, payload) + self.assertIn("unknown model key", str(ctx.exception).lower()) + self.assertIn("sd15", str(ctx.exception)) + + def test_payload_key_resolves_to_repo_ref(self) -> None: + w = Worker( + user_module_names=[], + worker_jwt="dummy-worker-jwt", + manifest={ + "models_by_function": { + "generate": { + "sd15": {"ref": "demo/sd15", "dtypes": ["fp16", "bf16"]}, + } + } + }, + ) + inj = InjectionSpec( + param_name="pipeline", + param_type=object, + model_ref=ModelRef(ModelRefSource.PAYLOAD, "model"), + ) + payload = _Payload(model="sd15") + out, key = w._resolve_model_id_for_injection("generate", inj, payload) + self.assertEqual(out, "cozy:demo/sd15:latest") + self.assertEqual(key, "sd15") diff --git a/tests/test_pipeline_thread_safety.py b/tests/test_pipeline_thread_safety.py new file mode 100644 index 0000000..661fa6d --- /dev/null +++ b/tests/test_pipeline_thread_safety.py @@ -0,0 +1,355 @@ +""" +Tests for thread-safe pipeline access in PipelineLoader. + +These tests verify that get_for_inference() properly creates thread-safe +pipeline copies with fresh schedulers to avoid concurrent access issues. +""" +import threading +import time +from typing import Any, Dict, List, Optional +from unittest.mock import MagicMock, patch, PropertyMock + +import pytest + +# Check if torch is available for skip markers +try: + import torch + TORCH_AVAILABLE = True +except ImportError: + TORCH_AVAILABLE = False + + +class MockSchedulerConfig: + """Mock scheduler config for testing.""" + def __init__(self) -> None: + self.num_train_timesteps = 1000 + self.beta_start = 0.0001 + self.beta_end = 0.02 + + +class MockScheduler: + """Mock scheduler that tracks instance creation.""" + _instance_count = 0 + + def __init__(self) -> None: + MockScheduler._instance_count += 1 + self.instance_id = MockScheduler._instance_count + self.config = MockSchedulerConfig() + # Simulate internal state that would cause issues if shared + self._timesteps: List[int] = [] + self._step_index: Optional[int] = None + + @classmethod + def from_config(cls, config: MockSchedulerConfig) -> "MockScheduler": + """Create new scheduler from config (like diffusers does).""" + scheduler = cls() + scheduler.config = config + return scheduler + + @classmethod + def reset_instance_count(cls) -> None: + cls._instance_count = 0 + + +class MockPipeline: + """Mock pipeline for testing thread-safe access.""" + _from_pipe_calls = 0 + + def __init__(self, scheduler: Optional[MockScheduler] = None) -> None: + self.scheduler = scheduler or MockScheduler() + self.unet = MagicMock() # Simulates heavy component + self.vae = MagicMock() # Simulates heavy component + + @classmethod + def from_pipe(cls, base_pipeline: "MockPipeline", scheduler: MockScheduler) -> "MockPipeline": + """Create new pipeline sharing components but with fresh scheduler.""" + MockPipeline._from_pipe_calls += 1 + new_pipeline = cls(scheduler=scheduler) + # Share heavy components + new_pipeline.unet = base_pipeline.unet + new_pipeline.vae = base_pipeline.vae + return new_pipeline + + @classmethod + def reset_call_count(cls) -> None: + cls._from_pipe_calls = 0 + + +class MockLoadedPipeline: + """Mock LoadedPipeline container.""" + def __init__(self, pipeline: MockPipeline, model_id: str) -> None: + self.pipeline = pipeline + self.model_id = model_id + self.pipeline_class = "MockPipeline" + self.dtype = "float16" + self.size_gb = 10.0 + self.load_format = "safetensors" + + +class TestGetForInferenceLogic: + """ + Tests for get_for_inference() logic without requiring torch. + Uses direct method testing with mocked dependencies. + """ + + def setup_method(self) -> None: + """Reset mocks before each test.""" + MockScheduler.reset_instance_count() + MockPipeline.reset_call_count() + + def test_creates_fresh_scheduler_logic(self) -> None: + """Test that get_for_inference creates a fresh scheduler.""" + # Test the logic directly without PipelineLoader instantiation + base_scheduler = MockScheduler() + base_pipeline = MockPipeline(scheduler=base_scheduler) + + initial_count = MockScheduler._instance_count + + # Simulate what get_for_inference does + fresh_scheduler = base_pipeline.scheduler.from_config( + base_pipeline.scheduler.config + ) + task_pipeline = MockPipeline.from_pipe(base_pipeline, scheduler=fresh_scheduler) + + # Verify new scheduler was created + assert MockScheduler._instance_count == initial_count + 1 + assert task_pipeline.scheduler.instance_id != base_scheduler.instance_id + + def test_shares_heavy_components_logic(self) -> None: + """Test that heavy components are shared.""" + base_pipeline = MockPipeline() + original_unet = base_pipeline.unet + original_vae = base_pipeline.vae + + # Simulate what get_for_inference does + fresh_scheduler = base_pipeline.scheduler.from_config( + base_pipeline.scheduler.config + ) + task_pipeline = MockPipeline.from_pipe(base_pipeline, scheduler=fresh_scheduler) + + # Heavy components should be shared + assert task_pipeline.unet is original_unet + assert task_pipeline.vae is original_vae + # Scheduler should be different + assert task_pipeline.scheduler is not base_pipeline.scheduler + + +class TestConcurrentAccess: + """Tests for concurrent pipeline access patterns.""" + + def setup_method(self) -> None: + """Reset mocks before each test.""" + MockScheduler.reset_instance_count() + MockPipeline.reset_call_count() + + def test_concurrent_scheduler_creation_is_safe(self) -> None: + """Multiple concurrent scheduler creations should be independent.""" + base_pipeline = MockPipeline() + results: List[MockPipeline] = [] + errors: List[Exception] = [] + lock = threading.Lock() + + def create_task_pipeline() -> None: + try: + # Simulate what get_for_inference does + fresh_scheduler = base_pipeline.scheduler.from_config( + base_pipeline.scheduler.config + ) + task_pipeline = MockPipeline.from_pipe( + base_pipeline, scheduler=fresh_scheduler + ) + with lock: + results.append(task_pipeline) + except Exception as e: + with lock: + errors.append(e) + + # Spawn multiple threads + threads = [threading.Thread(target=create_task_pipeline) for _ in range(10)] + for t in threads: + t.start() + for t in threads: + t.join() + + # All should succeed + assert len(errors) == 0, f"Got errors: {errors}" + assert len(results) == 10 + + # Each result should have a unique scheduler + scheduler_ids = [r.scheduler.instance_id for r in results] + assert len(set(scheduler_ids)) == 10 # All unique + + def test_concurrent_access_simulated_inference(self) -> None: + """ + Simulate concurrent inference to verify no state corruption. + + This test verifies that separate schedulers prevent the + 'IndexError: index N is out of bounds' that occurs when + multiple threads share a scheduler's internal state. + """ + base_pipeline = MockPipeline() + errors: List[Exception] = [] + completed = 0 + lock = threading.Lock() + + def simulate_inference() -> None: + nonlocal completed + try: + # Create thread-safe pipeline (what get_for_inference does) + fresh_scheduler = base_pipeline.scheduler.from_config( + base_pipeline.scheduler.config + ) + task_pipeline = MockPipeline.from_pipe( + base_pipeline, scheduler=fresh_scheduler + ) + + # Simulate scheduler state modification during inference + task_pipeline.scheduler._timesteps = list(range(1000)) + task_pipeline.scheduler._step_index = 0 + + # Simulate stepping through inference + for i in range(50): + idx = task_pipeline.scheduler._step_index + if idx is not None and idx < len(task_pipeline.scheduler._timesteps): + # This would cause IndexError if scheduler is shared + _ = task_pipeline.scheduler._timesteps[idx] + task_pipeline.scheduler._step_index = i + 1 + time.sleep(0.001) # Simulate work + + with lock: + completed += 1 + + except Exception as e: + with lock: + errors.append(e) + + # Spawn concurrent inference threads + threads = [threading.Thread(target=simulate_inference) for _ in range(5)] + for t in threads: + t.start() + for t in threads: + t.join() + + # All should complete without errors + assert len(errors) == 0, f"Got errors: {errors}" + assert completed == 5 + + +class TestModelManagementInterface: + """Tests for ModelManagementInterface.get_for_inference()""" + + def test_default_implementation_calls_get_active_pipeline(self) -> None: + """Default get_for_inference should fall back to get_active_pipeline.""" + from gen_worker.model_interface import ModelManagementInterface + + class TestManager(ModelManagementInterface): + def __init__(self) -> None: + self.get_active_pipeline_called = False + + async def process_supported_models_config( + self, supported_model_ids: List[str], downloader_instance: Any + ) -> None: + pass + + async def load_model_into_vram(self, model_id: str) -> bool: + return True + + async def get_active_pipeline(self, model_id: str) -> Any: + self.get_active_pipeline_called = True + return MockPipeline() + + async def get_active_model_bundle(self, model_id: str) -> Any: + return None + + def get_vram_loaded_models(self) -> List[str]: + return [] + + manager = TestManager() + result = manager.get_for_inference("test-model") + + assert manager.get_active_pipeline_called + assert result is not None + + +@pytest.mark.skipif(not TORCH_AVAILABLE, reason="torch not installed") +class TestPipelineLoaderIntegration: + """Integration tests that require torch.""" + + def setup_method(self) -> None: + """Reset mocks before each test.""" + MockScheduler.reset_instance_count() + MockPipeline.reset_call_count() + + def test_get_for_inference_with_real_loader(self) -> None: + """Test get_for_inference with actual PipelineLoader.""" + from gen_worker.pipeline_loader import PipelineLoader + + loader = PipelineLoader() + + # Create mock loaded pipeline + base_pipeline = MockPipeline() + loaded = MockLoadedPipeline(base_pipeline, "test-model") + loader._loaded_pipelines = {"test-model": loaded} + + result = loader.get_for_inference("test-model") + + assert result is not None + # Should have different scheduler + assert result.scheduler is not base_pipeline.scheduler + # Should share heavy components + assert result.unet is base_pipeline.unet + + def test_get_for_inference_returns_none_for_missing(self) -> None: + """Test get_for_inference returns None for unloaded models.""" + from gen_worker.pipeline_loader import PipelineLoader + + loader = PipelineLoader() + loader._loaded_pipelines = {} + + result = loader.get_for_inference("nonexistent") + + assert result is None + + def test_concurrent_get_for_inference_with_loader(self) -> None: + """Test concurrent get_for_inference calls with real PipelineLoader.""" + from gen_worker.pipeline_loader import PipelineLoader + + loader = PipelineLoader() + + # Create mock loaded pipeline + base_pipeline = MockPipeline() + loaded = MockLoadedPipeline(base_pipeline, "test-model") + loader._loaded_pipelines = {"test-model": loaded} + + results: List[Any] = [] + errors: List[Exception] = [] + lock = threading.Lock() + + def get_pipeline() -> None: + try: + result = loader.get_for_inference("test-model") + with lock: + results.append(result) + except Exception as e: + with lock: + errors.append(e) + + # Spawn concurrent threads + threads = [threading.Thread(target=get_pipeline) for _ in range(10)] + for t in threads: + t.start() + for t in threads: + t.join() + + assert len(errors) == 0, f"Got errors: {errors}" + assert len(results) == 10 + + # Each should have unique scheduler but share heavy components + scheduler_ids = set() + for r in results: + assert r is not None + scheduler_ids.add(r.scheduler.instance_id) + assert r.unet is base_pipeline.unet # Shared + + # All schedulers should be unique + assert len(scheduler_ids) == 10 diff --git a/tests/test_realtime_socket.py b/tests/test_realtime_socket.py new file mode 100644 index 0000000..66a9067 --- /dev/null +++ b/tests/test_realtime_socket.py @@ -0,0 +1,83 @@ +import time +import unittest + +from gen_worker.worker import ActionContext, RealtimeSocket, Worker, pb +from gen_worker.decorators import worker_websocket, ResourceRequirements + + +@worker_websocket(ResourceRequirements()) +async def echo_ws(ctx: ActionContext, sock: RealtimeSocket) -> None: + await sock.send_json({"status": "ready"}) + async for data in sock.iter_bytes(): + await sock.send_bytes(data) + + +class TestRealtimeSocket(unittest.TestCase): + def _make_worker(self) -> Worker: + w = Worker.__new__(Worker) + import threading + + w.owner = "tenant-1" + w._emit_progress_event = lambda e: None # type: ignore[method-assign] + w._runtime_loaders = {} + w._custom_runtime_cache = {} + w._custom_runtime_locks = {} + w._fixed_model_id_by_key = {} + w._payload_model_id_by_key_by_function = {} + w._fixed_model_spec_by_key = {} + w._payload_model_spec_by_key_by_function = {} + w._release_allowed_model_ids = None + w._model_manager = None + w._realtime_sessions = {} + w._realtime_lock = threading.Lock() + w._sent = [] + w._send_message = lambda msg: w._sent.append(msg) # type: ignore[method-assign] + w._ws_specs = {} + w._discovered_resources = {} + w._inspect_websocket_spec = Worker._inspect_websocket_spec.__get__(w, Worker) # type: ignore[attr-defined] + w._resolve_injected_value = Worker._resolve_injected_value.__get__(w, Worker) # type: ignore[attr-defined] + w._handle_realtime_open_cmd = Worker._handle_realtime_open_cmd.__get__(w, Worker) # type: ignore[attr-defined] + w._handle_realtime_frame = Worker._handle_realtime_frame.__get__(w, Worker) # type: ignore[attr-defined] + w._handle_realtime_close_cmd = Worker._handle_realtime_close_cmd.__get__(w, Worker) # type: ignore[attr-defined] + return w + + def test_realtime_echo(self) -> None: + w = self._make_worker() + spec = w._inspect_websocket_spec(echo_ws) # type: ignore[arg-type] + w._ws_specs[spec.name] = spec + + w._handle_realtime_open_cmd( + pb.RealtimeOpenCommand(session_id="s1", function_name=spec.name, owner="tenant-1") + ) + + # Wait for ready JSON frame. + deadline = time.time() + 2.0 + ready = False + while time.time() < deadline: + for m in list(w._sent): + if getattr(m, "realtime_frame", None) and m.realtime_frame.is_text: + if b"ready" in m.realtime_frame.data: + ready = True + break + if ready: + break + time.sleep(0.01) + self.assertTrue(ready) + + # Send binary bytes frame and expect an echoed binary frame back. + w._handle_realtime_frame(pb.RealtimeFrame(session_id="s1", data=b"abc", is_text=False)) + + deadline = time.time() + 2.0 + echoed = False + while time.time() < deadline: + for m in list(w._sent): + if getattr(m, "realtime_frame", None) and not m.realtime_frame.is_text: + if m.realtime_frame.data == b"abc": + echoed = True + break + if echoed: + break + time.sleep(0.01) + self.assertTrue(echoed) + + w._handle_realtime_close_cmd(pb.RealtimeCloseCommand(session_id="s1", reason="end")) diff --git a/tests/test_runtime_batching_config_cmd.py b/tests/test_runtime_batching_config_cmd.py new file mode 100644 index 0000000..256a189 --- /dev/null +++ b/tests/test_runtime_batching_config_cmd.py @@ -0,0 +1,113 @@ +import json +from types import SimpleNamespace + +from gen_worker.decorators import ResourceRequirements +from gen_worker.pb import worker_scheduler_pb2 as pb +from gen_worker.worker import Worker + + +def test_runtime_batching_config_cmd_applies_and_acks(monkeypatch) -> None: + w = Worker(user_module_names=[], worker_jwt="dummy-worker-jwt") + w._task_specs["caption"] = SimpleNamespace(output_mode="incremental") + + sent = [] + monkeypatch.setattr(w, "_send_message", lambda m: sent.append(m)) + + cmd = pb.RuntimeBatchingConfigCommand( + config=pb.RuntimeBatchingConfig( + function_name="caption", + batch_size_target=6, + batch_size_min=2, + batch_size_max=8, + prefetch_depth=3, + max_wait_ms=120, + version=3, + ) + ) + w._handle_runtime_batching_config_cmd(cmd) + + cfg = w._runtime_batching_cfg_for_function("caption") + assert cfg["function_name"] == "caption" + assert cfg["batch_size_target"] == 6 + assert cfg["batch_size_min"] == 2 + assert cfg["batch_size_max"] == 8 + assert cfg["prefetch_depth"] == 3 + assert cfg["max_wait_ms"] == 120 + assert cfg["version"] == 3 + + results = [m.runtime_batching_config_result for m in sent if m.HasField("runtime_batching_config_result")] + assert len(results) == 1 + assert results[0].function_name == "caption" + assert results[0].version == 3 + assert results[0].success is True + assert results[0].error_message == "" + + +def test_runtime_batching_config_cmd_stale_version_is_ignored(monkeypatch) -> None: + w = Worker(user_module_names=[], worker_jwt="dummy-worker-jwt") + w._task_specs["caption"] = SimpleNamespace(output_mode="single") + w._runtime_batching_config_by_function["caption"] = { + "function_name": "caption", + "batch_size_target": 5, + "batch_size_min": 1, + "batch_size_max": 6, + "prefetch_depth": 2, + "max_wait_ms": 100, + "version": 4, + } + + sent = [] + monkeypatch.setattr(w, "_send_message", lambda m: sent.append(m)) + + msg = pb.WorkerSchedulerMessage( + runtime_batching_config_cmd=pb.RuntimeBatchingConfigCommand( + config=pb.RuntimeBatchingConfig( + function_name="caption", + batch_size_target=2, + batch_size_min=1, + batch_size_max=2, + prefetch_depth=1, + max_wait_ms=50, + version=3, # stale + ) + ) + ) + w._process_message(msg) + + cfg = w._runtime_batching_cfg_for_function("caption") + assert cfg["version"] == 4 + assert cfg["batch_size_target"] == 5 + + results = [m.runtime_batching_config_result for m in sent if m.HasField("runtime_batching_config_result")] + assert len(results) == 1 + assert results[0].success is True + assert results[0].version == 3 + + +def test_function_capabilities_event_emits_when_changed(monkeypatch) -> None: + w = Worker(user_module_names=[], worker_jwt="dummy-worker-jwt") + w._discovered_resources["caption"] = ResourceRequirements( + batch_size_min=1, + batch_size_target=4, + batch_size_max=8, + prefetch_depth=2, + max_wait_ms=150, + memory_hint_mb=12288, + stage_profile="io_gpu_disaggregated", + stage_traits=["decode_prefetch", "gpu_decode_overlap"], + ) + w._task_specs["caption"] = SimpleNamespace(output_mode="incremental") + + sent = [] + monkeypatch.setattr(w, "_send_message", lambda m: sent.append(m)) + + w._emit_function_capabilities_event() + w._emit_function_capabilities_event() # second send should be deduped + + events = [m.worker_event for m in sent if m.HasField("worker_event")] + assert len(events) == 1 + assert events[0].event_type == "worker.function_capabilities" + payload = json.loads(bytes(events[0].payload_json or b"{}").decode("utf-8")) + fns = list(payload.get("functions") or []) + assert len(fns) == 1 + assert "max_inflight_requests" not in fns[0] diff --git a/tests/test_scheduler_model_scope.py b/tests/test_scheduler_model_scope.py new file mode 100644 index 0000000..2e38982 --- /dev/null +++ b/tests/test_scheduler_model_scope.py @@ -0,0 +1,36 @@ +import msgspec + +from gen_worker.pb import worker_scheduler_pb2 as pb +from gen_worker.worker import Worker + + +class _Payload(msgspec.Struct): + model: str + + +def test_scheduler_cannot_widen_manifest_model_scope(monkeypatch) -> None: + # Tenant-declared scope via baked manifest mapping. + w = Worker( + user_module_names=[], + worker_jwt="dummy-worker-jwt", + manifest={ + "models": { + "sd15": {"ref": "demo/sd15", "dtypes": ["fp16", "bf16"]}, + } + }, + ) + + # Avoid background download threads in this unit test. + monkeypatch.setattr(w, "_start_startup_prefetch", lambda *_args, **_kwargs: None) + + # Scheduler tries to widen scope (should be ignored / intersected away). + msg = pb.WorkerSchedulerMessage( + endpoint_config=pb.EndpointConfig( + supported_repo_refs=["cozy:evil/evil:latest"], + required_variant_refs=[], + ) + ) + w._process_message(msg) + + # Worker must not widen scope outside the tenant manifest mapping. + assert w._release_allowed_model_ids == {"cozy:demo/sd15:latest"} diff --git a/tests/test_signature_contract_and_incremental.py b/tests/test_signature_contract_and_incremental.py new file mode 100644 index 0000000..33d2e5c --- /dev/null +++ b/tests/test_signature_contract_and_incremental.py @@ -0,0 +1,178 @@ +import json +import unittest +from typing import Annotated, Iterator + +import msgspec + +from gen_worker.injection import ModelRef, ModelRefSource as Src +from gen_worker.worker import ActionContext, Worker + + +class Input(msgspec.Struct): + text: str + + +class Delta(msgspec.Struct): + delta: str + + +class InputWithModel(msgspec.Struct): + text: str + model_key: str + + +class Output(msgspec.Struct): + model_id: str + + +class FakeModel: + def __init__(self, model_id: str) -> None: + self.model_id = model_id + + @classmethod + def from_pretrained(cls, model_id: str) -> "FakeModel": + return cls(model_id) + + +def _make_worker() -> Worker: + w = Worker.__new__(Worker) + import threading + + w._gpu_busy_lock = threading.Lock() + w._is_gpu_busy = False + w._has_gpu = False + w.max_output_bytes = 0 + w._model_manager = None + w._runtime_loaders = {} + w._custom_runtime_cache = {} + w._custom_runtime_locks = {} + w._fixed_model_id_by_key = {} + w._payload_model_id_by_key_by_function = {} + w._fixed_model_spec_by_key = {} + w._payload_model_spec_by_key_by_function = {} + w._release_allowed_model_ids = None + w._active_tasks_lock = threading.Lock() + w._active_tasks = {} + w._request_batch_context_lock = threading.Lock() + w._request_batch_context = {} + w._send_message = lambda msg: w._sent.append(msg) # type: ignore[method-assign] + w._sent = [] + w._stop_event = threading.Event() + w._running = True + w._materialize_assets = lambda ctx, obj: None # type: ignore[method-assign] + w._discovered_resources = {} + w._model_cache = None + return w + + +class TestContractAndIncremental(unittest.TestCase): + def test_rejects_missing_return_annotation(self) -> None: + def bad(ctx: ActionContext, payload: Input): # type: ignore[no-untyped-def] + return Delta(delta="x") + + w = _make_worker() + with self.assertRaises(ValueError): + w._inspect_task_spec(bad) # type: ignore[arg-type] + + def test_incremental_output_emits_deltas_and_completed(self) -> None: + def stream(ctx: ActionContext, payload: Input) -> Iterator[Delta]: + yield Delta(delta=payload.text) + yield Delta(delta="!") + + w = _make_worker() + spec = w._inspect_task_spec(stream) # type: ignore[arg-type] + self.assertEqual(spec.output_mode, "incremental") + + ctx = ActionContext("run-1", emitter=lambda _e: None) + payload = Input(text="hi") + b = msgspec.msgpack.encode(msgspec.to_builtins(payload)) + w._execute_task(ctx, spec, b) + + # Capture incremental output messages (typed token events preferred, legacy worker_event fallback). + events = [] + for m in w._sent: + if not hasattr(m, "WhichOneof"): + continue + msg_type = m.WhichOneof("msg") + if msg_type == "incremental_token_delta": + events.append(("output.delta", m.incremental_token_delta.payload_json)) + continue + if msg_type == "incremental_token_stream_done": + events.append(("output.completed", b"{}")) + continue + if msg_type == "worker_event": + evt = m.worker_event + # Ignore non-output events (e.g. metrics.*). + if not str(evt.event_type or "").startswith("output."): + continue + events.append((evt.event_type, evt.payload_json)) + + self.assertGreaterEqual(len(events), 3) + self.assertEqual(events[0][0], "output.delta") + self.assertEqual(json.loads(events[0][1].decode("utf-8"))["delta"], "hi") + self.assertEqual(events[1][0], "output.delta") + self.assertEqual(json.loads(events[1][1].decode("utf-8"))["delta"], "!") + self.assertEqual(events[2][0], "output.completed") + + def test_incremental_output_emits_typed_token_messages(self) -> None: + def stream(ctx: ActionContext, payload: Input) -> Iterator[Delta]: + yield Delta(delta=payload.text) + + w = _make_worker() + spec = w._inspect_task_spec(stream) # type: ignore[arg-type] + + ctx = ActionContext("run-typed-1", emitter=lambda _e: None) + payload = Input(text="hello") + b = msgspec.msgpack.encode(msgspec.to_builtins(payload)) + w._execute_task(ctx, spec, b) + + msg_types = [m.WhichOneof("msg") for m in w._sent if hasattr(m, "WhichOneof")] + self.assertIn("incremental_token_delta", msg_types) + self.assertIn("incremental_token_stream_done", msg_types) + + def test_payload_model_key_resolves_via_endpoint_map(self) -> None: + def fn( + ctx: ActionContext, + model: Annotated[FakeModel, ModelRef(Src.PAYLOAD, "model_key")], + payload: InputWithModel, + ) -> Output: + return Output(model_id=model.model_id) + + w = _make_worker() + w._payload_model_id_by_key_by_function = {"fn": {"a": "google/foo"}} + w._release_allowed_model_ids = {"google/foo"} + spec = w._inspect_task_spec(fn) # type: ignore[arg-type] + ctx = ActionContext("run-2", emitter=lambda _e: None) + payload = InputWithModel(text="x", model_key="a") + b = msgspec.msgpack.encode(msgspec.to_builtins(payload)) + w._execute_task(ctx, spec, b) + + run_results = [m.run_result for m in w._sent if hasattr(m, "WhichOneof") and m.WhichOneof("msg") == "run_result"] + self.assertEqual(len(run_results), 1) + rr = run_results[0] + self.assertTrue(rr.success) + out = msgspec.msgpack.decode(rr.output_payload, type=dict) + self.assertEqual(out["model_id"], "google/foo") + + def test_payload_model_key_rejects_not_allowlisted(self) -> None: + def fn( + ctx: ActionContext, + model: Annotated[FakeModel, ModelRef(Src.PAYLOAD, "model_key")], + payload: InputWithModel, + ) -> Output: + return Output(model_id=model.model_id) + + w = _make_worker() + w._payload_model_id_by_key_by_function = {"fn": {"a": "google/foo", "b": "google/bar"}} + w._release_allowed_model_ids = {"google/foo"} + spec = w._inspect_task_spec(fn) # type: ignore[arg-type] + ctx = ActionContext("run-3", emitter=lambda _e: None) + payload = InputWithModel(text="x", model_key="b") + b = msgspec.msgpack.encode(msgspec.to_builtins(payload)) + w._execute_task(ctx, spec, b) + + run_results = [m.run_result for m in w._sent if hasattr(m, "WhichOneof") and m.WhichOneof("msg") == "run_result"] + self.assertEqual(len(run_results), 1) + rr = run_results[0] + self.assertFalse(rr.success) + self.assertEqual(rr.error_type, "validation") diff --git a/tests/test_startup_model_prefetch.py b/tests/test_startup_model_prefetch.py index 354be6d..94be301 100644 --- a/tests/test_startup_model_prefetch.py +++ b/tests/test_startup_model_prefetch.py @@ -43,7 +43,6 @@ def test_startup_prefetch_warms_disk_and_reports_disk_models(tmp_path: Path, mon server.start() variant_ref = "cozy:demo/repo@blake3:snap-1" - variant_ref_canon = "demo/repo@blake3:snap-1" w = Worker( scheduler_addr=f"127.0.0.1:{grpc_port}", @@ -57,7 +56,7 @@ def test_startup_prefetch_warms_disk_and_reports_disk_models(tmp_path: Path, mon sess = orch.get_session(timeout_s=30.0) assert sess is not None - cfg = pb.ReleaseArtifactConfig( + cfg = pb.EndpointConfig( supported_repo_refs=[variant_ref], required_variant_refs=[variant_ref], resolved_cozy_models_by_variant_ref={ @@ -74,14 +73,13 @@ def test_startup_prefetch_warms_disk_and_reports_disk_models(tmp_path: Path, mon ) }, ) - sess.send(pb.WorkerSchedulerMessage(release_artifact_config=cfg)) + sess.send(pb.WorkerSchedulerMessage(endpoint_config=cfg)) # The worker prefetch thread triggers an immediate registration update after caching. start = time.monotonic() saw_started = False saw_completed = False disk_ready = False - saw_cache_only_supports_model_loading_false = False while time.monotonic() - start < 30: msg = sess.recv(timeout_s=0.5) if msg is None: @@ -94,16 +92,12 @@ def test_startup_prefetch_warms_disk_and_reports_disk_models(tmp_path: Path, mon saw_completed = True if not msg.HasField("worker_registration"): if disk_ready and saw_started and saw_completed: - assert saw_cache_only_supports_model_loading_false return continue - if not msg.worker_registration.resources.supports_model_loading: - saw_cache_only_supports_model_loading_false = True disk_models = list(msg.worker_registration.resources.disk_models) - if variant_ref_canon in disk_models: + if variant_ref in disk_models: disk_ready = True if disk_ready and saw_started and saw_completed: - assert saw_cache_only_supports_model_loading_false return raise AssertionError("timed out waiting for worker to report disk_models after startup prefetch") diff --git a/tests/test_tensorhub_toml.py b/tests/test_tensorhub_toml.py index 85ebc40..04ad01b 100644 --- a/tests/test_tensorhub_toml.py +++ b/tests/test_tensorhub_toml.py @@ -92,23 +92,6 @@ def test_invalid_dtype_rejected(self) -> None: with self.assertRaises(ValueError): load_tensorhub_toml(p) - def test_nvfp4_dtype_accepted(self) -> None: - with tempfile.TemporaryDirectory() as td: - p = Path(td) / "endpoint.toml" - p.write_text( - """ -schema_version = 1 -name = "x" -main = "x.main" - -[models] -m = { ref = "o/r", dtypes = ["nvfp4"] } -""".lstrip(), - encoding="utf-8", - ) - cfg = load_tensorhub_toml(p) - self.assertEqual(cfg.models["m"].dtypes, ("nvfp4",)) - def test_invalid_cuda_constraint_rejected(self) -> None: with tempfile.TemporaryDirectory() as td: p = Path(td) / "endpoint.toml" diff --git a/tests/test_trainer_checkpointing.py b/tests/test_trainer_checkpointing.py new file mode 100644 index 0000000..8ed9782 --- /dev/null +++ b/tests/test_trainer_checkpointing.py @@ -0,0 +1,54 @@ +from __future__ import annotations + +from pathlib import Path + +import pytest + + +def test_save_and_load_trainable_module_checkpoint_roundtrip(tmp_path: Path) -> None: + torch = pytest.importorskip("torch") + import torch.nn as nn + + from gen_worker.trainer import load_trainable_module_checkpoint, save_trainable_module_checkpoint + + module = nn.Linear(4, 2, bias=True) + optimizer = torch.optim.AdamW(module.parameters(), lr=1e-3) + + # Warm optimizer state so optimizer checkpointing path is exercised. + x = torch.randn(3, 4) + y = torch.randn(3, 2) + loss = ((module(x) - y) ** 2).mean() + loss.backward() + optimizer.step() + optimizer.zero_grad(set_to_none=True) + + checkpoint_dir = tmp_path / "step-0001" + meta = save_trainable_module_checkpoint( + module=module, + optimizer=optimizer, + output_dir=str(checkpoint_dir), + step=1, + final=False, + model_name_or_path="runwayml/stable-diffusion-v1-5", + ) + + assert "primary_path" in meta + primary_path = Path(str(meta["primary_path"])) + assert primary_path.exists() + assert (checkpoint_dir / "checkpoint_meta.json").exists() + + saved_weight = module.weight.detach().clone() + saved_bias = module.bias.detach().clone() + + # Corrupt params then verify load restores trainable tensors. + with torch.no_grad(): + module.weight.add_(10.0) + module.bias.sub_(10.0) + + load_trainable_module_checkpoint( + module=module, + optimizer=optimizer, + checkpoint_dir=str(checkpoint_dir), + ) + assert torch.allclose(module.weight.detach(), saved_weight) + assert torch.allclose(module.bias.detach(), saved_bias) diff --git a/tests/test_trainer_runtime_orchestrated.py b/tests/test_trainer_runtime_orchestrated.py new file mode 100644 index 0000000..b7da66b --- /dev/null +++ b/tests/test_trainer_runtime_orchestrated.py @@ -0,0 +1,448 @@ +from __future__ import annotations + +import io +import json +from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer +from pathlib import Path +import sys +import threading +import time +import types +from typing import Any + +import pytest + +from gen_worker.trainer.orchestrated import StartupContractError +from gen_worker.trainer.runtime import run_training_runtime_from_env + + +def _register_trainer_module(monkeypatch: pytest.MonkeyPatch, module_name: str, class_name: str) -> str: + mod = types.ModuleType(module_name) + + class _Trainer: + def setup(self, ctx) -> None: + _ = ctx + + def configure(self, ctx) -> dict[str, object]: + _ = ctx + return {"loaded": False} + + def prepare_batch(self, raw_batch: object, state: dict[str, object], ctx) -> object: + _ = state + _ = ctx + return raw_batch + + def train_step(self, prepared_batch: object, state: dict[str, object], ctx): + _ = prepared_batch + _ = ctx + loaded = 1.0 if bool(state.get("loaded")) else 0.0 + from gen_worker.trainer import StepResult + + return StepResult(metrics={"train/loss": loaded}) + + def state_dict(self, state: dict[str, object]) -> dict[str, object]: + return dict(state) + + def load_state_dict(self, state: dict[str, object], payload: dict[str, object], ctx) -> None: + _ = ctx + state.update(payload) + + _Trainer.__name__ = class_name + setattr(mod, class_name, _Trainer) + monkeypatch.setitem(sys.modules, module_name, mod) + return f"{module_name}:{class_name}" + + +def _parquet_bytes() -> bytes: + pa = pytest.importorskip("pyarrow") + pq = pytest.importorskip("pyarrow.parquet") + table = pa.table({"image_ref": ["a", "b"], "caption": ["x", "y"]}) + buf = io.BytesIO() + pq.write_table(table, buf) + return buf.getvalue() + + +class _TestHTTPHandler(BaseHTTPRequestHandler): + auth_token = "" + dataset_bytes = b"" + resume_payload = b"{}" + fail_paths: set[str] = set() + posts: list[dict[str, Any]] = [] + puts: list[dict[str, Any]] = [] + + def log_message(self, format: str, *args: object) -> None: # noqa: A003 + return + + def _authorized(self) -> bool: + token = str(self.auth_token or "").strip() + if not token: + return True + got = str(self.headers.get("Authorization") or "").strip() + return got == f"Bearer {token}" + + def do_GET(self) -> None: # noqa: N802 + if not self._authorized(): + self.send_response(401) + self.end_headers() + return + if self.path == "/inputs/train.parquet": + body = self.dataset_bytes + elif self.path == "/inputs/resume.json": + body = self.resume_payload + else: + self.send_response(404) + self.end_headers() + return + self.send_response(200) + self.send_header("Content-Type", "application/octet-stream") + self.send_header("Content-Length", str(len(body))) + self.end_headers() + self.wfile.write(body) + + def do_POST(self) -> None: # noqa: N802 + if not self._authorized(): + self.send_response(401) + self.end_headers() + return + length = int(self.headers.get("Content-Length") or "0") + raw = self.rfile.read(length) if length > 0 else b"{}" + payload = json.loads(raw.decode("utf-8") or "{}") + self.posts.append({"path": self.path, "payload": payload}) + if self.path in self.fail_paths: + self.send_response(500) + self.send_header("Content-Type", "application/json") + self.end_headers() + self.wfile.write(b'{"error":"forced failure"}') + return + self.send_response(200) + self.send_header("Content-Type", "application/json") + self.end_headers() + self.wfile.write(b'{"ok":true}') + + def do_PUT(self) -> None: # noqa: N802 + if not self._authorized(): + self.send_response(401) + self.end_headers() + return + length = int(self.headers.get("Content-Length") or "0") + body = self.rfile.read(length) if length > 0 else b"" + self.puts.append( + { + "path": self.path, + "size": len(body), + "content_type": str(self.headers.get("Content-Type") or ""), + } + ) + if self.path in self.fail_paths: + self.send_response(500) + self.send_header("Content-Type", "application/json") + self.end_headers() + self.wfile.write(b'{"error":"forced failure"}') + return + payload = { + "ref": self.path.removeprefix("/api/v1/file/"), + "sha256": "testsha256", + "size_bytes": len(body), + } + raw = json.dumps(payload).encode("utf-8") + self.send_response(200) + self.send_header("Content-Type", "application/json") + self.send_header("Content-Length", str(len(raw))) + self.end_headers() + self.wfile.write(raw) + + +def _start_test_server(token: str, dataset_bytes: bytes, resume_payload: bytes, fail_paths: set[str] | None = None) -> tuple[ThreadingHTTPServer, str]: + _TestHTTPHandler.auth_token = token + _TestHTTPHandler.dataset_bytes = dataset_bytes + _TestHTTPHandler.resume_payload = resume_payload + _TestHTTPHandler.fail_paths = set(fail_paths or set()) + _TestHTTPHandler.posts = [] + _TestHTTPHandler.puts = [] + server = ThreadingHTTPServer(("127.0.0.1", 0), _TestHTTPHandler) + thread = threading.Thread(target=server.serve_forever, daemon=True) + thread.start() + host, port = server.server_address + return server, f"http://{host}:{port}" + + +def test_trainer_runtime_startup_requires_capability_token(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None: + trainer_import = _register_trainer_module(monkeypatch, "tmp_orch_startup_mod", "StartupTrainer") + spec = {"request_id": "run-orch-startup", "trainer": trainer_import, "max_steps": 1, "mock_batches": [1]} + spec_path = tmp_path / "trainer_job.json" + spec_path.write_text(json.dumps(spec), encoding="utf-8") + + monkeypatch.setenv("TRAINER_JOB_SPEC_PATH", str(spec_path)) + monkeypatch.setenv("TRAINER_ORCHESTRATED", "1") + monkeypatch.delenv("TRAINER_CAPABILITY_TOKEN", raising=False) + + with pytest.raises(StartupContractError, match="startup.missing_capability_token"): + run_training_runtime_from_env() + + +def test_trainer_runtime_orchestrated_happy_path_with_materialize_resume_and_uploads( + monkeypatch: pytest.MonkeyPatch, tmp_path: Path +) -> None: + trainer_import = _register_trainer_module(monkeypatch, "tmp_orch_happy_mod", "HappyTrainer") + token = "cap-123" + resume_payload = json.dumps({"state": {"loaded": True}}).encode("utf-8") + server, base = _start_test_server(token=token, dataset_bytes=_parquet_bytes(), resume_payload=resume_payload) + try: + events = tmp_path / "events.jsonl" + ckpt = tmp_path / "ckpt" + samples = tmp_path / "samples" + spec = { + "request_id": "run-orch-happy", + "trainer": trainer_import, + "trainer_api_version": "v1", + "max_steps": 2, + "metric_every": 1, + "checkpoint_every": 1, + "sample_every": 1, + "dataset": {"batch_size": 1, "readahead": 1, "columns": ["image_ref", "caption"]}, + "inputs": { + "dataset_parquet_refs": [f"{base}/inputs/train.parquet"], + "resume_checkpoint_ref": f"{base}/inputs/resume.json", + }, + } + spec_path = tmp_path / "trainer_job_happy.json" + spec_path.write_text(json.dumps(spec), encoding="utf-8") + + monkeypatch.setenv("TRAINER_ORCHESTRATED", "1") + monkeypatch.setenv("TRAINER_CAPABILITY_TOKEN", token) + monkeypatch.setenv("TRAINER_JOB_SPEC_PATH", str(spec_path)) + monkeypatch.setenv("TRAINER_EVENTS_PATH", str(events)) + monkeypatch.setenv("TRAINER_CHECKPOINTS_DIR", str(ckpt)) + monkeypatch.setenv("TRAINER_SAMPLES_DIR", str(samples)) + monkeypatch.setenv("TRAINER_UPLOAD_METRICS_URL", f"{base}/upload/metrics") + monkeypatch.setenv("TRAINER_UPLOAD_CHECKPOINT_URL", f"{base}/upload/checkpoint") + monkeypatch.setenv("TRAINER_UPLOAD_SAMPLE_URL", f"{base}/upload/sample") + monkeypatch.setenv("TRAINER_UPLOAD_TERMINAL_URL", f"{base}/upload/terminal") + + assert run_training_runtime_from_env() == 0 + lines = [json.loads(x) for x in events.read_text(encoding="utf-8").splitlines() if x.strip()] + assert any(x.get("event") == "started" for x in lines) + assert any(x.get("event") == "metric" and x.get("name") == "train/loss" and float(x.get("value", -1)) == 1.0 for x in lines) + assert any(x.get("event") == "completed" for x in lines) + assert all(x.get("schema_version") == "trainer_event.v1" for x in lines) + + posted_paths = [x["path"] for x in _TestHTTPHandler.posts] + assert "/upload/metrics" in posted_paths + assert "/upload/checkpoint" in posted_paths + assert "/upload/sample" in posted_paths + assert "/upload/terminal" in posted_paths + terminal = [x["payload"] for x in _TestHTTPHandler.posts if x["path"] == "/upload/terminal"][-1] + assert terminal["status"] == "completed" + finally: + server.shutdown() + server.server_close() + + +def test_trainer_runtime_orchestrated_uploads_checkpoint_bytes_to_tensorhub( + monkeypatch: pytest.MonkeyPatch, tmp_path: Path +) -> None: + trainer_import = _register_trainer_module(monkeypatch, "tmp_orch_upload_bytes_mod", "UploadBytesTrainer") + token = "cap-bytes-1" + server, base = _start_test_server(token=token, dataset_bytes=_parquet_bytes(), resume_payload=b"{}") + try: + events = tmp_path / "events.jsonl" + ckpt = tmp_path / "ckpt" + samples = tmp_path / "samples" + spec = { + "request_id": "run-orch-upload-bytes", + "owner": "00000000-0000-0000-0000-000000000001", + "trainer": trainer_import, + "trainer_api_version": "v1", + "max_steps": 1, + "metric_every": 1, + "checkpoint_every": 1, + "sample_every": 0, + "mock_batches": [1], + } + spec_path = tmp_path / "trainer_job_upload_bytes.json" + spec_path.write_text(json.dumps(spec), encoding="utf-8") + + monkeypatch.setenv("TRAINER_ORCHESTRATED", "1") + monkeypatch.setenv("TRAINER_CAPABILITY_TOKEN", token) + monkeypatch.setenv("TRAINER_JOB_SPEC_PATH", str(spec_path)) + monkeypatch.setenv("TRAINER_EVENTS_PATH", str(events)) + monkeypatch.setenv("TRAINER_CHECKPOINTS_DIR", str(ckpt)) + monkeypatch.setenv("TRAINER_SAMPLES_DIR", str(samples)) + monkeypatch.setenv("TRAINER_UPLOAD_METRICS_URL", f"{base}/upload/metrics") + monkeypatch.setenv("TRAINER_UPLOAD_CHECKPOINT_URL", f"{base}/upload/checkpoint") + monkeypatch.setenv("TRAINER_UPLOAD_TERMINAL_URL", f"{base}/upload/terminal") + monkeypatch.setenv("TENSORHUB_URL", base) + + assert run_training_runtime_from_env() == 0 + + assert _TestHTTPHandler.puts, "expected PUT uploads to tensorhub file API" + assert any("/api/v1/file/v1/00000000-0000-0000-0000-000000000001/runs/run-orch-upload-bytes/checkpoints/" in x["path"] for x in _TestHTTPHandler.puts) + + terminal = [x["payload"] for x in _TestHTTPHandler.posts if x["path"] == "/upload/terminal"][-1] + assert terminal["status"] == "completed" + assert terminal["final_checkpoint_ref"] != "" + assert terminal["final_checkpoint_sha256"] == "testsha256" + finally: + server.shutdown() + server.server_close() + + +def test_trainer_runtime_cancel_path(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None: + trainer_import = _register_trainer_module(monkeypatch, "tmp_orch_cancel_mod", "CancelTrainer") + spec = {"request_id": "run-orch-cancel", "trainer": trainer_import, "max_steps": 3, "metric_every": 1, "mock_batches": [1, 2, 3]} + spec_path = tmp_path / "trainer_job_cancel.json" + spec_path.write_text(json.dumps(spec), encoding="utf-8") + events = tmp_path / "events.jsonl" + cancel_file = tmp_path / "cancel.flag" + cancel_file.write_text("1", encoding="utf-8") + + monkeypatch.setenv("TRAINER_JOB_SPEC_PATH", str(spec_path)) + monkeypatch.setenv("TRAINER_EVENTS_PATH", str(events)) + monkeypatch.setenv("TRAINER_CANCEL_FILE", str(cancel_file)) + + with pytest.raises(Exception, match="canceled"): + run_training_runtime_from_env() + + lines = [json.loads(x) for x in events.read_text(encoding="utf-8").splitlines() if x.strip()] + failed = [x for x in lines if x.get("event") == "failed"] + assert failed + assert "canceled" in str(failed[-1].get("error", "")).lower() + + +def test_trainer_runtime_upload_failure_reports_upload_category(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None: + trainer_import = _register_trainer_module(monkeypatch, "tmp_orch_upload_fail_mod", "UploadFailTrainer") + token = "cap-up-1" + server, base = _start_test_server( + token=token, + dataset_bytes=_parquet_bytes(), + resume_payload=json.dumps({"state": {"loaded": True}}).encode("utf-8"), + fail_paths={"/upload/sample"}, + ) + try: + spec = { + "request_id": "run-orch-upload-fail", + "trainer": trainer_import, + "max_steps": 1, + "metric_every": 1, + "checkpoint_every": 1, + "sample_every": 1, + "mock_batches": [1], + } + spec_path = tmp_path / "trainer_job_upload_fail.json" + spec_path.write_text(json.dumps(spec), encoding="utf-8") + events = tmp_path / "events.jsonl" + + monkeypatch.setenv("TRAINER_JOB_SPEC_PATH", str(spec_path)) + monkeypatch.setenv("TRAINER_EVENTS_PATH", str(events)) + monkeypatch.setenv("TRAINER_CAPABILITY_TOKEN", token) + monkeypatch.setenv("TRAINER_UPLOAD_METRICS_URL", f"{base}/upload/metrics") + monkeypatch.setenv("TRAINER_UPLOAD_CHECKPOINT_URL", f"{base}/upload/checkpoint") + monkeypatch.setenv("TRAINER_UPLOAD_SAMPLE_URL", f"{base}/upload/sample") + + with pytest.raises(Exception, match="upload"): + run_training_runtime_from_env() + + lines = [json.loads(x) for x in events.read_text(encoding="utf-8").splitlines() if x.strip()] + failed = [x for x in lines if x.get("event") == "failed"] + assert failed + assert str(failed[-1].get("error", "")).startswith("upload:") + finally: + server.shutdown() + server.server_close() + + +def test_trainer_runtime_timeout_path(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None: + mod = types.ModuleType("tmp_orch_timeout_mod") + + class SlowTrainer: + def setup(self, ctx) -> None: + _ = ctx + + def configure(self, ctx) -> dict[str, object]: + _ = ctx + return {} + + def prepare_batch(self, raw_batch: object, state: dict[str, object], ctx) -> object: + _ = state + _ = ctx + return raw_batch + + def train_step(self, prepared_batch: object, state: dict[str, object], ctx): + _ = prepared_batch + _ = state + _ = ctx + time.sleep(1.1) + from gen_worker.trainer import StepResult + + return StepResult(metrics={"train/loss": 0.1}) + + def state_dict(self, state: dict[str, object]) -> dict[str, object]: + return dict(state) + + def load_state_dict(self, state: dict[str, object], payload: dict[str, object], ctx) -> None: + _ = ctx + state.update(payload) + + mod.SlowTrainer = SlowTrainer + monkeypatch.setitem(sys.modules, "tmp_orch_timeout_mod", mod) + + spec = { + "request_id": "run-orch-timeout", + "trainer": "tmp_orch_timeout_mod:SlowTrainer", + "max_steps": 2, + "metric_every": 1, + "mock_batches": [1, 2, 3], + } + spec_path = tmp_path / "trainer_job_timeout.json" + spec_path.write_text(json.dumps(spec), encoding="utf-8") + events = tmp_path / "events.jsonl" + monkeypatch.setenv("TRAINER_JOB_SPEC_PATH", str(spec_path)) + monkeypatch.setenv("TRAINER_EVENTS_PATH", str(events)) + monkeypatch.setenv("TRAINER_MAX_RUNTIME_SECONDS", "1") + + with pytest.raises(Exception, match="timeout"): + run_training_runtime_from_env() + + lines = [json.loads(x) for x in events.read_text(encoding="utf-8").splitlines() if x.strip()] + failed = [x for x in lines if x.get("event") == "failed"] + assert failed + assert "timeout" in str(failed[-1].get("error", "")).lower() + + +def test_trainer_runtime_resume_idempotent_skips_when_final_exists(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None: + trainer_import = _register_trainer_module(monkeypatch, "tmp_orch_resume_skip_mod", "ResumeSkipTrainer") + token = "cap-up-2" + server, base = _start_test_server( + token=token, + dataset_bytes=_parquet_bytes(), + resume_payload=json.dumps({"state": {"loaded": True}}).encode("utf-8"), + ) + try: + ckpt = tmp_path / "checkpoints" + ckpt.mkdir(parents=True, exist_ok=True) + (ckpt / "final.json").write_text(json.dumps({"state": {"loaded": True}}), encoding="utf-8") + events = tmp_path / "events.jsonl" + spec = { + "request_id": "run-orch-resume-skip", + "trainer": trainer_import, + "max_steps": 5, + "resume_from_latest": True, + "mock_batches": [1, 2, 3], + } + spec_path = tmp_path / "trainer_job_resume_skip.json" + spec_path.write_text(json.dumps(spec), encoding="utf-8") + + monkeypatch.setenv("TRAINER_JOB_SPEC_PATH", str(spec_path)) + monkeypatch.setenv("TRAINER_EVENTS_PATH", str(events)) + monkeypatch.setenv("TRAINER_CHECKPOINTS_DIR", str(ckpt)) + monkeypatch.setenv("TRAINER_CAPABILITY_TOKEN", token) + monkeypatch.setenv("TRAINER_UPLOAD_TERMINAL_URL", f"{base}/upload/terminal") + + assert run_training_runtime_from_env() == 0 + lines = [json.loads(x) for x in events.read_text(encoding="utf-8").splitlines() if x.strip()] + assert any(x.get("event") == "completed" for x in lines) + posted_paths = [x["path"] for x in _TestHTTPHandler.posts] + assert "/upload/terminal" not in posted_paths + finally: + server.shutdown() + server.server_close() diff --git a/tests/test_training_endpoints_contract_smoke.py b/tests/test_training_endpoints_contract_smoke.py new file mode 100644 index 0000000..8a60b89 --- /dev/null +++ b/tests/test_training_endpoints_contract_smoke.py @@ -0,0 +1,185 @@ +from __future__ import annotations + +import os +import sys +from pathlib import Path +from dataclasses import dataclass, field + +import pytest + +from gen_worker.trainer import StepContext, TrainingJobSpec, load_trainer_plugin, run_training_loop + + +class _Batch: + def __init__(self, values: dict[str, list[object]]) -> None: + self._values = values + + def to_pydict(self) -> dict[str, list[object]]: + return self._values + + +class _T2IModel: + def train_t2i_step( + self, *, image_refs: list[str], prompts: list[str], hyperparams: dict[str, object] + ) -> float: + _ = hyperparams + if len(image_refs) != len(prompts): + raise ValueError("length mismatch") + return 1.0 + + +class _EditModel: + def train_edit_step( + self, + *, + source_refs: list[str], + target_refs: list[str], + instructions: list[str], + mask_refs: list[str | None], + hyperparams: dict[str, object], + ) -> float: + _ = hyperparams + if not (len(source_refs) == len(target_refs) == len(instructions) == len(mask_refs)): + raise ValueError("length mismatch") + return 2.0 + + +@dataclass +class _Reporter: + metrics: list[tuple[str, float, int]] = field(default_factory=list) + checkpoints: list[tuple[str, int]] = field(default_factory=list) + samples: list[tuple[str, int]] = field(default_factory=list) + completed_calls: list[tuple[int, str | None]] = field(default_factory=list) + failed_calls: list[tuple[int, str]] = field(default_factory=list) + + def started(self, *, request_id: str) -> None: + _ = request_id + + def metric(self, *, name: str, value: float, step: int) -> None: + self.metrics.append((name, value, step)) + + def checkpoint(self, *, path: str, step: int) -> None: + self.checkpoints.append((path, step)) + + def sample(self, *, path: str, step: int) -> None: + self.samples.append((path, step)) + + def completed(self, *, step: int, final_checkpoint: str | None) -> None: + self.completed_calls.append((step, final_checkpoint)) + + def failed(self, *, step: int, error: str) -> None: + self.failed_calls.append((step, error)) + + def is_canceled(self) -> bool: + return False + + +@dataclass +class _Writer: + checkpoints: list[tuple[int, dict[str, object]]] = field(default_factory=list) + final_payloads: list[dict[str, object]] = field(default_factory=list) + + def write_checkpoint( + self, + *, + step: int, + state_payload: dict[str, object], + trainer: object, + state: object, + ctx: StepContext, + ) -> str: + _ = trainer + _ = state + _ = ctx + self.checkpoints.append((step, state_payload)) + return f"/tmp/ckpt-{step}.json" + + def write_samples(self, *, step: int, state: object, ctx: StepContext) -> list[str]: + _ = state + _ = ctx + return [f"/tmp/sample-{step}.json"] + + def finalize( + self, + *, + state_payload: dict[str, object], + trainer: object, + state: object, + ctx: StepContext, + ) -> str | None: + _ = trainer + _ = state + _ = ctx + self.final_payloads.append(state_payload) + return "/tmp/final.json" + + +def _training_endpoints_root() -> Path: + env_override = os.getenv("TRAINING_ENDPOINTS_ROOT") + if env_override: + return Path(env_override).resolve() + return Path(__file__).resolve().parents[2] / "training-endpoints" + + +@pytest.mark.skipif(not _training_endpoints_root().exists(), reason="training-endpoints repo not present") +def test_training_endpoints_examples_run_in_gen_worker_loop() -> None: + root = _training_endpoints_root() + t2i_src = root / "t2i_three_prompts" / "src" + edit_src = root / "img2img_edit_optional_prompt_mask" / "src" + sys.path.insert(0, str(t2i_src)) + sys.path.insert(0, str(edit_src)) + try: + t2i_trainer = load_trainer_plugin("t2i_three_prompts.main:ThreePromptT2ITrainer") + t2i_job = TrainingJobSpec(request_id="t2i-run", max_steps=1, metric_every=1, checkpoint_every=1, sample_every=1) + t2i_ctx = StepContext( + job=t2i_job, + model_handles={"model": _T2IModel()}, + ) + t2i_reporter = _Reporter() + t2i_writer = _Writer() + t2i_terminal = run_training_loop( + job=t2i_job, + ctx=t2i_ctx, + trainer=t2i_trainer, + batches=[_Batch({"image_ref": ["a"], "caption_short": ["short"]})], + reporter=t2i_reporter, + artifact_writer=t2i_writer, + ) + assert t2i_terminal == 1 + assert any(name == "train/loss" for (name, _value, _step) in t2i_reporter.metrics) + assert t2i_reporter.checkpoints and t2i_writer.final_payloads + assert t2i_reporter.samples + + edit_trainer = load_trainer_plugin("img2img_edit_optional_prompt_mask.main:Img2ImgEditTrainer") + edit_job = TrainingJobSpec(request_id="edit-run", max_steps=1, metric_every=1, checkpoint_every=1, sample_every=1) + edit_ctx = StepContext( + job=edit_job, + model_handles={"model": _EditModel()}, + ) + edit_reporter = _Reporter() + edit_writer = _Writer() + edit_terminal = run_training_loop( + job=edit_job, + ctx=edit_ctx, + trainer=edit_trainer, + batches=[ + _Batch( + { + "source_image_ref": ["source"], + "target_image_ref": ["target"], + "edit_type": ["mosaic"], + } + ) + ], + reporter=edit_reporter, + artifact_writer=edit_writer, + ) + assert edit_terminal == 1 + assert any(name == "train/loss" for (name, _value, _step) in edit_reporter.metrics) + assert edit_reporter.checkpoints and edit_writer.final_payloads + assert edit_reporter.samples + finally: + if str(t2i_src) in sys.path: + sys.path.remove(str(t2i_src)) + if str(edit_src) in sys.path: + sys.path.remove(str(edit_src)) diff --git a/tests/test_worker_jwt_rotation.py b/tests/test_worker_jwt_rotation.py index d5252f9..f74ca38 100644 --- a/tests/test_worker_jwt_rotation.py +++ b/tests/test_worker_jwt_rotation.py @@ -74,7 +74,7 @@ def hello(ctx: ActionContext, payload: Input) -> Output: # Send rotation signal over the stream. Worker stores it for next reconnect. payload = json.dumps({"worker_jwt": "jwt-2"}, separators=(",", ":"), sort_keys=True).encode("utf-8") - sess.send(pb.WorkerSchedulerMessage(worker_event=pb.WorkerEvent(run_id="", event_type="worker.jwt.rotate", payload_json=payload))) + sess.send(pb.WorkerSchedulerMessage(worker_event=pb.WorkerEvent(request_id="", event_type="worker.jwt.rotate", payload_json=payload))) # Ensure the worker processed the rotation signal before we force a reconnect. start = time.monotonic() @@ -84,27 +84,19 @@ def hello(ctx: ActionContext, payload: Input) -> Output: time.sleep(0.05) assert w.worker_jwt == "jwt-2" - # Trigger reconnect via real transport disruption: stop old server, then restart. - server.stop(grace=None) - orch2 = _MockOrchestrator() - server2 = grpc.server(futures.ThreadPoolExecutor(max_workers=8)) - pb_grpc.add_SchedulerWorkerServiceServicer_to_server(orch2, server2) - rebound = server2.add_insecure_port(f"127.0.0.1:{port}") - assert rebound == port - server2.start() - - # Wait for a new connection to establish with the rotated token on the restarted server. + # Force a reconnect without killing the server. This simulates network interruption. + w._handle_connection_error() + + # Wait for a new connection to establish with the rotated token. start = time.monotonic() while time.monotonic() - start < 30: - sess2 = orch2.get_session(timeout_s=0.5) + sess2 = orch.get_session(timeout_s=0.5) if sess2 is None: continue if _extract_bearer(sess2.metadata) == "jwt-2": - server2.stop(grace=None) return time.sleep(0.1) - server2.stop(grace=None) raise AssertionError("timed out waiting for reconnect with rotated WORKER_JWT") finally: try: diff --git a/tests/test_worker_leader_redirect.py b/tests/test_worker_leader_redirect.py new file mode 100644 index 0000000..b0104d6 --- /dev/null +++ b/tests/test_worker_leader_redirect.py @@ -0,0 +1,19 @@ +import unittest + +from gen_worker.worker import Worker + + +class TestWorkerLeaderRedirect(unittest.TestCase): + def test_extract_leader_addr(self): + self.assertEqual(Worker._extract_leader_addr("not_leader:127.0.0.1:8080"), "127.0.0.1:8080") + self.assertIsNone(Worker._extract_leader_addr("not_leader:")) + self.assertIsNone(Worker._extract_leader_addr("other_error")) + self.assertIsNone(Worker._extract_leader_addr(None)) + + def test_normalize_scheduler_addrs(self): + addrs = Worker._normalize_scheduler_addrs("a:1", ["b:2", "a:1", " ", "c:3"]) + self.assertEqual(addrs, ["a:1", "b:2", "c:3"]) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_worker_model_keyspace.py b/tests/test_worker_model_keyspace.py new file mode 100644 index 0000000..f6775eb --- /dev/null +++ b/tests/test_worker_model_keyspace.py @@ -0,0 +1,53 @@ +import unittest + +from gen_worker.injection import InjectionSpec, ModelRef, ModelRefSource +from gen_worker.worker import Worker + + +class _Payload: + def __init__(self, **kwargs): + for k, v in kwargs.items(): + setattr(self, k, v) + + +class TestWorkerModelKeyspace(unittest.TestCase): + def test_payload_model_selection_uses_function_mapping(self) -> None: + w = Worker.__new__(Worker) + w._payload_model_id_by_key_by_function = { + "generate": {"sdxl": "stabilityai/stable-diffusion-xl-base-1.0"} + } + w._fixed_model_id_by_key = {} + w._release_allowed_model_ids = None + + inj = InjectionSpec( + param_name="pipe", + param_type=object, + model_ref=ModelRef(ModelRefSource.PAYLOAD, "model_key"), + ) + payload = _Payload(model_key="sdxl") + + got, key = Worker._resolve_model_id_for_injection(w, "generate", inj, payload) # type: ignore[arg-type] + self.assertEqual(got, "stabilityai/stable-diffusion-xl-base-1.0") + self.assertEqual(key, "sdxl") + + def test_payload_model_selection_rejects_unknown_key(self) -> None: + w = Worker.__new__(Worker) + w._payload_model_id_by_key_by_function = { + "generate": {"sdxl": "stabilityai/stable-diffusion-xl-base-1.0"} + } + w._fixed_model_id_by_key = {} + w._release_allowed_model_ids = None + + inj = InjectionSpec( + param_name="pipe", + param_type=object, + model_ref=ModelRef(ModelRefSource.PAYLOAD, "model_key"), + ) + payload = _Payload(model_key="nope") + + with self.assertRaises(ValueError): + Worker._resolve_model_id_for_injection(w, "generate", inj, payload) # type: ignore[arg-type] + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_worker_startup_visibility.py b/tests/test_worker_startup_visibility.py new file mode 100644 index 0000000..ace2c46 --- /dev/null +++ b/tests/test_worker_startup_visibility.py @@ -0,0 +1,131 @@ +from __future__ import annotations + +import json +import sys +import threading +import time +import types +from pathlib import Path +from typing import Any + +import pytest + +from gen_worker.worker import Worker + + +def test_registration_watchdog_emits_timeout_and_sets_stop() -> None: + w = Worker.__new__(Worker) + w.worker_id = "w-1" + w.scheduler_addr = "127.0.0.1:8080" + w._process_started_monotonic = time.monotonic() - 0.5 + w._registered_event = threading.Event() + w._running = True + w._stop_event = threading.Event() + w._startup_timeout_triggered = False + + startup_events: list[tuple[str, dict[str, Any]]] = [] + worker_events: list[tuple[str, dict[str, Any]]] = [] + w._emit_startup_phase = lambda phase, **kw: startup_events.append((phase, kw)) # type: ignore[method-assign] + w._emit_worker_event_bytes = ( # type: ignore[method-assign] + lambda request_id, event_type, payload_json: worker_events.append( + (event_type, json.loads(payload_json.decode("utf-8"))) + ) + ) + w._close_connection = lambda: None # type: ignore[method-assign] + + w._registration_watchdog_loop(timeout_s=0.02) + + assert w._startup_timeout_triggered is True + assert w._stop_event.is_set() is True + assert any(name == "startup_timeout_unregistered" for name, _ in startup_events) + assert any(name == "worker.startup_timeout_unregistered" for name, _ in worker_events) + + +def test_task_phase_watchdog_emits_stuck_event() -> None: + w = Worker.__new__(Worker) + seen: list[tuple[str, dict[str, Any]]] = [] + w._emit_worker_event_bytes = ( # type: ignore[method-assign] + lambda request_id, event_type, payload_json: seen.append((event_type, json.loads(payload_json.decode("utf-8")))) + ) + + timer = w._start_task_phase_watchdog( + request_id="run-1", + phase="inference", + warn_after_s=0.02, + payload={"function_name": "generate"}, + ) + time.sleep(0.06) + if timer is not None: + timer.cancel() + + assert any(name == "task.inference.stuck" for name, _ in seen) + ev_payload = next(p for name, p in seen if name == "task.inference.stuck") + assert ev_payload["function_name"] == "generate" + assert ev_payload["elapsed_ms"] >= 20 + + +def test_emit_worker_fatal_includes_traceback_metadata() -> None: + w = Worker.__new__(Worker) + w.worker_id = "w-2" + w.scheduler_addr = "scheduler:8080" + w._process_started_monotonic = time.monotonic() - 1.0 + seen: list[tuple[str, dict[str, Any]]] = [] + w._emit_worker_event_bytes = ( # type: ignore[method-assign] + lambda request_id, event_type, payload_json: seen.append((event_type, json.loads(payload_json.decode("utf-8")))) + ) + + try: + raise RuntimeError("boom") + except Exception as exc: + w._emit_worker_fatal("startup", exc, exit_code=7) + + assert seen + event_type, payload = seen[-1] + assert event_type == "worker.fatal" + assert payload["phase"] == "startup" + assert payload["exception_class"] == "RuntimeError" + assert "boom" in payload["exception_message"] + assert "RuntimeError" in payload["traceback"] + assert payload["exit_code"] == 7 + + +def test_run_raises_when_registration_timeout_reached(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: + mod_dir = tmp_path / "mod" + mod_dir.mkdir(parents=True, exist_ok=True) + (mod_dir / "tiny_mod.py").write_text( + """ +from __future__ import annotations +import msgspec +from gen_worker.decorators import worker_function +from gen_worker.worker import ActionContext + +class Input(msgspec.Struct): + name: str + +class Output(msgspec.Struct): + ok: bool + +@worker_function() +def tiny(ctx: ActionContext, payload: Input) -> Output: + return Output(ok=True) +""".lstrip(), + encoding="utf-8", + ) + sys.path.insert(0, str(mod_dir)) + + w = Worker( + scheduler_addr="127.0.0.1:1", + user_module_names=["tiny_mod"], + worker_jwt="jwt-test", + reconnect_delay=0, + max_reconnect_attempts=0, + ) + monkeypatch.setattr(w, "connect", types.MethodType(lambda self: False, w)) + w._register_timeout_s = 1 + w._reconnect_delay_base = 0 + w._reconnect_delay_max = 0 + w._reconnect_jitter = 0 + + with pytest.raises(RuntimeError, match="startup_timeout_unregistered"): + w.run() + diff --git a/tests/test_worker_telemetry_issue67.py b/tests/test_worker_telemetry_issue67.py new file mode 100644 index 0000000..0308aee --- /dev/null +++ b/tests/test_worker_telemetry_issue67.py @@ -0,0 +1,187 @@ +from __future__ import annotations + +import asyncio +import threading +import time +from pathlib import Path +from typing import Any + +import msgspec +import pytest + +from gen_worker.model_cache import ModelCache +from gen_worker.worker import ActionContext, Worker, pb, _TaskSpec +from gen_worker.decorators import ResourceRequirements + + +class _Input(msgspec.Struct): + sleep_s: float + + +class _Output(msgspec.Struct): + ok: bool + + +def _sleep_fn(ctx: ActionContext, payload: _Input) -> _Output: + time.sleep(payload.sleep_s) + return _Output(ok=True) + + +def test_gpu_is_busy_refcount_overlapping_inference() -> None: + w = Worker.__new__(Worker) + w._has_gpu = True + w._gpu_busy_lock = threading.Lock() + w._gpu_busy_refcount = 0 + w._is_gpu_busy = False + w._active_model_use_lock = threading.Lock() + w._active_model_use_counts = {} + w._active_tasks_lock = threading.Lock() + w._active_tasks = {} + w._active_function_counts = {} + w._send_message = lambda msg: None # type: ignore[method-assign] + w._send_task_result = lambda *args, **kwargs: None # type: ignore[method-assign] + w._materialize_assets = lambda ctx, obj: None # type: ignore[method-assign] + + spec = _TaskSpec( + name="sleep", + func=_sleep_fn, + resources=ResourceRequirements(), + ctx_param="ctx", + payload_param="payload", + payload_type=_Input, + output_mode="single", + output_type=_Output, + injections=(), + ) + + ctx1 = ActionContext("r1", emitter=lambda e: None) + ctx2 = ActionContext("r2", emitter=lambda e: None) + payload = msgspec.msgpack.encode({"sleep_s": 0.25}) + + t1 = threading.Thread(target=w._execute_task, args=(ctx1, spec, payload), daemon=True) + t2 = threading.Thread(target=w._execute_task, args=(ctx2, spec, payload), daemon=True) + + t1.start() + t2.start() + + # Wait until at least one entered busy. + deadline = time.time() + 2.0 + while time.time() < deadline and not w._get_gpu_busy_status(): + time.sleep(0.01) + assert w._get_gpu_busy_status() is True + + # While at least one task is running, busy must remain true. + while t1.is_alive() or t2.is_alive(): + assert w._get_gpu_busy_status() is True + time.sleep(0.01) + + assert w._get_gpu_busy_status() is False + + +class _StubModelManager: + def __init__(self) -> None: + self._loaded: set[str] = set() + self.model_sizes: dict[str, float] = {} + + async def load_model_into_vram(self, model_id: str) -> bool: + await asyncio.sleep(0.15) + self._loaded.add(model_id) + return True + + def unload(self, model_id: str) -> None: + self._loaded.discard(model_id) + + def get_vram_loaded_models(self) -> list[str]: + return sorted(self._loaded) + + +def test_load_model_emits_events_and_updates_vram_models_and_busy(tmp_path: Path) -> None: + w = Worker.__new__(Worker) + w._has_gpu = True + w._gpu_busy_lock = threading.Lock() + w._gpu_busy_refcount = 0 + w._is_gpu_busy = False + w._active_model_use_lock = threading.Lock() + w._active_model_use_counts = {} + w._model_init_done_event = threading.Event() + w._model_init_done_event.set() + w._model_manager = _StubModelManager() + w._model_cache = ModelCache(model_cache_dir=str(tmp_path / "cache")) + w._task_specs = {} + w._ws_specs = {} + w._discovered_resources = {} + w._function_schemas = {} + w.max_concurrency = 0 + w.runpod_pod_id = "" + + sent: list[Any] = [] + w._send_message = lambda msg: sent.append(msg) # type: ignore[method-assign] + + cmd = pb.LoadModelCommand(model_id="cozy:demo/repo@sha256:snap-1") + + th = threading.Thread(target=w._handle_load_model_cmd, args=(cmd,), daemon=True) + th.start() + + # Busy should become true during the async sleep. + deadline = time.time() + 2.0 + saw_busy = False + while time.time() < deadline and th.is_alive(): + if w._get_gpu_busy_status(): + saw_busy = True + break + time.sleep(0.01) + assert saw_busy is True + + th.join(timeout=5) + assert w._get_gpu_busy_status() is False + + # Must emit load.started + load.completed/failed. + event_types = [ + m.worker_event.event_type + for m in sent + if getattr(m, "worker_event", None) and m.HasField("worker_event") + ] + assert "model.load.started" in event_types + assert "model.load.completed" in event_types + + # LoadModelResult must succeed. + results = [m.load_model_result for m in sent if m.HasField("load_model_result")] + assert results and results[-1].success is True + + # The immediate registration update should reflect vram_models after the load. + regs = [m.worker_registration for m in sent if m.HasField("worker_registration")] + assert regs + assert "cozy:demo/repo@sha256:snap-1" in list(regs[-1].resources.vram_models) + + +def test_unload_model_rejected_when_in_use(tmp_path: Path) -> None: + w = Worker.__new__(Worker) + w._has_gpu = True + w._gpu_busy_lock = threading.Lock() + w._gpu_busy_refcount = 0 + w._is_gpu_busy = False + w._active_model_use_lock = threading.Lock() + w._active_model_use_counts = {} + w._model_manager = _StubModelManager() + w._model_cache = ModelCache(model_cache_dir=str(tmp_path / "cache")) + w._task_specs = {} + w._ws_specs = {} + w._discovered_resources = {} + w._function_schemas = {} + w.max_concurrency = 0 + w.runpod_pod_id = "" + + sent: list[Any] = [] + w._send_message = lambda msg: sent.append(msg) # type: ignore[method-assign] + + model_id = "cozy:demo/repo@sha256:snap-1" + w._model_use_enter(model_id) + try: + w._handle_unload_model_cmd(pb.UnloadModelCommand(model_id=model_id)) + finally: + w._model_use_exit(model_id) + + res = [m.unload_model_result for m in sent if m.HasField("unload_model_result")] + assert res and res[-1].success is False + assert "model_in_use" in (res[-1].error_message or "") + diff --git a/tests/test_worker_wire_protocol.py b/tests/test_worker_wire_protocol.py new file mode 100644 index 0000000..f235287 --- /dev/null +++ b/tests/test_worker_wire_protocol.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +from gen_worker.wire_protocol import WIRE_PROTOCOL_MAJOR, WIRE_PROTOCOL_MINOR +from gen_worker.worker import Worker + + +def test_registration_advertises_wire_protocol(monkeypatch) -> None: + sent = [] + + w = Worker( + scheduler_addr="127.0.0.1:65535", + user_module_names=[], + worker_jwt="test-jwt", + reconnect_delay=0, + ) + monkeypatch.setattr(w, "_send_message", lambda message: sent.append(message)) + + w._register_worker(is_heartbeat=False) + + assert sent, "expected at least one outgoing registration message" + reg = sent[0].worker_registration + assert reg.protocol_major == WIRE_PROTOCOL_MAJOR + assert reg.protocol_minor == WIRE_PROTOCOL_MINOR + + +def test_detects_protocol_incompatibility_marker() -> None: + assert Worker._is_protocol_incompatibility("unsupported_worker_protocol:1.0 supported=1:1-9999") + assert not Worker._is_protocol_incompatibility("not_leader:127.0.0.1:50051") diff --git a/uv.lock b/uv.lock index 056c247..7bdf473 100644 --- a/uv.lock +++ b/uv.lock @@ -671,7 +671,7 @@ wheels = [ [[package]] name = "gen-worker" -version = "0.3.9" +version = "0.3.11" source = { editable = "." } dependencies = [ { name = "aiohttp" }, From 898c0302949e3564744dc3bd84d7049e8ddd49ba Mon Sep 17 00:00:00 2001 From: arpbansal Date: Tue, 17 Mar 2026 19:43:33 +0000 Subject: [PATCH 2/4] changed to RequestContext --- README.md | 22 ++++++------ .../src/firered_image_edit/main.py | 4 +-- .../flux2-klein-4b/src/flux2_klein_4b/main.py | 16 ++++----- .../flux2-klein-9b/src/flux2_klein_9b/main.py | 16 ++++----- examples/image-gen/src/image_gen/main.py | 8 ++--- .../src/medasr_transcribe/main.py | 4 +-- .../src/multi_sdxl_checkpoints/main.py | 4 +-- .../src/openai_codex_worker/main.py | 4 +-- .../src/qwen_image_2512/main.py | 4 +-- examples/sd15/src/sd15/main.py | 10 +++--- examples/smoke-test/src/smoke_test/example.py | 12 +++---- .../z-image-lora/src/z_image_lora/main.py | 4 +-- src/gen_worker/__init__.py | 4 +-- src/gen_worker/discover.py | 8 ++--- src/gen_worker/testing/http_runner.py | 4 +-- .../testing/test_output_save_contract.py | 8 ++--- src/gen_worker/worker.py | 34 +++++++++---------- tests/test_asset_materialization.py | 8 ++--- tests/test_dev_http_runner.py | 4 +-- tests/test_discover_models.py | 22 ++++++------ tests/test_discover_names.py | 14 ++++---- tests/test_file_token_scoping.py | 18 +++++----- tests/test_function_compute_capability.py | 4 +-- tests/test_hf_injection_normalization.py | 6 ++-- tests/test_injection_type_enforcement.py | 4 +-- tests/test_mock_orchestrator_dev.py | 4 +-- tests/test_realtime_socket.py | 4 +-- ... => test_request_context_local_outputs.py} | 4 +-- ...n.py => test_request_context_partition.py} | 10 +++--- ...test_signature_contract_and_incremental.py | 20 +++++------ tests/test_worker_jwt_rotation.py | 4 +-- tests/test_worker_startup_visibility.py | 4 +-- tests/test_worker_telemetry_issue67.py | 8 ++--- 33 files changed, 152 insertions(+), 152 deletions(-) rename tests/{test_action_context_local_outputs.py => test_request_context_local_outputs.py} (87%) rename tests/{test_action_context_partition.py => test_request_context_partition.py} (82%) diff --git a/README.md b/README.md index 16a4836..dc19fb2 100644 --- a/README.md +++ b/README.md @@ -50,7 +50,7 @@ uv add gen-worker[torch] ```python import msgspec -from gen_worker import ActionContext, worker_function +from gen_worker import RequestContext, worker_function class Input(msgspec.Struct): prompt: str @@ -59,7 +59,7 @@ class Output(msgspec.Struct): text: str @worker_function() -def generate(ctx: ActionContext, payload: Input) -> Output: +def generate(ctx: RequestContext, payload: Input) -> Output: return Output(text=f"Hello, {payload.prompt}!") ``` @@ -69,7 +69,7 @@ def generate(ctx: ActionContext, payload: Input) -> Output: - **Schema generation** - Input/output schemas extracted from msgspec types - **Model injection** - Dependency injection for ML models with caching - **Streaming output** - Support for incremental/streaming responses -- **Progress reporting** - Built-in progress events via `ActionContext` +- **Progress reporting** - Built-in progress events via `RequestContext` - **Perf metrics** - Best-effort per-run metrics emitted to gen-orchestrator (`metrics.*` worker events) - **Trainer runtime mode** - SDK-native trainer loop via `WORKER_MODE=trainer` - **File handling** - Upload/download assets via Cozy hub file API @@ -81,7 +81,7 @@ def generate(ctx: ActionContext, payload: Input) -> Output: ```python import msgspec -from gen_worker import ActionContext, worker_function +from gen_worker import RequestContext, worker_function class Input(msgspec.Struct): prompt: str @@ -90,7 +90,7 @@ class Output(msgspec.Struct): result: str @worker_function() -def my_function(ctx: ActionContext, payload: Input) -> Output: +def my_function(ctx: RequestContext, payload: Input) -> Output: return Output(result=f"Processed: {payload.prompt}") ``` @@ -103,7 +103,7 @@ class Delta(msgspec.Struct): chunk: str @worker_function() -def stream(ctx: ActionContext, payload: Input) -> Iterator[Delta]: +def stream(ctx: RequestContext, payload: Input) -> Iterator[Delta]: for word in payload.prompt.split(): if ctx.is_canceled(): raise InterruptedError("canceled") @@ -126,7 +126,7 @@ from gen_worker.injection import ModelRef, ModelRefSource as Src @worker_function() def generate( - ctx: ActionContext, + ctx: RequestContext, pipe: Annotated[DiffusionPipeline, ModelRef(Src.FIXED, "sd15")], payload: Input, ) -> Output: @@ -149,7 +149,7 @@ flux = { ref = "black-forest-labs/flux.2-klein-4b", dtypes = ["bf16"] } from typing import Annotated import msgspec from diffusers import DiffusionPipeline -from gen_worker import ActionContext, worker_function +from gen_worker import RequestContext, worker_function from gen_worker.injection import ModelRef, ModelRefSource as Src class Input(msgspec.Struct): @@ -158,7 +158,7 @@ class Input(msgspec.Struct): @worker_function() def generate( - ctx: ActionContext, + ctx: RequestContext, pipe: Annotated[DiffusionPipeline, ModelRef(Src.PAYLOAD, "model")], payload: Input, ): @@ -173,7 +173,7 @@ arbitrary repo refs in the payload. ```python @worker_function() -def process(ctx: ActionContext, payload: Input) -> Output: +def process(ctx: RequestContext, payload: Input) -> Output: # Save bytes and get asset reference asset = ctx.save_bytes(f"runs/{ctx.request_id}/outputs/output.png", image_bytes) return Output(result=asset.ref) @@ -451,7 +451,7 @@ cache.get_vram_models() # ["model-a"] from gen_worker.errors import RetryableError, ValidationError, FatalError @worker_function() -def process(ctx: ActionContext, payload: Input) -> Output: +def process(ctx: RequestContext, payload: Input) -> Output: if not payload.prompt: raise ValidationError("prompt is required") # 400, no retry diff --git a/examples/firered-image-edit/src/firered_image_edit/main.py b/examples/firered-image-edit/src/firered_image_edit/main.py index 0b66024..f1668c4 100644 --- a/examples/firered-image-edit/src/firered_image_edit/main.py +++ b/examples/firered-image-edit/src/firered_image_edit/main.py @@ -10,7 +10,7 @@ from diffusers import QwenImageEditPlusPipeline from PIL import Image -from gen_worker import ActionContext, ResourceRequirements, worker_function +from gen_worker import RequestContext, ResourceRequirements, worker_function from gen_worker.injection import ModelRef, ModelRefSource as Src from gen_worker.types import Asset @@ -47,7 +47,7 @@ class EditOutput(msgspec.Struct): @worker_function(_firered_resources) def edit( - ctx: ActionContext, + ctx: RequestContext, pipeline: Annotated[ QwenImageEditPlusPipeline, ModelRef(Src.FIXED, "firered_image_edit"), diff --git a/examples/flux2-klein-4b/src/flux2_klein_4b/main.py b/examples/flux2-klein-4b/src/flux2_klein_4b/main.py index 6f1823c..3db189b 100644 --- a/examples/flux2-klein-4b/src/flux2_klein_4b/main.py +++ b/examples/flux2-klein-4b/src/flux2_klein_4b/main.py @@ -11,7 +11,7 @@ from diffusers import Flux2KleinPipeline from PIL import Image -from gen_worker import ActionContext, ResourceRequirements, worker_function +from gen_worker import RequestContext, ResourceRequirements, worker_function from gen_worker.injection import ModelRef, ModelRefSource as Src from gen_worker.types import Asset @@ -52,7 +52,7 @@ def _should_enable_seq_offload() -> bool: def _generate( - ctx: ActionContext, + ctx: RequestContext, pipeline, payload: GenerateInput, model_key: str, @@ -100,7 +100,7 @@ def _generate( @worker_function(_flux_resources) def generate( - ctx: ActionContext, + ctx: RequestContext, pipeline: Annotated[ Flux2KleinPipeline, ModelRef(Src.FIXED, "flux2-klein-4b"), @@ -112,7 +112,7 @@ def generate( @worker_function(_flux_resources) def generate_fp8( - ctx: ActionContext, + ctx: RequestContext, pipeline: Annotated[ Flux2KleinPipeline, ModelRef(Src.FIXED, "flux2-klein-4b_fp8"), @@ -130,7 +130,7 @@ def generate_fp8( @worker_function(_flux_resources) def generate_9b( - ctx: ActionContext, + ctx: RequestContext, pipeline: Annotated[ Flux2KleinPipeline, ModelRef(Src.FIXED, "flux2-klein-9b"), @@ -142,7 +142,7 @@ def generate_9b( @worker_function(_flux_resources) def generate_9b_fp8( - ctx: ActionContext, + ctx: RequestContext, pipeline: Annotated[ Flux2KleinPipeline, ModelRef(Src.FIXED, "flux2-klein-9b_fp8"), @@ -154,7 +154,7 @@ def generate_9b_fp8( @worker_function(_flux_resources) def generate_int8( - ctx: ActionContext, + ctx: RequestContext, pipeline: Annotated[ Flux2KleinPipeline, ModelRef(Src.FIXED, "flux2-klein-4b_int8"), @@ -172,7 +172,7 @@ def generate_int8( @worker_function(_flux_resources) def generate_int4( - ctx: ActionContext, + ctx: RequestContext, pipeline: Annotated[ Flux2KleinPipeline, ModelRef(Src.FIXED, "flux2-klein-4b_int4"), diff --git a/examples/flux2-klein-9b/src/flux2_klein_9b/main.py b/examples/flux2-klein-9b/src/flux2_klein_9b/main.py index 1a14cf0..023ab93 100644 --- a/examples/flux2-klein-9b/src/flux2_klein_9b/main.py +++ b/examples/flux2-klein-9b/src/flux2_klein_9b/main.py @@ -11,7 +11,7 @@ from diffusers import Flux2KleinPipeline from PIL import Image -from gen_worker import ActionContext, ResourceRequirements, worker_function +from gen_worker import RequestContext, ResourceRequirements, worker_function from gen_worker.injection import ModelRef, ModelRefSource as Src from gen_worker.types import Asset @@ -53,7 +53,7 @@ def _should_enable_seq_offload() -> bool: def _generate( - ctx: ActionContext, + ctx: RequestContext, pipeline, payload: GenerateInput, model_key: str, @@ -101,7 +101,7 @@ def _generate( @worker_function(_flux_resources) def generate( - ctx: ActionContext, + ctx: RequestContext, pipeline: Annotated[ Flux2KleinPipeline, ModelRef(Src.FIXED, "flux2-klein-9b-base"), @@ -113,7 +113,7 @@ def generate( @worker_function(_flux_resources) def generate_turbo( - ctx: ActionContext, + ctx: RequestContext, pipeline: Annotated[ Flux2KleinPipeline, ModelRef(Src.FIXED, "flux2-klein-9b-turbo"), @@ -125,7 +125,7 @@ def generate_turbo( @worker_function(_flux_resources) def generate_fp8( - ctx: ActionContext, + ctx: RequestContext, pipeline: Annotated[ Flux2KleinPipeline, ModelRef(Src.FIXED, "flux2-klein-9b-base_fp8"), @@ -137,7 +137,7 @@ def generate_fp8( @worker_function(_flux_resources) def generate_turbo_fp8( - ctx: ActionContext, + ctx: RequestContext, pipeline: Annotated[ Flux2KleinPipeline, ModelRef(Src.FIXED, "flux2-klein-9b-turbo_fp8"), @@ -149,7 +149,7 @@ def generate_turbo_fp8( @worker_function(_nvfp4_resources) def generate_nvfp4( - ctx: ActionContext, + ctx: RequestContext, pipeline: Annotated[ Flux2KleinPipeline, ModelRef(Src.FIXED, "flux2-klein-9b-base_nvfp4"), @@ -161,7 +161,7 @@ def generate_nvfp4( @worker_function(_nvfp4_resources) def generate_turbo_nvfp4( - ctx: ActionContext, + ctx: RequestContext, pipeline: Annotated[ Flux2KleinPipeline, ModelRef(Src.FIXED, "flux2-klein-9b-turbo_nvfp4"), diff --git a/examples/image-gen/src/image_gen/main.py b/examples/image-gen/src/image_gen/main.py index 65999e4..f9de15d 100644 --- a/examples/image-gen/src/image_gen/main.py +++ b/examples/image-gen/src/image_gen/main.py @@ -9,7 +9,7 @@ from diffusers import StableDiffusionXLPipeline from PIL import Image -from gen_worker import ActionContext, ResourceRequirements, worker_function +from gen_worker import RequestContext, ResourceRequirements, worker_function from gen_worker.injection import ModelRef, ModelRefSource as Src from gen_worker.types import Asset @@ -46,7 +46,7 @@ class GenerateOutput(msgspec.Struct): @worker_function(sdxl_resources) def generate_image( - ctx: ActionContext, + ctx: RequestContext, pipeline: Annotated[ StableDiffusionXLPipeline, ModelRef(Src.FIXED, "sdxl"), @@ -78,7 +78,7 @@ def generate_image( # @worker_function(resources=sdxl_resources) -# def generate_image(ctx: ActionContext, pipeline: DiffusionPipeline = None, prompt_details: dict = None) -> bytes: +# def generate_image(ctx: RequestContext, pipeline: DiffusionPipeline = None, prompt_details: dict = None) -> bytes: # """ # Legacy function: Generates an image and returns raw bytes. # Consider using generate_and_upload_image for complete workflows. @@ -115,7 +115,7 @@ def generate_image( # s3_upload_resources = ResourceRequirements() # @worker_function(resources=s3_upload_resources) -# def upload_image_to_s3(ctx: ActionContext, upload_details: dict) -> Dict[str, str]: +# def upload_image_to_s3(ctx: RequestContext, upload_details: dict) -> Dict[str, str]: # """ # Legacy function: Uploads image bytes to S3. # Consider using generate_and_upload_image for complete workflows. diff --git a/examples/medasr-transcribe/src/medasr_transcribe/main.py b/examples/medasr-transcribe/src/medasr_transcribe/main.py index 7b3b7f0..3956372 100644 --- a/examples/medasr-transcribe/src/medasr_transcribe/main.py +++ b/examples/medasr-transcribe/src/medasr_transcribe/main.py @@ -9,7 +9,7 @@ import torch from transformers import AutoModelForCTC, AutoProcessor -from gen_worker import ActionContext, Asset, ResourceRequirements, worker_function +from gen_worker import RequestContext, Asset, ResourceRequirements, worker_function from gen_worker.injection import ModelRef, ModelRefSource as Src _MODEL_KEY = "medasr" @@ -25,7 +25,7 @@ class MedASROutput(msgspec.Struct): @worker_function(ResourceRequirements()) def medasr_transcribe( - ctx: ActionContext, + ctx: RequestContext, model: Annotated[AutoModelForCTC, ModelRef(Src.FIXED, _MODEL_KEY)], processor: Annotated[AutoProcessor, ModelRef(Src.FIXED, _MODEL_KEY)], payload: MedASRInput, diff --git a/examples/multi-sdxl-checkpoints/src/multi_sdxl_checkpoints/main.py b/examples/multi-sdxl-checkpoints/src/multi_sdxl_checkpoints/main.py index cd29ed7..9bae6c5 100644 --- a/examples/multi-sdxl-checkpoints/src/multi_sdxl_checkpoints/main.py +++ b/examples/multi-sdxl-checkpoints/src/multi_sdxl_checkpoints/main.py @@ -22,7 +22,7 @@ from diffusers import DiffusionPipeline from PIL import Image -from gen_worker import ActionContext, ResourceRequirements, worker_function +from gen_worker import RequestContext, ResourceRequirements, worker_function from gen_worker.injection import ModelRef, ModelRefSource as Src from gen_worker.types import Asset @@ -82,7 +82,7 @@ def _set_seed_and_perf(seed: Optional[int]) -> None: @worker_function(_sdxl_router_resources) def generate( - ctx: ActionContext, + ctx: RequestContext, pipeline: Annotated[ DiffusionPipeline, ModelRef(Src.PAYLOAD, "model_key") # Model key comes from payload.model_key diff --git a/examples/openai-codex/src/openai_codex_worker/main.py b/examples/openai-codex/src/openai_codex_worker/main.py index a834f6c..4f357df 100644 --- a/examples/openai-codex/src/openai_codex_worker/main.py +++ b/examples/openai-codex/src/openai_codex_worker/main.py @@ -5,7 +5,7 @@ import msgspec -from gen_worker import ActionContext, ResourceRequirements, worker_function +from gen_worker import RequestContext, ResourceRequirements, worker_function class CodexExecInput(msgspec.Struct): @@ -20,7 +20,7 @@ class CodexEventDelta(msgspec.Struct): @worker_function(ResourceRequirements()) -def codex_exec(ctx: ActionContext, payload: CodexExecInput) -> Iterator[CodexEventDelta]: +def codex_exec(ctx: RequestContext, payload: CodexExecInput) -> Iterator[CodexEventDelta]: """ Run Codex in headless mode and stream JSONL events as incremental deltas. diff --git a/examples/qwen-image-2512/src/qwen_image_2512/main.py b/examples/qwen-image-2512/src/qwen_image_2512/main.py index 89d8c9b..e96b585 100644 --- a/examples/qwen-image-2512/src/qwen_image_2512/main.py +++ b/examples/qwen-image-2512/src/qwen_image_2512/main.py @@ -11,7 +11,7 @@ from diffusers import DiffusionPipeline from PIL import Image -from gen_worker import ActionContext, ResourceRequirements, worker_function +from gen_worker import RequestContext, ResourceRequirements, worker_function from gen_worker.injection import ModelRef, ModelRefSource as Src from gen_worker.types import Asset @@ -93,7 +93,7 @@ def _try_compile_transformer(pipeline: DiffusionPipeline) -> None: @worker_function(qwen_resources) def generate( - ctx: ActionContext, + ctx: RequestContext, pipeline: Annotated[ DiffusionPipeline, ModelRef(Src.FIXED, "qwen_image"), diff --git a/examples/sd15/src/sd15/main.py b/examples/sd15/src/sd15/main.py index e5c5081..aa56eae 100644 --- a/examples/sd15/src/sd15/main.py +++ b/examples/sd15/src/sd15/main.py @@ -9,7 +9,7 @@ from diffusers import StableDiffusionPipeline from PIL import Image -from gen_worker import ActionContext, ResourceRequirements, worker_function +from gen_worker import RequestContext, ResourceRequirements, worker_function from gen_worker.injection import ModelRef, ModelRefSource as Src from gen_worker.types import Asset @@ -46,7 +46,7 @@ class GenerateOutput(msgspec.Struct): @worker_function(_sd15_resources) def generate( - ctx: ActionContext, + ctx: RequestContext, pipeline: Annotated[ StableDiffusionPipeline, ModelRef(Src.FIXED, "sd15"), @@ -96,7 +96,7 @@ def generate( @worker_function(_sd15_resources) def generate_fp8( - ctx: ActionContext, + ctx: RequestContext, pipeline: Annotated[ StableDiffusionPipeline, ModelRef(Src.FIXED, "sd15_fp8"), @@ -114,7 +114,7 @@ def generate_fp8( @worker_function(_sd15_resources) def generate_int8( - ctx: ActionContext, + ctx: RequestContext, pipeline: Annotated[ StableDiffusionPipeline, ModelRef(Src.FIXED, "sd15_int8"), @@ -132,7 +132,7 @@ def generate_int8( @worker_function(_sd15_resources) def generate_int4( - ctx: ActionContext, + ctx: RequestContext, pipeline: Annotated[ StableDiffusionPipeline, ModelRef(Src.FIXED, "sd15_int4"), diff --git a/examples/smoke-test/src/smoke_test/example.py b/examples/smoke-test/src/smoke_test/example.py index fc32006..fdf003d 100644 --- a/examples/smoke-test/src/smoke_test/example.py +++ b/examples/smoke-test/src/smoke_test/example.py @@ -4,7 +4,7 @@ import msgspec -from gen_worker import ActionContext, ResourceRequirements, worker_function +from gen_worker import RequestContext, ResourceRequirements, worker_function from gen_worker.types import Asset @@ -30,7 +30,7 @@ class ImageGenOutput(msgspec.Struct): @worker_function(ResourceRequirements()) -def image_gen_action(ctx: ActionContext, data: ImageGenInput) -> ImageGenOutput: +def image_gen_action(ctx: RequestContext, data: ImageGenInput) -> ImageGenOutput: """Example image generation function that returns real output Assets. This is a smoke test: it does not run ML inference. It validates: @@ -58,7 +58,7 @@ class AddOutput(msgspec.Struct): @worker_function(ResourceRequirements()) -def add_numbers(ctx: ActionContext, data: AddInput) -> AddOutput: +def add_numbers(ctx: RequestContext, data: AddInput) -> AddOutput: """Example function that adds two numbers.""" if ctx.is_canceled(): raise InterruptedError("Task cancelled") @@ -76,7 +76,7 @@ class MultiplyOutput(msgspec.Struct): @worker_function(ResourceRequirements()) -def multiply_numbers(ctx: ActionContext, data: MultiplyInput) -> MultiplyOutput: +def multiply_numbers(ctx: RequestContext, data: MultiplyInput) -> MultiplyOutput: """Example function that multiplies two numbers.""" if ctx.is_canceled(): raise InterruptedError("Task cancelled") @@ -94,7 +94,7 @@ class StreamInput(msgspec.Struct): @worker_function(ResourceRequirements()) -def token_stream(ctx: ActionContext, data: StreamInput) -> Iterator[TokenDelta]: +def token_stream(ctx: RequestContext, data: StreamInput) -> Iterator[TokenDelta]: """Example incremental-output function (LLM-style token deltas).""" for ch in data.text: if ctx.is_canceled(): @@ -122,7 +122,7 @@ class PromptBatchOutput(msgspec.Struct): @worker_function(ResourceRequirements()) -def caption_prompts(ctx: ActionContext, data: PromptBatchInput) -> PromptBatchOutput: +def caption_prompts(ctx: RequestContext, data: PromptBatchInput) -> PromptBatchOutput: """Example multi-item request handler for input.prompts[]. This function is intentionally simple and deterministic so it can be used in diff --git a/examples/z-image-lora/src/z_image_lora/main.py b/examples/z-image-lora/src/z_image_lora/main.py index 16bb47a..b24d060 100644 --- a/examples/z-image-lora/src/z_image_lora/main.py +++ b/examples/z-image-lora/src/z_image_lora/main.py @@ -19,7 +19,7 @@ from diffusers import DiffusionPipeline from PIL import Image -from gen_worker import ActionContext, ResourceRequirements, worker_function +from gen_worker import RequestContext, ResourceRequirements, worker_function from gen_worker.injection import ModelRef, ModelRefSource as Src from gen_worker.types import Asset @@ -67,7 +67,7 @@ class GenerateOutput(msgspec.Struct): @worker_function(_z_image_resources) def generate_with_loras( - ctx: ActionContext, + ctx: RequestContext, pipeline: Annotated[ DiffusionPipeline, ModelRef(Src.FIXED, "z-image-turbo"), diff --git a/src/gen_worker/__init__.py b/src/gen_worker/__init__.py index 6fc73af..84f37bc 100644 --- a/src/gen_worker/__init__.py +++ b/src/gen_worker/__init__.py @@ -1,7 +1,7 @@ # Make src/gen_worker a Python package from .decorators import ResourceRequirements, worker_function, worker_websocket from .injection import ModelRef, ModelRefSource -from .worker import ActionContext, RealtimeSocket +from .worker import RequestContext, RealtimeSocket from .errors import AuthError, RetryableError, FatalError from .types import Asset from .model_interface import ModelManager @@ -38,7 +38,7 @@ "ResourceRequirements", "ModelRef", "ModelRefSource", - "ActionContext", + "RequestContext", "RealtimeSocket", "AuthError", "RetryableError", diff --git a/src/gen_worker/discover.py b/src/gen_worker/discover.py index 2779f94..c90861f 100644 --- a/src/gen_worker/discover.py +++ b/src/gen_worker/discover.py @@ -25,7 +25,7 @@ import msgspec -from gen_worker import ActionContext +from gen_worker import RequestContext from gen_worker.injection import ModelRef from gen_worker.tensorhub_toml import ( @@ -195,12 +195,12 @@ def _extract_function_metadata(func: Any, module_name: str) -> Dict[str, Any]: if len(params) < 2: raise ValueError( - f"{func.__name__}: must accept (ctx: ActionContext, payload: msgspec.Struct, ...)" + f"{func.__name__}: must accept (ctx: RequestContext, payload: msgspec.Struct, ...)" ) ctx_name = params[0].name - if hints.get(ctx_name) is not ActionContext: - raise ValueError(f"{func.__name__}: first param must be ctx: ActionContext") + if hints.get(ctx_name) is not RequestContext: + raise ValueError(f"{func.__name__}: first param must be ctx: RequestContext") payload_type = None payload_param = None diff --git a/src/gen_worker/testing/http_runner.py b/src/gen_worker/testing/http_runner.py index b6d905a..c92253f 100644 --- a/src/gen_worker/testing/http_runner.py +++ b/src/gen_worker/testing/http_runner.py @@ -17,7 +17,7 @@ from gen_worker.cache_paths import worker_model_cache_dir from gen_worker.pb import worker_scheduler_pb2 as pb -from gen_worker.worker import ActionContext, Worker +from gen_worker.worker import RequestContext, Worker def _load_manifest(path: Path) -> dict[str, Any]: @@ -123,7 +123,7 @@ def run_task_sync( ) # Mirror _handle_run_request's ctx construction, but use local output backend. - ctx = ActionContext( + ctx = RequestContext( rid, emitter=self._emit_progress_event, owner=str(owner or "") or None, diff --git a/src/gen_worker/testing/test_output_save_contract.py b/src/gen_worker/testing/test_output_save_contract.py index 94d7dec..3d7944f 100644 --- a/src/gen_worker/testing/test_output_save_contract.py +++ b/src/gen_worker/testing/test_output_save_contract.py @@ -7,7 +7,7 @@ import unittest from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer -from gen_worker.worker import ActionContext +from gen_worker.worker import RequestContext class _UploadHandler(BaseHTTPRequestHandler): @@ -43,7 +43,7 @@ def do_PUT(self) -> None: # noqa: N802 class OutputSaveContractTest(unittest.TestCase): def test_save_bytes_local_output_accepts_path_agnostic_refs(self) -> None: with tempfile.TemporaryDirectory() as td: - ctx = ActionContext( + ctx = RequestContext( request_id="run-1", owner="alice", local_output_dir=td, @@ -56,7 +56,7 @@ def test_save_bytes_local_output_accepts_path_agnostic_refs(self) -> None: def test_save_bytes_rejects_url_refs(self) -> None: with tempfile.TemporaryDirectory() as td: - ctx = ActionContext(request_id="run-2", local_output_dir=td) + ctx = RequestContext(request_id="run-2", local_output_dir=td) with self.assertRaises(ValueError): ctx.save_bytes("https://example.test/out.bin", b"x") @@ -66,7 +66,7 @@ def test_save_bytes_uploads_to_tensorhub_file_api(self) -> None: t.start() try: base = f"http://127.0.0.1:{srv.server_address[1]}" - ctx = ActionContext( + ctx = RequestContext( request_id="run-3", owner="alice", file_api_base_url=base, diff --git a/src/gen_worker/worker.py b/src/gen_worker/worker.py index 80f12a7..8630081 100644 --- a/src/gen_worker/worker.py +++ b/src/gen_worker/worker.py @@ -143,7 +143,7 @@ async def close(self) -> None: # pragma: no cover - interface class _RealtimeSessionState: session_id: str spec: _WebsocketSpec - ctx: ActionContext + ctx: RequestContext loop: asyncio.AbstractEventLoop in_q: "asyncio.Queue[Optional[bytes]]" closed: threading.Event @@ -377,7 +377,7 @@ def get_key(self, kid: Optional[str]) -> Optional[Any]: return self._keys[kid] return None -class ActionContext: +class RequestContext: """Context object passed to action functions, allowing cancellation.""" def __init__( self, @@ -478,7 +478,7 @@ def _resolve_local_output_path(self, ref: str) -> Optional[str]: """ Dev-only local output backend. - When local_output_dir is set, ActionContext.save_* will write outputs to disk + When local_output_dir is set, RequestContext.save_* will write outputs to disk instead of using Cozy Hub's file API. """ base = (self._local_output_dir or "").strip() @@ -867,7 +867,7 @@ def __init__( self._task_specs: Dict[str, _TaskSpec] = {} self._ws_specs: Dict[str, _WebsocketSpec] = {} - self._active_tasks: Dict[str, ActionContext] = {} + self._active_tasks: Dict[str, RequestContext] = {} self._active_tasks_lock = threading.Lock() self._request_batch_context: Dict[str, Tuple[str, str]] = {} # request_id -> (batch_id, item_id) self._request_batch_context_lock = threading.Lock() @@ -1612,12 +1612,12 @@ def _inspect_task_spec(self, func: Callable[..., Any]) -> _TaskSpec: sig = inspect.signature(func) params = list(sig.parameters.values()) if not params: - raise ValueError("must accept ctx: ActionContext as first arg") + raise ValueError("must accept ctx: RequestContext as first arg") ctx_name = params[0].name ctx_type = hints.get(ctx_name) - if ctx_type is not ActionContext: - raise ValueError("first argument must be ctx: ActionContext") + if ctx_type is not RequestContext: + raise ValueError("first argument must be ctx: RequestContext") injections: list[InjectionSpec] = [] payload_type: Optional[type[msgspec.Struct]] = None @@ -1733,11 +1733,11 @@ def _inspect_websocket_spec(self, func: Callable[..., Any]) -> _WebsocketSpec: sig = inspect.signature(func) params = list(sig.parameters.values()) if len(params) < 2: - raise ValueError("websocket handler must accept (ctx: ActionContext, sock: RealtimeSocket, ...)") + raise ValueError("websocket handler must accept (ctx: RequestContext, sock: RealtimeSocket, ...)") ctx_name = params[0].name - if hints.get(ctx_name) is not ActionContext: - raise ValueError("first argument must be ctx: ActionContext") + if hints.get(ctx_name) is not RequestContext: + raise ValueError("first argument must be ctx: RequestContext") # We do not enforce a concrete socket type here; it is worker-owned and may # be provided by the runtime. We only validate that the param exists. @@ -1835,7 +1835,7 @@ def _send_message(self, message: WorkerSchedulerMessage) -> None: else: logger.warning("Attempted to send message while worker is stopping or stopped.") - def _materialize_assets(self, ctx: ActionContext, obj: Any) -> None: + def _materialize_assets(self, ctx: RequestContext, obj: Any) -> None: if isinstance(obj, Asset): self._materialize_asset(ctx, obj) return @@ -1855,7 +1855,7 @@ def _materialize_assets(self, ctx: ActionContext, obj: Any) -> None: except Exception: continue - def _materialize_asset(self, ctx: ActionContext, asset: Asset) -> None: + def _materialize_asset(self, ctx: RequestContext, asset: Asset) -> None: if asset.local_path: return ref = (asset.ref or "").strip() @@ -3470,7 +3470,7 @@ def _handle_run_request(self, request: TaskExecutionRequest) -> None: except Exception: pass - ctx = ActionContext( + ctx = RequestContext( request_id, emitter=self._emit_progress_event, owner=owner or None, @@ -3603,7 +3603,7 @@ def _handle_realtime_open_cmd(self, cmd: Any) -> None: materialized_input_urls[ks] = vs except Exception: pass - ctx = ActionContext( + ctx = RequestContext( session_id, emitter=self._emit_progress_event, owner=owner or None, @@ -3725,7 +3725,7 @@ def _handle_realtime_close_cmd(self, cmd: Any) -> None: def _execute_task( self, - ctx: ActionContext, + ctx: RequestContext, spec: _TaskSpec, input_payload: bytes, ) -> None: @@ -3746,7 +3746,7 @@ def _execute_task( required_models=list(getattr(ctx, "required_models", []) or []), resolved_cozy_models_by_id=resolved_map, ) - # Attach to ctx so ActionContext.save_* and injection paths can accumulate. + # Attach to ctx so RequestContext.save_* and injection paths can accumulate. try: setattr(ctx, "_run_metrics", rm) except Exception: @@ -4285,7 +4285,7 @@ def _shared_disk_volume_info(self, path: Optional[Path] = None) -> Dict[str, Any except Exception: return {} - def _resolve_injected_value(self, ctx: ActionContext, requested_type: Any, model_id: str, inj: InjectionSpec) -> Any: + def _resolve_injected_value(self, ctx: RequestContext, requested_type: Any, model_id: str, inj: InjectionSpec) -> Any: qn = type_qualname(requested_type) rm: Optional[RunMetricsV1] = getattr(ctx, "_run_metrics", None) diff --git a/tests/test_asset_materialization.py b/tests/test_asset_materialization.py index 77755c9..cf4c801 100644 --- a/tests/test_asset_materialization.py +++ b/tests/test_asset_materialization.py @@ -6,7 +6,7 @@ from unittest.mock import patch from gen_worker.types import Asset -from gen_worker.worker import ActionContext, Worker +from gen_worker.worker import RequestContext, Worker class _FakeHeaders(dict): @@ -59,7 +59,7 @@ def open(self, req: Any, timeout: int = 0) -> _FakeHTTPResponse: return _FakeHTTPResponse(data) with patch("urllib.request.build_opener", return_value=_Opener()) as _mock: - w._materialize_asset(ActionContext("run-1", owner=w.owner), a) + w._materialize_asset(RequestContext("run-1", owner=w.owner), a) self.assertGreaterEqual(_mock.call_count, 1) self.assertIsNotNone(a.local_path) @@ -88,7 +88,7 @@ def open(self, req: Any, timeout: int = 0) -> _FakeHTTPResponse: with patch("urllib.request.build_opener", return_value=_Opener()): with self.assertRaises(Exception): - w._materialize_asset(ActionContext("run-1", owner=w.owner), a) + w._materialize_asset(RequestContext("run-1", owner=w.owner), a) def test_materialize_tensorhub_ref(self) -> None: w = self._worker(owner="tenant-1") @@ -127,7 +127,7 @@ def fake_urlopen(req: Any, timeout: int = 0) -> _FakeHTTPResponse: os.environ["WORKER_MAX_INPUT_FILE_BYTES"] = "9999999" with patch("urllib.request.urlopen", side_effect=fake_urlopen): - ctx = ActionContext( + ctx = RequestContext( "run-1", owner="tenant-1", file_api_base_url="https://tensorhub.example", diff --git a/tests/test_dev_http_runner.py b/tests/test_dev_http_runner.py index 4bb0948..399f732 100644 --- a/tests/test_dev_http_runner.py +++ b/tests/test_dev_http_runner.py @@ -27,7 +27,7 @@ def test_dev_http_runner_writes_outputs(tmp_path: Path, monkeypatch) -> None: """ import msgspec from gen_worker.decorators import worker_function -from gen_worker.worker import ActionContext +from gen_worker.worker import RequestContext class In(msgspec.Struct): prompt: str @@ -36,7 +36,7 @@ class Out(msgspec.Struct): ref: str @worker_function() -def generate(ctx: ActionContext, payload: In) -> Out: +def generate(ctx: RequestContext, payload: In) -> Out: ref = f"runs/{ctx.request_id}/outputs/out.txt" ctx.save_bytes(ref, (payload.prompt + "\\n").encode("utf-8")) return Out(ref=ref) diff --git a/tests/test_discover_models.py b/tests/test_discover_models.py index b021177..fe5f95b 100644 --- a/tests/test_discover_models.py +++ b/tests/test_discover_models.py @@ -56,7 +56,7 @@ def test_discovery_emits_fixed_and_payload_keyspaces(self) -> None: """ from typing import Annotated import msgspec -from gen_worker import ActionContext, worker_function +from gen_worker import RequestContext, worker_function from gen_worker.injection import ModelRef, ModelRefSource as Src class Input(msgspec.Struct): @@ -71,7 +71,7 @@ class MockPipeline: @worker_function() def generate_fixed( - ctx: ActionContext, + ctx: RequestContext, pipeline: Annotated[MockPipeline, ModelRef(Src.FIXED, "sdxl")], payload: Input, ) -> Output: @@ -79,7 +79,7 @@ def generate_fixed( @worker_function() def generate_dynamic( - ctx: ActionContext, + ctx: RequestContext, pipeline: Annotated[MockPipeline, ModelRef(Src.PAYLOAD, "model_key")], payload: Input, ) -> Output: @@ -150,7 +150,7 @@ def test_missing_fixed_key_fails_discovery(self) -> None: """ from typing import Annotated import msgspec -from gen_worker import ActionContext, worker_function +from gen_worker import RequestContext, worker_function from gen_worker.injection import ModelRef, ModelRefSource as Src class Input(msgspec.Struct): @@ -164,7 +164,7 @@ class MockPipeline: @worker_function() def generate( - ctx: ActionContext, + ctx: RequestContext, pipeline: Annotated[MockPipeline, ModelRef(Src.FIXED, "sdxl")], payload: Input, ) -> Output: @@ -216,7 +216,7 @@ def test_missing_payload_keyspace_fails_discovery(self) -> None: """ from typing import Annotated import msgspec -from gen_worker import ActionContext, worker_function +from gen_worker import RequestContext, worker_function from gen_worker.injection import ModelRef, ModelRefSource as Src class Input(msgspec.Struct): @@ -231,7 +231,7 @@ class MockPipeline: @worker_function() def generate( - ctx: ActionContext, + ctx: RequestContext, pipeline: Annotated[MockPipeline, ModelRef(Src.PAYLOAD, "model_key")], payload: Input, ) -> Output: @@ -285,7 +285,7 @@ def test_batch_dimension_emitted(self) -> None: (src_dir / "__init__.py").write_text( """ import msgspec -from gen_worker import ActionContext, worker_function +from gen_worker import RequestContext, worker_function class Input(msgspec.Struct): items: list[str] @@ -294,7 +294,7 @@ class Output(msgspec.Struct): ok: bool @worker_function() -def caption(ctx: ActionContext, payload: Input) -> Output: +def caption(ctx: RequestContext, payload: Input) -> Output: return Output(ok=True) """.lstrip(), encoding="utf-8", @@ -346,7 +346,7 @@ def test_inline_ref_in_modelref_rejected(self) -> None: """ from typing import Annotated import msgspec -from gen_worker import ActionContext, worker_function +from gen_worker import RequestContext, worker_function from gen_worker.injection import ModelRef, ModelRefSource as Src class Input(msgspec.Struct): @@ -360,7 +360,7 @@ class MockPipeline: @worker_function() def generate( - ctx: ActionContext, + ctx: RequestContext, pipeline: Annotated[ MockPipeline, ModelRef(Src.FIXED, "sdxl", ref="stabilityai/stable-diffusion-xl-base-1.0"), diff --git a/tests/test_discover_names.py b/tests/test_discover_names.py index 615fb4b..56b9db8 100644 --- a/tests/test_discover_names.py +++ b/tests/test_discover_names.py @@ -49,7 +49,7 @@ def test_function_name_slugified_from_python_name(self) -> None: (src_dir / "main.py").write_text( """ import msgspec -from gen_worker import ActionContext, worker_function +from gen_worker import RequestContext, worker_function class Input(msgspec.Struct): x: int @@ -58,7 +58,7 @@ class Output(msgspec.Struct): y: int @worker_function() -def medasr_transcribe(ctx: ActionContext, payload: Input) -> Output: +def medasr_transcribe(ctx: RequestContext, payload: Input) -> Output: return Output(y=payload.x) """ ) @@ -106,7 +106,7 @@ def test_endpoint_slug_collision_fails(self) -> None: (src_dir / "main.py").write_text( """ import msgspec -from gen_worker import ActionContext, worker_function +from gen_worker import RequestContext, worker_function class Input(msgspec.Struct): x: int @@ -115,11 +115,11 @@ class Output(msgspec.Struct): y: int @worker_function() -def image_worker(ctx: ActionContext, payload: Input) -> Output: +def image_worker(ctx: RequestContext, payload: Input) -> Output: return Output(y=payload.x) @worker_function() -def image__worker(ctx: ActionContext, payload: Input) -> Output: +def image__worker(ctx: RequestContext, payload: Input) -> Output: return Output(y=payload.x) """ ) @@ -164,7 +164,7 @@ def test_endpoint_name_slugified_from_tensorhub_toml(self) -> None: (src_dir / "main.py").write_text( """ import msgspec -from gen_worker import ActionContext, worker_function +from gen_worker import RequestContext, worker_function class Input(msgspec.Struct): x: int @@ -173,7 +173,7 @@ class Output(msgspec.Struct): y: int @worker_function() -def generate(ctx: ActionContext, payload: Input) -> Output: +def generate(ctx: RequestContext, payload: Input) -> Output: return Output(y=payload.x) """ ) diff --git a/tests/test_file_token_scoping.py b/tests/test_file_token_scoping.py index 0632223..12a1019 100644 --- a/tests/test_file_token_scoping.py +++ b/tests/test_file_token_scoping.py @@ -8,7 +8,7 @@ from gen_worker.errors import AuthError from gen_worker.types import Asset -from gen_worker.worker import ActionContext, Worker +from gen_worker.worker import RequestContext, Worker class _FakeHeaders(dict): @@ -55,7 +55,7 @@ class TestFileTokenScoping(unittest.TestCase): """Test that per-run file tokens are used instead of env vars.""" def test_save_bytes_uses_per_run_token(self) -> None: - """save_bytes should use the token from ActionContext, not env.""" + """save_bytes should use the token from RequestContext, not env.""" captured_auth: list[str] = [] def fake_urlopen(req: Any, timeout: int = 0) -> _FakeHTTPResponse: @@ -70,7 +70,7 @@ def fake_urlopen(req: Any, timeout: int = 0) -> _FakeHTTPResponse: os.environ["FILE_API_TOKEN"] = "env-token-should-not-be-used" os.environ["FILE_API_BASE_URL"] = "https://should-not-be-used.example" - ctx = ActionContext( + ctx = RequestContext( "run-123", owner="tenant-1", file_api_base_url="https://tensorhub.example", @@ -99,7 +99,7 @@ def fake_urlopen(req: Any, timeout: int = 0) -> _FakeHTTPResponse: os.environ["FILE_API_TOKEN"] = "env-fallback-token" os.environ["FILE_API_BASE_URL"] = "https://tensorhub.example" - ctx = ActionContext( + ctx = RequestContext( "run-456", owner="tenant-1", # No file_api_token provided @@ -121,7 +121,7 @@ def test_save_bytes_raises_auth_error_on_401(self) -> None: def fake_urlopen(req: Any, timeout: int = 0) -> _FakeHTTPResponse: raise _make_http_error(401, "Unauthorized") - ctx = ActionContext( + ctx = RequestContext( "run-789", owner="tenant-1", file_api_base_url="https://tensorhub.example", @@ -140,7 +140,7 @@ def test_save_bytes_raises_auth_error_on_403(self) -> None: def fake_urlopen(req: Any, timeout: int = 0) -> _FakeHTTPResponse: raise _make_http_error(403, "Forbidden") - ctx = ActionContext( + ctx = RequestContext( "run-abc", owner="tenant-1", file_api_base_url="https://tensorhub.example", @@ -158,7 +158,7 @@ def test_save_bytes_create_raises_auth_error_on_401(self) -> None: def fake_urlopen(req: Any, timeout: int = 0) -> _FakeHTTPResponse: raise _make_http_error(401, "Unauthorized") - ctx = ActionContext( + ctx = RequestContext( "run-def", owner="tenant-1", file_api_base_url="https://tensorhub.example", @@ -183,7 +183,7 @@ def fake_urlopen(req: Any, timeout: int = 0) -> _FakeHTTPResponse: os.environ["WORKER_RUN_DIR"] = td os.environ["WORKER_CACHE_DIR"] = os.path.join(td, "cache") - ctx = ActionContext( + ctx = RequestContext( "run-ghi", owner="tenant-1", file_api_base_url="https://tensorhub.example", @@ -210,7 +210,7 @@ def fake_urlopen(req: Any, timeout: int = 0) -> _FakeHTTPResponse: os.environ["WORKER_RUN_DIR"] = td os.environ["WORKER_CACHE_DIR"] = os.path.join(td, "cache") - ctx = ActionContext( + ctx = RequestContext( "run-jkl", owner="tenant-1", file_api_base_url="https://tensorhub.example", diff --git a/tests/test_function_compute_capability.py b/tests/test_function_compute_capability.py index 3ecd452..d8066cb 100644 --- a/tests/test_function_compute_capability.py +++ b/tests/test_function_compute_capability.py @@ -57,7 +57,7 @@ def test_discovery_emits_function_compute_capability_requirement(self) -> None: (src_dir / "main.py").write_text( """ import msgspec -from gen_worker import ActionContext, ResourceRequirements, worker_function +from gen_worker import RequestContext, ResourceRequirements, worker_function class Input(msgspec.Struct): x: int @@ -66,7 +66,7 @@ class Output(msgspec.Struct): y: int @worker_function(resources=ResourceRequirements(compute_capability_min=10)) -def generate_nvfp4(ctx: ActionContext, payload: Input) -> Output: +def generate_nvfp4(ctx: RequestContext, payload: Input) -> Output: return Output(y=payload.x) """.lstrip(), encoding="utf-8", diff --git a/tests/test_hf_injection_normalization.py b/tests/test_hf_injection_normalization.py index a9eca59..4f8adc4 100644 --- a/tests/test_hf_injection_normalization.py +++ b/tests/test_hf_injection_normalization.py @@ -1,7 +1,7 @@ from __future__ import annotations from gen_worker.injection import InjectionSpec, ModelRef, ModelRefSource as Src -from gen_worker.worker import ActionContext, Worker +from gen_worker.worker import RequestContext, Worker class _DummyModel: @@ -37,7 +37,7 @@ def _bare_worker() -> Worker: def test_non_diffusers_hf_ref_normalizes_without_downloader() -> None: w = _bare_worker() - ctx = ActionContext("run-hf-no-downloader") + ctx = RequestContext("run-hf-no-downloader") inj = InjectionSpec(param_name="model", param_type=_DummyModel, model_ref=ModelRef(Src.FIXED, "joycaption")) _ = Worker._resolve_injected_value(w, ctx, _DummyModel, "hf:owner/repo@main", inj) # type: ignore[arg-type] @@ -50,7 +50,7 @@ def test_non_diffusers_hf_ref_uses_downloader_path_when_available() -> None: w = _bare_worker() dl = _DownloaderStub("/tmp/cozy-model-cache/hf-owner-repo-main") w._downloader = dl - ctx = ActionContext("run-hf-downloader") + ctx = RequestContext("run-hf-downloader") inj = InjectionSpec(param_name="model", param_type=_DummyModel, model_ref=ModelRef(Src.FIXED, "joycaption")) _ = Worker._resolve_injected_value(w, ctx, _DummyModel, "hf:owner/repo@main", inj) # type: ignore[arg-type] diff --git a/tests/test_injection_type_enforcement.py b/tests/test_injection_type_enforcement.py index 05047de..7b35d37 100644 --- a/tests/test_injection_type_enforcement.py +++ b/tests/test_injection_type_enforcement.py @@ -1,7 +1,7 @@ import unittest from gen_worker.injection import InjectionSpec, ModelRef, ModelRefSource -from gen_worker.worker import ActionContext, Worker +from gen_worker.worker import RequestContext, Worker class _StubModelManager: @@ -20,7 +20,7 @@ class _ActualPipeline: class TestInjectionTypeEnforcement(unittest.TestCase): def test_rejects_model_manager_type_mismatch(self) -> None: w = Worker(user_module_names=[], model_manager=_StubModelManager(), worker_jwt="dummy-worker-jwt") - ctx = ActionContext("run-1") + ctx = RequestContext("run-1") inj = InjectionSpec( param_name="pipeline", param_type=_ExpectedPipeline, diff --git a/tests/test_mock_orchestrator_dev.py b/tests/test_mock_orchestrator_dev.py index 0cb4dd8..47c1918 100644 --- a/tests/test_mock_orchestrator_dev.py +++ b/tests/test_mock_orchestrator_dev.py @@ -33,7 +33,7 @@ def test_mock_orchestrator_can_run_one_task(tmp_path: Path) -> None: from __future__ import annotations import msgspec -from gen_worker import ActionContext, ResourceRequirements, worker_function +from gen_worker import RequestContext, ResourceRequirements, worker_function class Input(msgspec.Struct): @@ -45,7 +45,7 @@ class Output(msgspec.Struct): @worker_function(ResourceRequirements()) -def hello(ctx: ActionContext, payload: Input) -> Output: +def hello(ctx: RequestContext, payload: Input) -> Output: return Output(message=f"hello {payload.name}") """.lstrip(), encoding="utf-8", diff --git a/tests/test_realtime_socket.py b/tests/test_realtime_socket.py index 66a9067..06dcccf 100644 --- a/tests/test_realtime_socket.py +++ b/tests/test_realtime_socket.py @@ -1,12 +1,12 @@ import time import unittest -from gen_worker.worker import ActionContext, RealtimeSocket, Worker, pb +from gen_worker.worker import RequestContext, RealtimeSocket, Worker, pb from gen_worker.decorators import worker_websocket, ResourceRequirements @worker_websocket(ResourceRequirements()) -async def echo_ws(ctx: ActionContext, sock: RealtimeSocket) -> None: +async def echo_ws(ctx: RequestContext, sock: RealtimeSocket) -> None: await sock.send_json({"status": "ready"}) async for data in sock.iter_bytes(): await sock.send_bytes(data) diff --git a/tests/test_action_context_local_outputs.py b/tests/test_request_context_local_outputs.py similarity index 87% rename from tests/test_action_context_local_outputs.py rename to tests/test_request_context_local_outputs.py index 19b33e8..f65f6ec 100644 --- a/tests/test_action_context_local_outputs.py +++ b/tests/test_request_context_local_outputs.py @@ -2,11 +2,11 @@ from pathlib import Path -from gen_worker.worker import ActionContext +from gen_worker.worker import RequestContext def test_action_context_local_output_backend(tmp_path: Path) -> None: - ctx = ActionContext( + ctx = RequestContext( "rid1", local_output_dir=str(tmp_path), owner="o1", diff --git a/tests/test_action_context_partition.py b/tests/test_request_context_partition.py similarity index 82% rename from tests/test_action_context_partition.py rename to tests/test_request_context_partition.py index dd662d3..3d02703 100644 --- a/tests/test_action_context_partition.py +++ b/tests/test_request_context_partition.py @@ -2,11 +2,11 @@ import pytest -from gen_worker.worker import ActionContext +from gen_worker.worker import RequestContext def test_partition_context_exposes_lineage_and_item_fields() -> None: - ctx = ActionContext( + ctx = RequestContext( "req-1", parent_request_id="parent-1", child_request_id="child-1", @@ -26,16 +26,16 @@ def test_partition_context_exposes_lineage_and_item_fields() -> None: def test_item_output_ref_prefers_item_id_when_present() -> None: - ctx = ActionContext("req-2", item_id="item-custom", item_index=7) + ctx = RequestContext("req-2", item_id="item-custom", item_index=7) ref = ctx.item_output_ref("/result.json") assert ref == "runs/req-2/outputs/items/item-custom/result.json" def test_item_output_ref_falls_back_to_index_then_default() -> None: - with_index = ActionContext("req-3", item_index=7) + with_index = RequestContext("req-3", item_index=7) assert with_index.item_output_ref("out.bin") == "runs/req-3/outputs/items/item-000007/out.bin" - without_index = ActionContext("req-4") + without_index = RequestContext("req-4") assert without_index.item_output_ref("out.bin") == "runs/req-4/outputs/items/item-000000/out.bin" with pytest.raises(ValueError): diff --git a/tests/test_signature_contract_and_incremental.py b/tests/test_signature_contract_and_incremental.py index 33d2e5c..98b81ca 100644 --- a/tests/test_signature_contract_and_incremental.py +++ b/tests/test_signature_contract_and_incremental.py @@ -5,7 +5,7 @@ import msgspec from gen_worker.injection import ModelRef, ModelRefSource as Src -from gen_worker.worker import ActionContext, Worker +from gen_worker.worker import RequestContext, Worker class Input(msgspec.Struct): @@ -67,7 +67,7 @@ def _make_worker() -> Worker: class TestContractAndIncremental(unittest.TestCase): def test_rejects_missing_return_annotation(self) -> None: - def bad(ctx: ActionContext, payload: Input): # type: ignore[no-untyped-def] + def bad(ctx: RequestContext, payload: Input): # type: ignore[no-untyped-def] return Delta(delta="x") w = _make_worker() @@ -75,7 +75,7 @@ def bad(ctx: ActionContext, payload: Input): # type: ignore[no-untyped-def] w._inspect_task_spec(bad) # type: ignore[arg-type] def test_incremental_output_emits_deltas_and_completed(self) -> None: - def stream(ctx: ActionContext, payload: Input) -> Iterator[Delta]: + def stream(ctx: RequestContext, payload: Input) -> Iterator[Delta]: yield Delta(delta=payload.text) yield Delta(delta="!") @@ -83,7 +83,7 @@ def stream(ctx: ActionContext, payload: Input) -> Iterator[Delta]: spec = w._inspect_task_spec(stream) # type: ignore[arg-type] self.assertEqual(spec.output_mode, "incremental") - ctx = ActionContext("run-1", emitter=lambda _e: None) + ctx = RequestContext("run-1", emitter=lambda _e: None) payload = Input(text="hi") b = msgspec.msgpack.encode(msgspec.to_builtins(payload)) w._execute_task(ctx, spec, b) @@ -115,13 +115,13 @@ def stream(ctx: ActionContext, payload: Input) -> Iterator[Delta]: self.assertEqual(events[2][0], "output.completed") def test_incremental_output_emits_typed_token_messages(self) -> None: - def stream(ctx: ActionContext, payload: Input) -> Iterator[Delta]: + def stream(ctx: RequestContext, payload: Input) -> Iterator[Delta]: yield Delta(delta=payload.text) w = _make_worker() spec = w._inspect_task_spec(stream) # type: ignore[arg-type] - ctx = ActionContext("run-typed-1", emitter=lambda _e: None) + ctx = RequestContext("run-typed-1", emitter=lambda _e: None) payload = Input(text="hello") b = msgspec.msgpack.encode(msgspec.to_builtins(payload)) w._execute_task(ctx, spec, b) @@ -132,7 +132,7 @@ def stream(ctx: ActionContext, payload: Input) -> Iterator[Delta]: def test_payload_model_key_resolves_via_endpoint_map(self) -> None: def fn( - ctx: ActionContext, + ctx: RequestContext, model: Annotated[FakeModel, ModelRef(Src.PAYLOAD, "model_key")], payload: InputWithModel, ) -> Output: @@ -142,7 +142,7 @@ def fn( w._payload_model_id_by_key_by_function = {"fn": {"a": "google/foo"}} w._release_allowed_model_ids = {"google/foo"} spec = w._inspect_task_spec(fn) # type: ignore[arg-type] - ctx = ActionContext("run-2", emitter=lambda _e: None) + ctx = RequestContext("run-2", emitter=lambda _e: None) payload = InputWithModel(text="x", model_key="a") b = msgspec.msgpack.encode(msgspec.to_builtins(payload)) w._execute_task(ctx, spec, b) @@ -156,7 +156,7 @@ def fn( def test_payload_model_key_rejects_not_allowlisted(self) -> None: def fn( - ctx: ActionContext, + ctx: RequestContext, model: Annotated[FakeModel, ModelRef(Src.PAYLOAD, "model_key")], payload: InputWithModel, ) -> Output: @@ -166,7 +166,7 @@ def fn( w._payload_model_id_by_key_by_function = {"fn": {"a": "google/foo", "b": "google/bar"}} w._release_allowed_model_ids = {"google/foo"} spec = w._inspect_task_spec(fn) # type: ignore[arg-type] - ctx = ActionContext("run-3", emitter=lambda _e: None) + ctx = RequestContext("run-3", emitter=lambda _e: None) payload = InputWithModel(text="x", model_key="b") b = msgspec.msgpack.encode(msgspec.to_builtins(payload)) w._execute_task(ctx, spec, b) diff --git a/tests/test_worker_jwt_rotation.py b/tests/test_worker_jwt_rotation.py index f74ca38..b026696 100644 --- a/tests/test_worker_jwt_rotation.py +++ b/tests/test_worker_jwt_rotation.py @@ -42,7 +42,7 @@ def test_worker_jwt_rotation_updates_next_reconnect(tmp_path: Path, monkeypatch: from __future__ import annotations import msgspec -from gen_worker import ActionContext, ResourceRequirements, worker_function +from gen_worker import RequestContext, ResourceRequirements, worker_function class Input(msgspec.Struct): name: str @@ -51,7 +51,7 @@ class Output(msgspec.Struct): message: str @worker_function(ResourceRequirements()) -def hello(ctx: ActionContext, payload: Input) -> Output: +def hello(ctx: RequestContext, payload: Input) -> Output: return Output(message=f"hello {payload.name}") """.lstrip(), encoding="utf-8", diff --git a/tests/test_worker_startup_visibility.py b/tests/test_worker_startup_visibility.py index ace2c46..003a1cf 100644 --- a/tests/test_worker_startup_visibility.py +++ b/tests/test_worker_startup_visibility.py @@ -97,7 +97,7 @@ def test_run_raises_when_registration_timeout_reached(tmp_path: Path, monkeypatc from __future__ import annotations import msgspec from gen_worker.decorators import worker_function -from gen_worker.worker import ActionContext +from gen_worker.worker import RequestContext class Input(msgspec.Struct): name: str @@ -106,7 +106,7 @@ class Output(msgspec.Struct): ok: bool @worker_function() -def tiny(ctx: ActionContext, payload: Input) -> Output: +def tiny(ctx: RequestContext, payload: Input) -> Output: return Output(ok=True) """.lstrip(), encoding="utf-8", diff --git a/tests/test_worker_telemetry_issue67.py b/tests/test_worker_telemetry_issue67.py index 0308aee..ea54041 100644 --- a/tests/test_worker_telemetry_issue67.py +++ b/tests/test_worker_telemetry_issue67.py @@ -10,7 +10,7 @@ import pytest from gen_worker.model_cache import ModelCache -from gen_worker.worker import ActionContext, Worker, pb, _TaskSpec +from gen_worker.worker import RequestContext, Worker, pb, _TaskSpec from gen_worker.decorators import ResourceRequirements @@ -22,7 +22,7 @@ class _Output(msgspec.Struct): ok: bool -def _sleep_fn(ctx: ActionContext, payload: _Input) -> _Output: +def _sleep_fn(ctx: RequestContext, payload: _Input) -> _Output: time.sleep(payload.sleep_s) return _Output(ok=True) @@ -54,8 +54,8 @@ def test_gpu_is_busy_refcount_overlapping_inference() -> None: injections=(), ) - ctx1 = ActionContext("r1", emitter=lambda e: None) - ctx2 = ActionContext("r2", emitter=lambda e: None) + ctx1 = RequestContext("r1", emitter=lambda e: None) + ctx2 = RequestContext("r2", emitter=lambda e: None) payload = msgspec.msgpack.encode({"sleep_s": 0.25}) t1 = threading.Thread(target=w._execute_task, args=(ctx1, spec, payload), daemon=True) From 1e8db037d2cabf0a631a15f6c39fb6761bab9e8e Mon Sep 17 00:00:00 2001 From: arpbansal Date: Wed, 18 Mar 2026 08:36:37 +0000 Subject: [PATCH 3/4] delete older test files --- tests/test_asset_materialization.py | 148 ------ tests/test_entrypoint_cache_preflight.py | 42 -- tests/test_entrypoint_worker_mode.py | 17 - tests/test_error_mapping.py | 59 --- tests/test_file_token_scoping.py | 257 ---------- tests/test_hf_injection_normalization.py | 60 --- tests/test_injection_type_enforcement.py | 31 -- tests/test_mock_orchestrator_dev.py | 109 ----- tests/test_model_cache.py | 312 ------------ tests/test_payload_model_selection.py | 78 --- tests/test_pipeline_thread_safety.py | 355 -------------- tests/test_realtime_socket.py | 83 ---- tests/test_runtime_batching_config_cmd.py | 113 ----- tests/test_scheduler_model_scope.py | 36 -- ...test_signature_contract_and_incremental.py | 178 ------- tests/test_trainer_checkpointing.py | 54 --- tests/test_trainer_runtime_orchestrated.py | 448 ------------------ .../test_training_endpoints_contract_smoke.py | 185 -------- tests/test_worker_leader_redirect.py | 19 - tests/test_worker_model_keyspace.py | 53 --- tests/test_worker_startup_visibility.py | 131 ----- tests/test_worker_telemetry_issue67.py | 187 -------- tests/test_worker_wire_protocol.py | 28 -- 23 files changed, 2983 deletions(-) delete mode 100644 tests/test_asset_materialization.py delete mode 100644 tests/test_entrypoint_cache_preflight.py delete mode 100644 tests/test_entrypoint_worker_mode.py delete mode 100644 tests/test_error_mapping.py delete mode 100644 tests/test_file_token_scoping.py delete mode 100644 tests/test_hf_injection_normalization.py delete mode 100644 tests/test_injection_type_enforcement.py delete mode 100644 tests/test_mock_orchestrator_dev.py delete mode 100644 tests/test_model_cache.py delete mode 100644 tests/test_payload_model_selection.py delete mode 100644 tests/test_pipeline_thread_safety.py delete mode 100644 tests/test_realtime_socket.py delete mode 100644 tests/test_runtime_batching_config_cmd.py delete mode 100644 tests/test_scheduler_model_scope.py delete mode 100644 tests/test_signature_contract_and_incremental.py delete mode 100644 tests/test_trainer_checkpointing.py delete mode 100644 tests/test_trainer_runtime_orchestrated.py delete mode 100644 tests/test_training_endpoints_contract_smoke.py delete mode 100644 tests/test_worker_leader_redirect.py delete mode 100644 tests/test_worker_model_keyspace.py delete mode 100644 tests/test_worker_startup_visibility.py delete mode 100644 tests/test_worker_telemetry_issue67.py delete mode 100644 tests/test_worker_wire_protocol.py diff --git a/tests/test_asset_materialization.py b/tests/test_asset_materialization.py deleted file mode 100644 index cf4c801..0000000 --- a/tests/test_asset_materialization.py +++ /dev/null @@ -1,148 +0,0 @@ -import hashlib -import os -import tempfile -import unittest -from typing import Any, Dict, Optional -from unittest.mock import patch - -from gen_worker.types import Asset -from gen_worker.worker import RequestContext, Worker - - -class _FakeHeaders(dict): - def get(self, key: str, default: Optional[str] = None) -> Optional[str]: # type: ignore[override] - return super().get(key, default) - - -class _FakeHTTPResponse: - def __init__(self, body: bytes, status: int = 200, headers: Optional[Dict[str, str]] = None) -> None: - self._body = body - self._pos = 0 - self.status = status - self.headers: Any = _FakeHeaders(headers or {}) - - def read(self, n: int = -1) -> bytes: - if self._pos >= len(self._body): - return b"" - if n is None or n < 0: - n = len(self._body) - self._pos - chunk = self._body[self._pos : self._pos + n] - self._pos += len(chunk) - return chunk - - def __enter__(self) -> "_FakeHTTPResponse": - return self - - def __exit__(self, exc_type, exc, tb) -> None: - return None - - -class TestAssetMaterialization(unittest.TestCase): - def _worker(self, owner: str = "tenant-1") -> Worker: - w = Worker.__new__(Worker) - w.owner = owner - return w - - def test_materialize_external_url(self) -> None: - w = self._worker() - # Use a literal public IP so SSRF/DNS resolution doesn't depend on DNS working. - a = Asset(ref="https://1.1.1.1/a.png") - data = b"\x89PNG\r\n\x1a\nhello" - - with tempfile.TemporaryDirectory() as td: - os.environ["WORKER_RUN_DIR"] = td - os.environ["WORKER_CACHE_DIR"] = os.path.join(td, "cache") - - class _Opener: - def open(self, req: Any, timeout: int = 0) -> _FakeHTTPResponse: - _ = req, timeout - return _FakeHTTPResponse(data) - - with patch("urllib.request.build_opener", return_value=_Opener()) as _mock: - w._materialize_asset(RequestContext("run-1", owner=w.owner), a) - self.assertGreaterEqual(_mock.call_count, 1) - - self.assertIsNotNone(a.local_path) - assert a.local_path is not None - with open(a.local_path, "rb") as f: - self.assertEqual(f.read(), data) - - self.assertEqual(a.size_bytes, len(data)) - self.assertEqual(a.sha256, hashlib.sha256(data).hexdigest()) - self.assertEqual(a.mime_type, "image/png") - - def test_materialize_external_url_size_cap(self) -> None: - w = self._worker() - a = Asset(ref="https://1.1.1.1/a.bin") - data = b"1234" - - with tempfile.TemporaryDirectory() as td: - os.environ["WORKER_RUN_DIR"] = td - os.environ["WORKER_CACHE_DIR"] = os.path.join(td, "cache") - os.environ["WORKER_MAX_INPUT_FILE_BYTES"] = "1" - - class _Opener: - def open(self, req: Any, timeout: int = 0) -> _FakeHTTPResponse: - _ = req, timeout - return _FakeHTTPResponse(data) - - with patch("urllib.request.build_opener", return_value=_Opener()): - with self.assertRaises(Exception): - w._materialize_asset(RequestContext("run-1", owner=w.owner), a) - - def test_materialize_tensorhub_ref(self) -> None: - w = self._worker(owner="tenant-1") - a = Asset(ref="my-uploads/cat.png") - body = b"\x89PNG\r\n\x1a\ncat" - sha = hashlib.sha256(body).hexdigest() - - def fake_urlopen(req: Any, timeout: int = 0) -> _FakeHTTPResponse: - # HEAD request (urllib.request.Request) - method = getattr(req, "method", None) - items = getattr(req, "header_items", None) - hdrs: Dict[str, str] = {} - if callable(items): - for k, v in items(): - hdrs[str(k).lower()] = str(v) - if method == "HEAD": - self.assertEqual(hdrs.get("authorization"), "Bearer tok") - self.assertEqual(hdrs.get("x-cozy-owner"), "tenant-1") - return _FakeHTTPResponse( - b"", - status=200, - headers={ - "X-Cozy-Size-Bytes": str(len(body)), - "X-Cozy-SHA256": sha, - "X-Cozy-Mime-Type": "image/png", - }, - ) - # GET request - self.assertEqual(hdrs.get("authorization"), "Bearer tok") - self.assertEqual(hdrs.get("x-cozy-owner"), "tenant-1") - return _FakeHTTPResponse(body, status=200, headers={"Content-Type": "image/png"}) - - with tempfile.TemporaryDirectory() as td: - os.environ["WORKER_RUN_DIR"] = td - os.environ["WORKER_CACHE_DIR"] = os.path.join(td, "cache") - os.environ["WORKER_MAX_INPUT_FILE_BYTES"] = "9999999" - - with patch("urllib.request.urlopen", side_effect=fake_urlopen): - ctx = RequestContext( - "run-1", - owner="tenant-1", - file_api_base_url="https://tensorhub.example", - file_api_token="tok", - ) - w._materialize_asset(ctx, a) - - self.assertIsNotNone(a.local_path) - assert a.local_path is not None - with open(a.local_path, "rb") as f: - self.assertEqual(f.read(), body) - self.assertEqual(a.mime_type, "image/png") - self.assertEqual(a.size_bytes, len(body)) - self.assertEqual(a.sha256, sha) - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/test_entrypoint_cache_preflight.py b/tests/test_entrypoint_cache_preflight.py deleted file mode 100644 index 06a90b2..0000000 --- a/tests/test_entrypoint_cache_preflight.py +++ /dev/null @@ -1,42 +0,0 @@ -from __future__ import annotations - -from pathlib import Path - -import pytest - -from gen_worker import entrypoint - - -def test_preflight_cache_dirs_fails_without_fallback(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None: - monkeypatch.setenv("TENSORHUB_CACHE_DIR", str(tmp_path / "primary-root")) - monkeypatch.delenv("WORKER_LOCAL_MODEL_CACHE_DIR", raising=False) - - def _fail(_: Path) -> None: - raise PermissionError("permission denied") - - monkeypatch.setattr(entrypoint, "_probe_cache_path_writable", _fail) - - with pytest.raises(RuntimeError, match="tensorhub CAS path"): - entrypoint._preflight_cache_dirs() - - -def test_preflight_cache_dirs_uses_tensorhub_cache_dir_root( - monkeypatch: pytest.MonkeyPatch, tmp_path: Path -) -> None: - root = tmp_path / "cache-root" - primary = root / "cas" - monkeypatch.setenv("TENSORHUB_CACHE_DIR", str(root)) - monkeypatch.delenv("WORKER_LOCAL_MODEL_CACHE_DIR", raising=False) - - def _probe(path: Path) -> None: - path.mkdir(parents=True, exist_ok=True) - probe = path / ".probe" - probe.write_bytes(b"ok") - probe.unlink() - - monkeypatch.setattr(entrypoint, "_probe_cache_path_writable", _probe) - - cfg = entrypoint._preflight_cache_dirs() - assert cfg["model_cache_dir"] == str(primary) - assert cfg["local_model_cache_dir"] == "" - assert primary.exists() diff --git a/tests/test_entrypoint_worker_mode.py b/tests/test_entrypoint_worker_mode.py deleted file mode 100644 index 9c4a2fc..0000000 --- a/tests/test_entrypoint_worker_mode.py +++ /dev/null @@ -1,17 +0,0 @@ -from __future__ import annotations - -import pytest - -from gen_worker import entrypoint -from gen_worker.trainer import runtime as trainer_runtime - - -def test_entrypoint_routes_to_trainer_mode(monkeypatch: pytest.MonkeyPatch) -> None: - monkeypatch.setenv("WORKER_MODE", "trainer") - monkeypatch.setattr(trainer_runtime, "run_training_runtime_from_env", lambda: 13) - assert entrypoint._run_main() == 13 - - -def test_entrypoint_rejects_invalid_worker_mode(monkeypatch: pytest.MonkeyPatch) -> None: - monkeypatch.setenv("WORKER_MODE", "wat") - assert entrypoint._run_main() == 1 diff --git a/tests/test_error_mapping.py b/tests/test_error_mapping.py deleted file mode 100644 index d45956c..0000000 --- a/tests/test_error_mapping.py +++ /dev/null @@ -1,59 +0,0 @@ -import unittest - -from gen_worker.errors import CanceledError, FatalError, ResourceError, RetryableError, ValidationError -from gen_worker.worker import Worker - - -class TestErrorMapping(unittest.TestCase): - def _worker(self) -> Worker: - # Avoid running Worker.__init__ (network/env-heavy); these helpers don't depend on init state. - return Worker.__new__(Worker) - - def test_sanitize_safe_message_redacts_tokens_urls_paths(self) -> None: - w = self._worker() - msg = "Bearer abc.def.ghi https://example.com/secret /home/user/token.txt" - out = w._sanitize_safe_message(msg) - self.assertNotIn("abc.def.ghi", out) - self.assertNotIn("https://example.com/secret", out) - self.assertNotIn("/home/user/token.txt", out) - - def test_map_exception_validation(self) -> None: - w = self._worker() - error_type, retryable, safe, internal = w._map_exception(ValidationError("bad input")) - self.assertEqual(error_type, "validation") - self.assertFalse(retryable) - self.assertIn("bad input", safe) - self.assertIn("ValidationError", internal) - - def test_map_exception_retryable(self) -> None: - w = self._worker() - error_type, retryable, safe, _ = w._map_exception(RetryableError("temporary")) - self.assertEqual(error_type, "retryable") - self.assertTrue(retryable) - self.assertIn("temporary", safe) - - def test_map_exception_fatal(self) -> None: - w = self._worker() - error_type, retryable, safe, _ = w._map_exception(FatalError("no")) - self.assertEqual(error_type, "fatal") - self.assertFalse(retryable) - self.assertIn("no", safe) - - def test_map_exception_resource(self) -> None: - w = self._worker() - error_type, retryable, safe, _ = w._map_exception(ResourceError("oom")) - self.assertEqual(error_type, "resource") - self.assertFalse(retryable) - self.assertIn("oom", safe) - - def test_map_exception_canceled(self) -> None: - w = self._worker() - error_type, retryable, safe, _ = w._map_exception(CanceledError("stop")) - self.assertEqual(error_type, "canceled") - self.assertFalse(retryable) - self.assertEqual(safe, "canceled") - - -if __name__ == "__main__": - unittest.main() - diff --git a/tests/test_file_token_scoping.py b/tests/test_file_token_scoping.py deleted file mode 100644 index 12a1019..0000000 --- a/tests/test_file_token_scoping.py +++ /dev/null @@ -1,257 +0,0 @@ -"""Tests for per-run file token scoping (issue #50).""" -import os -import tempfile -import unittest -from typing import Any, Dict, Optional -from unittest.mock import patch -import urllib.error - -from gen_worker.errors import AuthError -from gen_worker.types import Asset -from gen_worker.worker import RequestContext, Worker - - -class _FakeHeaders(dict): - def get(self, key: str, default: Optional[str] = None) -> Optional[str]: # type: ignore[override] - return super().get(key, default) - - -class _FakeHTTPResponse: - def __init__(self, body: bytes, status: int = 200, headers: Optional[Dict[str, str]] = None) -> None: - self._body = body - self._pos = 0 - self.status = status - self.headers: Any = _FakeHeaders(headers or {}) - - def read(self, n: int = -1) -> bytes: - if self._pos >= len(self._body): - return b"" - if n is None or n < 0: - n = len(self._body) - self._pos - chunk = self._body[self._pos : self._pos + n] - self._pos += len(chunk) - return chunk - - def __enter__(self) -> "_FakeHTTPResponse": - return self - - def __exit__(self, exc_type, exc, tb) -> None: - return None - - -def _make_http_error(code: int, msg: str = "error") -> urllib.error.HTTPError: - """Create a real HTTPError for testing.""" - import io - return urllib.error.HTTPError( - url="https://example.com", - code=code, - msg=msg, - hdrs={}, # type: ignore - fp=io.BytesIO(b""), - ) - - -class TestFileTokenScoping(unittest.TestCase): - """Test that per-run file tokens are used instead of env vars.""" - - def test_save_bytes_uses_per_run_token(self) -> None: - """save_bytes should use the token from RequestContext, not env.""" - captured_auth: list[str] = [] - - def fake_urlopen(req: Any, timeout: int = 0) -> _FakeHTTPResponse: - items = getattr(req, "header_items", None) - if callable(items): - for k, v in items(): - if str(k).lower() == "authorization": - captured_auth.append(str(v)) - return _FakeHTTPResponse(b'{"size_bytes": 5, "sha256": "abc123"}', status=200) - - # Set env var to a different value to prove it's not used - os.environ["FILE_API_TOKEN"] = "env-token-should-not-be-used" - os.environ["FILE_API_BASE_URL"] = "https://should-not-be-used.example" - - ctx = RequestContext( - "run-123", - owner="tenant-1", - file_api_base_url="https://tensorhub.example", - file_api_token="per-run-token", - ) - - with patch("urllib.request.urlopen", side_effect=fake_urlopen): - asset = ctx.save_bytes("runs/run-123/outputs/test.bin", b"hello") - - self.assertEqual(len(captured_auth), 1) - self.assertEqual(captured_auth[0], "Bearer per-run-token") - self.assertIsNotNone(asset) - - def test_save_bytes_falls_back_to_env_when_no_per_run_token(self) -> None: - """save_bytes should fall back to env var if no per-run token provided.""" - captured_auth: list[str] = [] - - def fake_urlopen(req: Any, timeout: int = 0) -> _FakeHTTPResponse: - items = getattr(req, "header_items", None) - if callable(items): - for k, v in items(): - if str(k).lower() == "authorization": - captured_auth.append(str(v)) - return _FakeHTTPResponse(b'{"size_bytes": 5, "sha256": "abc123"}', status=200) - - os.environ["FILE_API_TOKEN"] = "env-fallback-token" - os.environ["FILE_API_BASE_URL"] = "https://tensorhub.example" - - ctx = RequestContext( - "run-456", - owner="tenant-1", - # No file_api_token provided - ) - - with patch("urllib.request.urlopen", side_effect=fake_urlopen): - asset = ctx.save_bytes("runs/run-456/outputs/test.bin", b"hello") - - self.assertEqual(len(captured_auth), 1) - self.assertEqual(captured_auth[0], "Bearer env-fallback-token") - self.assertIsNotNone(asset) - - -class TestAuthErrorHandling(unittest.TestCase): - """Test that 401/403 errors raise AuthError (non-retryable).""" - - def test_save_bytes_raises_auth_error_on_401(self) -> None: - """save_bytes should raise AuthError on 401 response.""" - def fake_urlopen(req: Any, timeout: int = 0) -> _FakeHTTPResponse: - raise _make_http_error(401, "Unauthorized") - - ctx = RequestContext( - "run-789", - owner="tenant-1", - file_api_base_url="https://tensorhub.example", - file_api_token="expired-token", - ) - - with patch("urllib.request.urlopen", side_effect=fake_urlopen): - with self.assertRaises(AuthError) as cm: - ctx.save_bytes("runs/run-789/outputs/test.bin", b"hello") - - self.assertIn("401", str(cm.exception)) - self.assertIn("file_token", str(cm.exception)) - - def test_save_bytes_raises_auth_error_on_403(self) -> None: - """save_bytes should raise AuthError on 403 response.""" - def fake_urlopen(req: Any, timeout: int = 0) -> _FakeHTTPResponse: - raise _make_http_error(403, "Forbidden") - - ctx = RequestContext( - "run-abc", - owner="tenant-1", - file_api_base_url="https://tensorhub.example", - file_api_token="wrong-scope-token", - ) - - with patch("urllib.request.urlopen", side_effect=fake_urlopen): - with self.assertRaises(AuthError) as cm: - ctx.save_bytes("runs/run-abc/outputs/test.bin", b"hello") - - self.assertIn("403", str(cm.exception)) - - def test_save_bytes_create_raises_auth_error_on_401(self) -> None: - """save_bytes_create should raise AuthError on 401 response.""" - def fake_urlopen(req: Any, timeout: int = 0) -> _FakeHTTPResponse: - raise _make_http_error(401, "Unauthorized") - - ctx = RequestContext( - "run-def", - owner="tenant-1", - file_api_base_url="https://tensorhub.example", - file_api_token="bad-token", - ) - - with patch("urllib.request.urlopen", side_effect=fake_urlopen): - with self.assertRaises(AuthError): - ctx.save_bytes_create("runs/run-def/outputs/new.bin", b"data") - - def test_materialize_asset_raises_auth_error_on_401(self) -> None: - """_materialize_asset should raise AuthError on 401 for HEAD request.""" - w = Worker.__new__(Worker) - w.owner = "tenant-1" - - def fake_urlopen(req: Any, timeout: int = 0) -> _FakeHTTPResponse: - raise _make_http_error(401, "Unauthorized") - - a = Asset(ref="some-file.png") - - with tempfile.TemporaryDirectory() as td: - os.environ["WORKER_RUN_DIR"] = td - os.environ["WORKER_CACHE_DIR"] = os.path.join(td, "cache") - - ctx = RequestContext( - "run-ghi", - owner="tenant-1", - file_api_base_url="https://tensorhub.example", - file_api_token="expired-token", - ) - - with patch("urllib.request.urlopen", side_effect=fake_urlopen): - with self.assertRaises(AuthError) as cm: - w._materialize_asset(ctx, a) - - self.assertIn("401", str(cm.exception)) - - def test_materialize_asset_raises_auth_error_on_403(self) -> None: - """_materialize_asset should raise AuthError on 403 for HEAD request.""" - w = Worker.__new__(Worker) - w.owner = "tenant-1" - - def fake_urlopen(req: Any, timeout: int = 0) -> _FakeHTTPResponse: - raise _make_http_error(403, "Forbidden - path not in allowed prefixes") - - a = Asset(ref="other-run/outputs/secret.png") - - with tempfile.TemporaryDirectory() as td: - os.environ["WORKER_RUN_DIR"] = td - os.environ["WORKER_CACHE_DIR"] = os.path.join(td, "cache") - - ctx = RequestContext( - "run-jkl", - owner="tenant-1", - file_api_base_url="https://tensorhub.example", - file_api_token="scoped-token", - ) - - with patch("urllib.request.urlopen", side_effect=fake_urlopen): - with self.assertRaises(AuthError) as cm: - w._materialize_asset(ctx, a) - - self.assertIn("403", str(cm.exception)) - - -class TestAuthErrorMapping(unittest.TestCase): - """Test that AuthError is mapped to non-retryable error type.""" - - def test_auth_error_is_non_retryable(self) -> None: - """AuthError should map to 'auth' error type with retryable=False.""" - w = Worker.__new__(Worker) - w.owner = "tenant-1" - - exc = AuthError("token expired") - error_type, retryable, safe_msg, internal_msg = w._map_exception(exc) - - self.assertEqual(error_type, "auth") - self.assertFalse(retryable) - # safe_msg contains the exception message - self.assertIn("token expired", safe_msg.lower()) - - def test_auth_error_default_message(self) -> None: - """AuthError with empty message should use 'authentication failed'.""" - w = Worker.__new__(Worker) - w.owner = "tenant-1" - - exc = AuthError("") - error_type, retryable, safe_msg, internal_msg = w._map_exception(exc) - - self.assertEqual(error_type, "auth") - self.assertFalse(retryable) - self.assertIn("authentication", safe_msg.lower()) - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/test_hf_injection_normalization.py b/tests/test_hf_injection_normalization.py deleted file mode 100644 index 4f8adc4..0000000 --- a/tests/test_hf_injection_normalization.py +++ /dev/null @@ -1,60 +0,0 @@ -from __future__ import annotations - -from gen_worker.injection import InjectionSpec, ModelRef, ModelRefSource as Src -from gen_worker.worker import RequestContext, Worker - - -class _DummyModel: - last_source: str = "" - last_kwargs: dict = {} - - @classmethod - def from_pretrained(cls, source: str, **kwargs): - cls.last_source = str(source) - cls.last_kwargs = dict(kwargs) - return cls() - - -class _DownloaderStub: - def __init__(self, local_path: str) -> None: - self.local_path = local_path - self.calls: list[tuple[str, str]] = [] - - def download(self, model_ref: str, cache_dir: str) -> str: - self.calls.append((model_ref, cache_dir)) - return self.local_path - - -def _bare_worker() -> Worker: - w = Worker.__new__(Worker) - w._model_manager = None - w._model_cache = None - w._custom_runtime_locks = {} - w._custom_runtime_cache = {} - w._downloader = None - return w - - -def test_non_diffusers_hf_ref_normalizes_without_downloader() -> None: - w = _bare_worker() - ctx = RequestContext("run-hf-no-downloader") - inj = InjectionSpec(param_name="model", param_type=_DummyModel, model_ref=ModelRef(Src.FIXED, "joycaption")) - - _ = Worker._resolve_injected_value(w, ctx, _DummyModel, "hf:owner/repo@main", inj) # type: ignore[arg-type] - - assert _DummyModel.last_source == "owner/repo" - assert _DummyModel.last_kwargs.get("revision") == "main" - - -def test_non_diffusers_hf_ref_uses_downloader_path_when_available() -> None: - w = _bare_worker() - dl = _DownloaderStub("/tmp/cozy-model-cache/hf-owner-repo-main") - w._downloader = dl - ctx = RequestContext("run-hf-downloader") - inj = InjectionSpec(param_name="model", param_type=_DummyModel, model_ref=ModelRef(Src.FIXED, "joycaption")) - - _ = Worker._resolve_injected_value(w, ctx, _DummyModel, "hf:owner/repo@main", inj) # type: ignore[arg-type] - - assert dl.calls - assert dl.calls[0][0] == "hf:owner/repo@main" - assert _DummyModel.last_source == "/tmp/cozy-model-cache/hf-owner-repo-main" diff --git a/tests/test_injection_type_enforcement.py b/tests/test_injection_type_enforcement.py deleted file mode 100644 index 7b35d37..0000000 --- a/tests/test_injection_type_enforcement.py +++ /dev/null @@ -1,31 +0,0 @@ -import unittest - -from gen_worker.injection import InjectionSpec, ModelRef, ModelRefSource -from gen_worker.worker import RequestContext, Worker - - -class _StubModelManager: - async def get_active_pipeline(self, model_id: str): - return _ActualPipeline() - - -class _ExpectedPipeline: - pass - - -class _ActualPipeline: - pass - - -class TestInjectionTypeEnforcement(unittest.TestCase): - def test_rejects_model_manager_type_mismatch(self) -> None: - w = Worker(user_module_names=[], model_manager=_StubModelManager(), worker_jwt="dummy-worker-jwt") - ctx = RequestContext("run-1") - inj = InjectionSpec( - param_name="pipeline", - param_type=_ExpectedPipeline, - model_ref=ModelRef(ModelRefSource.FIXED, "foo"), - ) - - with self.assertRaises(ValueError): - w._resolve_injected_value(ctx, _ExpectedPipeline, "model-id", inj) diff --git a/tests/test_mock_orchestrator_dev.py b/tests/test_mock_orchestrator_dev.py deleted file mode 100644 index 47c1918..0000000 --- a/tests/test_mock_orchestrator_dev.py +++ /dev/null @@ -1,109 +0,0 @@ -from __future__ import annotations - -import os -import signal -import subprocess -import sys -import time -from pathlib import Path - -import pytest - -from gen_worker.testing.mock_orchestrator import _MockOrchestrator -from gen_worker.pb import worker_scheduler_pb2_grpc as pb_grpc - - -@pytest.mark.skipif(os.getenv("COZY_DEV_GRPC_E2E") != "1", reason="set COZY_DEV_GRPC_E2E=1 to run gRPC e2e") -def test_mock_orchestrator_can_run_one_task(tmp_path: Path) -> None: - # Start mock orchestrator server. - orch = _MockOrchestrator() - import grpc - from concurrent import futures - - server = grpc.server(futures.ThreadPoolExecutor(max_workers=8)) - pb_grpc.add_SchedulerWorkerServiceServicer_to_server(orch, server) - port = server.add_insecure_port("127.0.0.1:0") - server.start() - - # Write a tiny module with a single worker function into a temp dir. - mod_dir = tmp_path / "mod" - mod_dir.mkdir(parents=True, exist_ok=True) - (mod_dir / "hello_mod.py").write_text( - """ -from __future__ import annotations - -import msgspec -from gen_worker import RequestContext, ResourceRequirements, worker_function - - -class Input(msgspec.Struct): - name: str - - -class Output(msgspec.Struct): - message: str - - -@worker_function(ResourceRequirements()) -def hello(ctx: RequestContext, payload: Input) -> Output: - return Output(message=f"hello {payload.name}") -""".lstrip(), - encoding="utf-8", - ) - - # Start a worker process that connects to our mock orchestrator. - env = dict(os.environ) - env["SCHEDULER_PUBLIC_ADDR"] = f"127.0.0.1:{port}" - (tmp_path / "endpoint.toml").write_text( - """ -schema_version = 1 -name = "dev-test" -main = "hello_mod" -""".lstrip(), - encoding="utf-8", - ) - env["ENDPOINT_TOML_PATH"] = str(tmp_path / "endpoint.toml") - env["PYTHONPATH"] = f"{mod_dir}:{env.get('PYTHONPATH','')}" - env["WORKER_ID"] = "dev-test" - env["WORKER_JWT"] = "dev-test-jwt" - - proc = subprocess.Popen( - [sys.executable, "-m", "gen_worker.entrypoint"], - env=env, - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - text=True, - ) - try: - sess = orch.get_session(timeout_s=30.0) - assert sess is not None - assert "hello" in sess.available_functions - - request_id = sess.run_task(function_name="hello", payload_obj={"name": "world"}) - - # Wait for a result. - start = time.monotonic() - while time.monotonic() - start < 30: - msg = sess.recv(timeout_s=0.5) - if msg is None: - continue - if msg.HasField("run_result") and msg.run_result.request_id == request_id: - assert msg.run_result.success is True - assert msg.run_result.output_payload - import msgspec - - out = msgspec.msgpack.decode(msg.run_result.output_payload) - assert out["message"] == "hello world" - return - - raise AssertionError("timed out waiting for run_result") - finally: - try: - proc.send_signal(signal.SIGINT) - except Exception: - pass - try: - proc.wait(timeout=5) - except Exception: - proc.kill() - server.stop(grace=None) diff --git a/tests/test_model_cache.py b/tests/test_model_cache.py deleted file mode 100644 index 14c3080..0000000 --- a/tests/test_model_cache.py +++ /dev/null @@ -1,312 +0,0 @@ -"""Tests for the ModelCache class.""" - -import unittest -from unittest.mock import patch, MagicMock - -from gen_worker.model_cache import ( - ModelCache, - ModelCacheStats, - ModelLocation, - CachedModel, -) - - -class TestModelCache(unittest.TestCase): - """Tests for ModelCache LRU eviction and stats.""" - - def setUp(self) -> None: - """Create a fresh ModelCache for each test.""" - # Patch torch to avoid CUDA detection - with patch("gen_worker.model_cache.ModelCache._detect_total_vram", return_value=24.0): - self.cache = ModelCache( - max_vram_gb=20.0, - vram_safety_margin_gb=4.0, - ) - - def test_register_model_vram(self) -> None: - """Test registering a model in VRAM.""" - self.cache.register_model( - model_id="model-a", - location=ModelLocation.VRAM, - size_gb=5.0, - pipeline=MagicMock(), - ) - - self.assertTrue(self.cache.has_model("model-a")) - self.assertTrue(self.cache.is_in_vram("model-a")) - self.assertFalse(self.cache.is_on_disk("model-a")) - - def test_register_model_disk(self) -> None: - """Test registering a model on disk.""" - from pathlib import Path - - self.cache.register_model( - model_id="model-b", - location=ModelLocation.DISK, - size_gb=10.0, - disk_path=Path("/tmp/model-b"), - ) - - self.assertTrue(self.cache.has_model("model-b")) - self.assertFalse(self.cache.is_in_vram("model-b")) - self.assertTrue(self.cache.is_on_disk("model-b")) - - def test_lru_ordering(self) -> None: - """Test that LRU ordering works correctly.""" - # Register three models - self.cache.register_model("model-a", ModelLocation.VRAM, 5.0, MagicMock()) - self.cache.register_model("model-b", ModelLocation.VRAM, 5.0, MagicMock()) - self.cache.register_model("model-c", ModelLocation.VRAM, 5.0, MagicMock()) - - # model-a is LRU (first registered) - lru = self.cache._get_lru_vram_models() - self.assertEqual(lru[0], "model-a") - - # Access model-a, now model-b should be LRU - self.cache._touch("model-a") - lru = self.cache._get_lru_vram_models() - self.assertEqual(lru[0], "model-b") - - def test_lru_eviction(self) -> None: - """Test that LRU eviction frees space.""" - # Fill cache: 5 + 5 + 5 = 15GB used out of 20GB max - self.cache.register_model("model-a", ModelLocation.VRAM, 5.0, MagicMock()) - self.cache.register_model("model-b", ModelLocation.VRAM, 5.0, MagicMock()) - self.cache.register_model("model-c", ModelLocation.VRAM, 5.0, MagicMock()) - - self.assertEqual(self.cache._vram_used_gb, 15.0) - - # Try to add 10GB model - should evict LRU models - # Need to evict 5GB (15 + 10 - 20 = 5) - freed = self.cache._evict_lru_for_space(10.0) - - # model-a (5GB) should be evicted - self.assertEqual(freed, 5.0) - self.assertEqual(self.cache._vram_used_gb, 10.0) - self.assertFalse(self.cache.is_in_vram("model-a")) - - def test_get_stats(self) -> None: - """Test stats generation for heartbeat.""" - self.cache.register_model("model-vram-1", ModelLocation.VRAM, 5.0, MagicMock()) - self.cache.register_model("model-vram-2", ModelLocation.VRAM, 3.0, MagicMock()) - from pathlib import Path - self.cache.register_model("model-disk-1", ModelLocation.DISK, 10.0, disk_path=Path("/tmp/m")) - self.cache.mark_downloading("model-dl-1", 0.5) - - stats = self.cache.get_stats() - - self.assertIsInstance(stats, ModelCacheStats) - self.assertEqual(len(stats.vram_models), 2) - self.assertIn("model-vram-1", stats.vram_models) - self.assertIn("model-vram-2", stats.vram_models) - self.assertEqual(len(stats.disk_models), 1) - self.assertIn("model-disk-1", stats.disk_models) - self.assertEqual(len(stats.downloading_models), 1) - self.assertIn("model-dl-1", stats.downloading_models) - self.assertEqual(stats.vram_used_gb, 8.0) - self.assertEqual(stats.vram_model_count, 2) - self.assertEqual(stats.disk_model_count, 1) - self.assertEqual(stats.total_models, 4) - - def test_stats_to_dict(self) -> None: - """Test stats serialization to dict.""" - self.cache.register_model("model-a", ModelLocation.VRAM, 5.5, MagicMock()) - stats = self.cache.get_stats() - d = stats.to_dict() - - self.assertIsInstance(d, dict) - self.assertIn("vram_models", d) - self.assertIn("vram_used_gb", d) - self.assertEqual(d["vram_used_gb"], 5.5) - - def test_unload_model(self) -> None: - """Test unloading a model from cache.""" - self.cache.register_model("model-a", ModelLocation.VRAM, 5.0, MagicMock()) - self.assertTrue(self.cache.has_model("model-a")) - self.assertEqual(self.cache._vram_used_gb, 5.0) - - result = self.cache.unload_model("model-a") - self.assertTrue(result) - self.assertFalse(self.cache.has_model("model-a")) - self.assertEqual(self.cache._vram_used_gb, 0.0) - - def test_unload_nonexistent(self) -> None: - """Test unloading a model that doesn't exist.""" - result = self.cache.unload_model("nonexistent") - self.assertFalse(result) - - def test_mark_loaded_to_vram(self) -> None: - """Test marking a model as loaded to VRAM.""" - self.cache.mark_loaded_to_vram("model-a", MagicMock(), 8.0) - - self.assertTrue(self.cache.is_in_vram("model-a")) - self.assertEqual(self.cache._vram_used_gb, 8.0) - - def test_mark_cached_to_disk(self) -> None: - """Test marking a model as cached to disk.""" - from pathlib import Path - - # First load to VRAM - self.cache.mark_loaded_to_vram("model-a", MagicMock(), 8.0) - self.assertTrue(self.cache.is_in_vram("model-a")) - - # Then mark as disk-cached (offloaded) - self.cache.mark_cached_to_disk("model-a", Path("/tmp/model-a"), 8.0) - - self.assertFalse(self.cache.is_in_vram("model-a")) - self.assertTrue(self.cache.is_on_disk("model-a")) - self.assertEqual(self.cache._vram_used_gb, 0.0) - - def test_can_fit_in_vram(self) -> None: - """Test checking if model can fit in VRAM.""" - # Empty cache with 20GB max - self.assertTrue(self.cache.can_fit_in_vram(10.0)) - self.assertTrue(self.cache.can_fit_in_vram(20.0)) - self.assertFalse(self.cache.can_fit_in_vram(25.0)) - - # With some models loaded - self.cache.register_model("model-a", ModelLocation.VRAM, 15.0, MagicMock()) - self.assertTrue(self.cache.can_fit_in_vram(5.0)) # 15 + 5 = 20 - self.assertTrue(self.cache.can_fit_in_vram(20.0)) # Can evict model-a - self.assertFalse(self.cache.can_fit_in_vram(25.0)) # Too big even after eviction - - def test_get_pipeline(self) -> None: - """Test getting a pipeline from cache.""" - pipeline = MagicMock() - self.cache.register_model("model-a", ModelLocation.VRAM, 5.0, pipeline) - - retrieved = self.cache.get_pipeline("model-a") - self.assertIs(retrieved, pipeline) - - # Getting pipeline should update LRU - self.cache.register_model("model-b", ModelLocation.VRAM, 5.0, MagicMock()) - self.cache.get_pipeline("model-a") - lru = self.cache._get_lru_vram_models() - self.assertEqual(lru[0], "model-b") # model-b is now LRU - - def test_get_pipeline_disk_model(self) -> None: - """Test getting pipeline for disk-cached model returns None.""" - from pathlib import Path - self.cache.register_model("model-a", ModelLocation.DISK, 5.0, disk_path=Path("/tmp")) - - retrieved = self.cache.get_pipeline("model-a") - self.assertIsNone(retrieved) - - def test_download_progress(self) -> None: - """Test download progress tracking.""" - self.cache.mark_downloading("model-a", 0.0) - model = self.cache._models.get("model-a") - self.assertIsNotNone(model) - self.assertEqual(model.location, ModelLocation.DOWNLOADING) - self.assertEqual(model.download_progress, 0.0) - - self.cache.update_download_progress("model-a", 0.5) - self.assertEqual(model.download_progress, 0.5) - - self.cache.update_download_progress("model-a", 1.0) - self.assertEqual(model.download_progress, 1.0) - - -class TestModelCacheEnvironment(unittest.TestCase): - """Test ModelCache configuration from environment.""" - - def test_env_config(self) -> None: - """Test that environment variables configure the cache.""" - import os - - with patch.dict(os.environ, { - "WORKER_MAX_VRAM_GB": "16", - "WORKER_VRAM_SAFETY_MARGIN_GB": "2.5", - }): - with patch("gen_worker.model_cache.ModelCache._detect_total_vram", return_value=24.0): - cache = ModelCache() - self.assertEqual(cache._max_vram_gb, 16.0) - self.assertEqual(cache._vram_safety_margin, 2.5) - - -class TestProgressiveAvailability(unittest.TestCase): - """Tests for progressive model availability.""" - - def setUp(self) -> None: - """Create a fresh ModelCache for each test.""" - with patch("gen_worker.model_cache.ModelCache._detect_total_vram", return_value=24.0): - self.cache = ModelCache(max_vram_gb=20.0) - - def test_are_models_available_all_ready(self) -> None: - """Test that available check passes when all models are ready.""" - self.cache.register_model("model-a", ModelLocation.VRAM, 5.0, MagicMock()) - from pathlib import Path - self.cache.register_model("model-b", ModelLocation.DISK, 5.0, disk_path=Path("/tmp")) - - # Both VRAM and disk models should be considered available - self.assertTrue(self.cache.are_models_available(["model-a"])) - self.assertTrue(self.cache.are_models_available(["model-b"])) - self.assertTrue(self.cache.are_models_available(["model-a", "model-b"])) - - def test_are_models_available_downloading(self) -> None: - """Test that available check fails when model is downloading.""" - self.cache.register_model("model-a", ModelLocation.VRAM, 5.0, MagicMock()) - self.cache.mark_downloading("model-b", 0.5) - - # model-a is available, model-b is not - self.assertTrue(self.cache.are_models_available(["model-a"])) - self.assertFalse(self.cache.are_models_available(["model-b"])) - self.assertFalse(self.cache.are_models_available(["model-a", "model-b"])) - - def test_are_models_available_missing(self) -> None: - """Test that available check fails for unknown models.""" - self.cache.register_model("model-a", ModelLocation.VRAM, 5.0, MagicMock()) - - self.assertTrue(self.cache.are_models_available(["model-a"])) - self.assertFalse(self.cache.are_models_available(["model-a", "model-unknown"])) - self.assertFalse(self.cache.are_models_available(["model-unknown"])) - - def test_get_available_models(self) -> None: - """Test getting list of available models.""" - self.cache.register_model("model-vram", ModelLocation.VRAM, 5.0, MagicMock()) - from pathlib import Path - self.cache.register_model("model-disk", ModelLocation.DISK, 5.0, disk_path=Path("/tmp")) - self.cache.mark_downloading("model-dl", 0.5) - - available = self.cache.get_available_models() - self.assertEqual(len(available), 2) - self.assertIn("model-vram", available) - self.assertIn("model-disk", available) - self.assertNotIn("model-dl", available) - - def test_get_downloading_models(self) -> None: - """Test getting list of downloading models.""" - self.cache.register_model("model-a", ModelLocation.VRAM, 5.0, MagicMock()) - self.cache.mark_downloading("model-b", 0.3) - self.cache.mark_downloading("model-c", 0.7) - - downloading = self.cache.get_downloading_models() - self.assertEqual(len(downloading), 2) - self.assertIn("model-b", downloading) - self.assertIn("model-c", downloading) - - def test_get_download_progress(self) -> None: - """Test getting download progress for a model.""" - self.cache.mark_downloading("model-a", 0.5) - self.cache.register_model("model-b", ModelLocation.VRAM, 5.0, MagicMock()) - - self.assertEqual(self.cache.get_download_progress("model-a"), 0.5) - self.assertIsNone(self.cache.get_download_progress("model-b")) - self.assertIsNone(self.cache.get_download_progress("unknown")) - - def test_max_concurrent_downloads_config(self) -> None: - """Test max concurrent downloads configuration.""" - import os - - # Default value - self.assertEqual(self.cache.get_max_concurrent_downloads(), 2) - - # From environment - with patch.dict(os.environ, {"WORKER_MAX_CONCURRENT_DOWNLOADS": "4"}): - with patch("gen_worker.model_cache.ModelCache._detect_total_vram", return_value=24.0): - cache = ModelCache() - self.assertEqual(cache.get_max_concurrent_downloads(), 4) - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/test_payload_model_selection.py b/tests/test_payload_model_selection.py deleted file mode 100644 index 7fdf444..0000000 --- a/tests/test_payload_model_selection.py +++ /dev/null @@ -1,78 +0,0 @@ -import unittest - -import msgspec - -from gen_worker.injection import InjectionSpec, ModelRef, ModelRefSource -from gen_worker.worker import Worker - - -class _Payload(msgspec.Struct): - model: str - - -class TestPayloadModelSelection(unittest.TestCase): - def test_fixed_key_must_exist_in_mapping(self) -> None: - w = Worker( - user_module_names=[], - worker_jwt="dummy-worker-jwt", - manifest={ - "models": { - "sd15": {"ref": "demo/sd15", "dtypes": ["fp16", "bf16"]}, - } - }, - ) - inj = InjectionSpec( - param_name="pipeline", - param_type=object, - model_ref=ModelRef(ModelRefSource.FIXED, "missing"), - ) - payload = _Payload(model="sd15") - with self.assertRaises(ValueError) as ctx: - w._resolve_model_id_for_injection("generate", inj, payload) - self.assertIn("unknown fixed model key", str(ctx.exception).lower()) - - def test_payload_key_must_exist_in_function_keyspace(self) -> None: - w = Worker( - user_module_names=[], - worker_jwt="dummy-worker-jwt", - manifest={ - "models_by_function": { - "generate": { - "sd15": {"ref": "demo/sd15", "dtypes": ["fp16", "bf16"]}, - "flux": {"ref": "demo/flux", "dtypes": ["bf16"]}, - } - } - }, - ) - inj = InjectionSpec( - param_name="pipeline", - param_type=object, - model_ref=ModelRef(ModelRefSource.PAYLOAD, "model"), - ) - payload = _Payload(model="does-not-exist") - with self.assertRaises(ValueError) as ctx: - w._resolve_model_id_for_injection("generate", inj, payload) - self.assertIn("unknown model key", str(ctx.exception).lower()) - self.assertIn("sd15", str(ctx.exception)) - - def test_payload_key_resolves_to_repo_ref(self) -> None: - w = Worker( - user_module_names=[], - worker_jwt="dummy-worker-jwt", - manifest={ - "models_by_function": { - "generate": { - "sd15": {"ref": "demo/sd15", "dtypes": ["fp16", "bf16"]}, - } - } - }, - ) - inj = InjectionSpec( - param_name="pipeline", - param_type=object, - model_ref=ModelRef(ModelRefSource.PAYLOAD, "model"), - ) - payload = _Payload(model="sd15") - out, key = w._resolve_model_id_for_injection("generate", inj, payload) - self.assertEqual(out, "cozy:demo/sd15:latest") - self.assertEqual(key, "sd15") diff --git a/tests/test_pipeline_thread_safety.py b/tests/test_pipeline_thread_safety.py deleted file mode 100644 index 661fa6d..0000000 --- a/tests/test_pipeline_thread_safety.py +++ /dev/null @@ -1,355 +0,0 @@ -""" -Tests for thread-safe pipeline access in PipelineLoader. - -These tests verify that get_for_inference() properly creates thread-safe -pipeline copies with fresh schedulers to avoid concurrent access issues. -""" -import threading -import time -from typing import Any, Dict, List, Optional -from unittest.mock import MagicMock, patch, PropertyMock - -import pytest - -# Check if torch is available for skip markers -try: - import torch - TORCH_AVAILABLE = True -except ImportError: - TORCH_AVAILABLE = False - - -class MockSchedulerConfig: - """Mock scheduler config for testing.""" - def __init__(self) -> None: - self.num_train_timesteps = 1000 - self.beta_start = 0.0001 - self.beta_end = 0.02 - - -class MockScheduler: - """Mock scheduler that tracks instance creation.""" - _instance_count = 0 - - def __init__(self) -> None: - MockScheduler._instance_count += 1 - self.instance_id = MockScheduler._instance_count - self.config = MockSchedulerConfig() - # Simulate internal state that would cause issues if shared - self._timesteps: List[int] = [] - self._step_index: Optional[int] = None - - @classmethod - def from_config(cls, config: MockSchedulerConfig) -> "MockScheduler": - """Create new scheduler from config (like diffusers does).""" - scheduler = cls() - scheduler.config = config - return scheduler - - @classmethod - def reset_instance_count(cls) -> None: - cls._instance_count = 0 - - -class MockPipeline: - """Mock pipeline for testing thread-safe access.""" - _from_pipe_calls = 0 - - def __init__(self, scheduler: Optional[MockScheduler] = None) -> None: - self.scheduler = scheduler or MockScheduler() - self.unet = MagicMock() # Simulates heavy component - self.vae = MagicMock() # Simulates heavy component - - @classmethod - def from_pipe(cls, base_pipeline: "MockPipeline", scheduler: MockScheduler) -> "MockPipeline": - """Create new pipeline sharing components but with fresh scheduler.""" - MockPipeline._from_pipe_calls += 1 - new_pipeline = cls(scheduler=scheduler) - # Share heavy components - new_pipeline.unet = base_pipeline.unet - new_pipeline.vae = base_pipeline.vae - return new_pipeline - - @classmethod - def reset_call_count(cls) -> None: - cls._from_pipe_calls = 0 - - -class MockLoadedPipeline: - """Mock LoadedPipeline container.""" - def __init__(self, pipeline: MockPipeline, model_id: str) -> None: - self.pipeline = pipeline - self.model_id = model_id - self.pipeline_class = "MockPipeline" - self.dtype = "float16" - self.size_gb = 10.0 - self.load_format = "safetensors" - - -class TestGetForInferenceLogic: - """ - Tests for get_for_inference() logic without requiring torch. - Uses direct method testing with mocked dependencies. - """ - - def setup_method(self) -> None: - """Reset mocks before each test.""" - MockScheduler.reset_instance_count() - MockPipeline.reset_call_count() - - def test_creates_fresh_scheduler_logic(self) -> None: - """Test that get_for_inference creates a fresh scheduler.""" - # Test the logic directly without PipelineLoader instantiation - base_scheduler = MockScheduler() - base_pipeline = MockPipeline(scheduler=base_scheduler) - - initial_count = MockScheduler._instance_count - - # Simulate what get_for_inference does - fresh_scheduler = base_pipeline.scheduler.from_config( - base_pipeline.scheduler.config - ) - task_pipeline = MockPipeline.from_pipe(base_pipeline, scheduler=fresh_scheduler) - - # Verify new scheduler was created - assert MockScheduler._instance_count == initial_count + 1 - assert task_pipeline.scheduler.instance_id != base_scheduler.instance_id - - def test_shares_heavy_components_logic(self) -> None: - """Test that heavy components are shared.""" - base_pipeline = MockPipeline() - original_unet = base_pipeline.unet - original_vae = base_pipeline.vae - - # Simulate what get_for_inference does - fresh_scheduler = base_pipeline.scheduler.from_config( - base_pipeline.scheduler.config - ) - task_pipeline = MockPipeline.from_pipe(base_pipeline, scheduler=fresh_scheduler) - - # Heavy components should be shared - assert task_pipeline.unet is original_unet - assert task_pipeline.vae is original_vae - # Scheduler should be different - assert task_pipeline.scheduler is not base_pipeline.scheduler - - -class TestConcurrentAccess: - """Tests for concurrent pipeline access patterns.""" - - def setup_method(self) -> None: - """Reset mocks before each test.""" - MockScheduler.reset_instance_count() - MockPipeline.reset_call_count() - - def test_concurrent_scheduler_creation_is_safe(self) -> None: - """Multiple concurrent scheduler creations should be independent.""" - base_pipeline = MockPipeline() - results: List[MockPipeline] = [] - errors: List[Exception] = [] - lock = threading.Lock() - - def create_task_pipeline() -> None: - try: - # Simulate what get_for_inference does - fresh_scheduler = base_pipeline.scheduler.from_config( - base_pipeline.scheduler.config - ) - task_pipeline = MockPipeline.from_pipe( - base_pipeline, scheduler=fresh_scheduler - ) - with lock: - results.append(task_pipeline) - except Exception as e: - with lock: - errors.append(e) - - # Spawn multiple threads - threads = [threading.Thread(target=create_task_pipeline) for _ in range(10)] - for t in threads: - t.start() - for t in threads: - t.join() - - # All should succeed - assert len(errors) == 0, f"Got errors: {errors}" - assert len(results) == 10 - - # Each result should have a unique scheduler - scheduler_ids = [r.scheduler.instance_id for r in results] - assert len(set(scheduler_ids)) == 10 # All unique - - def test_concurrent_access_simulated_inference(self) -> None: - """ - Simulate concurrent inference to verify no state corruption. - - This test verifies that separate schedulers prevent the - 'IndexError: index N is out of bounds' that occurs when - multiple threads share a scheduler's internal state. - """ - base_pipeline = MockPipeline() - errors: List[Exception] = [] - completed = 0 - lock = threading.Lock() - - def simulate_inference() -> None: - nonlocal completed - try: - # Create thread-safe pipeline (what get_for_inference does) - fresh_scheduler = base_pipeline.scheduler.from_config( - base_pipeline.scheduler.config - ) - task_pipeline = MockPipeline.from_pipe( - base_pipeline, scheduler=fresh_scheduler - ) - - # Simulate scheduler state modification during inference - task_pipeline.scheduler._timesteps = list(range(1000)) - task_pipeline.scheduler._step_index = 0 - - # Simulate stepping through inference - for i in range(50): - idx = task_pipeline.scheduler._step_index - if idx is not None and idx < len(task_pipeline.scheduler._timesteps): - # This would cause IndexError if scheduler is shared - _ = task_pipeline.scheduler._timesteps[idx] - task_pipeline.scheduler._step_index = i + 1 - time.sleep(0.001) # Simulate work - - with lock: - completed += 1 - - except Exception as e: - with lock: - errors.append(e) - - # Spawn concurrent inference threads - threads = [threading.Thread(target=simulate_inference) for _ in range(5)] - for t in threads: - t.start() - for t in threads: - t.join() - - # All should complete without errors - assert len(errors) == 0, f"Got errors: {errors}" - assert completed == 5 - - -class TestModelManagementInterface: - """Tests for ModelManagementInterface.get_for_inference()""" - - def test_default_implementation_calls_get_active_pipeline(self) -> None: - """Default get_for_inference should fall back to get_active_pipeline.""" - from gen_worker.model_interface import ModelManagementInterface - - class TestManager(ModelManagementInterface): - def __init__(self) -> None: - self.get_active_pipeline_called = False - - async def process_supported_models_config( - self, supported_model_ids: List[str], downloader_instance: Any - ) -> None: - pass - - async def load_model_into_vram(self, model_id: str) -> bool: - return True - - async def get_active_pipeline(self, model_id: str) -> Any: - self.get_active_pipeline_called = True - return MockPipeline() - - async def get_active_model_bundle(self, model_id: str) -> Any: - return None - - def get_vram_loaded_models(self) -> List[str]: - return [] - - manager = TestManager() - result = manager.get_for_inference("test-model") - - assert manager.get_active_pipeline_called - assert result is not None - - -@pytest.mark.skipif(not TORCH_AVAILABLE, reason="torch not installed") -class TestPipelineLoaderIntegration: - """Integration tests that require torch.""" - - def setup_method(self) -> None: - """Reset mocks before each test.""" - MockScheduler.reset_instance_count() - MockPipeline.reset_call_count() - - def test_get_for_inference_with_real_loader(self) -> None: - """Test get_for_inference with actual PipelineLoader.""" - from gen_worker.pipeline_loader import PipelineLoader - - loader = PipelineLoader() - - # Create mock loaded pipeline - base_pipeline = MockPipeline() - loaded = MockLoadedPipeline(base_pipeline, "test-model") - loader._loaded_pipelines = {"test-model": loaded} - - result = loader.get_for_inference("test-model") - - assert result is not None - # Should have different scheduler - assert result.scheduler is not base_pipeline.scheduler - # Should share heavy components - assert result.unet is base_pipeline.unet - - def test_get_for_inference_returns_none_for_missing(self) -> None: - """Test get_for_inference returns None for unloaded models.""" - from gen_worker.pipeline_loader import PipelineLoader - - loader = PipelineLoader() - loader._loaded_pipelines = {} - - result = loader.get_for_inference("nonexistent") - - assert result is None - - def test_concurrent_get_for_inference_with_loader(self) -> None: - """Test concurrent get_for_inference calls with real PipelineLoader.""" - from gen_worker.pipeline_loader import PipelineLoader - - loader = PipelineLoader() - - # Create mock loaded pipeline - base_pipeline = MockPipeline() - loaded = MockLoadedPipeline(base_pipeline, "test-model") - loader._loaded_pipelines = {"test-model": loaded} - - results: List[Any] = [] - errors: List[Exception] = [] - lock = threading.Lock() - - def get_pipeline() -> None: - try: - result = loader.get_for_inference("test-model") - with lock: - results.append(result) - except Exception as e: - with lock: - errors.append(e) - - # Spawn concurrent threads - threads = [threading.Thread(target=get_pipeline) for _ in range(10)] - for t in threads: - t.start() - for t in threads: - t.join() - - assert len(errors) == 0, f"Got errors: {errors}" - assert len(results) == 10 - - # Each should have unique scheduler but share heavy components - scheduler_ids = set() - for r in results: - assert r is not None - scheduler_ids.add(r.scheduler.instance_id) - assert r.unet is base_pipeline.unet # Shared - - # All schedulers should be unique - assert len(scheduler_ids) == 10 diff --git a/tests/test_realtime_socket.py b/tests/test_realtime_socket.py deleted file mode 100644 index 06dcccf..0000000 --- a/tests/test_realtime_socket.py +++ /dev/null @@ -1,83 +0,0 @@ -import time -import unittest - -from gen_worker.worker import RequestContext, RealtimeSocket, Worker, pb -from gen_worker.decorators import worker_websocket, ResourceRequirements - - -@worker_websocket(ResourceRequirements()) -async def echo_ws(ctx: RequestContext, sock: RealtimeSocket) -> None: - await sock.send_json({"status": "ready"}) - async for data in sock.iter_bytes(): - await sock.send_bytes(data) - - -class TestRealtimeSocket(unittest.TestCase): - def _make_worker(self) -> Worker: - w = Worker.__new__(Worker) - import threading - - w.owner = "tenant-1" - w._emit_progress_event = lambda e: None # type: ignore[method-assign] - w._runtime_loaders = {} - w._custom_runtime_cache = {} - w._custom_runtime_locks = {} - w._fixed_model_id_by_key = {} - w._payload_model_id_by_key_by_function = {} - w._fixed_model_spec_by_key = {} - w._payload_model_spec_by_key_by_function = {} - w._release_allowed_model_ids = None - w._model_manager = None - w._realtime_sessions = {} - w._realtime_lock = threading.Lock() - w._sent = [] - w._send_message = lambda msg: w._sent.append(msg) # type: ignore[method-assign] - w._ws_specs = {} - w._discovered_resources = {} - w._inspect_websocket_spec = Worker._inspect_websocket_spec.__get__(w, Worker) # type: ignore[attr-defined] - w._resolve_injected_value = Worker._resolve_injected_value.__get__(w, Worker) # type: ignore[attr-defined] - w._handle_realtime_open_cmd = Worker._handle_realtime_open_cmd.__get__(w, Worker) # type: ignore[attr-defined] - w._handle_realtime_frame = Worker._handle_realtime_frame.__get__(w, Worker) # type: ignore[attr-defined] - w._handle_realtime_close_cmd = Worker._handle_realtime_close_cmd.__get__(w, Worker) # type: ignore[attr-defined] - return w - - def test_realtime_echo(self) -> None: - w = self._make_worker() - spec = w._inspect_websocket_spec(echo_ws) # type: ignore[arg-type] - w._ws_specs[spec.name] = spec - - w._handle_realtime_open_cmd( - pb.RealtimeOpenCommand(session_id="s1", function_name=spec.name, owner="tenant-1") - ) - - # Wait for ready JSON frame. - deadline = time.time() + 2.0 - ready = False - while time.time() < deadline: - for m in list(w._sent): - if getattr(m, "realtime_frame", None) and m.realtime_frame.is_text: - if b"ready" in m.realtime_frame.data: - ready = True - break - if ready: - break - time.sleep(0.01) - self.assertTrue(ready) - - # Send binary bytes frame and expect an echoed binary frame back. - w._handle_realtime_frame(pb.RealtimeFrame(session_id="s1", data=b"abc", is_text=False)) - - deadline = time.time() + 2.0 - echoed = False - while time.time() < deadline: - for m in list(w._sent): - if getattr(m, "realtime_frame", None) and not m.realtime_frame.is_text: - if m.realtime_frame.data == b"abc": - echoed = True - break - if echoed: - break - time.sleep(0.01) - self.assertTrue(echoed) - - w._handle_realtime_close_cmd(pb.RealtimeCloseCommand(session_id="s1", reason="end")) diff --git a/tests/test_runtime_batching_config_cmd.py b/tests/test_runtime_batching_config_cmd.py deleted file mode 100644 index 256a189..0000000 --- a/tests/test_runtime_batching_config_cmd.py +++ /dev/null @@ -1,113 +0,0 @@ -import json -from types import SimpleNamespace - -from gen_worker.decorators import ResourceRequirements -from gen_worker.pb import worker_scheduler_pb2 as pb -from gen_worker.worker import Worker - - -def test_runtime_batching_config_cmd_applies_and_acks(monkeypatch) -> None: - w = Worker(user_module_names=[], worker_jwt="dummy-worker-jwt") - w._task_specs["caption"] = SimpleNamespace(output_mode="incremental") - - sent = [] - monkeypatch.setattr(w, "_send_message", lambda m: sent.append(m)) - - cmd = pb.RuntimeBatchingConfigCommand( - config=pb.RuntimeBatchingConfig( - function_name="caption", - batch_size_target=6, - batch_size_min=2, - batch_size_max=8, - prefetch_depth=3, - max_wait_ms=120, - version=3, - ) - ) - w._handle_runtime_batching_config_cmd(cmd) - - cfg = w._runtime_batching_cfg_for_function("caption") - assert cfg["function_name"] == "caption" - assert cfg["batch_size_target"] == 6 - assert cfg["batch_size_min"] == 2 - assert cfg["batch_size_max"] == 8 - assert cfg["prefetch_depth"] == 3 - assert cfg["max_wait_ms"] == 120 - assert cfg["version"] == 3 - - results = [m.runtime_batching_config_result for m in sent if m.HasField("runtime_batching_config_result")] - assert len(results) == 1 - assert results[0].function_name == "caption" - assert results[0].version == 3 - assert results[0].success is True - assert results[0].error_message == "" - - -def test_runtime_batching_config_cmd_stale_version_is_ignored(monkeypatch) -> None: - w = Worker(user_module_names=[], worker_jwt="dummy-worker-jwt") - w._task_specs["caption"] = SimpleNamespace(output_mode="single") - w._runtime_batching_config_by_function["caption"] = { - "function_name": "caption", - "batch_size_target": 5, - "batch_size_min": 1, - "batch_size_max": 6, - "prefetch_depth": 2, - "max_wait_ms": 100, - "version": 4, - } - - sent = [] - monkeypatch.setattr(w, "_send_message", lambda m: sent.append(m)) - - msg = pb.WorkerSchedulerMessage( - runtime_batching_config_cmd=pb.RuntimeBatchingConfigCommand( - config=pb.RuntimeBatchingConfig( - function_name="caption", - batch_size_target=2, - batch_size_min=1, - batch_size_max=2, - prefetch_depth=1, - max_wait_ms=50, - version=3, # stale - ) - ) - ) - w._process_message(msg) - - cfg = w._runtime_batching_cfg_for_function("caption") - assert cfg["version"] == 4 - assert cfg["batch_size_target"] == 5 - - results = [m.runtime_batching_config_result for m in sent if m.HasField("runtime_batching_config_result")] - assert len(results) == 1 - assert results[0].success is True - assert results[0].version == 3 - - -def test_function_capabilities_event_emits_when_changed(monkeypatch) -> None: - w = Worker(user_module_names=[], worker_jwt="dummy-worker-jwt") - w._discovered_resources["caption"] = ResourceRequirements( - batch_size_min=1, - batch_size_target=4, - batch_size_max=8, - prefetch_depth=2, - max_wait_ms=150, - memory_hint_mb=12288, - stage_profile="io_gpu_disaggregated", - stage_traits=["decode_prefetch", "gpu_decode_overlap"], - ) - w._task_specs["caption"] = SimpleNamespace(output_mode="incremental") - - sent = [] - monkeypatch.setattr(w, "_send_message", lambda m: sent.append(m)) - - w._emit_function_capabilities_event() - w._emit_function_capabilities_event() # second send should be deduped - - events = [m.worker_event for m in sent if m.HasField("worker_event")] - assert len(events) == 1 - assert events[0].event_type == "worker.function_capabilities" - payload = json.loads(bytes(events[0].payload_json or b"{}").decode("utf-8")) - fns = list(payload.get("functions") or []) - assert len(fns) == 1 - assert "max_inflight_requests" not in fns[0] diff --git a/tests/test_scheduler_model_scope.py b/tests/test_scheduler_model_scope.py deleted file mode 100644 index 2e38982..0000000 --- a/tests/test_scheduler_model_scope.py +++ /dev/null @@ -1,36 +0,0 @@ -import msgspec - -from gen_worker.pb import worker_scheduler_pb2 as pb -from gen_worker.worker import Worker - - -class _Payload(msgspec.Struct): - model: str - - -def test_scheduler_cannot_widen_manifest_model_scope(monkeypatch) -> None: - # Tenant-declared scope via baked manifest mapping. - w = Worker( - user_module_names=[], - worker_jwt="dummy-worker-jwt", - manifest={ - "models": { - "sd15": {"ref": "demo/sd15", "dtypes": ["fp16", "bf16"]}, - } - }, - ) - - # Avoid background download threads in this unit test. - monkeypatch.setattr(w, "_start_startup_prefetch", lambda *_args, **_kwargs: None) - - # Scheduler tries to widen scope (should be ignored / intersected away). - msg = pb.WorkerSchedulerMessage( - endpoint_config=pb.EndpointConfig( - supported_repo_refs=["cozy:evil/evil:latest"], - required_variant_refs=[], - ) - ) - w._process_message(msg) - - # Worker must not widen scope outside the tenant manifest mapping. - assert w._release_allowed_model_ids == {"cozy:demo/sd15:latest"} diff --git a/tests/test_signature_contract_and_incremental.py b/tests/test_signature_contract_and_incremental.py deleted file mode 100644 index 98b81ca..0000000 --- a/tests/test_signature_contract_and_incremental.py +++ /dev/null @@ -1,178 +0,0 @@ -import json -import unittest -from typing import Annotated, Iterator - -import msgspec - -from gen_worker.injection import ModelRef, ModelRefSource as Src -from gen_worker.worker import RequestContext, Worker - - -class Input(msgspec.Struct): - text: str - - -class Delta(msgspec.Struct): - delta: str - - -class InputWithModel(msgspec.Struct): - text: str - model_key: str - - -class Output(msgspec.Struct): - model_id: str - - -class FakeModel: - def __init__(self, model_id: str) -> None: - self.model_id = model_id - - @classmethod - def from_pretrained(cls, model_id: str) -> "FakeModel": - return cls(model_id) - - -def _make_worker() -> Worker: - w = Worker.__new__(Worker) - import threading - - w._gpu_busy_lock = threading.Lock() - w._is_gpu_busy = False - w._has_gpu = False - w.max_output_bytes = 0 - w._model_manager = None - w._runtime_loaders = {} - w._custom_runtime_cache = {} - w._custom_runtime_locks = {} - w._fixed_model_id_by_key = {} - w._payload_model_id_by_key_by_function = {} - w._fixed_model_spec_by_key = {} - w._payload_model_spec_by_key_by_function = {} - w._release_allowed_model_ids = None - w._active_tasks_lock = threading.Lock() - w._active_tasks = {} - w._request_batch_context_lock = threading.Lock() - w._request_batch_context = {} - w._send_message = lambda msg: w._sent.append(msg) # type: ignore[method-assign] - w._sent = [] - w._stop_event = threading.Event() - w._running = True - w._materialize_assets = lambda ctx, obj: None # type: ignore[method-assign] - w._discovered_resources = {} - w._model_cache = None - return w - - -class TestContractAndIncremental(unittest.TestCase): - def test_rejects_missing_return_annotation(self) -> None: - def bad(ctx: RequestContext, payload: Input): # type: ignore[no-untyped-def] - return Delta(delta="x") - - w = _make_worker() - with self.assertRaises(ValueError): - w._inspect_task_spec(bad) # type: ignore[arg-type] - - def test_incremental_output_emits_deltas_and_completed(self) -> None: - def stream(ctx: RequestContext, payload: Input) -> Iterator[Delta]: - yield Delta(delta=payload.text) - yield Delta(delta="!") - - w = _make_worker() - spec = w._inspect_task_spec(stream) # type: ignore[arg-type] - self.assertEqual(spec.output_mode, "incremental") - - ctx = RequestContext("run-1", emitter=lambda _e: None) - payload = Input(text="hi") - b = msgspec.msgpack.encode(msgspec.to_builtins(payload)) - w._execute_task(ctx, spec, b) - - # Capture incremental output messages (typed token events preferred, legacy worker_event fallback). - events = [] - for m in w._sent: - if not hasattr(m, "WhichOneof"): - continue - msg_type = m.WhichOneof("msg") - if msg_type == "incremental_token_delta": - events.append(("output.delta", m.incremental_token_delta.payload_json)) - continue - if msg_type == "incremental_token_stream_done": - events.append(("output.completed", b"{}")) - continue - if msg_type == "worker_event": - evt = m.worker_event - # Ignore non-output events (e.g. metrics.*). - if not str(evt.event_type or "").startswith("output."): - continue - events.append((evt.event_type, evt.payload_json)) - - self.assertGreaterEqual(len(events), 3) - self.assertEqual(events[0][0], "output.delta") - self.assertEqual(json.loads(events[0][1].decode("utf-8"))["delta"], "hi") - self.assertEqual(events[1][0], "output.delta") - self.assertEqual(json.loads(events[1][1].decode("utf-8"))["delta"], "!") - self.assertEqual(events[2][0], "output.completed") - - def test_incremental_output_emits_typed_token_messages(self) -> None: - def stream(ctx: RequestContext, payload: Input) -> Iterator[Delta]: - yield Delta(delta=payload.text) - - w = _make_worker() - spec = w._inspect_task_spec(stream) # type: ignore[arg-type] - - ctx = RequestContext("run-typed-1", emitter=lambda _e: None) - payload = Input(text="hello") - b = msgspec.msgpack.encode(msgspec.to_builtins(payload)) - w._execute_task(ctx, spec, b) - - msg_types = [m.WhichOneof("msg") for m in w._sent if hasattr(m, "WhichOneof")] - self.assertIn("incremental_token_delta", msg_types) - self.assertIn("incremental_token_stream_done", msg_types) - - def test_payload_model_key_resolves_via_endpoint_map(self) -> None: - def fn( - ctx: RequestContext, - model: Annotated[FakeModel, ModelRef(Src.PAYLOAD, "model_key")], - payload: InputWithModel, - ) -> Output: - return Output(model_id=model.model_id) - - w = _make_worker() - w._payload_model_id_by_key_by_function = {"fn": {"a": "google/foo"}} - w._release_allowed_model_ids = {"google/foo"} - spec = w._inspect_task_spec(fn) # type: ignore[arg-type] - ctx = RequestContext("run-2", emitter=lambda _e: None) - payload = InputWithModel(text="x", model_key="a") - b = msgspec.msgpack.encode(msgspec.to_builtins(payload)) - w._execute_task(ctx, spec, b) - - run_results = [m.run_result for m in w._sent if hasattr(m, "WhichOneof") and m.WhichOneof("msg") == "run_result"] - self.assertEqual(len(run_results), 1) - rr = run_results[0] - self.assertTrue(rr.success) - out = msgspec.msgpack.decode(rr.output_payload, type=dict) - self.assertEqual(out["model_id"], "google/foo") - - def test_payload_model_key_rejects_not_allowlisted(self) -> None: - def fn( - ctx: RequestContext, - model: Annotated[FakeModel, ModelRef(Src.PAYLOAD, "model_key")], - payload: InputWithModel, - ) -> Output: - return Output(model_id=model.model_id) - - w = _make_worker() - w._payload_model_id_by_key_by_function = {"fn": {"a": "google/foo", "b": "google/bar"}} - w._release_allowed_model_ids = {"google/foo"} - spec = w._inspect_task_spec(fn) # type: ignore[arg-type] - ctx = RequestContext("run-3", emitter=lambda _e: None) - payload = InputWithModel(text="x", model_key="b") - b = msgspec.msgpack.encode(msgspec.to_builtins(payload)) - w._execute_task(ctx, spec, b) - - run_results = [m.run_result for m in w._sent if hasattr(m, "WhichOneof") and m.WhichOneof("msg") == "run_result"] - self.assertEqual(len(run_results), 1) - rr = run_results[0] - self.assertFalse(rr.success) - self.assertEqual(rr.error_type, "validation") diff --git a/tests/test_trainer_checkpointing.py b/tests/test_trainer_checkpointing.py deleted file mode 100644 index 8ed9782..0000000 --- a/tests/test_trainer_checkpointing.py +++ /dev/null @@ -1,54 +0,0 @@ -from __future__ import annotations - -from pathlib import Path - -import pytest - - -def test_save_and_load_trainable_module_checkpoint_roundtrip(tmp_path: Path) -> None: - torch = pytest.importorskip("torch") - import torch.nn as nn - - from gen_worker.trainer import load_trainable_module_checkpoint, save_trainable_module_checkpoint - - module = nn.Linear(4, 2, bias=True) - optimizer = torch.optim.AdamW(module.parameters(), lr=1e-3) - - # Warm optimizer state so optimizer checkpointing path is exercised. - x = torch.randn(3, 4) - y = torch.randn(3, 2) - loss = ((module(x) - y) ** 2).mean() - loss.backward() - optimizer.step() - optimizer.zero_grad(set_to_none=True) - - checkpoint_dir = tmp_path / "step-0001" - meta = save_trainable_module_checkpoint( - module=module, - optimizer=optimizer, - output_dir=str(checkpoint_dir), - step=1, - final=False, - model_name_or_path="runwayml/stable-diffusion-v1-5", - ) - - assert "primary_path" in meta - primary_path = Path(str(meta["primary_path"])) - assert primary_path.exists() - assert (checkpoint_dir / "checkpoint_meta.json").exists() - - saved_weight = module.weight.detach().clone() - saved_bias = module.bias.detach().clone() - - # Corrupt params then verify load restores trainable tensors. - with torch.no_grad(): - module.weight.add_(10.0) - module.bias.sub_(10.0) - - load_trainable_module_checkpoint( - module=module, - optimizer=optimizer, - checkpoint_dir=str(checkpoint_dir), - ) - assert torch.allclose(module.weight.detach(), saved_weight) - assert torch.allclose(module.bias.detach(), saved_bias) diff --git a/tests/test_trainer_runtime_orchestrated.py b/tests/test_trainer_runtime_orchestrated.py deleted file mode 100644 index b7da66b..0000000 --- a/tests/test_trainer_runtime_orchestrated.py +++ /dev/null @@ -1,448 +0,0 @@ -from __future__ import annotations - -import io -import json -from http.server import BaseHTTPRequestHandler, ThreadingHTTPServer -from pathlib import Path -import sys -import threading -import time -import types -from typing import Any - -import pytest - -from gen_worker.trainer.orchestrated import StartupContractError -from gen_worker.trainer.runtime import run_training_runtime_from_env - - -def _register_trainer_module(monkeypatch: pytest.MonkeyPatch, module_name: str, class_name: str) -> str: - mod = types.ModuleType(module_name) - - class _Trainer: - def setup(self, ctx) -> None: - _ = ctx - - def configure(self, ctx) -> dict[str, object]: - _ = ctx - return {"loaded": False} - - def prepare_batch(self, raw_batch: object, state: dict[str, object], ctx) -> object: - _ = state - _ = ctx - return raw_batch - - def train_step(self, prepared_batch: object, state: dict[str, object], ctx): - _ = prepared_batch - _ = ctx - loaded = 1.0 if bool(state.get("loaded")) else 0.0 - from gen_worker.trainer import StepResult - - return StepResult(metrics={"train/loss": loaded}) - - def state_dict(self, state: dict[str, object]) -> dict[str, object]: - return dict(state) - - def load_state_dict(self, state: dict[str, object], payload: dict[str, object], ctx) -> None: - _ = ctx - state.update(payload) - - _Trainer.__name__ = class_name - setattr(mod, class_name, _Trainer) - monkeypatch.setitem(sys.modules, module_name, mod) - return f"{module_name}:{class_name}" - - -def _parquet_bytes() -> bytes: - pa = pytest.importorskip("pyarrow") - pq = pytest.importorskip("pyarrow.parquet") - table = pa.table({"image_ref": ["a", "b"], "caption": ["x", "y"]}) - buf = io.BytesIO() - pq.write_table(table, buf) - return buf.getvalue() - - -class _TestHTTPHandler(BaseHTTPRequestHandler): - auth_token = "" - dataset_bytes = b"" - resume_payload = b"{}" - fail_paths: set[str] = set() - posts: list[dict[str, Any]] = [] - puts: list[dict[str, Any]] = [] - - def log_message(self, format: str, *args: object) -> None: # noqa: A003 - return - - def _authorized(self) -> bool: - token = str(self.auth_token or "").strip() - if not token: - return True - got = str(self.headers.get("Authorization") or "").strip() - return got == f"Bearer {token}" - - def do_GET(self) -> None: # noqa: N802 - if not self._authorized(): - self.send_response(401) - self.end_headers() - return - if self.path == "/inputs/train.parquet": - body = self.dataset_bytes - elif self.path == "/inputs/resume.json": - body = self.resume_payload - else: - self.send_response(404) - self.end_headers() - return - self.send_response(200) - self.send_header("Content-Type", "application/octet-stream") - self.send_header("Content-Length", str(len(body))) - self.end_headers() - self.wfile.write(body) - - def do_POST(self) -> None: # noqa: N802 - if not self._authorized(): - self.send_response(401) - self.end_headers() - return - length = int(self.headers.get("Content-Length") or "0") - raw = self.rfile.read(length) if length > 0 else b"{}" - payload = json.loads(raw.decode("utf-8") or "{}") - self.posts.append({"path": self.path, "payload": payload}) - if self.path in self.fail_paths: - self.send_response(500) - self.send_header("Content-Type", "application/json") - self.end_headers() - self.wfile.write(b'{"error":"forced failure"}') - return - self.send_response(200) - self.send_header("Content-Type", "application/json") - self.end_headers() - self.wfile.write(b'{"ok":true}') - - def do_PUT(self) -> None: # noqa: N802 - if not self._authorized(): - self.send_response(401) - self.end_headers() - return - length = int(self.headers.get("Content-Length") or "0") - body = self.rfile.read(length) if length > 0 else b"" - self.puts.append( - { - "path": self.path, - "size": len(body), - "content_type": str(self.headers.get("Content-Type") or ""), - } - ) - if self.path in self.fail_paths: - self.send_response(500) - self.send_header("Content-Type", "application/json") - self.end_headers() - self.wfile.write(b'{"error":"forced failure"}') - return - payload = { - "ref": self.path.removeprefix("/api/v1/file/"), - "sha256": "testsha256", - "size_bytes": len(body), - } - raw = json.dumps(payload).encode("utf-8") - self.send_response(200) - self.send_header("Content-Type", "application/json") - self.send_header("Content-Length", str(len(raw))) - self.end_headers() - self.wfile.write(raw) - - -def _start_test_server(token: str, dataset_bytes: bytes, resume_payload: bytes, fail_paths: set[str] | None = None) -> tuple[ThreadingHTTPServer, str]: - _TestHTTPHandler.auth_token = token - _TestHTTPHandler.dataset_bytes = dataset_bytes - _TestHTTPHandler.resume_payload = resume_payload - _TestHTTPHandler.fail_paths = set(fail_paths or set()) - _TestHTTPHandler.posts = [] - _TestHTTPHandler.puts = [] - server = ThreadingHTTPServer(("127.0.0.1", 0), _TestHTTPHandler) - thread = threading.Thread(target=server.serve_forever, daemon=True) - thread.start() - host, port = server.server_address - return server, f"http://{host}:{port}" - - -def test_trainer_runtime_startup_requires_capability_token(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None: - trainer_import = _register_trainer_module(monkeypatch, "tmp_orch_startup_mod", "StartupTrainer") - spec = {"request_id": "run-orch-startup", "trainer": trainer_import, "max_steps": 1, "mock_batches": [1]} - spec_path = tmp_path / "trainer_job.json" - spec_path.write_text(json.dumps(spec), encoding="utf-8") - - monkeypatch.setenv("TRAINER_JOB_SPEC_PATH", str(spec_path)) - monkeypatch.setenv("TRAINER_ORCHESTRATED", "1") - monkeypatch.delenv("TRAINER_CAPABILITY_TOKEN", raising=False) - - with pytest.raises(StartupContractError, match="startup.missing_capability_token"): - run_training_runtime_from_env() - - -def test_trainer_runtime_orchestrated_happy_path_with_materialize_resume_and_uploads( - monkeypatch: pytest.MonkeyPatch, tmp_path: Path -) -> None: - trainer_import = _register_trainer_module(monkeypatch, "tmp_orch_happy_mod", "HappyTrainer") - token = "cap-123" - resume_payload = json.dumps({"state": {"loaded": True}}).encode("utf-8") - server, base = _start_test_server(token=token, dataset_bytes=_parquet_bytes(), resume_payload=resume_payload) - try: - events = tmp_path / "events.jsonl" - ckpt = tmp_path / "ckpt" - samples = tmp_path / "samples" - spec = { - "request_id": "run-orch-happy", - "trainer": trainer_import, - "trainer_api_version": "v1", - "max_steps": 2, - "metric_every": 1, - "checkpoint_every": 1, - "sample_every": 1, - "dataset": {"batch_size": 1, "readahead": 1, "columns": ["image_ref", "caption"]}, - "inputs": { - "dataset_parquet_refs": [f"{base}/inputs/train.parquet"], - "resume_checkpoint_ref": f"{base}/inputs/resume.json", - }, - } - spec_path = tmp_path / "trainer_job_happy.json" - spec_path.write_text(json.dumps(spec), encoding="utf-8") - - monkeypatch.setenv("TRAINER_ORCHESTRATED", "1") - monkeypatch.setenv("TRAINER_CAPABILITY_TOKEN", token) - monkeypatch.setenv("TRAINER_JOB_SPEC_PATH", str(spec_path)) - monkeypatch.setenv("TRAINER_EVENTS_PATH", str(events)) - monkeypatch.setenv("TRAINER_CHECKPOINTS_DIR", str(ckpt)) - monkeypatch.setenv("TRAINER_SAMPLES_DIR", str(samples)) - monkeypatch.setenv("TRAINER_UPLOAD_METRICS_URL", f"{base}/upload/metrics") - monkeypatch.setenv("TRAINER_UPLOAD_CHECKPOINT_URL", f"{base}/upload/checkpoint") - monkeypatch.setenv("TRAINER_UPLOAD_SAMPLE_URL", f"{base}/upload/sample") - monkeypatch.setenv("TRAINER_UPLOAD_TERMINAL_URL", f"{base}/upload/terminal") - - assert run_training_runtime_from_env() == 0 - lines = [json.loads(x) for x in events.read_text(encoding="utf-8").splitlines() if x.strip()] - assert any(x.get("event") == "started" for x in lines) - assert any(x.get("event") == "metric" and x.get("name") == "train/loss" and float(x.get("value", -1)) == 1.0 for x in lines) - assert any(x.get("event") == "completed" for x in lines) - assert all(x.get("schema_version") == "trainer_event.v1" for x in lines) - - posted_paths = [x["path"] for x in _TestHTTPHandler.posts] - assert "/upload/metrics" in posted_paths - assert "/upload/checkpoint" in posted_paths - assert "/upload/sample" in posted_paths - assert "/upload/terminal" in posted_paths - terminal = [x["payload"] for x in _TestHTTPHandler.posts if x["path"] == "/upload/terminal"][-1] - assert terminal["status"] == "completed" - finally: - server.shutdown() - server.server_close() - - -def test_trainer_runtime_orchestrated_uploads_checkpoint_bytes_to_tensorhub( - monkeypatch: pytest.MonkeyPatch, tmp_path: Path -) -> None: - trainer_import = _register_trainer_module(monkeypatch, "tmp_orch_upload_bytes_mod", "UploadBytesTrainer") - token = "cap-bytes-1" - server, base = _start_test_server(token=token, dataset_bytes=_parquet_bytes(), resume_payload=b"{}") - try: - events = tmp_path / "events.jsonl" - ckpt = tmp_path / "ckpt" - samples = tmp_path / "samples" - spec = { - "request_id": "run-orch-upload-bytes", - "owner": "00000000-0000-0000-0000-000000000001", - "trainer": trainer_import, - "trainer_api_version": "v1", - "max_steps": 1, - "metric_every": 1, - "checkpoint_every": 1, - "sample_every": 0, - "mock_batches": [1], - } - spec_path = tmp_path / "trainer_job_upload_bytes.json" - spec_path.write_text(json.dumps(spec), encoding="utf-8") - - monkeypatch.setenv("TRAINER_ORCHESTRATED", "1") - monkeypatch.setenv("TRAINER_CAPABILITY_TOKEN", token) - monkeypatch.setenv("TRAINER_JOB_SPEC_PATH", str(spec_path)) - monkeypatch.setenv("TRAINER_EVENTS_PATH", str(events)) - monkeypatch.setenv("TRAINER_CHECKPOINTS_DIR", str(ckpt)) - monkeypatch.setenv("TRAINER_SAMPLES_DIR", str(samples)) - monkeypatch.setenv("TRAINER_UPLOAD_METRICS_URL", f"{base}/upload/metrics") - monkeypatch.setenv("TRAINER_UPLOAD_CHECKPOINT_URL", f"{base}/upload/checkpoint") - monkeypatch.setenv("TRAINER_UPLOAD_TERMINAL_URL", f"{base}/upload/terminal") - monkeypatch.setenv("TENSORHUB_URL", base) - - assert run_training_runtime_from_env() == 0 - - assert _TestHTTPHandler.puts, "expected PUT uploads to tensorhub file API" - assert any("/api/v1/file/v1/00000000-0000-0000-0000-000000000001/runs/run-orch-upload-bytes/checkpoints/" in x["path"] for x in _TestHTTPHandler.puts) - - terminal = [x["payload"] for x in _TestHTTPHandler.posts if x["path"] == "/upload/terminal"][-1] - assert terminal["status"] == "completed" - assert terminal["final_checkpoint_ref"] != "" - assert terminal["final_checkpoint_sha256"] == "testsha256" - finally: - server.shutdown() - server.server_close() - - -def test_trainer_runtime_cancel_path(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None: - trainer_import = _register_trainer_module(monkeypatch, "tmp_orch_cancel_mod", "CancelTrainer") - spec = {"request_id": "run-orch-cancel", "trainer": trainer_import, "max_steps": 3, "metric_every": 1, "mock_batches": [1, 2, 3]} - spec_path = tmp_path / "trainer_job_cancel.json" - spec_path.write_text(json.dumps(spec), encoding="utf-8") - events = tmp_path / "events.jsonl" - cancel_file = tmp_path / "cancel.flag" - cancel_file.write_text("1", encoding="utf-8") - - monkeypatch.setenv("TRAINER_JOB_SPEC_PATH", str(spec_path)) - monkeypatch.setenv("TRAINER_EVENTS_PATH", str(events)) - monkeypatch.setenv("TRAINER_CANCEL_FILE", str(cancel_file)) - - with pytest.raises(Exception, match="canceled"): - run_training_runtime_from_env() - - lines = [json.loads(x) for x in events.read_text(encoding="utf-8").splitlines() if x.strip()] - failed = [x for x in lines if x.get("event") == "failed"] - assert failed - assert "canceled" in str(failed[-1].get("error", "")).lower() - - -def test_trainer_runtime_upload_failure_reports_upload_category(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None: - trainer_import = _register_trainer_module(monkeypatch, "tmp_orch_upload_fail_mod", "UploadFailTrainer") - token = "cap-up-1" - server, base = _start_test_server( - token=token, - dataset_bytes=_parquet_bytes(), - resume_payload=json.dumps({"state": {"loaded": True}}).encode("utf-8"), - fail_paths={"/upload/sample"}, - ) - try: - spec = { - "request_id": "run-orch-upload-fail", - "trainer": trainer_import, - "max_steps": 1, - "metric_every": 1, - "checkpoint_every": 1, - "sample_every": 1, - "mock_batches": [1], - } - spec_path = tmp_path / "trainer_job_upload_fail.json" - spec_path.write_text(json.dumps(spec), encoding="utf-8") - events = tmp_path / "events.jsonl" - - monkeypatch.setenv("TRAINER_JOB_SPEC_PATH", str(spec_path)) - monkeypatch.setenv("TRAINER_EVENTS_PATH", str(events)) - monkeypatch.setenv("TRAINER_CAPABILITY_TOKEN", token) - monkeypatch.setenv("TRAINER_UPLOAD_METRICS_URL", f"{base}/upload/metrics") - monkeypatch.setenv("TRAINER_UPLOAD_CHECKPOINT_URL", f"{base}/upload/checkpoint") - monkeypatch.setenv("TRAINER_UPLOAD_SAMPLE_URL", f"{base}/upload/sample") - - with pytest.raises(Exception, match="upload"): - run_training_runtime_from_env() - - lines = [json.loads(x) for x in events.read_text(encoding="utf-8").splitlines() if x.strip()] - failed = [x for x in lines if x.get("event") == "failed"] - assert failed - assert str(failed[-1].get("error", "")).startswith("upload:") - finally: - server.shutdown() - server.server_close() - - -def test_trainer_runtime_timeout_path(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None: - mod = types.ModuleType("tmp_orch_timeout_mod") - - class SlowTrainer: - def setup(self, ctx) -> None: - _ = ctx - - def configure(self, ctx) -> dict[str, object]: - _ = ctx - return {} - - def prepare_batch(self, raw_batch: object, state: dict[str, object], ctx) -> object: - _ = state - _ = ctx - return raw_batch - - def train_step(self, prepared_batch: object, state: dict[str, object], ctx): - _ = prepared_batch - _ = state - _ = ctx - time.sleep(1.1) - from gen_worker.trainer import StepResult - - return StepResult(metrics={"train/loss": 0.1}) - - def state_dict(self, state: dict[str, object]) -> dict[str, object]: - return dict(state) - - def load_state_dict(self, state: dict[str, object], payload: dict[str, object], ctx) -> None: - _ = ctx - state.update(payload) - - mod.SlowTrainer = SlowTrainer - monkeypatch.setitem(sys.modules, "tmp_orch_timeout_mod", mod) - - spec = { - "request_id": "run-orch-timeout", - "trainer": "tmp_orch_timeout_mod:SlowTrainer", - "max_steps": 2, - "metric_every": 1, - "mock_batches": [1, 2, 3], - } - spec_path = tmp_path / "trainer_job_timeout.json" - spec_path.write_text(json.dumps(spec), encoding="utf-8") - events = tmp_path / "events.jsonl" - monkeypatch.setenv("TRAINER_JOB_SPEC_PATH", str(spec_path)) - monkeypatch.setenv("TRAINER_EVENTS_PATH", str(events)) - monkeypatch.setenv("TRAINER_MAX_RUNTIME_SECONDS", "1") - - with pytest.raises(Exception, match="timeout"): - run_training_runtime_from_env() - - lines = [json.loads(x) for x in events.read_text(encoding="utf-8").splitlines() if x.strip()] - failed = [x for x in lines if x.get("event") == "failed"] - assert failed - assert "timeout" in str(failed[-1].get("error", "")).lower() - - -def test_trainer_runtime_resume_idempotent_skips_when_final_exists(monkeypatch: pytest.MonkeyPatch, tmp_path: Path) -> None: - trainer_import = _register_trainer_module(monkeypatch, "tmp_orch_resume_skip_mod", "ResumeSkipTrainer") - token = "cap-up-2" - server, base = _start_test_server( - token=token, - dataset_bytes=_parquet_bytes(), - resume_payload=json.dumps({"state": {"loaded": True}}).encode("utf-8"), - ) - try: - ckpt = tmp_path / "checkpoints" - ckpt.mkdir(parents=True, exist_ok=True) - (ckpt / "final.json").write_text(json.dumps({"state": {"loaded": True}}), encoding="utf-8") - events = tmp_path / "events.jsonl" - spec = { - "request_id": "run-orch-resume-skip", - "trainer": trainer_import, - "max_steps": 5, - "resume_from_latest": True, - "mock_batches": [1, 2, 3], - } - spec_path = tmp_path / "trainer_job_resume_skip.json" - spec_path.write_text(json.dumps(spec), encoding="utf-8") - - monkeypatch.setenv("TRAINER_JOB_SPEC_PATH", str(spec_path)) - monkeypatch.setenv("TRAINER_EVENTS_PATH", str(events)) - monkeypatch.setenv("TRAINER_CHECKPOINTS_DIR", str(ckpt)) - monkeypatch.setenv("TRAINER_CAPABILITY_TOKEN", token) - monkeypatch.setenv("TRAINER_UPLOAD_TERMINAL_URL", f"{base}/upload/terminal") - - assert run_training_runtime_from_env() == 0 - lines = [json.loads(x) for x in events.read_text(encoding="utf-8").splitlines() if x.strip()] - assert any(x.get("event") == "completed" for x in lines) - posted_paths = [x["path"] for x in _TestHTTPHandler.posts] - assert "/upload/terminal" not in posted_paths - finally: - server.shutdown() - server.server_close() diff --git a/tests/test_training_endpoints_contract_smoke.py b/tests/test_training_endpoints_contract_smoke.py deleted file mode 100644 index 8a60b89..0000000 --- a/tests/test_training_endpoints_contract_smoke.py +++ /dev/null @@ -1,185 +0,0 @@ -from __future__ import annotations - -import os -import sys -from pathlib import Path -from dataclasses import dataclass, field - -import pytest - -from gen_worker.trainer import StepContext, TrainingJobSpec, load_trainer_plugin, run_training_loop - - -class _Batch: - def __init__(self, values: dict[str, list[object]]) -> None: - self._values = values - - def to_pydict(self) -> dict[str, list[object]]: - return self._values - - -class _T2IModel: - def train_t2i_step( - self, *, image_refs: list[str], prompts: list[str], hyperparams: dict[str, object] - ) -> float: - _ = hyperparams - if len(image_refs) != len(prompts): - raise ValueError("length mismatch") - return 1.0 - - -class _EditModel: - def train_edit_step( - self, - *, - source_refs: list[str], - target_refs: list[str], - instructions: list[str], - mask_refs: list[str | None], - hyperparams: dict[str, object], - ) -> float: - _ = hyperparams - if not (len(source_refs) == len(target_refs) == len(instructions) == len(mask_refs)): - raise ValueError("length mismatch") - return 2.0 - - -@dataclass -class _Reporter: - metrics: list[tuple[str, float, int]] = field(default_factory=list) - checkpoints: list[tuple[str, int]] = field(default_factory=list) - samples: list[tuple[str, int]] = field(default_factory=list) - completed_calls: list[tuple[int, str | None]] = field(default_factory=list) - failed_calls: list[tuple[int, str]] = field(default_factory=list) - - def started(self, *, request_id: str) -> None: - _ = request_id - - def metric(self, *, name: str, value: float, step: int) -> None: - self.metrics.append((name, value, step)) - - def checkpoint(self, *, path: str, step: int) -> None: - self.checkpoints.append((path, step)) - - def sample(self, *, path: str, step: int) -> None: - self.samples.append((path, step)) - - def completed(self, *, step: int, final_checkpoint: str | None) -> None: - self.completed_calls.append((step, final_checkpoint)) - - def failed(self, *, step: int, error: str) -> None: - self.failed_calls.append((step, error)) - - def is_canceled(self) -> bool: - return False - - -@dataclass -class _Writer: - checkpoints: list[tuple[int, dict[str, object]]] = field(default_factory=list) - final_payloads: list[dict[str, object]] = field(default_factory=list) - - def write_checkpoint( - self, - *, - step: int, - state_payload: dict[str, object], - trainer: object, - state: object, - ctx: StepContext, - ) -> str: - _ = trainer - _ = state - _ = ctx - self.checkpoints.append((step, state_payload)) - return f"/tmp/ckpt-{step}.json" - - def write_samples(self, *, step: int, state: object, ctx: StepContext) -> list[str]: - _ = state - _ = ctx - return [f"/tmp/sample-{step}.json"] - - def finalize( - self, - *, - state_payload: dict[str, object], - trainer: object, - state: object, - ctx: StepContext, - ) -> str | None: - _ = trainer - _ = state - _ = ctx - self.final_payloads.append(state_payload) - return "/tmp/final.json" - - -def _training_endpoints_root() -> Path: - env_override = os.getenv("TRAINING_ENDPOINTS_ROOT") - if env_override: - return Path(env_override).resolve() - return Path(__file__).resolve().parents[2] / "training-endpoints" - - -@pytest.mark.skipif(not _training_endpoints_root().exists(), reason="training-endpoints repo not present") -def test_training_endpoints_examples_run_in_gen_worker_loop() -> None: - root = _training_endpoints_root() - t2i_src = root / "t2i_three_prompts" / "src" - edit_src = root / "img2img_edit_optional_prompt_mask" / "src" - sys.path.insert(0, str(t2i_src)) - sys.path.insert(0, str(edit_src)) - try: - t2i_trainer = load_trainer_plugin("t2i_three_prompts.main:ThreePromptT2ITrainer") - t2i_job = TrainingJobSpec(request_id="t2i-run", max_steps=1, metric_every=1, checkpoint_every=1, sample_every=1) - t2i_ctx = StepContext( - job=t2i_job, - model_handles={"model": _T2IModel()}, - ) - t2i_reporter = _Reporter() - t2i_writer = _Writer() - t2i_terminal = run_training_loop( - job=t2i_job, - ctx=t2i_ctx, - trainer=t2i_trainer, - batches=[_Batch({"image_ref": ["a"], "caption_short": ["short"]})], - reporter=t2i_reporter, - artifact_writer=t2i_writer, - ) - assert t2i_terminal == 1 - assert any(name == "train/loss" for (name, _value, _step) in t2i_reporter.metrics) - assert t2i_reporter.checkpoints and t2i_writer.final_payloads - assert t2i_reporter.samples - - edit_trainer = load_trainer_plugin("img2img_edit_optional_prompt_mask.main:Img2ImgEditTrainer") - edit_job = TrainingJobSpec(request_id="edit-run", max_steps=1, metric_every=1, checkpoint_every=1, sample_every=1) - edit_ctx = StepContext( - job=edit_job, - model_handles={"model": _EditModel()}, - ) - edit_reporter = _Reporter() - edit_writer = _Writer() - edit_terminal = run_training_loop( - job=edit_job, - ctx=edit_ctx, - trainer=edit_trainer, - batches=[ - _Batch( - { - "source_image_ref": ["source"], - "target_image_ref": ["target"], - "edit_type": ["mosaic"], - } - ) - ], - reporter=edit_reporter, - artifact_writer=edit_writer, - ) - assert edit_terminal == 1 - assert any(name == "train/loss" for (name, _value, _step) in edit_reporter.metrics) - assert edit_reporter.checkpoints and edit_writer.final_payloads - assert edit_reporter.samples - finally: - if str(t2i_src) in sys.path: - sys.path.remove(str(t2i_src)) - if str(edit_src) in sys.path: - sys.path.remove(str(edit_src)) diff --git a/tests/test_worker_leader_redirect.py b/tests/test_worker_leader_redirect.py deleted file mode 100644 index b0104d6..0000000 --- a/tests/test_worker_leader_redirect.py +++ /dev/null @@ -1,19 +0,0 @@ -import unittest - -from gen_worker.worker import Worker - - -class TestWorkerLeaderRedirect(unittest.TestCase): - def test_extract_leader_addr(self): - self.assertEqual(Worker._extract_leader_addr("not_leader:127.0.0.1:8080"), "127.0.0.1:8080") - self.assertIsNone(Worker._extract_leader_addr("not_leader:")) - self.assertIsNone(Worker._extract_leader_addr("other_error")) - self.assertIsNone(Worker._extract_leader_addr(None)) - - def test_normalize_scheduler_addrs(self): - addrs = Worker._normalize_scheduler_addrs("a:1", ["b:2", "a:1", " ", "c:3"]) - self.assertEqual(addrs, ["a:1", "b:2", "c:3"]) - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/test_worker_model_keyspace.py b/tests/test_worker_model_keyspace.py deleted file mode 100644 index f6775eb..0000000 --- a/tests/test_worker_model_keyspace.py +++ /dev/null @@ -1,53 +0,0 @@ -import unittest - -from gen_worker.injection import InjectionSpec, ModelRef, ModelRefSource -from gen_worker.worker import Worker - - -class _Payload: - def __init__(self, **kwargs): - for k, v in kwargs.items(): - setattr(self, k, v) - - -class TestWorkerModelKeyspace(unittest.TestCase): - def test_payload_model_selection_uses_function_mapping(self) -> None: - w = Worker.__new__(Worker) - w._payload_model_id_by_key_by_function = { - "generate": {"sdxl": "stabilityai/stable-diffusion-xl-base-1.0"} - } - w._fixed_model_id_by_key = {} - w._release_allowed_model_ids = None - - inj = InjectionSpec( - param_name="pipe", - param_type=object, - model_ref=ModelRef(ModelRefSource.PAYLOAD, "model_key"), - ) - payload = _Payload(model_key="sdxl") - - got, key = Worker._resolve_model_id_for_injection(w, "generate", inj, payload) # type: ignore[arg-type] - self.assertEqual(got, "stabilityai/stable-diffusion-xl-base-1.0") - self.assertEqual(key, "sdxl") - - def test_payload_model_selection_rejects_unknown_key(self) -> None: - w = Worker.__new__(Worker) - w._payload_model_id_by_key_by_function = { - "generate": {"sdxl": "stabilityai/stable-diffusion-xl-base-1.0"} - } - w._fixed_model_id_by_key = {} - w._release_allowed_model_ids = None - - inj = InjectionSpec( - param_name="pipe", - param_type=object, - model_ref=ModelRef(ModelRefSource.PAYLOAD, "model_key"), - ) - payload = _Payload(model_key="nope") - - with self.assertRaises(ValueError): - Worker._resolve_model_id_for_injection(w, "generate", inj, payload) # type: ignore[arg-type] - - -if __name__ == "__main__": - unittest.main() diff --git a/tests/test_worker_startup_visibility.py b/tests/test_worker_startup_visibility.py deleted file mode 100644 index 003a1cf..0000000 --- a/tests/test_worker_startup_visibility.py +++ /dev/null @@ -1,131 +0,0 @@ -from __future__ import annotations - -import json -import sys -import threading -import time -import types -from pathlib import Path -from typing import Any - -import pytest - -from gen_worker.worker import Worker - - -def test_registration_watchdog_emits_timeout_and_sets_stop() -> None: - w = Worker.__new__(Worker) - w.worker_id = "w-1" - w.scheduler_addr = "127.0.0.1:8080" - w._process_started_monotonic = time.monotonic() - 0.5 - w._registered_event = threading.Event() - w._running = True - w._stop_event = threading.Event() - w._startup_timeout_triggered = False - - startup_events: list[tuple[str, dict[str, Any]]] = [] - worker_events: list[tuple[str, dict[str, Any]]] = [] - w._emit_startup_phase = lambda phase, **kw: startup_events.append((phase, kw)) # type: ignore[method-assign] - w._emit_worker_event_bytes = ( # type: ignore[method-assign] - lambda request_id, event_type, payload_json: worker_events.append( - (event_type, json.loads(payload_json.decode("utf-8"))) - ) - ) - w._close_connection = lambda: None # type: ignore[method-assign] - - w._registration_watchdog_loop(timeout_s=0.02) - - assert w._startup_timeout_triggered is True - assert w._stop_event.is_set() is True - assert any(name == "startup_timeout_unregistered" for name, _ in startup_events) - assert any(name == "worker.startup_timeout_unregistered" for name, _ in worker_events) - - -def test_task_phase_watchdog_emits_stuck_event() -> None: - w = Worker.__new__(Worker) - seen: list[tuple[str, dict[str, Any]]] = [] - w._emit_worker_event_bytes = ( # type: ignore[method-assign] - lambda request_id, event_type, payload_json: seen.append((event_type, json.loads(payload_json.decode("utf-8")))) - ) - - timer = w._start_task_phase_watchdog( - request_id="run-1", - phase="inference", - warn_after_s=0.02, - payload={"function_name": "generate"}, - ) - time.sleep(0.06) - if timer is not None: - timer.cancel() - - assert any(name == "task.inference.stuck" for name, _ in seen) - ev_payload = next(p for name, p in seen if name == "task.inference.stuck") - assert ev_payload["function_name"] == "generate" - assert ev_payload["elapsed_ms"] >= 20 - - -def test_emit_worker_fatal_includes_traceback_metadata() -> None: - w = Worker.__new__(Worker) - w.worker_id = "w-2" - w.scheduler_addr = "scheduler:8080" - w._process_started_monotonic = time.monotonic() - 1.0 - seen: list[tuple[str, dict[str, Any]]] = [] - w._emit_worker_event_bytes = ( # type: ignore[method-assign] - lambda request_id, event_type, payload_json: seen.append((event_type, json.loads(payload_json.decode("utf-8")))) - ) - - try: - raise RuntimeError("boom") - except Exception as exc: - w._emit_worker_fatal("startup", exc, exit_code=7) - - assert seen - event_type, payload = seen[-1] - assert event_type == "worker.fatal" - assert payload["phase"] == "startup" - assert payload["exception_class"] == "RuntimeError" - assert "boom" in payload["exception_message"] - assert "RuntimeError" in payload["traceback"] - assert payload["exit_code"] == 7 - - -def test_run_raises_when_registration_timeout_reached(tmp_path: Path, monkeypatch: pytest.MonkeyPatch) -> None: - mod_dir = tmp_path / "mod" - mod_dir.mkdir(parents=True, exist_ok=True) - (mod_dir / "tiny_mod.py").write_text( - """ -from __future__ import annotations -import msgspec -from gen_worker.decorators import worker_function -from gen_worker.worker import RequestContext - -class Input(msgspec.Struct): - name: str - -class Output(msgspec.Struct): - ok: bool - -@worker_function() -def tiny(ctx: RequestContext, payload: Input) -> Output: - return Output(ok=True) -""".lstrip(), - encoding="utf-8", - ) - sys.path.insert(0, str(mod_dir)) - - w = Worker( - scheduler_addr="127.0.0.1:1", - user_module_names=["tiny_mod"], - worker_jwt="jwt-test", - reconnect_delay=0, - max_reconnect_attempts=0, - ) - monkeypatch.setattr(w, "connect", types.MethodType(lambda self: False, w)) - w._register_timeout_s = 1 - w._reconnect_delay_base = 0 - w._reconnect_delay_max = 0 - w._reconnect_jitter = 0 - - with pytest.raises(RuntimeError, match="startup_timeout_unregistered"): - w.run() - diff --git a/tests/test_worker_telemetry_issue67.py b/tests/test_worker_telemetry_issue67.py deleted file mode 100644 index ea54041..0000000 --- a/tests/test_worker_telemetry_issue67.py +++ /dev/null @@ -1,187 +0,0 @@ -from __future__ import annotations - -import asyncio -import threading -import time -from pathlib import Path -from typing import Any - -import msgspec -import pytest - -from gen_worker.model_cache import ModelCache -from gen_worker.worker import RequestContext, Worker, pb, _TaskSpec -from gen_worker.decorators import ResourceRequirements - - -class _Input(msgspec.Struct): - sleep_s: float - - -class _Output(msgspec.Struct): - ok: bool - - -def _sleep_fn(ctx: RequestContext, payload: _Input) -> _Output: - time.sleep(payload.sleep_s) - return _Output(ok=True) - - -def test_gpu_is_busy_refcount_overlapping_inference() -> None: - w = Worker.__new__(Worker) - w._has_gpu = True - w._gpu_busy_lock = threading.Lock() - w._gpu_busy_refcount = 0 - w._is_gpu_busy = False - w._active_model_use_lock = threading.Lock() - w._active_model_use_counts = {} - w._active_tasks_lock = threading.Lock() - w._active_tasks = {} - w._active_function_counts = {} - w._send_message = lambda msg: None # type: ignore[method-assign] - w._send_task_result = lambda *args, **kwargs: None # type: ignore[method-assign] - w._materialize_assets = lambda ctx, obj: None # type: ignore[method-assign] - - spec = _TaskSpec( - name="sleep", - func=_sleep_fn, - resources=ResourceRequirements(), - ctx_param="ctx", - payload_param="payload", - payload_type=_Input, - output_mode="single", - output_type=_Output, - injections=(), - ) - - ctx1 = RequestContext("r1", emitter=lambda e: None) - ctx2 = RequestContext("r2", emitter=lambda e: None) - payload = msgspec.msgpack.encode({"sleep_s": 0.25}) - - t1 = threading.Thread(target=w._execute_task, args=(ctx1, spec, payload), daemon=True) - t2 = threading.Thread(target=w._execute_task, args=(ctx2, spec, payload), daemon=True) - - t1.start() - t2.start() - - # Wait until at least one entered busy. - deadline = time.time() + 2.0 - while time.time() < deadline and not w._get_gpu_busy_status(): - time.sleep(0.01) - assert w._get_gpu_busy_status() is True - - # While at least one task is running, busy must remain true. - while t1.is_alive() or t2.is_alive(): - assert w._get_gpu_busy_status() is True - time.sleep(0.01) - - assert w._get_gpu_busy_status() is False - - -class _StubModelManager: - def __init__(self) -> None: - self._loaded: set[str] = set() - self.model_sizes: dict[str, float] = {} - - async def load_model_into_vram(self, model_id: str) -> bool: - await asyncio.sleep(0.15) - self._loaded.add(model_id) - return True - - def unload(self, model_id: str) -> None: - self._loaded.discard(model_id) - - def get_vram_loaded_models(self) -> list[str]: - return sorted(self._loaded) - - -def test_load_model_emits_events_and_updates_vram_models_and_busy(tmp_path: Path) -> None: - w = Worker.__new__(Worker) - w._has_gpu = True - w._gpu_busy_lock = threading.Lock() - w._gpu_busy_refcount = 0 - w._is_gpu_busy = False - w._active_model_use_lock = threading.Lock() - w._active_model_use_counts = {} - w._model_init_done_event = threading.Event() - w._model_init_done_event.set() - w._model_manager = _StubModelManager() - w._model_cache = ModelCache(model_cache_dir=str(tmp_path / "cache")) - w._task_specs = {} - w._ws_specs = {} - w._discovered_resources = {} - w._function_schemas = {} - w.max_concurrency = 0 - w.runpod_pod_id = "" - - sent: list[Any] = [] - w._send_message = lambda msg: sent.append(msg) # type: ignore[method-assign] - - cmd = pb.LoadModelCommand(model_id="cozy:demo/repo@sha256:snap-1") - - th = threading.Thread(target=w._handle_load_model_cmd, args=(cmd,), daemon=True) - th.start() - - # Busy should become true during the async sleep. - deadline = time.time() + 2.0 - saw_busy = False - while time.time() < deadline and th.is_alive(): - if w._get_gpu_busy_status(): - saw_busy = True - break - time.sleep(0.01) - assert saw_busy is True - - th.join(timeout=5) - assert w._get_gpu_busy_status() is False - - # Must emit load.started + load.completed/failed. - event_types = [ - m.worker_event.event_type - for m in sent - if getattr(m, "worker_event", None) and m.HasField("worker_event") - ] - assert "model.load.started" in event_types - assert "model.load.completed" in event_types - - # LoadModelResult must succeed. - results = [m.load_model_result for m in sent if m.HasField("load_model_result")] - assert results and results[-1].success is True - - # The immediate registration update should reflect vram_models after the load. - regs = [m.worker_registration for m in sent if m.HasField("worker_registration")] - assert regs - assert "cozy:demo/repo@sha256:snap-1" in list(regs[-1].resources.vram_models) - - -def test_unload_model_rejected_when_in_use(tmp_path: Path) -> None: - w = Worker.__new__(Worker) - w._has_gpu = True - w._gpu_busy_lock = threading.Lock() - w._gpu_busy_refcount = 0 - w._is_gpu_busy = False - w._active_model_use_lock = threading.Lock() - w._active_model_use_counts = {} - w._model_manager = _StubModelManager() - w._model_cache = ModelCache(model_cache_dir=str(tmp_path / "cache")) - w._task_specs = {} - w._ws_specs = {} - w._discovered_resources = {} - w._function_schemas = {} - w.max_concurrency = 0 - w.runpod_pod_id = "" - - sent: list[Any] = [] - w._send_message = lambda msg: sent.append(msg) # type: ignore[method-assign] - - model_id = "cozy:demo/repo@sha256:snap-1" - w._model_use_enter(model_id) - try: - w._handle_unload_model_cmd(pb.UnloadModelCommand(model_id=model_id)) - finally: - w._model_use_exit(model_id) - - res = [m.unload_model_result for m in sent if m.HasField("unload_model_result")] - assert res and res[-1].success is False - assert "model_in_use" in (res[-1].error_message or "") - diff --git a/tests/test_worker_wire_protocol.py b/tests/test_worker_wire_protocol.py deleted file mode 100644 index f235287..0000000 --- a/tests/test_worker_wire_protocol.py +++ /dev/null @@ -1,28 +0,0 @@ -from __future__ import annotations - -from gen_worker.wire_protocol import WIRE_PROTOCOL_MAJOR, WIRE_PROTOCOL_MINOR -from gen_worker.worker import Worker - - -def test_registration_advertises_wire_protocol(monkeypatch) -> None: - sent = [] - - w = Worker( - scheduler_addr="127.0.0.1:65535", - user_module_names=[], - worker_jwt="test-jwt", - reconnect_delay=0, - ) - monkeypatch.setattr(w, "_send_message", lambda message: sent.append(message)) - - w._register_worker(is_heartbeat=False) - - assert sent, "expected at least one outgoing registration message" - reg = sent[0].worker_registration - assert reg.protocol_major == WIRE_PROTOCOL_MAJOR - assert reg.protocol_minor == WIRE_PROTOCOL_MINOR - - -def test_detects_protocol_incompatibility_marker() -> None: - assert Worker._is_protocol_incompatibility("unsupported_worker_protocol:1.0 supported=1:1-9999") - assert not Worker._is_protocol_incompatibility("not_leader:127.0.0.1:50051") From 0cb08a33f1200d5f5cf3d0a7114ec877c7bca37b Mon Sep 17 00:00:00 2001 From: arpbansal Date: Wed, 18 Mar 2026 08:42:54 +0000 Subject: [PATCH 4/4] revert back any change done in example/ --- examples/firered-image-edit/Dockerfile | 14 +- examples/firered-image-edit/endpoint.toml | 2 +- examples/firered-image-edit/pyproject.toml | 2 +- examples/firered-image-edit/uv.lock | 2 +- examples/flux2-klein-4b/Dockerfile | 16 +- examples/flux2-klein-4b/README.md | 45 +- examples/flux2-klein-4b/endpoint.toml | 16 +- examples/flux2-klein-4b/pyproject.toml | 6 +- .../flux2-klein-4b/src/flux2_klein_4b/main.py | 56 +- examples/flux2-klein-4b/uv.lock | 2 +- examples/image-gen/Dockerfile | 14 +- examples/image-gen/endpoint.toml | 2 +- examples/image-gen/pyproject.toml | 2 +- examples/image-gen/uv.lock | 2 +- examples/marco-polo/pyproject.toml | 10 +- examples/marco-polo/uv.lock | 947 +++++++++--------- examples/medasr-transcribe/Dockerfile | 14 +- examples/medasr-transcribe/endpoint.toml | 2 +- examples/medasr-transcribe/pyproject.toml | 2 +- examples/medasr-transcribe/uv.lock | 2 +- examples/multi-sdxl-checkpoints/Dockerfile | 14 +- examples/multi-sdxl-checkpoints/endpoint.toml | 2 +- .../multi-sdxl-checkpoints/pyproject.toml | 2 +- examples/multi-sdxl-checkpoints/uv.lock | 2 +- examples/openai-codex/Dockerfile | 10 +- examples/openai-codex/pyproject.toml | 2 +- examples/openai-codex/uv.lock | 2 +- examples/qwen-image-2512/Dockerfile | 12 +- examples/qwen-image-2512/endpoint.toml | 2 +- examples/qwen-image-2512/pyproject.toml | 2 +- examples/qwen-image-2512/uv.lock | 2 +- examples/sd15/Dockerfile | 16 +- examples/sd15/Dockerfile.local | 72 -- examples/sd15/endpoint.toml | 2 +- examples/sd15/pyproject.toml | 2 +- examples/sd15/uv.lock | 2 +- examples/smoke-test/Dockerfile | 53 - examples/smoke-test/src/smoke_test/example.py | 148 --- examples/z-image-lora/Dockerfile | 14 +- examples/z-image-lora/endpoint.toml | 2 +- examples/z-image-lora/pyproject.toml | 2 +- examples/z-image-lora/uv.lock | 2 +- 42 files changed, 615 insertions(+), 910 deletions(-) delete mode 100644 examples/sd15/Dockerfile.local delete mode 100644 examples/smoke-test/Dockerfile delete mode 100644 examples/smoke-test/src/smoke_test/example.py diff --git a/examples/firered-image-edit/Dockerfile b/examples/firered-image-edit/Dockerfile index 5c9919f..1351d5f 100644 --- a/examples/firered-image-edit/Dockerfile +++ b/examples/firered-image-edit/Dockerfile @@ -1,13 +1,14 @@ # Tenant-supplied Dockerfile example (GPU). # -# - Installs torch + gen-worker in stable cacheable layers. -# - Installs tenant deps from uv.lock without replacing torch/gen-worker. +# - Installs torch in a stable cacheable layer. +# - Installs tenant deps from uv.lock without replacing torch. # - Bakes discovered functions into /app/.tensorhub/endpoint.lock at build time. # ARG PYTHON_VERSION=3.12 FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-bookworm-slim AS cozy_base -ARG UV_TORCH_BACKEND=cu126 +ARG CUDA_VERSION=12.8 +ARG UV_TORCH_BACKEND= ARG TORCH_SPEC="~=2.10.0" WORKDIR /app @@ -22,10 +23,9 @@ RUN --mount=type=cache,id=cozy-apt-cache,target=/var/cache/apt,sharing=locked \ && apt-get clean RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - uv pip install --system --break-system-packages --torch-backend ${UV_TORCH_BACKEND} \ + backend="${UV_TORCH_BACKEND:-cu$(printf '%s' "${CUDA_VERSION}" | tr -d '.')}" \ + && uv pip install --system --break-system-packages --torch-backend "$backend" \ "torch${TORCH_SPEC}" -RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - uv pip install --system --break-system-packages gen-worker==0.3.0 FROM cozy_base AS cozy_final @@ -33,7 +33,7 @@ COPY pyproject.toml uv.lock /app/ RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ uv export --no-dev --no-hashes --no-sources --no-emit-project --no-emit-local \ - --no-emit-package torch --no-emit-package gen-worker \ + --no-emit-package torch \ -o /tmp/requirements.all.txt \ && grep -Ev '^(torch|triton|nvidia-|cuda-)' /tmp/requirements.all.txt > /tmp/requirements.txt \ && uv pip install --system --break-system-packages --no-deps -r /tmp/requirements.txt diff --git a/examples/firered-image-edit/endpoint.toml b/examples/firered-image-edit/endpoint.toml index f39a8ec..086e2a8 100644 --- a/examples/firered-image-edit/endpoint.toml +++ b/examples/firered-image-edit/endpoint.toml @@ -7,7 +7,7 @@ main = "firered_image_edit.main" firered_image_edit = { ref = "fireredteam/firered-image-edit-1.0", dtypes = ["bf16"] } [host.requirements] -cuda = "12.6" +cuda = "12.8" [resources] vram_gb = 24 diff --git a/examples/firered-image-edit/pyproject.toml b/examples/firered-image-edit/pyproject.toml index 9374d51..6ff91ec 100644 --- a/examples/firered-image-edit/pyproject.toml +++ b/examples/firered-image-edit/pyproject.toml @@ -4,7 +4,7 @@ version = "0.1.0" description = "FireRed Image Edit example (inference-only; Cozy manifest via endpoint.toml)" requires-python = ">=3.12" dependencies = [ - "gen-worker[torch]==0.3.0", + "gen-worker[torch]", "diffusers @ git+https://github.com/huggingface/diffusers", "transformers>=4.51.3", "accelerate", diff --git a/examples/firered-image-edit/uv.lock b/examples/firered-image-edit/uv.lock index 077624f..320d631 100644 --- a/examples/firered-image-edit/uv.lock +++ b/examples/firered-image-edit/uv.lock @@ -503,7 +503,7 @@ dev = [ requires-dist = [ { name = "accelerate" }, { name = "diffusers", git = "https://github.com/huggingface/diffusers" }, - { name = "gen-worker", extras = ["torch"], specifier = "==0.3.0" }, + { name = "gen-worker", extras = ["torch"] }, { name = "pillow" }, { name = "protobuf" }, { name = "sentencepiece" }, diff --git a/examples/flux2-klein-4b/Dockerfile b/examples/flux2-klein-4b/Dockerfile index 8326ef1..cff5e4c 100644 --- a/examples/flux2-klein-4b/Dockerfile +++ b/examples/flux2-klein-4b/Dockerfile @@ -1,7 +1,7 @@ # Tenant-supplied Dockerfile example. # -# - Installs torch + gen-worker in stable cacheable layers. -# - Installs tenant deps from uv.lock without replacing torch/gen-worker. +# - Installs torch in a stable cacheable layer. +# - Installs tenant deps from uv.lock without replacing torch. # - Bakes discovered functions into /app/.tensorhub/endpoint.lock at build time. # # Local build (GPU): @@ -10,7 +10,8 @@ ARG PYTHON_VERSION=3.12 FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-bookworm-slim AS cozy_base -ARG UV_TORCH_BACKEND=cu126 +ARG CUDA_VERSION=12.8 +ARG UV_TORCH_BACKEND= ARG TORCH_SPEC="~=2.10.0" WORKDIR /app @@ -26,20 +27,19 @@ RUN --mount=type=cache,id=cozy-apt-cache,target=/var/cache/apt,sharing=locked \ # Stable runtime layers (avoid re-downloading torch/cu libs on every tenant build). RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - uv pip install --system --break-system-packages --torch-backend ${UV_TORCH_BACKEND} \ + backend="${UV_TORCH_BACKEND:-cu$(printf '%s' "${CUDA_VERSION}" | tr -d '.')}" \ + && uv pip install --system --break-system-packages --torch-backend "$backend" \ "torch${TORCH_SPEC}" -RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - uv pip install --system --break-system-packages gen-worker==0.3.0 FROM cozy_base AS cozy_final # Copy lock metadata first so dependency layers are cacheable across source edits. COPY pyproject.toml uv.lock /app/ -# Install tenant dependencies into the global environment without replacing torch/gen-worker. +# Install tenant dependencies into the global environment without replacing torch. RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ uv export --no-dev --no-hashes --no-sources --no-emit-project --no-emit-local \ - --no-emit-package torch --no-emit-package gen-worker \ + --no-emit-package torch \ -o /tmp/requirements.all.txt \ && grep -Ev '^(torch|triton|nvidia-|cuda-)' /tmp/requirements.all.txt > /tmp/requirements.txt \ && uv pip install --system --break-system-packages --no-deps -r /tmp/requirements.txt diff --git a/examples/flux2-klein-4b/README.md b/examples/flux2-klein-4b/README.md index 962b512..e222d4f 100644 --- a/examples/flux2-klein-4b/README.md +++ b/examples/flux2-klein-4b/README.md @@ -1,40 +1,25 @@ # flux2-klein-4b -FLUX.2-klein turbo example using Cozy’s injection pattern (4B + 9B variants). +FLUX.2-klein 4B endpoint with separate base/turbo functions and dtype-specific variants. -- The worker function only defines input/output + runs inference. -- Fixed model selection is declared in code via `ModelRef(Src.FIXED, "")`. -- Model refs/dtypes are declared in `endpoint.toml [models]`. -- This model is treated as a turbo model: the worker forces `num_inference_steps=8`. - -Steps: - -- `num_inference_steps` is accepted in the payload, but it is clamped to `[4, 8]` (rounded) for predictable cost/latency. +Naming convention in this repo: -Code uses: +- Base model ref: `black-forest-labs/flux.2-klein-4b-base` +- Turbo model ref: `black-forest-labs/flux.2-klein-4b-turbo` -```py -pipeline: Annotated[ - Flux2KleinPipeline, - ModelRef(Src.FIXED, "flux2-klein-4b"), -] -``` +This avoids ambiguity with upstream naming where `flux.2-klein-4b` is commonly used for turbo variants. Functions: -- `generate`: 4B bf16 (regular turbo baseline) -- `generate_fp8`: 4B fp8 -- `generate_9b`: 9B bf16 (regular turbo baseline) -- `generate_9b_fp8`: 9B fp8 -- `generate_int8`: int8-only -- `generate_int4`: int4-only +- `generate`: base bf16 +- `generate_turbo`: turbo bf16 +- `generate_fp8`: base fp8 +- `generate_turbo_fp8`: turbo fp8 +- `generate_nvfp4`: base nvfp4 +- `generate_turbo_nvfp4`: turbo nvfp4 -Notes on FP8: +Notes: -- FP8 support here is **weight-only** quantization via `torchao` (Diffusers TorchAoConfig). -- GPUs vary: FP8 acceleration typically requires newer NVIDIA GPUs (e.g. Ada/Hopper class). - -Notes on INT8/INT4: - -- INT8/INT4 support here is **weight-only** quantization via `torchao` (Diffusers TorchAoConfig). -- INT4 is experimental for diffusion; expect quality regressions or incompatibilities. +- Fixed model selection is declared in code via `ModelRef(Src.FIXED, "")`. +- Model refs/dtypes are declared in `endpoint.toml [models]`. +- `num_inference_steps` is accepted in the payload, but clamped to `[4, 8]`. diff --git a/examples/flux2-klein-4b/endpoint.toml b/examples/flux2-klein-4b/endpoint.toml index fe645d6..ada324a 100644 --- a/examples/flux2-klein-4b/endpoint.toml +++ b/examples/flux2-klein-4b/endpoint.toml @@ -1,20 +1,20 @@ schema_version = 1 -name = "flux2-klein-4b" +name = "flux.2-klein-4b" # Python import path used for function discovery (not a Docker ENTRYPOINT). main = "flux2_klein_4b.main" [models] -flux2-klein-4b = { ref = "black-forest-labs/flux.2-klein-4b", dtypes = ["bf16"] } -flux2-klein-4b_fp8 = { ref = "black-forest-labs/flux.2-klein-4b", dtypes = ["fp8"] } -flux2-klein-9b = { ref = "black-forest-labs/flux.2-klein-9b", dtypes = ["bf16"] } -flux2-klein-9b_fp8 = { ref = "black-forest-labs/flux.2-klein-9b", dtypes = ["fp8"] } -flux2-klein-4b_int8 = { ref = "black-forest-labs/flux.2-klein-4b", dtypes = ["int8"] } -flux2-klein-4b_int4 = { ref = "black-forest-labs/flux.2-klein-4b", dtypes = ["int4"] } +flux2-klein-4b-base = { ref = "black-forest-labs/flux.2-klein-4b-base", dtypes = ["bf16"] } +flux2-klein-4b-turbo = { ref = "black-forest-labs/flux.2-klein-4b-turbo", dtypes = ["bf16"] } +flux2-klein-4b-base_fp8 = { ref = "black-forest-labs/flux.2-klein-4b-base", dtypes = ["fp8"] } +flux2-klein-4b-turbo_fp8 = { ref = "black-forest-labs/flux.2-klein-4b-turbo", dtypes = ["fp8"] } +flux2-klein-4b-base_nvfp4 = { ref = "black-forest-labs/flux.2-klein-4b-base", dtypes = ["nvfp4"] } +flux2-klein-4b-turbo_nvfp4 = { ref = "black-forest-labs/flux.2-klein-4b-turbo", dtypes = ["nvfp4"] } [host.requirements] # If cuda is set, the platform treats this worker as requiring an NVIDIA GPU. -cuda = "12.6" +cuda = "12.8" [resources] vram_gb = 24 diff --git a/examples/flux2-klein-4b/pyproject.toml b/examples/flux2-klein-4b/pyproject.toml index 9d6cf0c..c1abd27 100644 --- a/examples/flux2-klein-4b/pyproject.toml +++ b/examples/flux2-klein-4b/pyproject.toml @@ -1,15 +1,15 @@ [project] name = "flux2-klein-4b" version = "0.1.0" -description = "Flux2-klein-4B example (inference-only; Cozy manifest via endpoint.toml)" +description = "Flux2-klein-4B endpoint (base+turbo; inference-only; Cozy manifest via endpoint.toml)" requires-python = ">=3.12" dependencies = [ - "gen-worker[torch]==0.3.0", + "gen-worker[torch]", "diffusers @ git+https://github.com/huggingface/diffusers.git@99e2cfff27dec514a43e260e885c5e6eca038b36", "transformers>=4.56,<5", "accelerate", "pillow", - # Needed for fp8 weight-only quantization variants (diffusers TorchAoConfig). + # Needed for fp8/nvfp4 variants (diffusers TorchAoConfig and/or variant-aware loading). "torchao", ] diff --git a/examples/flux2-klein-4b/src/flux2_klein_4b/main.py b/examples/flux2-klein-4b/src/flux2_klein_4b/main.py index 3db189b..6ad278c 100644 --- a/examples/flux2-klein-4b/src/flux2_klein_4b/main.py +++ b/examples/flux2-klein-4b/src/flux2_klein_4b/main.py @@ -18,6 +18,7 @@ logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s") _flux_resources = ResourceRequirements() +_nvfp4_resources = ResourceRequirements(compute_capability_min=10.0) _pipeline_locks_guard = threading.Lock() _pipeline_locks: dict[int, threading.Lock] = {} @@ -103,85 +104,68 @@ def generate( ctx: RequestContext, pipeline: Annotated[ Flux2KleinPipeline, - ModelRef(Src.FIXED, "flux2-klein-4b"), + ModelRef(Src.FIXED, "flux2-klein-4b-base"), ], payload: GenerateInput, ) -> GenerateOutput: - return _generate(ctx, pipeline, payload, "flux2-klein-4b") + return _generate(ctx, pipeline, payload, "flux2-klein-4b-base") @worker_function(_flux_resources) -def generate_fp8( +def generate_turbo( ctx: RequestContext, pipeline: Annotated[ Flux2KleinPipeline, - ModelRef(Src.FIXED, "flux2-klein-4b_fp8"), + ModelRef(Src.FIXED, "flux2-klein-4b-turbo"), ], payload: GenerateInput, ) -> GenerateOutput: - """ - FP8 function. - - This endpoint is intended to run against an fp8-weight-only artifact (or an artifact - that the worker can load with torchao-backed fp8 quantization enabled). - """ - return _generate(ctx, pipeline, payload, "flux2-klein-4b_fp8") + return _generate(ctx, pipeline, payload, "flux2-klein-4b-turbo") @worker_function(_flux_resources) -def generate_9b( +def generate_fp8( ctx: RequestContext, pipeline: Annotated[ Flux2KleinPipeline, - ModelRef(Src.FIXED, "flux2-klein-9b"), + ModelRef(Src.FIXED, "flux2-klein-4b-base_fp8"), ], payload: GenerateInput, ) -> GenerateOutput: - return _generate(ctx, pipeline, payload, "flux2-klein-9b") + return _generate(ctx, pipeline, payload, "flux2-klein-4b-base_fp8") @worker_function(_flux_resources) -def generate_9b_fp8( +def generate_turbo_fp8( ctx: RequestContext, pipeline: Annotated[ Flux2KleinPipeline, - ModelRef(Src.FIXED, "flux2-klein-9b_fp8"), + ModelRef(Src.FIXED, "flux2-klein-4b-turbo_fp8"), ], payload: GenerateInput, ) -> GenerateOutput: - return _generate(ctx, pipeline, payload, "flux2-klein-9b_fp8") + return _generate(ctx, pipeline, payload, "flux2-klein-4b-turbo_fp8") -@worker_function(_flux_resources) -def generate_int8( +@worker_function(_nvfp4_resources) +def generate_nvfp4( ctx: RequestContext, pipeline: Annotated[ Flux2KleinPipeline, - ModelRef(Src.FIXED, "flux2-klein-4b_int8"), + ModelRef(Src.FIXED, "flux2-klein-4b-base_nvfp4"), ], payload: GenerateInput, ) -> GenerateOutput: - """ - INT8 function (weight-only). + return _generate(ctx, pipeline, payload, "flux2-klein-4b-base_nvfp4") - This endpoint is intended to run against an int8-weight-only artifact (or an artifact - that the worker can load with torchao-backed int8 quantization enabled). - """ - return _generate(ctx, pipeline, payload, "flux2-klein-4b_int8") - -@worker_function(_flux_resources) -def generate_int4( +@worker_function(_nvfp4_resources) +def generate_turbo_nvfp4( ctx: RequestContext, pipeline: Annotated[ Flux2KleinPipeline, - ModelRef(Src.FIXED, "flux2-klein-4b_int4"), + ModelRef(Src.FIXED, "flux2-klein-4b-turbo_nvfp4"), ], payload: GenerateInput, ) -> GenerateOutput: - """ - INT4 function (weight-only). - - This endpoint is experimental; expect quality regressions or incompatibilities. - """ - return _generate(ctx, pipeline, payload, "flux2-klein-4b_int4") + return _generate(ctx, pipeline, payload, "flux2-klein-4b-turbo_nvfp4") diff --git a/examples/flux2-klein-4b/uv.lock b/examples/flux2-klein-4b/uv.lock index 8353718..448d669 100644 --- a/examples/flux2-klein-4b/uv.lock +++ b/examples/flux2-klein-4b/uv.lock @@ -512,7 +512,7 @@ dev = [ requires-dist = [ { name = "accelerate" }, { name = "diffusers", git = "https://github.com/huggingface/diffusers.git?rev=99e2cfff27dec514a43e260e885c5e6eca038b36" }, - { name = "gen-worker", extras = ["torch"], specifier = "==0.3.0" }, + { name = "gen-worker", extras = ["torch"] }, { name = "pillow" }, { name = "torchao" }, { name = "transformers", specifier = ">=4.56,<5" }, diff --git a/examples/image-gen/Dockerfile b/examples/image-gen/Dockerfile index cd4c800..1203d3c 100644 --- a/examples/image-gen/Dockerfile +++ b/examples/image-gen/Dockerfile @@ -1,13 +1,14 @@ # Tenant-supplied Dockerfile example. # -# - Installs torch + gen-worker in stable cacheable layers. -# - Installs tenant deps from uv.lock without replacing torch/gen-worker. +# - Installs torch in a stable cacheable layer. +# - Installs tenant deps from uv.lock without replacing torch. # - Bakes discovered functions into /app/.tensorhub/endpoint.lock at build time. # ARG PYTHON_VERSION=3.12 FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-bookworm-slim AS cozy_base -ARG UV_TORCH_BACKEND=cu126 +ARG CUDA_VERSION=12.8 +ARG UV_TORCH_BACKEND= ARG TORCH_SPEC="~=2.10.0" WORKDIR /app @@ -23,10 +24,9 @@ RUN --mount=type=cache,id=cozy-apt-cache,target=/var/cache/apt,sharing=locked \ && apt-get clean RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - uv pip install --system --break-system-packages --torch-backend ${UV_TORCH_BACKEND} \ + backend="${UV_TORCH_BACKEND:-cu$(printf '%s' "${CUDA_VERSION}" | tr -d '.')}" \ + && uv pip install --system --break-system-packages --torch-backend "$backend" \ "torch${TORCH_SPEC}" -RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - uv pip install --system --break-system-packages gen-worker==0.3.0 FROM cozy_base AS cozy_final @@ -34,7 +34,7 @@ COPY pyproject.toml uv.lock /app/ RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ uv export --no-dev --no-hashes --no-sources --no-emit-project --no-emit-local \ - --no-emit-package torch --no-emit-package gen-worker \ + --no-emit-package torch \ -o /tmp/requirements.all.txt \ && grep -Ev '^(torch|triton|nvidia-|cuda-)' /tmp/requirements.all.txt > /tmp/requirements.txt \ && uv pip install --system --break-system-packages --no-deps -r /tmp/requirements.txt diff --git a/examples/image-gen/endpoint.toml b/examples/image-gen/endpoint.toml index 2daf35f..de4a700 100644 --- a/examples/image-gen/endpoint.toml +++ b/examples/image-gen/endpoint.toml @@ -7,7 +7,7 @@ main = "image_gen.main" sdxl = { ref = "stabilityai/stable-diffusion-xl-base-1.0", dtypes = ["fp16", "bf16"] } [host.requirements] -cuda = "12.6" +cuda = "12.8" [resources] vram_gb = 12 diff --git a/examples/image-gen/pyproject.toml b/examples/image-gen/pyproject.toml index f6074d2..7902855 100644 --- a/examples/image-gen/pyproject.toml +++ b/examples/image-gen/pyproject.toml @@ -4,7 +4,7 @@ version = "0.1.0" description = "Image generation function" requires-python = ">=3.12" dependencies = [ - "gen-worker[torch]==0.3.0", + "gen-worker[torch]", "diffusers", "transformers", "accelerate", diff --git a/examples/image-gen/uv.lock b/examples/image-gen/uv.lock index 481720f..74df27e 100644 --- a/examples/image-gen/uv.lock +++ b/examples/image-gen/uv.lock @@ -786,7 +786,7 @@ dev = [ requires-dist = [ { name = "accelerate" }, { name = "diffusers" }, - { name = "gen-worker", extras = ["torch"], specifier = "==0.3.0" }, + { name = "gen-worker", extras = ["torch"] }, { name = "pillow" }, { name = "transformers" }, ] diff --git a/examples/marco-polo/pyproject.toml b/examples/marco-polo/pyproject.toml index 858525b..4674920 100644 --- a/examples/marco-polo/pyproject.toml +++ b/examples/marco-polo/pyproject.toml @@ -1,10 +1,10 @@ [project] -name = "smoke-test" -version = "0.1.0" -description = "Simple worker functions for smoke testing" +name = "marco_polo" +version = "1.0.0" +description = "Simple function for testing" requires-python = ">=3.12" dependencies = [ - "gen-worker==0.3.0", + "gen-worker>=0.3.10", ] [dependency-groups] @@ -15,7 +15,7 @@ requires = ["hatchling"] build-backend = "hatchling.build" [tool.hatch.build.targets.wheel] -packages = ["src/smoke_test"] +packages = ["src/marco_polo"] [tool.mypy] python_version = "3.12" diff --git a/examples/marco-polo/uv.lock b/examples/marco-polo/uv.lock index 7ebd088..6c27a41 100644 --- a/examples/marco-polo/uv.lock +++ b/examples/marco-polo/uv.lock @@ -219,11 +219,11 @@ wheels = [ [[package]] name = "certifi" -version = "2026.1.4" +version = "2026.2.25" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e0/2d/a891ca51311197f6ad14a7ef42e2399f36cf2f9bd44752b3dc4eab60fdc5/certifi-2026.1.4.tar.gz", hash = "sha256:ac726dd470482006e014ad384921ed6438c457018f4b3d204aea4281258b2120", size = 154268, upload-time = "2026-01-04T02:42:41.825Z" } +sdist = { url = "https://files.pythonhosted.org/packages/af/2d/7bf41579a8986e348fa033a31cdd0e4121114f6bce2457e8876010b092dd/certifi-2026.2.25.tar.gz", hash = "sha256:e887ab5cee78ea814d3472169153c2d12cd43b14bd03329a39a9c6e2e80bfba7", size = 155029, upload-time = "2026-02-25T02:54:17.342Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e6/ad/3cc14f097111b4de0040c83a525973216457bbeeb63739ef1ed275c1c021/certifi-2026.1.4-py3-none-any.whl", hash = "sha256:9943707519e4add1115f44c2bc244f782c0249876bf51b6599fee1ffbedd685c", size = 152900, upload-time = "2026-01-04T02:42:40.15Z" }, + { url = "https://files.pythonhosted.org/packages/9a/3c/c17fb3ca2d9c3acff52e30b309f538586f9f5b9c9cf454f3845fc9af4881/certifi-2026.2.25-py3-none-any.whl", hash = "sha256:027692e4402ad994f1c42e52a4997a9763c646b73e4096e4d5d6db8af1d6f0fa", size = 153684, upload-time = "2026-02-25T02:54:15.766Z" }, ] [[package]] @@ -285,59 +285,59 @@ wheels = [ [[package]] name = "charset-normalizer" -version = "3.4.4" +version = "3.4.5" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/13/69/33ddede1939fdd074bce5434295f38fae7136463422fe4fd3e0e89b98062/charset_normalizer-3.4.4.tar.gz", hash = "sha256:94537985111c35f28720e43603b8e7b43a6ecfb2ce1d3058bbe955b73404e21a", size = 129418, upload-time = "2025-10-14T04:42:32.879Z" } +sdist = { url = "https://files.pythonhosted.org/packages/1d/35/02daf95b9cd686320bb622eb148792655c9412dbb9b67abb5694e5910a24/charset_normalizer-3.4.5.tar.gz", hash = "sha256:95adae7b6c42a6c5b5b559b1a99149f090a57128155daeea91732c8d970d8644", size = 134804, upload-time = "2026-03-06T06:03:19.46Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f3/85/1637cd4af66fa687396e757dec650f28025f2a2f5a5531a3208dc0ec43f2/charset_normalizer-3.4.4-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0a98e6759f854bd25a58a73fa88833fba3b7c491169f86ce1180c948ab3fd394", size = 208425, upload-time = "2025-10-14T04:40:53.353Z" }, - { url = "https://files.pythonhosted.org/packages/9d/6a/04130023fef2a0d9c62d0bae2649b69f7b7d8d24ea5536feef50551029df/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b5b290ccc2a263e8d185130284f8501e3e36c5e02750fc6b6bdeb2e9e96f1e25", size = 148162, upload-time = "2025-10-14T04:40:54.558Z" }, - { url = "https://files.pythonhosted.org/packages/78/29/62328d79aa60da22c9e0b9a66539feae06ca0f5a4171ac4f7dc285b83688/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74bb723680f9f7a6234dcf67aea57e708ec1fbdf5699fb91dfd6f511b0a320ef", size = 144558, upload-time = "2025-10-14T04:40:55.677Z" }, - { url = "https://files.pythonhosted.org/packages/86/bb/b32194a4bf15b88403537c2e120b817c61cd4ecffa9b6876e941c3ee38fe/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f1e34719c6ed0b92f418c7c780480b26b5d9c50349e9a9af7d76bf757530350d", size = 161497, upload-time = "2025-10-14T04:40:57.217Z" }, - { url = "https://files.pythonhosted.org/packages/19/89/a54c82b253d5b9b111dc74aca196ba5ccfcca8242d0fb64146d4d3183ff1/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2437418e20515acec67d86e12bf70056a33abdacb5cb1655042f6538d6b085a8", size = 159240, upload-time = "2025-10-14T04:40:58.358Z" }, - { url = "https://files.pythonhosted.org/packages/c0/10/d20b513afe03acc89ec33948320a5544d31f21b05368436d580dec4e234d/charset_normalizer-3.4.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:11d694519d7f29d6cd09f6ac70028dba10f92f6cdd059096db198c283794ac86", size = 153471, upload-time = "2025-10-14T04:40:59.468Z" }, - { url = "https://files.pythonhosted.org/packages/61/fa/fbf177b55bdd727010f9c0a3c49eefa1d10f960e5f09d1d887bf93c2e698/charset_normalizer-3.4.4-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:ac1c4a689edcc530fc9d9aa11f5774b9e2f33f9a0c6a57864e90908f5208d30a", size = 150864, upload-time = "2025-10-14T04:41:00.623Z" }, - { url = "https://files.pythonhosted.org/packages/05/12/9fbc6a4d39c0198adeebbde20b619790e9236557ca59fc40e0e3cebe6f40/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:21d142cc6c0ec30d2efee5068ca36c128a30b0f2c53c1c07bd78cb6bc1d3be5f", size = 150647, upload-time = "2025-10-14T04:41:01.754Z" }, - { url = "https://files.pythonhosted.org/packages/ad/1f/6a9a593d52e3e8c5d2b167daf8c6b968808efb57ef4c210acb907c365bc4/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:5dbe56a36425d26d6cfb40ce79c314a2e4dd6211d51d6d2191c00bed34f354cc", size = 145110, upload-time = "2025-10-14T04:41:03.231Z" }, - { url = "https://files.pythonhosted.org/packages/30/42/9a52c609e72471b0fc54386dc63c3781a387bb4fe61c20231a4ebcd58bdd/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:5bfbb1b9acf3334612667b61bd3002196fe2a1eb4dd74d247e0f2a4d50ec9bbf", size = 162839, upload-time = "2025-10-14T04:41:04.715Z" }, - { url = "https://files.pythonhosted.org/packages/c4/5b/c0682bbf9f11597073052628ddd38344a3d673fda35a36773f7d19344b23/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:d055ec1e26e441f6187acf818b73564e6e6282709e9bcb5b63f5b23068356a15", size = 150667, upload-time = "2025-10-14T04:41:05.827Z" }, - { url = "https://files.pythonhosted.org/packages/e4/24/a41afeab6f990cf2daf6cb8c67419b63b48cf518e4f56022230840c9bfb2/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:af2d8c67d8e573d6de5bc30cdb27e9b95e49115cd9baad5ddbd1a6207aaa82a9", size = 160535, upload-time = "2025-10-14T04:41:06.938Z" }, - { url = "https://files.pythonhosted.org/packages/2a/e5/6a4ce77ed243c4a50a1fecca6aaaab419628c818a49434be428fe24c9957/charset_normalizer-3.4.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:780236ac706e66881f3b7f2f32dfe90507a09e67d1d454c762cf642e6e1586e0", size = 154816, upload-time = "2025-10-14T04:41:08.101Z" }, - { url = "https://files.pythonhosted.org/packages/a8/ef/89297262b8092b312d29cdb2517cb1237e51db8ecef2e9af5edbe7b683b1/charset_normalizer-3.4.4-cp312-cp312-win32.whl", hash = "sha256:5833d2c39d8896e4e19b689ffc198f08ea58116bee26dea51e362ecc7cd3ed26", size = 99694, upload-time = "2025-10-14T04:41:09.23Z" }, - { url = "https://files.pythonhosted.org/packages/3d/2d/1e5ed9dd3b3803994c155cd9aacb60c82c331bad84daf75bcb9c91b3295e/charset_normalizer-3.4.4-cp312-cp312-win_amd64.whl", hash = "sha256:a79cfe37875f822425b89a82333404539ae63dbdddf97f84dcbc3d339aae9525", size = 107131, upload-time = "2025-10-14T04:41:10.467Z" }, - { url = "https://files.pythonhosted.org/packages/d0/d9/0ed4c7098a861482a7b6a95603edce4c0d9db2311af23da1fb2b75ec26fc/charset_normalizer-3.4.4-cp312-cp312-win_arm64.whl", hash = "sha256:376bec83a63b8021bb5c8ea75e21c4ccb86e7e45ca4eb81146091b56599b80c3", size = 100390, upload-time = "2025-10-14T04:41:11.915Z" }, - { url = "https://files.pythonhosted.org/packages/97/45/4b3a1239bbacd321068ea6e7ac28875b03ab8bc0aa0966452db17cd36714/charset_normalizer-3.4.4-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:e1f185f86a6f3403aa2420e815904c67b2f9ebc443f045edd0de921108345794", size = 208091, upload-time = "2025-10-14T04:41:13.346Z" }, - { url = "https://files.pythonhosted.org/packages/7d/62/73a6d7450829655a35bb88a88fca7d736f9882a27eacdca2c6d505b57e2e/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b39f987ae8ccdf0d2642338faf2abb1862340facc796048b604ef14919e55ed", size = 147936, upload-time = "2025-10-14T04:41:14.461Z" }, - { url = "https://files.pythonhosted.org/packages/89/c5/adb8c8b3d6625bef6d88b251bbb0d95f8205831b987631ab0c8bb5d937c2/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:3162d5d8ce1bb98dd51af660f2121c55d0fa541b46dff7bb9b9f86ea1d87de72", size = 144180, upload-time = "2025-10-14T04:41:15.588Z" }, - { url = "https://files.pythonhosted.org/packages/91/ed/9706e4070682d1cc219050b6048bfd293ccf67b3d4f5a4f39207453d4b99/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:81d5eb2a312700f4ecaa977a8235b634ce853200e828fbadf3a9c50bab278328", size = 161346, upload-time = "2025-10-14T04:41:16.738Z" }, - { url = "https://files.pythonhosted.org/packages/d5/0d/031f0d95e4972901a2f6f09ef055751805ff541511dc1252ba3ca1f80cf5/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:5bd2293095d766545ec1a8f612559f6b40abc0eb18bb2f5d1171872d34036ede", size = 158874, upload-time = "2025-10-14T04:41:17.923Z" }, - { url = "https://files.pythonhosted.org/packages/f5/83/6ab5883f57c9c801ce5e5677242328aa45592be8a00644310a008d04f922/charset_normalizer-3.4.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a8a8b89589086a25749f471e6a900d3f662d1d3b6e2e59dcecf787b1cc3a1894", size = 153076, upload-time = "2025-10-14T04:41:19.106Z" }, - { url = "https://files.pythonhosted.org/packages/75/1e/5ff781ddf5260e387d6419959ee89ef13878229732732ee73cdae01800f2/charset_normalizer-3.4.4-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc7637e2f80d8530ee4a78e878bce464f70087ce73cf7c1caf142416923b98f1", size = 150601, upload-time = "2025-10-14T04:41:20.245Z" }, - { url = "https://files.pythonhosted.org/packages/d7/57/71be810965493d3510a6ca79b90c19e48696fb1ff964da319334b12677f0/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f8bf04158c6b607d747e93949aa60618b61312fe647a6369f88ce2ff16043490", size = 150376, upload-time = "2025-10-14T04:41:21.398Z" }, - { url = "https://files.pythonhosted.org/packages/e5/d5/c3d057a78c181d007014feb7e9f2e65905a6c4ef182c0ddf0de2924edd65/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:554af85e960429cf30784dd47447d5125aaa3b99a6f0683589dbd27e2f45da44", size = 144825, upload-time = "2025-10-14T04:41:22.583Z" }, - { url = "https://files.pythonhosted.org/packages/e6/8c/d0406294828d4976f275ffbe66f00266c4b3136b7506941d87c00cab5272/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:74018750915ee7ad843a774364e13a3db91682f26142baddf775342c3f5b1133", size = 162583, upload-time = "2025-10-14T04:41:23.754Z" }, - { url = "https://files.pythonhosted.org/packages/d7/24/e2aa1f18c8f15c4c0e932d9287b8609dd30ad56dbe41d926bd846e22fb8d/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:c0463276121fdee9c49b98908b3a89c39be45d86d1dbaa22957e38f6321d4ce3", size = 150366, upload-time = "2025-10-14T04:41:25.27Z" }, - { url = "https://files.pythonhosted.org/packages/e4/5b/1e6160c7739aad1e2df054300cc618b06bf784a7a164b0f238360721ab86/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:362d61fd13843997c1c446760ef36f240cf81d3ebf74ac62652aebaf7838561e", size = 160300, upload-time = "2025-10-14T04:41:26.725Z" }, - { url = "https://files.pythonhosted.org/packages/7a/10/f882167cd207fbdd743e55534d5d9620e095089d176d55cb22d5322f2afd/charset_normalizer-3.4.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:9a26f18905b8dd5d685d6d07b0cdf98a79f3c7a918906af7cc143ea2e164c8bc", size = 154465, upload-time = "2025-10-14T04:41:28.322Z" }, - { url = "https://files.pythonhosted.org/packages/89/66/c7a9e1b7429be72123441bfdbaf2bc13faab3f90b933f664db506dea5915/charset_normalizer-3.4.4-cp313-cp313-win32.whl", hash = "sha256:9b35f4c90079ff2e2edc5b26c0c77925e5d2d255c42c74fdb70fb49b172726ac", size = 99404, upload-time = "2025-10-14T04:41:29.95Z" }, - { url = "https://files.pythonhosted.org/packages/c4/26/b9924fa27db384bdcd97ab83b4f0a8058d96ad9626ead570674d5e737d90/charset_normalizer-3.4.4-cp313-cp313-win_amd64.whl", hash = "sha256:b435cba5f4f750aa6c0a0d92c541fb79f69a387c91e61f1795227e4ed9cece14", size = 107092, upload-time = "2025-10-14T04:41:31.188Z" }, - { url = "https://files.pythonhosted.org/packages/af/8f/3ed4bfa0c0c72a7ca17f0380cd9e4dd842b09f664e780c13cff1dcf2ef1b/charset_normalizer-3.4.4-cp313-cp313-win_arm64.whl", hash = "sha256:542d2cee80be6f80247095cc36c418f7bddd14f4a6de45af91dfad36d817bba2", size = 100408, upload-time = "2025-10-14T04:41:32.624Z" }, - { url = "https://files.pythonhosted.org/packages/2a/35/7051599bd493e62411d6ede36fd5af83a38f37c4767b92884df7301db25d/charset_normalizer-3.4.4-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:da3326d9e65ef63a817ecbcc0df6e94463713b754fe293eaa03da99befb9a5bd", size = 207746, upload-time = "2025-10-14T04:41:33.773Z" }, - { url = "https://files.pythonhosted.org/packages/10/9a/97c8d48ef10d6cd4fcead2415523221624bf58bcf68a802721a6bc807c8f/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:8af65f14dc14a79b924524b1e7fffe304517b2bff5a58bf64f30b98bbc5079eb", size = 147889, upload-time = "2025-10-14T04:41:34.897Z" }, - { url = "https://files.pythonhosted.org/packages/10/bf/979224a919a1b606c82bd2c5fa49b5c6d5727aa47b4312bb27b1734f53cd/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:74664978bb272435107de04e36db5a9735e78232b85b77d45cfb38f758efd33e", size = 143641, upload-time = "2025-10-14T04:41:36.116Z" }, - { url = "https://files.pythonhosted.org/packages/ba/33/0ad65587441fc730dc7bd90e9716b30b4702dc7b617e6ba4997dc8651495/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:752944c7ffbfdd10c074dc58ec2d5a8a4cd9493b314d367c14d24c17684ddd14", size = 160779, upload-time = "2025-10-14T04:41:37.229Z" }, - { url = "https://files.pythonhosted.org/packages/67/ed/331d6b249259ee71ddea93f6f2f0a56cfebd46938bde6fcc6f7b9a3d0e09/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1f13550535ad8cff21b8d757a3257963e951d96e20ec82ab44bc64aeb62a191", size = 159035, upload-time = "2025-10-14T04:41:38.368Z" }, - { url = "https://files.pythonhosted.org/packages/67/ff/f6b948ca32e4f2a4576aa129d8bed61f2e0543bf9f5f2b7fc3758ed005c9/charset_normalizer-3.4.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ecaae4149d99b1c9e7b88bb03e3221956f68fd6d50be2ef061b2381b61d20838", size = 152542, upload-time = "2025-10-14T04:41:39.862Z" }, - { url = "https://files.pythonhosted.org/packages/16/85/276033dcbcc369eb176594de22728541a925b2632f9716428c851b149e83/charset_normalizer-3.4.4-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:cb6254dc36b47a990e59e1068afacdcd02958bdcce30bb50cc1700a8b9d624a6", size = 149524, upload-time = "2025-10-14T04:41:41.319Z" }, - { url = "https://files.pythonhosted.org/packages/9e/f2/6a2a1f722b6aba37050e626530a46a68f74e63683947a8acff92569f979a/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:c8ae8a0f02f57a6e61203a31428fa1d677cbe50c93622b4149d5c0f319c1d19e", size = 150395, upload-time = "2025-10-14T04:41:42.539Z" }, - { url = "https://files.pythonhosted.org/packages/60/bb/2186cb2f2bbaea6338cad15ce23a67f9b0672929744381e28b0592676824/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:47cc91b2f4dd2833fddaedd2893006b0106129d4b94fdb6af1f4ce5a9965577c", size = 143680, upload-time = "2025-10-14T04:41:43.661Z" }, - { url = "https://files.pythonhosted.org/packages/7d/a5/bf6f13b772fbb2a90360eb620d52ed8f796f3c5caee8398c3b2eb7b1c60d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:82004af6c302b5d3ab2cfc4cc5f29db16123b1a8417f2e25f9066f91d4411090", size = 162045, upload-time = "2025-10-14T04:41:44.821Z" }, - { url = "https://files.pythonhosted.org/packages/df/c5/d1be898bf0dc3ef9030c3825e5d3b83f2c528d207d246cbabe245966808d/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:2b7d8f6c26245217bd2ad053761201e9f9680f8ce52f0fcd8d0755aeae5b2152", size = 149687, upload-time = "2025-10-14T04:41:46.442Z" }, - { url = "https://files.pythonhosted.org/packages/a5/42/90c1f7b9341eef50c8a1cb3f098ac43b0508413f33affd762855f67a410e/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:799a7a5e4fb2d5898c60b640fd4981d6a25f1c11790935a44ce38c54e985f828", size = 160014, upload-time = "2025-10-14T04:41:47.631Z" }, - { url = "https://files.pythonhosted.org/packages/76/be/4d3ee471e8145d12795ab655ece37baed0929462a86e72372fd25859047c/charset_normalizer-3.4.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:99ae2cffebb06e6c22bdc25801d7b30f503cc87dbd283479e7b606f70aff57ec", size = 154044, upload-time = "2025-10-14T04:41:48.81Z" }, - { url = "https://files.pythonhosted.org/packages/b0/6f/8f7af07237c34a1defe7defc565a9bc1807762f672c0fde711a4b22bf9c0/charset_normalizer-3.4.4-cp314-cp314-win32.whl", hash = "sha256:f9d332f8c2a2fcbffe1378594431458ddbef721c1769d78e2cbc06280d8155f9", size = 99940, upload-time = "2025-10-14T04:41:49.946Z" }, - { url = "https://files.pythonhosted.org/packages/4b/51/8ade005e5ca5b0d80fb4aff72a3775b325bdc3d27408c8113811a7cbe640/charset_normalizer-3.4.4-cp314-cp314-win_amd64.whl", hash = "sha256:8a6562c3700cce886c5be75ade4a5db4214fda19fede41d9792d100288d8f94c", size = 107104, upload-time = "2025-10-14T04:41:51.051Z" }, - { url = "https://files.pythonhosted.org/packages/da/5f/6b8f83a55bb8278772c5ae54a577f3099025f9ade59d0136ac24a0df4bde/charset_normalizer-3.4.4-cp314-cp314-win_arm64.whl", hash = "sha256:de00632ca48df9daf77a2c65a484531649261ec9f25489917f09e455cb09ddb2", size = 100743, upload-time = "2025-10-14T04:41:52.122Z" }, - { url = "https://files.pythonhosted.org/packages/0a/4c/925909008ed5a988ccbb72dcc897407e5d6d3bd72410d69e051fc0c14647/charset_normalizer-3.4.4-py3-none-any.whl", hash = "sha256:7a32c560861a02ff789ad905a2fe94e3f840803362c84fecf1851cb4cf3dc37f", size = 53402, upload-time = "2025-10-14T04:42:31.76Z" }, + { url = "https://files.pythonhosted.org/packages/9c/b6/9ee9c1a608916ca5feae81a344dffbaa53b26b90be58cc2159e3332d44ec/charset_normalizer-3.4.5-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:ed97c282ee4f994ef814042423a529df9497e3c666dca19be1d4cd1129dc7ade", size = 280976, upload-time = "2026-03-06T06:01:15.276Z" }, + { url = "https://files.pythonhosted.org/packages/f8/d8/a54f7c0b96f1df3563e9190f04daf981e365a9b397eedfdfb5dbef7e5c6c/charset_normalizer-3.4.5-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0294916d6ccf2d069727d65973c3a1ca477d68708db25fd758dd28b0827cff54", size = 189356, upload-time = "2026-03-06T06:01:16.511Z" }, + { url = "https://files.pythonhosted.org/packages/42/69/2bf7f76ce1446759a5787cb87d38f6a61eb47dbbdf035cfebf6347292a65/charset_normalizer-3.4.5-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:dc57a0baa3eeedd99fafaef7511b5a6ef4581494e8168ee086031744e2679467", size = 206369, upload-time = "2026-03-06T06:01:17.853Z" }, + { url = "https://files.pythonhosted.org/packages/10/9c/949d1a46dab56b959d9a87272482195f1840b515a3380e39986989a893ae/charset_normalizer-3.4.5-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:ed1a9a204f317ef879b32f9af507d47e49cd5e7f8e8d5d96358c98373314fc60", size = 203285, upload-time = "2026-03-06T06:01:19.473Z" }, + { url = "https://files.pythonhosted.org/packages/67/5c/ae30362a88b4da237d71ea214a8c7eb915db3eec941adda511729ac25fa2/charset_normalizer-3.4.5-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7ad83b8f9379176c841f8865884f3514d905bcd2a9a3b210eaa446e7d2223e4d", size = 196274, upload-time = "2026-03-06T06:01:20.728Z" }, + { url = "https://files.pythonhosted.org/packages/b2/07/c9f2cb0e46cb6d64fdcc4f95953747b843bb2181bda678dc4e699b8f0f9a/charset_normalizer-3.4.5-cp312-cp312-manylinux_2_31_armv7l.whl", hash = "sha256:a118e2e0b5ae6b0120d5efa5f866e58f2bb826067a646431da4d6a2bdae7950e", size = 184715, upload-time = "2026-03-06T06:01:22.194Z" }, + { url = "https://files.pythonhosted.org/packages/36/64/6b0ca95c44fddf692cd06d642b28f63009d0ce325fad6e9b2b4d0ef86a52/charset_normalizer-3.4.5-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:754f96058e61a5e22e91483f823e07df16416ce76afa4ebf306f8e1d1296d43f", size = 193426, upload-time = "2026-03-06T06:01:23.795Z" }, + { url = "https://files.pythonhosted.org/packages/50/bc/a730690d726403743795ca3f5bb2baf67838c5fea78236098f324b965e40/charset_normalizer-3.4.5-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0c300cefd9b0970381a46394902cd18eaf2aa00163f999590ace991989dcd0fc", size = 191780, upload-time = "2026-03-06T06:01:25.053Z" }, + { url = "https://files.pythonhosted.org/packages/97/4f/6c0bc9af68222b22951552d73df4532b5be6447cee32d58e7e8c74ecbb7b/charset_normalizer-3.4.5-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:c108f8619e504140569ee7de3f97d234f0fbae338a7f9f360455071ef9855a95", size = 185805, upload-time = "2026-03-06T06:01:26.294Z" }, + { url = "https://files.pythonhosted.org/packages/dd/b9/a523fb9b0ee90814b503452b2600e4cbc118cd68714d57041564886e7325/charset_normalizer-3.4.5-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:d1028de43596a315e2720a9849ee79007ab742c06ad8b45a50db8cdb7ed4a82a", size = 208342, upload-time = "2026-03-06T06:01:27.55Z" }, + { url = "https://files.pythonhosted.org/packages/4d/61/c59e761dee4464050713e50e27b58266cc8e209e518c0b378c1580c959ba/charset_normalizer-3.4.5-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:19092dde50335accf365cce21998a1c6dd8eafd42c7b226eb54b2747cdce2fac", size = 193661, upload-time = "2026-03-06T06:01:29.051Z" }, + { url = "https://files.pythonhosted.org/packages/1c/43/729fa30aad69783f755c5ad8649da17ee095311ca42024742701e202dc59/charset_normalizer-3.4.5-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:4354e401eb6dab9aed3c7b4030514328a6c748d05e1c3e19175008ca7de84fb1", size = 204819, upload-time = "2026-03-06T06:01:30.298Z" }, + { url = "https://files.pythonhosted.org/packages/87/33/d9b442ce5a91b96fc0840455a9e49a611bbadae6122778d0a6a79683dd31/charset_normalizer-3.4.5-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a68766a3c58fde7f9aaa22b3786276f62ab2f594efb02d0a1421b6282e852e98", size = 198080, upload-time = "2026-03-06T06:01:31.478Z" }, + { url = "https://files.pythonhosted.org/packages/56/5a/b8b5a23134978ee9885cee2d6995f4c27cc41f9baded0a9685eabc5338f0/charset_normalizer-3.4.5-cp312-cp312-win32.whl", hash = "sha256:1827734a5b308b65ac54e86a618de66f935a4f63a8a462ff1e19a6788d6c2262", size = 132630, upload-time = "2026-03-06T06:01:33.056Z" }, + { url = "https://files.pythonhosted.org/packages/70/53/e44a4c07e8904500aec95865dc3f6464dc3586a039ef0df606eb3ac38e35/charset_normalizer-3.4.5-cp312-cp312-win_amd64.whl", hash = "sha256:728c6a963dfab66ef865f49286e45239384249672cd598576765acc2a640a636", size = 142856, upload-time = "2026-03-06T06:01:34.489Z" }, + { url = "https://files.pythonhosted.org/packages/ea/aa/c5628f7cad591b1cf45790b7a61483c3e36cf41349c98af7813c483fd6e8/charset_normalizer-3.4.5-cp312-cp312-win_arm64.whl", hash = "sha256:75dfd1afe0b1647449e852f4fb428195a7ed0588947218f7ba929f6538487f02", size = 132982, upload-time = "2026-03-06T06:01:35.641Z" }, + { url = "https://files.pythonhosted.org/packages/f5/48/9f34ec4bb24aa3fdba1890c1bddb97c8a4be1bd84ef5c42ac2352563ad05/charset_normalizer-3.4.5-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:ac59c15e3f1465f722607800c68713f9fbc2f672b9eb649fe831da4019ae9b23", size = 280788, upload-time = "2026-03-06T06:01:37.126Z" }, + { url = "https://files.pythonhosted.org/packages/0e/09/6003e7ffeb90cc0560da893e3208396a44c210c5ee42efff539639def59b/charset_normalizer-3.4.5-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:165c7b21d19365464e8f70e5ce5e12524c58b48c78c1f5a57524603c1ab003f8", size = 188890, upload-time = "2026-03-06T06:01:38.73Z" }, + { url = "https://files.pythonhosted.org/packages/42/1e/02706edf19e390680daa694d17e2b8eab4b5f7ac285e2a51168b4b22ee6b/charset_normalizer-3.4.5-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:28269983f25a4da0425743d0d257a2d6921ea7d9b83599d4039486ec5b9f911d", size = 206136, upload-time = "2026-03-06T06:01:40.016Z" }, + { url = "https://files.pythonhosted.org/packages/c7/87/942c3def1b37baf3cf786bad01249190f3ca3d5e63a84f831e704977de1f/charset_normalizer-3.4.5-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d27ce22ec453564770d29d03a9506d449efbb9fa13c00842262b2f6801c48cce", size = 202551, upload-time = "2026-03-06T06:01:41.522Z" }, + { url = "https://files.pythonhosted.org/packages/94/0a/af49691938dfe175d71b8a929bd7e4ace2809c0c5134e28bc535660d5262/charset_normalizer-3.4.5-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0625665e4ebdddb553ab185de5db7054393af8879fb0c87bd5690d14379d6819", size = 195572, upload-time = "2026-03-06T06:01:43.208Z" }, + { url = "https://files.pythonhosted.org/packages/20/ea/dfb1792a8050a8e694cfbde1570ff97ff74e48afd874152d38163d1df9ae/charset_normalizer-3.4.5-cp313-cp313-manylinux_2_31_armv7l.whl", hash = "sha256:c23eb3263356d94858655b3e63f85ac5d50970c6e8febcdde7830209139cc37d", size = 184438, upload-time = "2026-03-06T06:01:44.755Z" }, + { url = "https://files.pythonhosted.org/packages/72/12/c281e2067466e3ddd0595bfaea58a6946765ace5c72dfa3edc2f5f118026/charset_normalizer-3.4.5-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e6302ca4ae283deb0af68d2fbf467474b8b6aedcd3dab4db187e07f94c109763", size = 193035, upload-time = "2026-03-06T06:01:46.051Z" }, + { url = "https://files.pythonhosted.org/packages/ba/4f/3792c056e7708e10464bad0438a44708886fb8f92e3c3d29ec5e2d964d42/charset_normalizer-3.4.5-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e51ae7d81c825761d941962450f50d041db028b7278e7b08930b4541b3e45cb9", size = 191340, upload-time = "2026-03-06T06:01:47.547Z" }, + { url = "https://files.pythonhosted.org/packages/e7/86/80ddba897127b5c7a9bccc481b0cd36c8fefa485d113262f0fe4332f0bf4/charset_normalizer-3.4.5-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:597d10dec876923e5c59e48dbd366e852eacb2b806029491d307daea6b917d7c", size = 185464, upload-time = "2026-03-06T06:01:48.764Z" }, + { url = "https://files.pythonhosted.org/packages/4d/00/b5eff85ba198faacab83e0e4b6f0648155f072278e3b392a82478f8b988b/charset_normalizer-3.4.5-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:5cffde4032a197bd3b42fd0b9509ec60fb70918d6970e4cc773f20fc9180ca67", size = 208014, upload-time = "2026-03-06T06:01:50.371Z" }, + { url = "https://files.pythonhosted.org/packages/c8/11/d36f70be01597fd30850dde8a1269ebc8efadd23ba5785808454f2389bde/charset_normalizer-3.4.5-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:2da4eedcb6338e2321e831a0165759c0c620e37f8cd044a263ff67493be8ffb3", size = 193297, upload-time = "2026-03-06T06:01:51.933Z" }, + { url = "https://files.pythonhosted.org/packages/1a/1d/259eb0a53d4910536c7c2abb9cb25f4153548efb42800c6a9456764649c0/charset_normalizer-3.4.5-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:65a126fb4b070d05340a84fc709dd9e7c75d9b063b610ece8a60197a291d0adf", size = 204321, upload-time = "2026-03-06T06:01:53.887Z" }, + { url = "https://files.pythonhosted.org/packages/84/31/faa6c5b9d3688715e1ed1bb9d124c384fe2fc1633a409e503ffe1c6398c1/charset_normalizer-3.4.5-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:c7a80a9242963416bd81f99349d5f3fce1843c303bd404f204918b6d75a75fd6", size = 197509, upload-time = "2026-03-06T06:01:56.439Z" }, + { url = "https://files.pythonhosted.org/packages/fd/a5/c7d9dd1503ffc08950b3260f5d39ec2366dd08254f0900ecbcf3a6197c7c/charset_normalizer-3.4.5-cp313-cp313-win32.whl", hash = "sha256:f1d725b754e967e648046f00c4facc42d414840f5ccc670c5670f59f83693e4f", size = 132284, upload-time = "2026-03-06T06:01:57.812Z" }, + { url = "https://files.pythonhosted.org/packages/b9/0f/57072b253af40c8aa6636e6de7d75985624c1eb392815b2f934199340a89/charset_normalizer-3.4.5-cp313-cp313-win_amd64.whl", hash = "sha256:e37bd100d2c5d3ba35db9c7c5ba5a9228cbcffe5c4778dc824b164e5257813d7", size = 142630, upload-time = "2026-03-06T06:01:59.062Z" }, + { url = "https://files.pythonhosted.org/packages/31/41/1c4b7cc9f13bd9d369ce3bc993e13d374ce25fa38a2663644283ecf422c1/charset_normalizer-3.4.5-cp313-cp313-win_arm64.whl", hash = "sha256:93b3b2cc5cf1b8743660ce77a4f45f3f6d1172068207c1defc779a36eea6bb36", size = 133254, upload-time = "2026-03-06T06:02:00.281Z" }, + { url = "https://files.pythonhosted.org/packages/43/be/0f0fd9bb4a7fa4fb5067fb7d9ac693d4e928d306f80a0d02bde43a7c4aee/charset_normalizer-3.4.5-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:8197abe5ca1ffb7d91e78360f915eef5addff270f8a71c1fc5be24a56f3e4873", size = 280232, upload-time = "2026-03-06T06:02:01.508Z" }, + { url = "https://files.pythonhosted.org/packages/28/02/983b5445e4bef49cd8c9da73a8e029f0825f39b74a06d201bfaa2e55142a/charset_normalizer-3.4.5-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a2aecdb364b8a1802afdc7f9327d55dad5366bc97d8502d0f5854e50712dbc5f", size = 189688, upload-time = "2026-03-06T06:02:02.857Z" }, + { url = "https://files.pythonhosted.org/packages/d0/88/152745c5166437687028027dc080e2daed6fe11cfa95a22f4602591c42db/charset_normalizer-3.4.5-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a66aa5022bf81ab4b1bebfb009db4fd68e0c6d4307a1ce5ef6a26e5878dfc9e4", size = 206833, upload-time = "2026-03-06T06:02:05.127Z" }, + { url = "https://files.pythonhosted.org/packages/cb/0f/ebc15c8b02af2f19be9678d6eed115feeeccc45ce1f4b098d986c13e8769/charset_normalizer-3.4.5-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d77f97e515688bd615c1d1f795d540f32542d514242067adcb8ef532504cb9ee", size = 202879, upload-time = "2026-03-06T06:02:06.446Z" }, + { url = "https://files.pythonhosted.org/packages/38/9c/71336bff6934418dc8d1e8a1644176ac9088068bc571da612767619c97b3/charset_normalizer-3.4.5-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:01a1ed54b953303ca7e310fafe0fe347aab348bd81834a0bcd602eb538f89d66", size = 195764, upload-time = "2026-03-06T06:02:08.763Z" }, + { url = "https://files.pythonhosted.org/packages/b7/95/ce92fde4f98615661871bc282a856cf9b8a15f686ba0af012984660d480b/charset_normalizer-3.4.5-cp314-cp314-manylinux_2_31_armv7l.whl", hash = "sha256:b2d37d78297b39a9eb9eb92c0f6df98c706467282055419df141389b23f93362", size = 183728, upload-time = "2026-03-06T06:02:10.137Z" }, + { url = "https://files.pythonhosted.org/packages/1c/e7/f5b4588d94e747ce45ae680f0f242bc2d98dbd4eccfab73e6160b6893893/charset_normalizer-3.4.5-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e71bbb595973622b817c042bd943c3f3667e9c9983ce3d205f973f486fec98a7", size = 192937, upload-time = "2026-03-06T06:02:11.663Z" }, + { url = "https://files.pythonhosted.org/packages/f9/29/9d94ed6b929bf9f48bf6ede6e7474576499f07c4c5e878fb186083622716/charset_normalizer-3.4.5-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4cd966c2559f501c6fd69294d082c2934c8dd4719deb32c22961a5ac6db0df1d", size = 192040, upload-time = "2026-03-06T06:02:13.489Z" }, + { url = "https://files.pythonhosted.org/packages/15/d2/1a093a1cf827957f9445f2fe7298bcc16f8fc5e05c1ed2ad1af0b239035e/charset_normalizer-3.4.5-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:d5e52d127045d6ae01a1e821acfad2f3a1866c54d0e837828538fabe8d9d1bd6", size = 184107, upload-time = "2026-03-06T06:02:14.83Z" }, + { url = "https://files.pythonhosted.org/packages/0f/7d/82068ce16bd36135df7b97f6333c5d808b94e01d4599a682e2337ed5fd14/charset_normalizer-3.4.5-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:30a2b1a48478c3428d047ed9690d57c23038dac838a87ad624c85c0a78ebeb39", size = 208310, upload-time = "2026-03-06T06:02:16.165Z" }, + { url = "https://files.pythonhosted.org/packages/84/4e/4dfb52307bb6af4a5c9e73e482d171b81d36f522b21ccd28a49656baa680/charset_normalizer-3.4.5-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:d8ed79b8f6372ca4254955005830fd61c1ccdd8c0fac6603e2c145c61dd95db6", size = 192918, upload-time = "2026-03-06T06:02:18.144Z" }, + { url = "https://files.pythonhosted.org/packages/08/a4/159ff7da662cf7201502ca89980b8f06acf3e887b278956646a8aeb178ab/charset_normalizer-3.4.5-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:c5af897b45fa606b12464ccbe0014bbf8c09191e0a66aab6aa9d5cf6e77e0c94", size = 204615, upload-time = "2026-03-06T06:02:19.821Z" }, + { url = "https://files.pythonhosted.org/packages/d6/62/0dd6172203cb6b429ffffc9935001fde42e5250d57f07b0c28c6046deb6b/charset_normalizer-3.4.5-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:1088345bcc93c58d8d8f3d783eca4a6e7a7752bbff26c3eee7e73c597c191c2e", size = 197784, upload-time = "2026-03-06T06:02:21.86Z" }, + { url = "https://files.pythonhosted.org/packages/c7/5e/1aab5cb737039b9c59e63627dc8bbc0d02562a14f831cc450e5f91d84ce1/charset_normalizer-3.4.5-cp314-cp314-win32.whl", hash = "sha256:ee57b926940ba00bca7ba7041e665cc956e55ef482f851b9b65acb20d867e7a2", size = 133009, upload-time = "2026-03-06T06:02:23.289Z" }, + { url = "https://files.pythonhosted.org/packages/40/65/e7c6c77d7aaa4c0d7974f2e403e17f0ed2cb0fc135f77d686b916bf1eead/charset_normalizer-3.4.5-cp314-cp314-win_amd64.whl", hash = "sha256:4481e6da1830c8a1cc0b746b47f603b653dadb690bcd851d039ffaefe70533aa", size = 143511, upload-time = "2026-03-06T06:02:26.195Z" }, + { url = "https://files.pythonhosted.org/packages/ba/91/52b0841c71f152f563b8e072896c14e3d83b195c188b338d3cc2e582d1d4/charset_normalizer-3.4.5-cp314-cp314-win_arm64.whl", hash = "sha256:97ab7787092eb9b50fb47fa04f24c75b768a606af1bcba1957f07f128a7219e4", size = 133775, upload-time = "2026-03-06T06:02:27.473Z" }, + { url = "https://files.pythonhosted.org/packages/c5/60/3a621758945513adfd4db86827a5bafcc615f913dbd0b4c2ed64a65731be/charset_normalizer-3.4.5-py3-none-any.whl", hash = "sha256:9db5e3fcdcee89a78c04dffb3fe33c79f77bd741a624946db2591c81b2fc85b0", size = 55455, upload-time = "2026-03-06T06:03:17.827Z" }, ] [[package]] @@ -363,67 +363,64 @@ wheels = [ [[package]] name = "cryptography" -version = "46.0.3" +version = "46.0.5" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "cffi", marker = "platform_python_implementation != 'PyPy'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/9f/33/c00162f49c0e2fe8064a62cb92b93e50c74a72bc370ab92f86112b33ff62/cryptography-46.0.3.tar.gz", hash = "sha256:a8b17438104fed022ce745b362294d9ce35b4c2e45c1d958ad4a4b019285f4a1", size = 749258, upload-time = "2025-10-15T23:18:31.74Z" } +sdist = { url = "https://files.pythonhosted.org/packages/60/04/ee2a9e8542e4fa2773b81771ff8349ff19cdd56b7258a0cc442639052edb/cryptography-46.0.5.tar.gz", hash = "sha256:abace499247268e3757271b2f1e244b36b06f8515cf27c4d49468fc9eb16e93d", size = 750064, upload-time = "2026-02-10T19:18:38.255Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/1d/42/9c391dd801d6cf0d561b5890549d4b27bafcc53b39c31a817e69d87c625b/cryptography-46.0.3-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:109d4ddfadf17e8e7779c39f9b18111a09efb969a301a31e987416a0191ed93a", size = 7225004, upload-time = "2025-10-15T23:16:52.239Z" }, - { url = "https://files.pythonhosted.org/packages/1c/67/38769ca6b65f07461eb200e85fc1639b438bdc667be02cf7f2cd6a64601c/cryptography-46.0.3-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:09859af8466b69bc3c27bdf4f5d84a665e0f7ab5088412e9e2ec49758eca5cbc", size = 4296667, upload-time = "2025-10-15T23:16:54.369Z" }, - { url = "https://files.pythonhosted.org/packages/5c/49/498c86566a1d80e978b42f0d702795f69887005548c041636df6ae1ca64c/cryptography-46.0.3-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:01ca9ff2885f3acc98c29f1860552e37f6d7c7d013d7334ff2a9de43a449315d", size = 4450807, upload-time = "2025-10-15T23:16:56.414Z" }, - { url = "https://files.pythonhosted.org/packages/4b/0a/863a3604112174c8624a2ac3c038662d9e59970c7f926acdcfaed8d61142/cryptography-46.0.3-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:6eae65d4c3d33da080cff9c4ab1f711b15c1d9760809dad6ea763f3812d254cb", size = 4299615, upload-time = "2025-10-15T23:16:58.442Z" }, - { url = "https://files.pythonhosted.org/packages/64/02/b73a533f6b64a69f3cd3872acb6ebc12aef924d8d103133bb3ea750dc703/cryptography-46.0.3-cp311-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e5bf0ed4490068a2e72ac03d786693adeb909981cc596425d09032d372bcc849", size = 4016800, upload-time = "2025-10-15T23:17:00.378Z" }, - { url = "https://files.pythonhosted.org/packages/25/d5/16e41afbfa450cde85a3b7ec599bebefaef16b5c6ba4ec49a3532336ed72/cryptography-46.0.3-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:5ecfccd2329e37e9b7112a888e76d9feca2347f12f37918facbb893d7bb88ee8", size = 4984707, upload-time = "2025-10-15T23:17:01.98Z" }, - { url = "https://files.pythonhosted.org/packages/c9/56/e7e69b427c3878352c2fb9b450bd0e19ed552753491d39d7d0a2f5226d41/cryptography-46.0.3-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:a2c0cd47381a3229c403062f764160d57d4d175e022c1df84e168c6251a22eec", size = 4482541, upload-time = "2025-10-15T23:17:04.078Z" }, - { url = "https://files.pythonhosted.org/packages/78/f6/50736d40d97e8483172f1bb6e698895b92a223dba513b0ca6f06b2365339/cryptography-46.0.3-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:549e234ff32571b1f4076ac269fcce7a808d3bf98b76c8dd560e42dbc66d7d91", size = 4299464, upload-time = "2025-10-15T23:17:05.483Z" }, - { url = "https://files.pythonhosted.org/packages/00/de/d8e26b1a855f19d9994a19c702fa2e93b0456beccbcfe437eda00e0701f2/cryptography-46.0.3-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:c0a7bb1a68a5d3471880e264621346c48665b3bf1c3759d682fc0864c540bd9e", size = 4950838, upload-time = "2025-10-15T23:17:07.425Z" }, - { url = "https://files.pythonhosted.org/packages/8f/29/798fc4ec461a1c9e9f735f2fc58741b0daae30688f41b2497dcbc9ed1355/cryptography-46.0.3-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:10b01676fc208c3e6feeb25a8b83d81767e8059e1fe86e1dc62d10a3018fa926", size = 4481596, upload-time = "2025-10-15T23:17:09.343Z" }, - { url = "https://files.pythonhosted.org/packages/15/8d/03cd48b20a573adfff7652b76271078e3045b9f49387920e7f1f631d125e/cryptography-46.0.3-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:0abf1ffd6e57c67e92af68330d05760b7b7efb243aab8377e583284dbab72c71", size = 4426782, upload-time = "2025-10-15T23:17:11.22Z" }, - { url = "https://files.pythonhosted.org/packages/fa/b1/ebacbfe53317d55cf33165bda24c86523497a6881f339f9aae5c2e13e57b/cryptography-46.0.3-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a04bee9ab6a4da801eb9b51f1b708a1b5b5c9eb48c03f74198464c66f0d344ac", size = 4698381, upload-time = "2025-10-15T23:17:12.829Z" }, - { url = "https://files.pythonhosted.org/packages/96/92/8a6a9525893325fc057a01f654d7efc2c64b9de90413adcf605a85744ff4/cryptography-46.0.3-cp311-abi3-win32.whl", hash = "sha256:f260d0d41e9b4da1ed1e0f1ce571f97fe370b152ab18778e9e8f67d6af432018", size = 3055988, upload-time = "2025-10-15T23:17:14.65Z" }, - { url = "https://files.pythonhosted.org/packages/7e/bf/80fbf45253ea585a1e492a6a17efcb93467701fa79e71550a430c5e60df0/cryptography-46.0.3-cp311-abi3-win_amd64.whl", hash = "sha256:a9a3008438615669153eb86b26b61e09993921ebdd75385ddd748702c5adfddb", size = 3514451, upload-time = "2025-10-15T23:17:16.142Z" }, - { url = "https://files.pythonhosted.org/packages/2e/af/9b302da4c87b0beb9db4e756386a7c6c5b8003cd0e742277888d352ae91d/cryptography-46.0.3-cp311-abi3-win_arm64.whl", hash = "sha256:5d7f93296ee28f68447397bf5198428c9aeeab45705a55d53a6343455dcb2c3c", size = 2928007, upload-time = "2025-10-15T23:17:18.04Z" }, - { url = "https://files.pythonhosted.org/packages/f5/e2/a510aa736755bffa9d2f75029c229111a1d02f8ecd5de03078f4c18d91a3/cryptography-46.0.3-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:00a5e7e87938e5ff9ff5447ab086a5706a957137e6e433841e9d24f38a065217", size = 7158012, upload-time = "2025-10-15T23:17:19.982Z" }, - { url = "https://files.pythonhosted.org/packages/73/dc/9aa866fbdbb95b02e7f9d086f1fccfeebf8953509b87e3f28fff927ff8a0/cryptography-46.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c8daeb2d2174beb4575b77482320303f3d39b8e81153da4f0fb08eb5fe86a6c5", size = 4288728, upload-time = "2025-10-15T23:17:21.527Z" }, - { url = "https://files.pythonhosted.org/packages/c5/fd/bc1daf8230eaa075184cbbf5f8cd00ba9db4fd32d63fb83da4671b72ed8a/cryptography-46.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:39b6755623145ad5eff1dab323f4eae2a32a77a7abef2c5089a04a3d04366715", size = 4435078, upload-time = "2025-10-15T23:17:23.042Z" }, - { url = "https://files.pythonhosted.org/packages/82/98/d3bd5407ce4c60017f8ff9e63ffee4200ab3e23fe05b765cab805a7db008/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:db391fa7c66df6762ee3f00c95a89e6d428f4d60e7abc8328f4fe155b5ac6e54", size = 4293460, upload-time = "2025-10-15T23:17:24.885Z" }, - { url = "https://files.pythonhosted.org/packages/26/e9/e23e7900983c2b8af7a08098db406cf989d7f09caea7897e347598d4cd5b/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:78a97cf6a8839a48c49271cdcbd5cf37ca2c1d6b7fdd86cc864f302b5e9bf459", size = 3995237, upload-time = "2025-10-15T23:17:26.449Z" }, - { url = "https://files.pythonhosted.org/packages/91/15/af68c509d4a138cfe299d0d7ddb14afba15233223ebd933b4bbdbc7155d3/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:dfb781ff7eaa91a6f7fd41776ec37c5853c795d3b358d4896fdbb5df168af422", size = 4967344, upload-time = "2025-10-15T23:17:28.06Z" }, - { url = "https://files.pythonhosted.org/packages/ca/e3/8643d077c53868b681af077edf6b3cb58288b5423610f21c62aadcbe99f4/cryptography-46.0.3-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:6f61efb26e76c45c4a227835ddeae96d83624fb0d29eb5df5b96e14ed1a0afb7", size = 4466564, upload-time = "2025-10-15T23:17:29.665Z" }, - { url = "https://files.pythonhosted.org/packages/0e/43/c1e8726fa59c236ff477ff2b5dc071e54b21e5a1e51aa2cee1676f1c986f/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:23b1a8f26e43f47ceb6d6a43115f33a5a37d57df4ea0ca295b780ae8546e8044", size = 4292415, upload-time = "2025-10-15T23:17:31.686Z" }, - { url = "https://files.pythonhosted.org/packages/42/f9/2f8fefdb1aee8a8e3256a0568cffc4e6d517b256a2fe97a029b3f1b9fe7e/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:b419ae593c86b87014b9be7396b385491ad7f320bde96826d0dd174459e54665", size = 4931457, upload-time = "2025-10-15T23:17:33.478Z" }, - { url = "https://files.pythonhosted.org/packages/79/30/9b54127a9a778ccd6d27c3da7563e9f2d341826075ceab89ae3b41bf5be2/cryptography-46.0.3-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:50fc3343ac490c6b08c0cf0d704e881d0d660be923fd3076db3e932007e726e3", size = 4466074, upload-time = "2025-10-15T23:17:35.158Z" }, - { url = "https://files.pythonhosted.org/packages/ac/68/b4f4a10928e26c941b1b6a179143af9f4d27d88fe84a6a3c53592d2e76bf/cryptography-46.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:22d7e97932f511d6b0b04f2bfd818d73dcd5928db509460aaf48384778eb6d20", size = 4420569, upload-time = "2025-10-15T23:17:37.188Z" }, - { url = "https://files.pythonhosted.org/packages/a3/49/3746dab4c0d1979888f125226357d3262a6dd40e114ac29e3d2abdf1ec55/cryptography-46.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:d55f3dffadd674514ad19451161118fd010988540cee43d8bc20675e775925de", size = 4681941, upload-time = "2025-10-15T23:17:39.236Z" }, - { url = "https://files.pythonhosted.org/packages/fd/30/27654c1dbaf7e4a3531fa1fc77986d04aefa4d6d78259a62c9dc13d7ad36/cryptography-46.0.3-cp314-cp314t-win32.whl", hash = "sha256:8a6e050cb6164d3f830453754094c086ff2d0b2f3a897a1d9820f6139a1f0914", size = 3022339, upload-time = "2025-10-15T23:17:40.888Z" }, - { url = "https://files.pythonhosted.org/packages/f6/30/640f34ccd4d2a1bc88367b54b926b781b5a018d65f404d409aba76a84b1c/cryptography-46.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:760f83faa07f8b64e9c33fc963d790a2edb24efb479e3520c14a45741cd9b2db", size = 3494315, upload-time = "2025-10-15T23:17:42.769Z" }, - { url = "https://files.pythonhosted.org/packages/ba/8b/88cc7e3bd0a8e7b861f26981f7b820e1f46aa9d26cc482d0feba0ecb4919/cryptography-46.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:516ea134e703e9fe26bcd1277a4b59ad30586ea90c365a87781d7887a646fe21", size = 2919331, upload-time = "2025-10-15T23:17:44.468Z" }, - { url = "https://files.pythonhosted.org/packages/fd/23/45fe7f376a7df8daf6da3556603b36f53475a99ce4faacb6ba2cf3d82021/cryptography-46.0.3-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:cb3d760a6117f621261d662bccc8ef5bc32ca673e037c83fbe565324f5c46936", size = 7218248, upload-time = "2025-10-15T23:17:46.294Z" }, - { url = "https://files.pythonhosted.org/packages/27/32/b68d27471372737054cbd34c84981f9edbc24fe67ca225d389799614e27f/cryptography-46.0.3-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:4b7387121ac7d15e550f5cb4a43aef2559ed759c35df7336c402bb8275ac9683", size = 4294089, upload-time = "2025-10-15T23:17:48.269Z" }, - { url = "https://files.pythonhosted.org/packages/26/42/fa8389d4478368743e24e61eea78846a0006caffaf72ea24a15159215a14/cryptography-46.0.3-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:15ab9b093e8f09daab0f2159bb7e47532596075139dd74365da52ecc9cb46c5d", size = 4440029, upload-time = "2025-10-15T23:17:49.837Z" }, - { url = "https://files.pythonhosted.org/packages/5f/eb/f483db0ec5ac040824f269e93dd2bd8a21ecd1027e77ad7bdf6914f2fd80/cryptography-46.0.3-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:46acf53b40ea38f9c6c229599a4a13f0d46a6c3fa9ef19fc1a124d62e338dfa0", size = 4297222, upload-time = "2025-10-15T23:17:51.357Z" }, - { url = "https://files.pythonhosted.org/packages/fd/cf/da9502c4e1912cb1da3807ea3618a6829bee8207456fbbeebc361ec38ba3/cryptography-46.0.3-cp38-abi3-manylinux_2_28_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:10ca84c4668d066a9878890047f03546f3ae0a6b8b39b697457b7757aaf18dbc", size = 4012280, upload-time = "2025-10-15T23:17:52.964Z" }, - { url = "https://files.pythonhosted.org/packages/6b/8f/9adb86b93330e0df8b3dcf03eae67c33ba89958fc2e03862ef1ac2b42465/cryptography-46.0.3-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:36e627112085bb3b81b19fed209c05ce2a52ee8b15d161b7c643a7d5a88491f3", size = 4978958, upload-time = "2025-10-15T23:17:54.965Z" }, - { url = "https://files.pythonhosted.org/packages/d1/a0/5fa77988289c34bdb9f913f5606ecc9ada1adb5ae870bd0d1054a7021cc4/cryptography-46.0.3-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:1000713389b75c449a6e979ffc7dcc8ac90b437048766cef052d4d30b8220971", size = 4473714, upload-time = "2025-10-15T23:17:56.754Z" }, - { url = "https://files.pythonhosted.org/packages/14/e5/fc82d72a58d41c393697aa18c9abe5ae1214ff6f2a5c18ac470f92777895/cryptography-46.0.3-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:b02cf04496f6576afffef5ddd04a0cb7d49cf6be16a9059d793a30b035f6b6ac", size = 4296970, upload-time = "2025-10-15T23:17:58.588Z" }, - { url = "https://files.pythonhosted.org/packages/78/06/5663ed35438d0b09056973994f1aec467492b33bd31da36e468b01ec1097/cryptography-46.0.3-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:71e842ec9bc7abf543b47cf86b9a743baa95f4677d22baa4c7d5c69e49e9bc04", size = 4940236, upload-time = "2025-10-15T23:18:00.897Z" }, - { url = "https://files.pythonhosted.org/packages/fc/59/873633f3f2dcd8a053b8dd1d38f783043b5fce589c0f6988bf55ef57e43e/cryptography-46.0.3-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:402b58fc32614f00980b66d6e56a5b4118e6cb362ae8f3fda141ba4689bd4506", size = 4472642, upload-time = "2025-10-15T23:18:02.749Z" }, - { url = "https://files.pythonhosted.org/packages/3d/39/8e71f3930e40f6877737d6f69248cf74d4e34b886a3967d32f919cc50d3b/cryptography-46.0.3-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:ef639cb3372f69ec44915fafcd6698b6cc78fbe0c2ea41be867f6ed612811963", size = 4423126, upload-time = "2025-10-15T23:18:04.85Z" }, - { url = "https://files.pythonhosted.org/packages/cd/c7/f65027c2810e14c3e7268353b1681932b87e5a48e65505d8cc17c99e36ae/cryptography-46.0.3-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:3b51b8ca4f1c6453d8829e1eb7299499ca7f313900dd4d89a24b8b87c0a780d4", size = 4686573, upload-time = "2025-10-15T23:18:06.908Z" }, - { url = "https://files.pythonhosted.org/packages/0a/6e/1c8331ddf91ca4730ab3086a0f1be19c65510a33b5a441cb334e7a2d2560/cryptography-46.0.3-cp38-abi3-win32.whl", hash = "sha256:6276eb85ef938dc035d59b87c8a7dc559a232f954962520137529d77b18ff1df", size = 3036695, upload-time = "2025-10-15T23:18:08.672Z" }, - { url = "https://files.pythonhosted.org/packages/90/45/b0d691df20633eff80955a0fc7695ff9051ffce8b69741444bd9ed7bd0db/cryptography-46.0.3-cp38-abi3-win_amd64.whl", hash = "sha256:416260257577718c05135c55958b674000baef9a1c7d9e8f306ec60d71db850f", size = 3501720, upload-time = "2025-10-15T23:18:10.632Z" }, - { url = "https://files.pythonhosted.org/packages/e8/cb/2da4cc83f5edb9c3257d09e1e7ab7b23f049c7962cae8d842bbef0a9cec9/cryptography-46.0.3-cp38-abi3-win_arm64.whl", hash = "sha256:d89c3468de4cdc4f08a57e214384d0471911a3830fcdaf7a8cc587e42a866372", size = 2918740, upload-time = "2025-10-15T23:18:12.277Z" }, + { url = "https://files.pythonhosted.org/packages/f7/81/b0bb27f2ba931a65409c6b8a8b358a7f03c0e46eceacddff55f7c84b1f3b/cryptography-46.0.5-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:351695ada9ea9618b3500b490ad54c739860883df6c1f555e088eaf25b1bbaad", size = 7176289, upload-time = "2026-02-10T19:17:08.274Z" }, + { url = "https://files.pythonhosted.org/packages/ff/9e/6b4397a3e3d15123de3b1806ef342522393d50736c13b20ec4c9ea6693a6/cryptography-46.0.5-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c18ff11e86df2e28854939acde2d003f7984f721eba450b56a200ad90eeb0e6b", size = 4275637, upload-time = "2026-02-10T19:17:10.53Z" }, + { url = "https://files.pythonhosted.org/packages/63/e7/471ab61099a3920b0c77852ea3f0ea611c9702f651600397ac567848b897/cryptography-46.0.5-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4d7e3d356b8cd4ea5aff04f129d5f66ebdc7b6f8eae802b93739ed520c47c79b", size = 4424742, upload-time = "2026-02-10T19:17:12.388Z" }, + { url = "https://files.pythonhosted.org/packages/37/53/a18500f270342d66bf7e4d9f091114e31e5ee9e7375a5aba2e85a91e0044/cryptography-46.0.5-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:50bfb6925eff619c9c023b967d5b77a54e04256c4281b0e21336a130cd7fc263", size = 4277528, upload-time = "2026-02-10T19:17:13.853Z" }, + { url = "https://files.pythonhosted.org/packages/22/29/c2e812ebc38c57b40e7c583895e73c8c5adb4d1e4a0cc4c5a4fdab2b1acc/cryptography-46.0.5-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:803812e111e75d1aa73690d2facc295eaefd4439be1023fefc4995eaea2af90d", size = 4947993, upload-time = "2026-02-10T19:17:15.618Z" }, + { url = "https://files.pythonhosted.org/packages/6b/e7/237155ae19a9023de7e30ec64e5d99a9431a567407ac21170a046d22a5a3/cryptography-46.0.5-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ee190460e2fbe447175cda91b88b84ae8322a104fc27766ad09428754a618ed", size = 4456855, upload-time = "2026-02-10T19:17:17.221Z" }, + { url = "https://files.pythonhosted.org/packages/2d/87/fc628a7ad85b81206738abbd213b07702bcbdada1dd43f72236ef3cffbb5/cryptography-46.0.5-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:f145bba11b878005c496e93e257c1e88f154d278d2638e6450d17e0f31e558d2", size = 3984635, upload-time = "2026-02-10T19:17:18.792Z" }, + { url = "https://files.pythonhosted.org/packages/84/29/65b55622bde135aedf4565dc509d99b560ee4095e56989e815f8fd2aa910/cryptography-46.0.5-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:e9251e3be159d1020c4030bd2e5f84d6a43fe54b6c19c12f51cde9542a2817b2", size = 4277038, upload-time = "2026-02-10T19:17:20.256Z" }, + { url = "https://files.pythonhosted.org/packages/bc/36/45e76c68d7311432741faf1fbf7fac8a196a0a735ca21f504c75d37e2558/cryptography-46.0.5-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:47fb8a66058b80e509c47118ef8a75d14c455e81ac369050f20ba0d23e77fee0", size = 4912181, upload-time = "2026-02-10T19:17:21.825Z" }, + { url = "https://files.pythonhosted.org/packages/6d/1a/c1ba8fead184d6e3d5afcf03d569acac5ad063f3ac9fb7258af158f7e378/cryptography-46.0.5-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:4c3341037c136030cb46e4b1e17b7418ea4cbd9dd207e4a6f3b2b24e0d4ac731", size = 4456482, upload-time = "2026-02-10T19:17:25.133Z" }, + { url = "https://files.pythonhosted.org/packages/f9/e5/3fb22e37f66827ced3b902cf895e6a6bc1d095b5b26be26bd13c441fdf19/cryptography-46.0.5-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:890bcb4abd5a2d3f852196437129eb3667d62630333aacc13dfd470fad3aaa82", size = 4405497, upload-time = "2026-02-10T19:17:26.66Z" }, + { url = "https://files.pythonhosted.org/packages/1a/df/9d58bb32b1121a8a2f27383fabae4d63080c7ca60b9b5c88be742be04ee7/cryptography-46.0.5-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:80a8d7bfdf38f87ca30a5391c0c9ce4ed2926918e017c29ddf643d0ed2778ea1", size = 4667819, upload-time = "2026-02-10T19:17:28.569Z" }, + { url = "https://files.pythonhosted.org/packages/ea/ed/325d2a490c5e94038cdb0117da9397ece1f11201f425c4e9c57fe5b9f08b/cryptography-46.0.5-cp311-abi3-win32.whl", hash = "sha256:60ee7e19e95104d4c03871d7d7dfb3d22ef8a9b9c6778c94e1c8fcc8365afd48", size = 3028230, upload-time = "2026-02-10T19:17:30.518Z" }, + { url = "https://files.pythonhosted.org/packages/e9/5a/ac0f49e48063ab4255d9e3b79f5def51697fce1a95ea1370f03dc9db76f6/cryptography-46.0.5-cp311-abi3-win_amd64.whl", hash = "sha256:38946c54b16c885c72c4f59846be9743d699eee2b69b6988e0a00a01f46a61a4", size = 3480909, upload-time = "2026-02-10T19:17:32.083Z" }, + { url = "https://files.pythonhosted.org/packages/00/13/3d278bfa7a15a96b9dc22db5a12ad1e48a9eb3d40e1827ef66a5df75d0d0/cryptography-46.0.5-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:94a76daa32eb78d61339aff7952ea819b1734b46f73646a07decb40e5b3448e2", size = 7119287, upload-time = "2026-02-10T19:17:33.801Z" }, + { url = "https://files.pythonhosted.org/packages/67/c8/581a6702e14f0898a0848105cbefd20c058099e2c2d22ef4e476dfec75d7/cryptography-46.0.5-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:5be7bf2fb40769e05739dd0046e7b26f9d4670badc7b032d6ce4db64dddc0678", size = 4265728, upload-time = "2026-02-10T19:17:35.569Z" }, + { url = "https://files.pythonhosted.org/packages/dd/4a/ba1a65ce8fc65435e5a849558379896c957870dd64fecea97b1ad5f46a37/cryptography-46.0.5-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fe346b143ff9685e40192a4960938545c699054ba11d4f9029f94751e3f71d87", size = 4408287, upload-time = "2026-02-10T19:17:36.938Z" }, + { url = "https://files.pythonhosted.org/packages/f8/67/8ffdbf7b65ed1ac224d1c2df3943553766914a8ca718747ee3871da6107e/cryptography-46.0.5-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:c69fd885df7d089548a42d5ec05be26050ebcd2283d89b3d30676eb32ff87dee", size = 4270291, upload-time = "2026-02-10T19:17:38.748Z" }, + { url = "https://files.pythonhosted.org/packages/f8/e5/f52377ee93bc2f2bba55a41a886fd208c15276ffbd2569f2ddc89d50e2c5/cryptography-46.0.5-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:8293f3dea7fc929ef7240796ba231413afa7b68ce38fd21da2995549f5961981", size = 4927539, upload-time = "2026-02-10T19:17:40.241Z" }, + { url = "https://files.pythonhosted.org/packages/3b/02/cfe39181b02419bbbbcf3abdd16c1c5c8541f03ca8bda240debc467d5a12/cryptography-46.0.5-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:1abfdb89b41c3be0365328a410baa9df3ff8a9110fb75e7b52e66803ddabc9a9", size = 4442199, upload-time = "2026-02-10T19:17:41.789Z" }, + { url = "https://files.pythonhosted.org/packages/c0/96/2fcaeb4873e536cf71421a388a6c11b5bc846e986b2b069c79363dc1648e/cryptography-46.0.5-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:d66e421495fdb797610a08f43b05269e0a5ea7f5e652a89bfd5a7d3c1dee3648", size = 3960131, upload-time = "2026-02-10T19:17:43.379Z" }, + { url = "https://files.pythonhosted.org/packages/d8/d2/b27631f401ddd644e94c5cf33c9a4069f72011821cf3dc7309546b0642a0/cryptography-46.0.5-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:4e817a8920bfbcff8940ecfd60f23d01836408242b30f1a708d93198393a80b4", size = 4270072, upload-time = "2026-02-10T19:17:45.481Z" }, + { url = "https://files.pythonhosted.org/packages/f4/a7/60d32b0370dae0b4ebe55ffa10e8599a2a59935b5ece1b9f06edb73abdeb/cryptography-46.0.5-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:68f68d13f2e1cb95163fa3b4db4bf9a159a418f5f6e7242564fc75fcae667fd0", size = 4892170, upload-time = "2026-02-10T19:17:46.997Z" }, + { url = "https://files.pythonhosted.org/packages/d2/b9/cf73ddf8ef1164330eb0b199a589103c363afa0cf794218c24d524a58eab/cryptography-46.0.5-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:a3d1fae9863299076f05cb8a778c467578262fae09f9dc0ee9b12eb4268ce663", size = 4441741, upload-time = "2026-02-10T19:17:48.661Z" }, + { url = "https://files.pythonhosted.org/packages/5f/eb/eee00b28c84c726fe8fa0158c65afe312d9c3b78d9d01daf700f1f6e37ff/cryptography-46.0.5-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:c4143987a42a2397f2fc3b4d7e3a7d313fbe684f67ff443999e803dd75a76826", size = 4396728, upload-time = "2026-02-10T19:17:50.058Z" }, + { url = "https://files.pythonhosted.org/packages/65/f4/6bc1a9ed5aef7145045114b75b77c2a8261b4d38717bd8dea111a63c3442/cryptography-46.0.5-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:7d731d4b107030987fd61a7f8ab512b25b53cef8f233a97379ede116f30eb67d", size = 4652001, upload-time = "2026-02-10T19:17:51.54Z" }, + { url = "https://files.pythonhosted.org/packages/86/ef/5d00ef966ddd71ac2e6951d278884a84a40ffbd88948ef0e294b214ae9e4/cryptography-46.0.5-cp314-cp314t-win32.whl", hash = "sha256:c3bcce8521d785d510b2aad26ae2c966092b7daa8f45dd8f44734a104dc0bc1a", size = 3003637, upload-time = "2026-02-10T19:17:52.997Z" }, + { url = "https://files.pythonhosted.org/packages/b7/57/f3f4160123da6d098db78350fdfd9705057aad21de7388eacb2401dceab9/cryptography-46.0.5-cp314-cp314t-win_amd64.whl", hash = "sha256:4d8ae8659ab18c65ced284993c2265910f6c9e650189d4e3f68445ef82a810e4", size = 3469487, upload-time = "2026-02-10T19:17:54.549Z" }, + { url = "https://files.pythonhosted.org/packages/e2/fa/a66aa722105ad6a458bebd64086ca2b72cdd361fed31763d20390f6f1389/cryptography-46.0.5-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:4108d4c09fbbf2789d0c926eb4152ae1760d5a2d97612b92d508d96c861e4d31", size = 7170514, upload-time = "2026-02-10T19:17:56.267Z" }, + { url = "https://files.pythonhosted.org/packages/0f/04/c85bdeab78c8bc77b701bf0d9bdcf514c044e18a46dcff330df5448631b0/cryptography-46.0.5-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7d1f30a86d2757199cb2d56e48cce14deddf1f9c95f1ef1b64ee91ea43fe2e18", size = 4275349, upload-time = "2026-02-10T19:17:58.419Z" }, + { url = "https://files.pythonhosted.org/packages/5c/32/9b87132a2f91ee7f5223b091dc963055503e9b442c98fc0b8a5ca765fab0/cryptography-46.0.5-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:039917b0dc418bb9f6edce8a906572d69e74bd330b0b3fea4f79dab7f8ddd235", size = 4420667, upload-time = "2026-02-10T19:18:00.619Z" }, + { url = "https://files.pythonhosted.org/packages/a1/a6/a7cb7010bec4b7c5692ca6f024150371b295ee1c108bdc1c400e4c44562b/cryptography-46.0.5-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:ba2a27ff02f48193fc4daeadf8ad2590516fa3d0adeeb34336b96f7fa64c1e3a", size = 4276980, upload-time = "2026-02-10T19:18:02.379Z" }, + { url = "https://files.pythonhosted.org/packages/8e/7c/c4f45e0eeff9b91e3f12dbd0e165fcf2a38847288fcfd889deea99fb7b6d/cryptography-46.0.5-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:61aa400dce22cb001a98014f647dc21cda08f7915ceb95df0c9eaf84b4b6af76", size = 4939143, upload-time = "2026-02-10T19:18:03.964Z" }, + { url = "https://files.pythonhosted.org/packages/37/19/e1b8f964a834eddb44fa1b9a9976f4e414cbb7aa62809b6760c8803d22d1/cryptography-46.0.5-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:3ce58ba46e1bc2aac4f7d9290223cead56743fa6ab94a5d53292ffaac6a91614", size = 4453674, upload-time = "2026-02-10T19:18:05.588Z" }, + { url = "https://files.pythonhosted.org/packages/db/ed/db15d3956f65264ca204625597c410d420e26530c4e2943e05a0d2f24d51/cryptography-46.0.5-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:420d0e909050490d04359e7fdb5ed7e667ca5c3c402b809ae2563d7e66a92229", size = 3978801, upload-time = "2026-02-10T19:18:07.167Z" }, + { url = "https://files.pythonhosted.org/packages/41/e2/df40a31d82df0a70a0daf69791f91dbb70e47644c58581d654879b382d11/cryptography-46.0.5-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:582f5fcd2afa31622f317f80426a027f30dc792e9c80ffee87b993200ea115f1", size = 4276755, upload-time = "2026-02-10T19:18:09.813Z" }, + { url = "https://files.pythonhosted.org/packages/33/45/726809d1176959f4a896b86907b98ff4391a8aa29c0aaaf9450a8a10630e/cryptography-46.0.5-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:bfd56bb4b37ed4f330b82402f6f435845a5f5648edf1ad497da51a8452d5d62d", size = 4901539, upload-time = "2026-02-10T19:18:11.263Z" }, + { url = "https://files.pythonhosted.org/packages/99/0f/a3076874e9c88ecb2ecc31382f6e7c21b428ede6f55aafa1aa272613e3cd/cryptography-46.0.5-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:a3d507bb6a513ca96ba84443226af944b0f7f47dcc9a399d110cd6146481d24c", size = 4452794, upload-time = "2026-02-10T19:18:12.914Z" }, + { url = "https://files.pythonhosted.org/packages/02/ef/ffeb542d3683d24194a38f66ca17c0a4b8bf10631feef44a7ef64e631b1a/cryptography-46.0.5-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9f16fbdf4da055efb21c22d81b89f155f02ba420558db21288b3d0035bafd5f4", size = 4404160, upload-time = "2026-02-10T19:18:14.375Z" }, + { url = "https://files.pythonhosted.org/packages/96/93/682d2b43c1d5f1406ed048f377c0fc9fc8f7b0447a478d5c65ab3d3a66eb/cryptography-46.0.5-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:ced80795227d70549a411a4ab66e8ce307899fad2220ce5ab2f296e687eacde9", size = 4667123, upload-time = "2026-02-10T19:18:15.886Z" }, + { url = "https://files.pythonhosted.org/packages/45/2d/9c5f2926cb5300a8eefc3f4f0b3f3df39db7f7ce40c8365444c49363cbda/cryptography-46.0.5-cp38-abi3-win32.whl", hash = "sha256:02f547fce831f5096c9a567fd41bc12ca8f11df260959ecc7c3202555cc47a72", size = 3010220, upload-time = "2026-02-10T19:18:17.361Z" }, + { url = "https://files.pythonhosted.org/packages/48/ef/0c2f4a8e31018a986949d34a01115dd057bf536905dca38897bacd21fac3/cryptography-46.0.5-cp38-abi3-win_amd64.whl", hash = "sha256:556e106ee01aa13484ce9b0239bca667be5004efb0aabbed28d353df86445595", size = 3467050, upload-time = "2026-02-10T19:18:18.899Z" }, ] [[package]] name = "filelock" -version = "3.24.0" +version = "3.25.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/00/cd/fa3ab025a8f9772e8a9146d8fd8eef6d62649274d231ca84249f54a0de4a/filelock-3.24.0.tar.gz", hash = "sha256:aeeab479339ddf463a1cdd1f15a6e6894db976071e5883efc94d22ed5139044b", size = 37166, upload-time = "2026-02-14T16:05:28.723Z" } +sdist = { url = "https://files.pythonhosted.org/packages/b3/8b/4c32ecde6bea6486a2a5d05340e695174351ff6b06cf651a74c005f9df00/filelock-3.25.1.tar.gz", hash = "sha256:b9a2e977f794ef94d77cdf7d27129ac648a61f585bff3ca24630c1629f701aa9", size = 40319, upload-time = "2026-03-09T19:38:47.309Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d9/dd/d7e7f4f49180e8591c9e1281d15ecf8e7f25eb2c829771d9682f1f9fe0c8/filelock-3.24.0-py3-none-any.whl", hash = "sha256:eebebb403d78363ef7be8e236b63cc6760b0004c7464dceaba3fd0afbd637ced", size = 23977, upload-time = "2026-02-14T16:05:27.578Z" }, + { url = "https://files.pythonhosted.org/packages/a9/b8/2f664b56a3b4b32d28d3d106c71783073f712ba43ff6d34b9ea0ce36dc7b/filelock-3.25.1-py3-none-any.whl", hash = "sha256:18972df45473c4aa2c7921b609ee9ca4925910cc3a0fb226c96b92fc224ef7bf", size = 26720, upload-time = "2026-03-09T19:38:45.718Z" }, ] [[package]] @@ -526,7 +523,7 @@ wheels = [ [[package]] name = "gen-worker" -version = "0.3.0" +version = "0.3.10" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "aiohttp" }, @@ -540,52 +537,53 @@ dependencies = [ { name = "pyjwt", extra = ["crypto"] }, { name = "pyyaml" }, { name = "requests" }, + { name = "tomli-w" }, { name = "tqdm" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/37/ac/309ca10137c52be07071a058affcf991e91ef1cae6c7bc2c3fc6fc726e55/gen_worker-0.3.0.tar.gz", hash = "sha256:689be664a9f8b7c3a052103675bea097594934b04ed08d02bf4b9d06c2e1468f", size = 130692, upload-time = "2026-02-14T22:19:00.419Z" } +sdist = { url = "https://files.pythonhosted.org/packages/72/f3/5509b6d962895800d26a44f255aeeb6df864312e3d4e2fc59fb63f1c9f3a/gen_worker-0.3.10.tar.gz", hash = "sha256:f750110a08b5a9ef868c12878698076ef8e88f19a8816fcdcc73343df88cc366", size = 163826, upload-time = "2026-03-10T07:42:10.647Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/25/b5/658d13ad74ea95661b6e8eaca426bc7500f72887b47404e8d00bc9a2d369/gen_worker-0.3.0-py3-none-any.whl", hash = "sha256:c4e5011e89bedbcd52e04637282942cbc4270c55d2056176863e16a855d803e3", size = 145753, upload-time = "2026-02-14T22:18:59.182Z" }, + { url = "https://files.pythonhosted.org/packages/ef/e3/a99492c2138cc586ca9ebaf46690039327c0ba63f03cebf6830139b68c80/gen_worker-0.3.10-py3-none-any.whl", hash = "sha256:956ebef7cd1b4cc55e7b8612c27063c8925ecf2f104d9a2553db2fa7cf665a25", size = 187377, upload-time = "2026-03-10T07:42:12.175Z" }, ] [[package]] name = "grpcio" -version = "1.76.0" +version = "1.78.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/b6/e0/318c1ce3ae5a17894d5791e87aea147587c9e702f24122cc7a5c8bbaeeb1/grpcio-1.76.0.tar.gz", hash = "sha256:7be78388d6da1a25c0d5ec506523db58b18be22d9c37d8d3a32c08be4987bd73", size = 12785182, upload-time = "2025-10-21T16:23:12.106Z" } +sdist = { url = "https://files.pythonhosted.org/packages/06/8a/3d098f35c143a89520e568e6539cc098fcd294495910e359889ce8741c84/grpcio-1.78.0.tar.gz", hash = "sha256:7382b95189546f375c174f53a5fa873cef91c4b8005faa05cc5b3beea9c4f1c5", size = 12852416, upload-time = "2026-02-06T09:57:18.093Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/bf/05/8e29121994b8d959ffa0afd28996d452f291b48cfc0875619de0bde2c50c/grpcio-1.76.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:81fd9652b37b36f16138611c7e884eb82e0cec137c40d3ef7c3f9b3ed00f6ed8", size = 5799718, upload-time = "2025-10-21T16:21:17.939Z" }, - { url = "https://files.pythonhosted.org/packages/d9/75/11d0e66b3cdf998c996489581bdad8900db79ebd83513e45c19548f1cba4/grpcio-1.76.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:04bbe1bfe3a68bbfd4e52402ab7d4eb59d72d02647ae2042204326cf4bbad280", size = 11825627, upload-time = "2025-10-21T16:21:20.466Z" }, - { url = "https://files.pythonhosted.org/packages/28/50/2f0aa0498bc188048f5d9504dcc5c2c24f2eb1a9337cd0fa09a61a2e75f0/grpcio-1.76.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d388087771c837cdb6515539f43b9d4bf0b0f23593a24054ac16f7a960be16f4", size = 6359167, upload-time = "2025-10-21T16:21:23.122Z" }, - { url = "https://files.pythonhosted.org/packages/66/e5/bbf0bb97d29ede1d59d6588af40018cfc345b17ce979b7b45424628dc8bb/grpcio-1.76.0-cp312-cp312-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:9f8f757bebaaea112c00dba718fc0d3260052ce714e25804a03f93f5d1c6cc11", size = 7044267, upload-time = "2025-10-21T16:21:25.995Z" }, - { url = "https://files.pythonhosted.org/packages/f5/86/f6ec2164f743d9609691115ae8ece098c76b894ebe4f7c94a655c6b03e98/grpcio-1.76.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:980a846182ce88c4f2f7e2c22c56aefd515daeb36149d1c897f83cf57999e0b6", size = 6573963, upload-time = "2025-10-21T16:21:28.631Z" }, - { url = "https://files.pythonhosted.org/packages/60/bc/8d9d0d8505feccfdf38a766d262c71e73639c165b311c9457208b56d92ae/grpcio-1.76.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:f92f88e6c033db65a5ae3d97905c8fea9c725b63e28d5a75cb73b49bda5024d8", size = 7164484, upload-time = "2025-10-21T16:21:30.837Z" }, - { url = "https://files.pythonhosted.org/packages/67/e6/5d6c2fc10b95edf6df9b8f19cf10a34263b7fd48493936fffd5085521292/grpcio-1.76.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:4baf3cbe2f0be3289eb68ac8ae771156971848bb8aaff60bad42005539431980", size = 8127777, upload-time = "2025-10-21T16:21:33.577Z" }, - { url = "https://files.pythonhosted.org/packages/3f/c8/dce8ff21c86abe025efe304d9e31fdb0deaaa3b502b6a78141080f206da0/grpcio-1.76.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:615ba64c208aaceb5ec83bfdce7728b80bfeb8be97562944836a7a0a9647d882", size = 7594014, upload-time = "2025-10-21T16:21:41.882Z" }, - { url = "https://files.pythonhosted.org/packages/e0/42/ad28191ebf983a5d0ecef90bab66baa5a6b18f2bfdef9d0a63b1973d9f75/grpcio-1.76.0-cp312-cp312-win32.whl", hash = "sha256:45d59a649a82df5718fd9527ce775fd66d1af35e6d31abdcdc906a49c6822958", size = 3984750, upload-time = "2025-10-21T16:21:44.006Z" }, - { url = "https://files.pythonhosted.org/packages/9e/00/7bd478cbb851c04a48baccaa49b75abaa8e4122f7d86da797500cccdd771/grpcio-1.76.0-cp312-cp312-win_amd64.whl", hash = "sha256:c088e7a90b6017307f423efbb9d1ba97a22aa2170876223f9709e9d1de0b5347", size = 4704003, upload-time = "2025-10-21T16:21:46.244Z" }, - { url = "https://files.pythonhosted.org/packages/fc/ed/71467ab770effc9e8cef5f2e7388beb2be26ed642d567697bb103a790c72/grpcio-1.76.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:26ef06c73eb53267c2b319f43e6634c7556ea37672029241a056629af27c10e2", size = 5807716, upload-time = "2025-10-21T16:21:48.475Z" }, - { url = "https://files.pythonhosted.org/packages/2c/85/c6ed56f9817fab03fa8a111ca91469941fb514e3e3ce6d793cb8f1e1347b/grpcio-1.76.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:45e0111e73f43f735d70786557dc38141185072d7ff8dc1829d6a77ac1471468", size = 11821522, upload-time = "2025-10-21T16:21:51.142Z" }, - { url = "https://files.pythonhosted.org/packages/ac/31/2b8a235ab40c39cbc141ef647f8a6eb7b0028f023015a4842933bc0d6831/grpcio-1.76.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:83d57312a58dcfe2a3a0f9d1389b299438909a02db60e2f2ea2ae2d8034909d3", size = 6362558, upload-time = "2025-10-21T16:21:54.213Z" }, - { url = "https://files.pythonhosted.org/packages/bd/64/9784eab483358e08847498ee56faf8ff6ea8e0a4592568d9f68edc97e9e9/grpcio-1.76.0-cp313-cp313-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:3e2a27c89eb9ac3d81ec8835e12414d73536c6e620355d65102503064a4ed6eb", size = 7049990, upload-time = "2025-10-21T16:21:56.476Z" }, - { url = "https://files.pythonhosted.org/packages/2b/94/8c12319a6369434e7a184b987e8e9f3b49a114c489b8315f029e24de4837/grpcio-1.76.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:61f69297cba3950a524f61c7c8ee12e55c486cb5f7db47ff9dcee33da6f0d3ae", size = 6575387, upload-time = "2025-10-21T16:21:59.051Z" }, - { url = "https://files.pythonhosted.org/packages/15/0f/f12c32b03f731f4a6242f771f63039df182c8b8e2cf8075b245b409259d4/grpcio-1.76.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6a15c17af8839b6801d554263c546c69c4d7718ad4321e3166175b37eaacca77", size = 7166668, upload-time = "2025-10-21T16:22:02.049Z" }, - { url = "https://files.pythonhosted.org/packages/ff/2d/3ec9ce0c2b1d92dd59d1c3264aaec9f0f7c817d6e8ac683b97198a36ed5a/grpcio-1.76.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:25a18e9810fbc7e7f03ec2516addc116a957f8cbb8cbc95ccc80faa072743d03", size = 8124928, upload-time = "2025-10-21T16:22:04.984Z" }, - { url = "https://files.pythonhosted.org/packages/1a/74/fd3317be5672f4856bcdd1a9e7b5e17554692d3db9a3b273879dc02d657d/grpcio-1.76.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:931091142fd8cc14edccc0845a79248bc155425eee9a98b2db2ea4f00a235a42", size = 7589983, upload-time = "2025-10-21T16:22:07.881Z" }, - { url = "https://files.pythonhosted.org/packages/45/bb/ca038cf420f405971f19821c8c15bcbc875505f6ffadafe9ffd77871dc4c/grpcio-1.76.0-cp313-cp313-win32.whl", hash = "sha256:5e8571632780e08526f118f74170ad8d50fb0a48c23a746bef2a6ebade3abd6f", size = 3984727, upload-time = "2025-10-21T16:22:10.032Z" }, - { url = "https://files.pythonhosted.org/packages/41/80/84087dc56437ced7cdd4b13d7875e7439a52a261e3ab4e06488ba6173b0a/grpcio-1.76.0-cp313-cp313-win_amd64.whl", hash = "sha256:f9f7bd5faab55f47231ad8dba7787866b69f5e93bc306e3915606779bbfb4ba8", size = 4702799, upload-time = "2025-10-21T16:22:12.709Z" }, - { url = "https://files.pythonhosted.org/packages/b4/46/39adac80de49d678e6e073b70204091e76631e03e94928b9ea4ecf0f6e0e/grpcio-1.76.0-cp314-cp314-linux_armv7l.whl", hash = "sha256:ff8a59ea85a1f2191a0ffcc61298c571bc566332f82e5f5be1b83c9d8e668a62", size = 5808417, upload-time = "2025-10-21T16:22:15.02Z" }, - { url = "https://files.pythonhosted.org/packages/9c/f5/a4531f7fb8b4e2a60b94e39d5d924469b7a6988176b3422487be61fe2998/grpcio-1.76.0-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:06c3d6b076e7b593905d04fdba6a0525711b3466f43b3400266f04ff735de0cd", size = 11828219, upload-time = "2025-10-21T16:22:17.954Z" }, - { url = "https://files.pythonhosted.org/packages/4b/1c/de55d868ed7a8bd6acc6b1d6ddc4aa36d07a9f31d33c912c804adb1b971b/grpcio-1.76.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:fd5ef5932f6475c436c4a55e4336ebbe47bd3272be04964a03d316bbf4afbcbc", size = 6367826, upload-time = "2025-10-21T16:22:20.721Z" }, - { url = "https://files.pythonhosted.org/packages/59/64/99e44c02b5adb0ad13ab3adc89cb33cb54bfa90c74770f2607eea629b86f/grpcio-1.76.0-cp314-cp314-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:b331680e46239e090f5b3cead313cc772f6caa7d0fc8de349337563125361a4a", size = 7049550, upload-time = "2025-10-21T16:22:23.637Z" }, - { url = "https://files.pythonhosted.org/packages/43/28/40a5be3f9a86949b83e7d6a2ad6011d993cbe9b6bd27bea881f61c7788b6/grpcio-1.76.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2229ae655ec4e8999599469559e97630185fdd53ae1e8997d147b7c9b2b72cba", size = 6575564, upload-time = "2025-10-21T16:22:26.016Z" }, - { url = "https://files.pythonhosted.org/packages/4b/a9/1be18e6055b64467440208a8559afac243c66a8b904213af6f392dc2212f/grpcio-1.76.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:490fa6d203992c47c7b9e4a9d39003a0c2bcc1c9aa3c058730884bbbb0ee9f09", size = 7176236, upload-time = "2025-10-21T16:22:28.362Z" }, - { url = "https://files.pythonhosted.org/packages/0f/55/dba05d3fcc151ce6e81327541d2cc8394f442f6b350fead67401661bf041/grpcio-1.76.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:479496325ce554792dba6548fae3df31a72cef7bad71ca2e12b0e58f9b336bfc", size = 8125795, upload-time = "2025-10-21T16:22:31.075Z" }, - { url = "https://files.pythonhosted.org/packages/4a/45/122df922d05655f63930cf42c9e3f72ba20aadb26c100ee105cad4ce4257/grpcio-1.76.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:1c9b93f79f48b03ada57ea24725d83a30284a012ec27eab2cf7e50a550cbbbcc", size = 7592214, upload-time = "2025-10-21T16:22:33.831Z" }, - { url = "https://files.pythonhosted.org/packages/4a/6e/0b899b7f6b66e5af39e377055fb4a6675c9ee28431df5708139df2e93233/grpcio-1.76.0-cp314-cp314-win32.whl", hash = "sha256:747fa73efa9b8b1488a95d0ba1039c8e2dca0f741612d80415b1e1c560febf4e", size = 4062961, upload-time = "2025-10-21T16:22:36.468Z" }, - { url = "https://files.pythonhosted.org/packages/19/41/0b430b01a2eb38ee887f88c1f07644a1df8e289353b78e82b37ef988fb64/grpcio-1.76.0-cp314-cp314-win_amd64.whl", hash = "sha256:922fa70ba549fce362d2e2871ab542082d66e2aaf0c19480ea453905b01f384e", size = 4834462, upload-time = "2025-10-21T16:22:39.772Z" }, + { url = "https://files.pythonhosted.org/packages/4e/f4/7384ed0178203d6074446b3c4f46c90a22ddf7ae0b3aee521627f54cfc2a/grpcio-1.78.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:f9ab915a267fc47c7e88c387a3a28325b58c898e23d4995f765728f4e3dedb97", size = 5913985, upload-time = "2026-02-06T09:55:26.832Z" }, + { url = "https://files.pythonhosted.org/packages/81/ed/be1caa25f06594463f685b3790b320f18aea49b33166f4141bfdc2bfb236/grpcio-1.78.0-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:3f8904a8165ab21e07e58bf3e30a73f4dffc7a1e0dbc32d51c61b5360d26f43e", size = 11811853, upload-time = "2026-02-06T09:55:29.224Z" }, + { url = "https://files.pythonhosted.org/packages/24/a7/f06d151afc4e64b7e3cc3e872d331d011c279aaab02831e40a81c691fb65/grpcio-1.78.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:859b13906ce098c0b493af92142ad051bf64c7870fa58a123911c88606714996", size = 6475766, upload-time = "2026-02-06T09:55:31.825Z" }, + { url = "https://files.pythonhosted.org/packages/8a/a8/4482922da832ec0082d0f2cc3a10976d84a7424707f25780b82814aafc0a/grpcio-1.78.0-cp312-cp312-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:b2342d87af32790f934a79c3112641e7b27d63c261b8b4395350dad43eff1dc7", size = 7170027, upload-time = "2026-02-06T09:55:34.7Z" }, + { url = "https://files.pythonhosted.org/packages/54/bf/f4a3b9693e35d25b24b0b39fa46d7d8a3c439e0a3036c3451764678fec20/grpcio-1.78.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:12a771591ae40bc65ba67048fa52ef4f0e6db8279e595fd349f9dfddeef571f9", size = 6690766, upload-time = "2026-02-06T09:55:36.902Z" }, + { url = "https://files.pythonhosted.org/packages/c7/b9/521875265cc99fe5ad4c5a17010018085cae2810a928bf15ebe7d8bcd9cc/grpcio-1.78.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:185dea0d5260cbb2d224c507bf2a5444d5abbb1fa3594c1ed7e4c709d5eb8383", size = 7266161, upload-time = "2026-02-06T09:55:39.824Z" }, + { url = "https://files.pythonhosted.org/packages/05/86/296a82844fd40a4ad4a95f100b55044b4f817dece732bf686aea1a284147/grpcio-1.78.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:51b13f9aed9d59ee389ad666b8c2214cc87b5de258fa712f9ab05f922e3896c6", size = 8253303, upload-time = "2026-02-06T09:55:42.353Z" }, + { url = "https://files.pythonhosted.org/packages/f3/e4/ea3c0caf5468537f27ad5aab92b681ed7cc0ef5f8c9196d3fd42c8c2286b/grpcio-1.78.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fd5f135b1bd58ab088930b3c613455796dfa0393626a6972663ccdda5b4ac6ce", size = 7698222, upload-time = "2026-02-06T09:55:44.629Z" }, + { url = "https://files.pythonhosted.org/packages/d7/47/7f05f81e4bb6b831e93271fb12fd52ba7b319b5402cbc101d588f435df00/grpcio-1.78.0-cp312-cp312-win32.whl", hash = "sha256:94309f498bcc07e5a7d16089ab984d42ad96af1d94b5a4eb966a266d9fcabf68", size = 4066123, upload-time = "2026-02-06T09:55:47.644Z" }, + { url = "https://files.pythonhosted.org/packages/ad/e7/d6914822c88aa2974dbbd10903d801a28a19ce9cd8bad7e694cbbcf61528/grpcio-1.78.0-cp312-cp312-win_amd64.whl", hash = "sha256:9566fe4ababbb2610c39190791e5b829869351d14369603702e890ef3ad2d06e", size = 4797657, upload-time = "2026-02-06T09:55:49.86Z" }, + { url = "https://files.pythonhosted.org/packages/05/a9/8f75894993895f361ed8636cd9237f4ab39ef87fd30db17467235ed1c045/grpcio-1.78.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:ce3a90455492bf8bfa38e56fbbe1dbd4f872a3d8eeaf7337dc3b1c8aa28c271b", size = 5920143, upload-time = "2026-02-06T09:55:52.035Z" }, + { url = "https://files.pythonhosted.org/packages/55/06/0b78408e938ac424100100fd081189451b472236e8a3a1f6500390dc4954/grpcio-1.78.0-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:2bf5e2e163b356978b23652c4818ce4759d40f4712ee9ec5a83c4be6f8c23a3a", size = 11803926, upload-time = "2026-02-06T09:55:55.494Z" }, + { url = "https://files.pythonhosted.org/packages/88/93/b59fe7832ff6ae3c78b813ea43dac60e295fa03606d14d89d2e0ec29f4f3/grpcio-1.78.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8f2ac84905d12918e4e55a16da17939eb63e433dc11b677267c35568aa63fc84", size = 6478628, upload-time = "2026-02-06T09:55:58.533Z" }, + { url = "https://files.pythonhosted.org/packages/ed/df/e67e3734527f9926b7d9c0dde6cd998d1d26850c3ed8eeec81297967ac67/grpcio-1.78.0-cp313-cp313-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:b58f37edab4a3881bc6c9bca52670610e0c9ca14e2ea3cf9debf185b870457fb", size = 7173574, upload-time = "2026-02-06T09:56:01.786Z" }, + { url = "https://files.pythonhosted.org/packages/a6/62/cc03fffb07bfba982a9ec097b164e8835546980aec25ecfa5f9c1a47e022/grpcio-1.78.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:735e38e176a88ce41840c21bb49098ab66177c64c82426e24e0082500cc68af5", size = 6692639, upload-time = "2026-02-06T09:56:04.529Z" }, + { url = "https://files.pythonhosted.org/packages/bf/9a/289c32e301b85bdb67d7ec68b752155e674ee3ba2173a1858f118e399ef3/grpcio-1.78.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:2045397e63a7a0ee7957c25f7dbb36ddc110e0cfb418403d110c0a7a68a844e9", size = 7268838, upload-time = "2026-02-06T09:56:08.397Z" }, + { url = "https://files.pythonhosted.org/packages/0e/79/1be93f32add280461fa4773880196572563e9c8510861ac2da0ea0f892b6/grpcio-1.78.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:a9f136fbafe7ccf4ac7e8e0c28b31066e810be52d6e344ef954a3a70234e1702", size = 8251878, upload-time = "2026-02-06T09:56:10.914Z" }, + { url = "https://files.pythonhosted.org/packages/65/65/793f8e95296ab92e4164593674ae6291b204bb5f67f9d4a711489cd30ffa/grpcio-1.78.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:748b6138585379c737adc08aeffd21222abbda1a86a0dca2a39682feb9196c20", size = 7695412, upload-time = "2026-02-06T09:56:13.593Z" }, + { url = "https://files.pythonhosted.org/packages/1c/9f/1e233fe697ecc82845942c2822ed06bb522e70d6771c28d5528e4c50f6a4/grpcio-1.78.0-cp313-cp313-win32.whl", hash = "sha256:271c73e6e5676afe4fc52907686670c7cea22ab2310b76a59b678403ed40d670", size = 4064899, upload-time = "2026-02-06T09:56:15.601Z" }, + { url = "https://files.pythonhosted.org/packages/4d/27/d86b89e36de8a951501fb06a0f38df19853210f341d0b28f83f4aa0ffa08/grpcio-1.78.0-cp313-cp313-win_amd64.whl", hash = "sha256:f2d4e43ee362adfc05994ed479334d5a451ab7bc3f3fee1b796b8ca66895acb4", size = 4797393, upload-time = "2026-02-06T09:56:17.882Z" }, + { url = "https://files.pythonhosted.org/packages/29/f2/b56e43e3c968bfe822fa6ce5bca10d5c723aa40875b48791ce1029bb78c7/grpcio-1.78.0-cp314-cp314-linux_armv7l.whl", hash = "sha256:e87cbc002b6f440482b3519e36e1313eb5443e9e9e73d6a52d43bd2004fcfd8e", size = 5920591, upload-time = "2026-02-06T09:56:20.758Z" }, + { url = "https://files.pythonhosted.org/packages/5d/81/1f3b65bd30c334167bfa8b0d23300a44e2725ce39bba5b76a2460d85f745/grpcio-1.78.0-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:c41bc64626db62e72afec66b0c8a0da76491510015417c127bfc53b2fe6d7f7f", size = 11813685, upload-time = "2026-02-06T09:56:24.315Z" }, + { url = "https://files.pythonhosted.org/packages/0e/1c/bbe2f8216a5bd3036119c544d63c2e592bdf4a8ec6e4a1867592f4586b26/grpcio-1.78.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8dfffba826efcf366b1e3ccc37e67afe676f290e13a3b48d31a46739f80a8724", size = 6487803, upload-time = "2026-02-06T09:56:27.367Z" }, + { url = "https://files.pythonhosted.org/packages/16/5c/a6b2419723ea7ddce6308259a55e8e7593d88464ce8db9f4aa857aba96fa/grpcio-1.78.0-cp314-cp314-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:74be1268d1439eaaf552c698cdb11cd594f0c49295ae6bb72c34ee31abbe611b", size = 7173206, upload-time = "2026-02-06T09:56:29.876Z" }, + { url = "https://files.pythonhosted.org/packages/df/1e/b8801345629a415ea7e26c83d75eb5dbe91b07ffe5210cc517348a8d4218/grpcio-1.78.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:be63c88b32e6c0f1429f1398ca5c09bc64b0d80950c8bb7807d7d7fb36fb84c7", size = 6693826, upload-time = "2026-02-06T09:56:32.305Z" }, + { url = "https://files.pythonhosted.org/packages/34/84/0de28eac0377742679a510784f049738a80424b17287739fc47d63c2439e/grpcio-1.78.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:3c586ac70e855c721bda8f548d38c3ca66ac791dc49b66a8281a1f99db85e452", size = 7277897, upload-time = "2026-02-06T09:56:34.915Z" }, + { url = "https://files.pythonhosted.org/packages/ca/9c/ad8685cfe20559a9edb66f735afdcb2b7d3de69b13666fdfc542e1916ebd/grpcio-1.78.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:35eb275bf1751d2ffbd8f57cdbc46058e857cf3971041521b78b7db94bdaf127", size = 8252404, upload-time = "2026-02-06T09:56:37.553Z" }, + { url = "https://files.pythonhosted.org/packages/3c/05/33a7a4985586f27e1de4803887c417ec7ced145ebd069bc38a9607059e2b/grpcio-1.78.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:207db540302c884b8848036b80db352a832b99dfdf41db1eb554c2c2c7800f65", size = 7696837, upload-time = "2026-02-06T09:56:40.173Z" }, + { url = "https://files.pythonhosted.org/packages/73/77/7382241caf88729b106e49e7d18e3116216c778e6a7e833826eb96de22f7/grpcio-1.78.0-cp314-cp314-win32.whl", hash = "sha256:57bab6deef2f4f1ca76cc04565df38dc5713ae6c17de690721bdf30cb1e0545c", size = 4142439, upload-time = "2026-02-06T09:56:43.258Z" }, + { url = "https://files.pythonhosted.org/packages/48/b2/b096ccce418882fbfda4f7496f9357aaa9a5af1896a9a7f60d9f2b275a06/grpcio-1.78.0-cp314-cp314-win_amd64.whl", hash = "sha256:dce09d6116df20a96acfdbf85e4866258c3758180e8c49845d6ba8248b6d0bbb", size = 4929852, upload-time = "2026-02-06T09:56:45.885Z" }, ] [[package]] @@ -599,31 +597,34 @@ wheels = [ [[package]] name = "hf-xet" -version = "1.2.0" +version = "1.3.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/5e/6e/0f11bacf08a67f7fb5ee09740f2ca54163863b07b70d579356e9222ce5d8/hf_xet-1.2.0.tar.gz", hash = "sha256:a8c27070ca547293b6890c4bf389f713f80e8c478631432962bb7f4bc0bd7d7f", size = 506020, upload-time = "2025-10-24T19:04:32.129Z" } +sdist = { url = "https://files.pythonhosted.org/packages/8b/cb/9bb543bd987ffa1ee48202cc96a756951b734b79a542335c566148ade36c/hf_xet-1.3.2.tar.gz", hash = "sha256:e130ee08984783d12717444e538587fa2119385e5bd8fc2bb9f930419b73a7af", size = 643646, upload-time = "2026-02-27T17:26:08.051Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/9e/a5/85ef910a0aa034a2abcfadc360ab5ac6f6bc4e9112349bd40ca97551cff0/hf_xet-1.2.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:ceeefcd1b7aed4956ae8499e2199607765fbd1c60510752003b6cc0b8413b649", size = 2861870, upload-time = "2025-10-24T19:04:11.422Z" }, - { url = "https://files.pythonhosted.org/packages/ea/40/e2e0a7eb9a51fe8828ba2d47fe22a7e74914ea8a0db68a18c3aa7449c767/hf_xet-1.2.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:b70218dd548e9840224df5638fdc94bd033552963cfa97f9170829381179c813", size = 2717584, upload-time = "2025-10-24T19:04:09.586Z" }, - { url = "https://files.pythonhosted.org/packages/a5/7d/daf7f8bc4594fdd59a8a596f9e3886133fdc68e675292218a5e4c1b7e834/hf_xet-1.2.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d40b18769bb9a8bc82a9ede575ce1a44c75eb80e7375a01d76259089529b5dc", size = 3315004, upload-time = "2025-10-24T19:04:00.314Z" }, - { url = "https://files.pythonhosted.org/packages/b1/ba/45ea2f605fbf6d81c8b21e4d970b168b18a53515923010c312c06cd83164/hf_xet-1.2.0-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:cd3a6027d59cfb60177c12d6424e31f4b5ff13d8e3a1247b3a584bf8977e6df5", size = 3222636, upload-time = "2025-10-24T19:03:58.111Z" }, - { url = "https://files.pythonhosted.org/packages/4a/1d/04513e3cab8f29ab8c109d309ddd21a2705afab9d52f2ba1151e0c14f086/hf_xet-1.2.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:6de1fc44f58f6dd937956c8d304d8c2dea264c80680bcfa61ca4a15e7b76780f", size = 3408448, upload-time = "2025-10-24T19:04:20.951Z" }, - { url = "https://files.pythonhosted.org/packages/f0/7c/60a2756d7feec7387db3a1176c632357632fbe7849fce576c5559d4520c7/hf_xet-1.2.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f182f264ed2acd566c514e45da9f2119110e48a87a327ca271027904c70c5832", size = 3503401, upload-time = "2025-10-24T19:04:22.549Z" }, - { url = "https://files.pythonhosted.org/packages/4e/64/48fffbd67fb418ab07451e4ce641a70de1c40c10a13e25325e24858ebe5a/hf_xet-1.2.0-cp313-cp313t-win_amd64.whl", hash = "sha256:293a7a3787e5c95d7be1857358a9130694a9c6021de3f27fa233f37267174382", size = 2900866, upload-time = "2025-10-24T19:04:33.461Z" }, - { url = "https://files.pythonhosted.org/packages/e2/51/f7e2caae42f80af886db414d4e9885fac959330509089f97cccb339c6b87/hf_xet-1.2.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:10bfab528b968c70e062607f663e21e34e2bba349e8038db546646875495179e", size = 2861861, upload-time = "2025-10-24T19:04:19.01Z" }, - { url = "https://files.pythonhosted.org/packages/6e/1d/a641a88b69994f9371bd347f1dd35e5d1e2e2460a2e350c8d5165fc62005/hf_xet-1.2.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:2a212e842647b02eb6a911187dc878e79c4aa0aa397e88dd3b26761676e8c1f8", size = 2717699, upload-time = "2025-10-24T19:04:17.306Z" }, - { url = "https://files.pythonhosted.org/packages/df/e0/e5e9bba7d15f0318955f7ec3f4af13f92e773fbb368c0b8008a5acbcb12f/hf_xet-1.2.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:30e06daccb3a7d4c065f34fc26c14c74f4653069bb2b194e7f18f17cbe9939c0", size = 3314885, upload-time = "2025-10-24T19:04:07.642Z" }, - { url = "https://files.pythonhosted.org/packages/21/90/b7fe5ff6f2b7b8cbdf1bd56145f863c90a5807d9758a549bf3d916aa4dec/hf_xet-1.2.0-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:29c8fc913a529ec0a91867ce3d119ac1aac966e098cf49501800c870328cc090", size = 3221550, upload-time = "2025-10-24T19:04:05.55Z" }, - { url = "https://files.pythonhosted.org/packages/6f/cb/73f276f0a7ce46cc6a6ec7d6c7d61cbfe5f2e107123d9bbd0193c355f106/hf_xet-1.2.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:66e159cbfcfbb29f920db2c09ed8b660eb894640d284f102ada929b6e3dc410a", size = 3408010, upload-time = "2025-10-24T19:04:28.598Z" }, - { url = "https://files.pythonhosted.org/packages/b8/1e/d642a12caa78171f4be64f7cd9c40e3ca5279d055d0873188a58c0f5fbb9/hf_xet-1.2.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:9c91d5ae931510107f148874e9e2de8a16052b6f1b3ca3c1b12f15ccb491390f", size = 3503264, upload-time = "2025-10-24T19:04:30.397Z" }, - { url = "https://files.pythonhosted.org/packages/17/b5/33764714923fa1ff922770f7ed18c2daae034d21ae6e10dbf4347c854154/hf_xet-1.2.0-cp314-cp314t-win_amd64.whl", hash = "sha256:210d577732b519ac6ede149d2f2f34049d44e8622bf14eb3d63bbcd2d4b332dc", size = 2901071, upload-time = "2025-10-24T19:04:37.463Z" }, - { url = "https://files.pythonhosted.org/packages/96/2d/22338486473df5923a9ab7107d375dbef9173c338ebef5098ef593d2b560/hf_xet-1.2.0-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:46740d4ac024a7ca9b22bebf77460ff43332868b661186a8e46c227fdae01848", size = 2866099, upload-time = "2025-10-24T19:04:15.366Z" }, - { url = "https://files.pythonhosted.org/packages/7f/8c/c5becfa53234299bc2210ba314eaaae36c2875e0045809b82e40a9544f0c/hf_xet-1.2.0-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:27df617a076420d8845bea087f59303da8be17ed7ec0cd7ee3b9b9f579dff0e4", size = 2722178, upload-time = "2025-10-24T19:04:13.695Z" }, - { url = "https://files.pythonhosted.org/packages/9a/92/cf3ab0b652b082e66876d08da57fcc6fa2f0e6c70dfbbafbd470bb73eb47/hf_xet-1.2.0-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3651fd5bfe0281951b988c0facbe726aa5e347b103a675f49a3fa8144c7968fd", size = 3320214, upload-time = "2025-10-24T19:04:03.596Z" }, - { url = "https://files.pythonhosted.org/packages/46/92/3f7ec4a1b6a65bf45b059b6d4a5d38988f63e193056de2f420137e3c3244/hf_xet-1.2.0-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:d06fa97c8562fb3ee7a378dd9b51e343bc5bc8190254202c9771029152f5e08c", size = 3229054, upload-time = "2025-10-24T19:04:01.949Z" }, - { url = "https://files.pythonhosted.org/packages/0b/dd/7ac658d54b9fb7999a0ccb07ad863b413cbaf5cf172f48ebcd9497ec7263/hf_xet-1.2.0-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:4c1428c9ae73ec0939410ec73023c4f842927f39db09b063b9482dac5a3bb737", size = 3413812, upload-time = "2025-10-24T19:04:24.585Z" }, - { url = "https://files.pythonhosted.org/packages/92/68/89ac4e5b12a9ff6286a12174c8538a5930e2ed662091dd2572bbe0a18c8a/hf_xet-1.2.0-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:a55558084c16b09b5ed32ab9ed38421e2d87cf3f1f89815764d1177081b99865", size = 3508920, upload-time = "2025-10-24T19:04:26.927Z" }, - { url = "https://files.pythonhosted.org/packages/cb/44/870d44b30e1dcfb6a65932e3e1506c103a8a5aea9103c337e7a53180322c/hf_xet-1.2.0-cp37-abi3-win_amd64.whl", hash = "sha256:e6584a52253f72c9f52f9e549d5895ca7a471608495c4ecaa6cc73dba2b24d69", size = 2905735, upload-time = "2025-10-24T19:04:35.928Z" }, + { url = "https://files.pythonhosted.org/packages/49/75/462285971954269432aad2e7938c5c7ff9ec7d60129cec542ab37121e3d6/hf_xet-1.3.2-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:335a8f36c55fd35a92d0062f4e9201b4015057e62747b7e7001ffb203c0ee1d2", size = 3761019, upload-time = "2026-02-27T17:25:49.441Z" }, + { url = "https://files.pythonhosted.org/packages/35/56/987b0537ddaf88e17192ea09afa8eca853e55f39a4721578be436f8409df/hf_xet-1.3.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:c1ae4d3a716afc774e66922f3cac8206bfa707db13f6a7e62dfff74bfc95c9a8", size = 3521565, upload-time = "2026-02-27T17:25:47.469Z" }, + { url = "https://files.pythonhosted.org/packages/a8/5c/7e4a33a3d689f77761156cc34558047569e54af92e4d15a8f493229f6767/hf_xet-1.3.2-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d6dbdf231efac0b9b39adcf12a07f0c030498f9212a18e8c50224d0e84ab803d", size = 4176494, upload-time = "2026-02-27T17:25:40.247Z" }, + { url = "https://files.pythonhosted.org/packages/6b/b3/71e856bf9d9a69b3931837e8bf22e095775f268c8edcd4a9e8c355f92484/hf_xet-1.3.2-cp313-cp313t-manylinux_2_28_aarch64.whl", hash = "sha256:c1980abfb68ecf6c1c7983379ed7b1e2b49a1aaf1a5aca9acc7d48e5e2e0a961", size = 3955601, upload-time = "2026-02-27T17:25:38.376Z" }, + { url = "https://files.pythonhosted.org/packages/63/d7/aecf97b3f0a981600a67ff4db15e2d433389d698a284bb0ea5d8fcdd6f7f/hf_xet-1.3.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:1c88fbd90ad0d27c46b77a445f0a436ebaa94e14965c581123b68b1c52f5fd30", size = 4154770, upload-time = "2026-02-27T17:25:56.756Z" }, + { url = "https://files.pythonhosted.org/packages/e2/e1/3af961f71a40e09bf5ee909842127b6b00f5ab4ee3817599dc0771b79893/hf_xet-1.3.2-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:35b855024ca37f2dd113ac1c08993e997fbe167b9d61f9ef66d3d4f84015e508", size = 4394161, upload-time = "2026-02-27T17:25:58.111Z" }, + { url = "https://files.pythonhosted.org/packages/a1/c3/859509bade9178e21b8b1db867b8e10e9f817ab9ac1de77cb9f461ced765/hf_xet-1.3.2-cp313-cp313t-win_amd64.whl", hash = "sha256:31612ba0629046e425ba50375685a2586e11fb9144270ebabd75878c3eaf6378", size = 3637377, upload-time = "2026-02-27T17:26:10.611Z" }, + { url = "https://files.pythonhosted.org/packages/05/7f/724cfbef4da92d577b71f68bf832961c8919f36c60d28d289a9fc9d024d4/hf_xet-1.3.2-cp313-cp313t-win_arm64.whl", hash = "sha256:433c77c9f4e132b562f37d66c9b22c05b5479f243a1f06a120c1c06ce8b1502a", size = 3497875, upload-time = "2026-02-27T17:26:09.034Z" }, + { url = "https://files.pythonhosted.org/packages/ba/75/9d54c1ae1d05fb704f977eca1671747babf1957f19f38ae75c5933bc2dc1/hf_xet-1.3.2-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:c34e2c7aefad15792d57067c1c89b2b02c1bbaeabd7f8456ae3d07b4bbaf4094", size = 3761076, upload-time = "2026-02-27T17:25:55.42Z" }, + { url = "https://files.pythonhosted.org/packages/f2/8a/08a24b6c6f52b5d26848c16e4b6d790bb810d1bf62c3505bed179f7032d3/hf_xet-1.3.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:4bc995d6c41992831f762096020dc14a65fdf3963f86ffed580b596d04de32e3", size = 3521745, upload-time = "2026-02-27T17:25:54.217Z" }, + { url = "https://files.pythonhosted.org/packages/b5/db/a75cf400dd8a1a8acf226a12955ff6ee999f272dfc0505bafd8079a61267/hf_xet-1.3.2-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:959083c89dee30f7d6f890b36cdadda823386c4de63b1a30384a75bfd2ae995d", size = 4176301, upload-time = "2026-02-27T17:25:46.044Z" }, + { url = "https://files.pythonhosted.org/packages/01/40/6c4c798ffdd83e740dd3925c4e47793b07442a9efa3bc3866ba141a82365/hf_xet-1.3.2-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:cfa760888633b08c01b398d212ce7e8c0d7adac6c86e4b20dfb2397d8acd78ee", size = 3955437, upload-time = "2026-02-27T17:25:44.703Z" }, + { url = "https://files.pythonhosted.org/packages/0c/09/9a3aa7c5f07d3e5cc57bb750d12a124ffa72c273a87164bd848f9ac5cc14/hf_xet-1.3.2-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:3155a02e083aa21fd733a7485c7c36025e49d5975c8d6bda0453d224dd0b0ac4", size = 4154535, upload-time = "2026-02-27T17:26:05.207Z" }, + { url = "https://files.pythonhosted.org/packages/ae/e0/831f7fa6d90cb47a230bc23284b502c700e1483bbe459437b3844cdc0776/hf_xet-1.3.2-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:91b1dc03c31cbf733d35dc03df7c5353686233d86af045e716f1e0ea4a2673cf", size = 4393891, upload-time = "2026-02-27T17:26:06.607Z" }, + { url = "https://files.pythonhosted.org/packages/ab/96/6ed472fdce7f8b70f5da6e3f05be76816a610063003bfd6d9cea0bbb58a3/hf_xet-1.3.2-cp314-cp314t-win_amd64.whl", hash = "sha256:211f30098512d95e85ad03ae63bd7dd2c4df476558a5095d09f9e38e78cbf674", size = 3637583, upload-time = "2026-02-27T17:26:17.349Z" }, + { url = "https://files.pythonhosted.org/packages/8b/e8/a069edc4570b3f8e123c0b80fadc94530f3d7b01394e1fc1bb223339366c/hf_xet-1.3.2-cp314-cp314t-win_arm64.whl", hash = "sha256:4a6817c41de7c48ed9270da0b02849347e089c5ece9a0e72ae4f4b3a57617f82", size = 3497977, upload-time = "2026-02-27T17:26:14.966Z" }, + { url = "https://files.pythonhosted.org/packages/d8/28/dbb024e2e3907f6f3052847ca7d1a2f7a3972fafcd53ff79018977fcb3e4/hf_xet-1.3.2-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:f93b7595f1d8fefddfede775c18b5c9256757824f7f6832930b49858483cd56f", size = 3763961, upload-time = "2026-02-27T17:25:52.537Z" }, + { url = "https://files.pythonhosted.org/packages/e4/71/b99aed3823c9d1795e4865cf437d651097356a3f38c7d5877e4ac544b8e4/hf_xet-1.3.2-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:a85d3d43743174393afe27835bde0cd146e652b5fcfdbcd624602daef2ef3259", size = 3526171, upload-time = "2026-02-27T17:25:50.968Z" }, + { url = "https://files.pythonhosted.org/packages/9d/ca/907890ce6ef5598b5920514f255ed0a65f558f820515b18db75a51b2f878/hf_xet-1.3.2-cp37-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:7c2a054a97c44e136b1f7f5a78f12b3efffdf2eed3abc6746fc5ea4b39511633", size = 4180750, upload-time = "2026-02-27T17:25:43.125Z" }, + { url = "https://files.pythonhosted.org/packages/8c/ad/bc7f41f87173d51d0bce497b171c4ee0cbde1eed2d7b4216db5d0ada9f50/hf_xet-1.3.2-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:06b724a361f670ae557836e57801b82c75b534812e351a87a2c739f77d1e0635", size = 3961035, upload-time = "2026-02-27T17:25:41.837Z" }, + { url = "https://files.pythonhosted.org/packages/73/38/600f4dda40c4a33133404d9fe644f1d35ff2d9babb4d0435c646c63dd107/hf_xet-1.3.2-cp37-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:305f5489d7241a47e0458ef49334be02411d1d0f480846363c1c8084ed9916f7", size = 4161378, upload-time = "2026-02-27T17:26:00.365Z" }, + { url = "https://files.pythonhosted.org/packages/00/b3/7bc1ff91d1ac18420b7ad1e169b618b27c00001b96310a89f8a9294fe509/hf_xet-1.3.2-cp37-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:06cdbde243c85f39a63b28e9034321399c507bcd5e7befdd17ed2ccc06dfe14e", size = 4398020, upload-time = "2026-02-27T17:26:03.977Z" }, + { url = "https://files.pythonhosted.org/packages/2b/0b/99bfd948a3ed3620ab709276df3ad3710dcea61976918cce8706502927af/hf_xet-1.3.2-cp37-abi3-win_amd64.whl", hash = "sha256:9298b47cce6037b7045ae41482e703c471ce36b52e73e49f71226d2e8e5685a1", size = 3641624, upload-time = "2026-02-27T17:26:13.542Z" }, + { url = "https://files.pythonhosted.org/packages/cc/02/9a6e4ca1f3f73a164c0cd48e41b3cc56585dcc37e809250de443d673266f/hf_xet-1.3.2-cp37-abi3-win_arm64.whl", hash = "sha256:83d8ec273136171431833a6957e8f3af496bee227a0fe47c7b8b39c106d1749a", size = 3503976, upload-time = "2026-02-27T17:26:12.123Z" }, ] [[package]] @@ -656,7 +657,7 @@ wheels = [ [[package]] name = "huggingface-hub" -version = "1.4.1" +version = "1.6.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "filelock" }, @@ -665,14 +666,13 @@ dependencies = [ { name = "httpx" }, { name = "packaging" }, { name = "pyyaml" }, - { name = "shellingham" }, { name = "tqdm" }, - { name = "typer-slim" }, + { name = "typer" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c4/fc/eb9bc06130e8bbda6a616e1b80a7aa127681c448d6b49806f61db2670b61/huggingface_hub-1.4.1.tar.gz", hash = "sha256:b41131ec35e631e7383ab26d6146b8d8972abc8b6309b963b306fbcca87f5ed5", size = 642156, upload-time = "2026-02-06T09:20:03.013Z" } +sdist = { url = "https://files.pythonhosted.org/packages/d5/7a/304cec37112382c4fe29a43bcb0d5891f922785d18745883d2aa4eb74e4b/huggingface_hub-1.6.0.tar.gz", hash = "sha256:d931ddad8ba8dfc1e816bf254810eb6f38e5c32f60d4184b5885662a3b167325", size = 717071, upload-time = "2026-03-06T14:19:18.524Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d5/ae/2f6d96b4e6c5478d87d606a1934b5d436c4a2bce6bb7c6fdece891c128e3/huggingface_hub-1.4.1-py3-none-any.whl", hash = "sha256:9931d075fb7a79af5abc487106414ec5fba2c0ae86104c0c62fd6cae38873d18", size = 553326, upload-time = "2026-02-06T09:20:00.728Z" }, + { url = "https://files.pythonhosted.org/packages/92/e3/e3a44f54c8e2f28983fcf07f13d4260b37bd6a0d3a081041bc60b91d230e/huggingface_hub-1.6.0-py3-none-any.whl", hash = "sha256:ef40e2d5cb85e48b2c067020fa5142168342d5108a1b267478ed384ecbf18961", size = 612874, upload-time = "2026-03-06T14:19:16.844Z" }, ] [[package]] @@ -686,56 +686,83 @@ wheels = [ [[package]] name = "librt" -version = "0.7.8" +version = "0.8.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e7/24/5f3646ff414285e0f7708fa4e946b9bf538345a41d1c375c439467721a5e/librt-0.7.8.tar.gz", hash = "sha256:1a4ede613941d9c3470b0368be851df6bb78ab218635512d0370b27a277a0862", size = 148323, upload-time = "2026-01-14T12:56:16.876Z" } +sdist = { url = "https://files.pythonhosted.org/packages/56/9c/b4b0c54d84da4a94b37bd44151e46d5e583c9534c7e02250b961b1b6d8a8/librt-0.8.1.tar.gz", hash = "sha256:be46a14693955b3bd96014ccbdb8339ee8c9346fbe11c1b78901b55125f14c73", size = 177471, upload-time = "2026-02-17T16:13:06.101Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/56/04/79d8fcb43cae376c7adbab7b2b9f65e48432c9eced62ac96703bcc16e09b/librt-0.7.8-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:9b6943885b2d49c48d0cff23b16be830ba46b0152d98f62de49e735c6e655a63", size = 57472, upload-time = "2026-01-14T12:55:08.528Z" }, - { url = "https://files.pythonhosted.org/packages/b4/ba/60b96e93043d3d659da91752689023a73981336446ae82078cddf706249e/librt-0.7.8-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:46ef1f4b9b6cc364b11eea0ecc0897314447a66029ee1e55859acb3dd8757c93", size = 58986, upload-time = "2026-01-14T12:55:09.466Z" }, - { url = "https://files.pythonhosted.org/packages/7c/26/5215e4cdcc26e7be7eee21955a7e13cbf1f6d7d7311461a6014544596fac/librt-0.7.8-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:907ad09cfab21e3c86e8f1f87858f7049d1097f77196959c033612f532b4e592", size = 168422, upload-time = "2026-01-14T12:55:10.499Z" }, - { url = "https://files.pythonhosted.org/packages/0f/84/e8d1bc86fa0159bfc24f3d798d92cafd3897e84c7fea7fe61b3220915d76/librt-0.7.8-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2991b6c3775383752b3ca0204842743256f3ad3deeb1d0adc227d56b78a9a850", size = 177478, upload-time = "2026-01-14T12:55:11.577Z" }, - { url = "https://files.pythonhosted.org/packages/57/11/d0268c4b94717a18aa91df1100e767b010f87b7ae444dafaa5a2d80f33a6/librt-0.7.8-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:03679b9856932b8c8f674e87aa3c55ea11c9274301f76ae8dc4d281bda55cf62", size = 192439, upload-time = "2026-01-14T12:55:12.7Z" }, - { url = "https://files.pythonhosted.org/packages/8d/56/1e8e833b95fe684f80f8894ae4d8b7d36acc9203e60478fcae599120a975/librt-0.7.8-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3968762fec1b2ad34ce57458b6de25dbb4142713e9ca6279a0d352fa4e9f452b", size = 191483, upload-time = "2026-01-14T12:55:13.838Z" }, - { url = "https://files.pythonhosted.org/packages/17/48/f11cf28a2cb6c31f282009e2208312aa84a5ee2732859f7856ee306176d5/librt-0.7.8-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:bb7a7807523a31f03061288cc4ffc065d684c39db7644c676b47d89553c0d714", size = 185376, upload-time = "2026-01-14T12:55:15.017Z" }, - { url = "https://files.pythonhosted.org/packages/b8/6a/d7c116c6da561b9155b184354a60a3d5cdbf08fc7f3678d09c95679d13d9/librt-0.7.8-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ad64a14b1e56e702e19b24aae108f18ad1bf7777f3af5fcd39f87d0c5a814449", size = 206234, upload-time = "2026-01-14T12:55:16.571Z" }, - { url = "https://files.pythonhosted.org/packages/61/de/1975200bb0285fc921c5981d9978ce6ce11ae6d797df815add94a5a848a3/librt-0.7.8-cp312-cp312-win32.whl", hash = "sha256:0241a6ed65e6666236ea78203a73d800dbed896cf12ae25d026d75dc1fcd1dac", size = 44057, upload-time = "2026-01-14T12:55:18.077Z" }, - { url = "https://files.pythonhosted.org/packages/8e/cd/724f2d0b3461426730d4877754b65d39f06a41ac9d0a92d5c6840f72b9ae/librt-0.7.8-cp312-cp312-win_amd64.whl", hash = "sha256:6db5faf064b5bab9675c32a873436b31e01d66ca6984c6f7f92621656033a708", size = 50293, upload-time = "2026-01-14T12:55:19.179Z" }, - { url = "https://files.pythonhosted.org/packages/bd/cf/7e899acd9ee5727ad8160fdcc9994954e79fab371c66535c60e13b968ffc/librt-0.7.8-cp312-cp312-win_arm64.whl", hash = "sha256:57175aa93f804d2c08d2edb7213e09276bd49097611aefc37e3fa38d1fb99ad0", size = 43574, upload-time = "2026-01-14T12:55:20.185Z" }, - { url = "https://files.pythonhosted.org/packages/a1/fe/b1f9de2829cf7fc7649c1dcd202cfd873837c5cc2fc9e526b0e7f716c3d2/librt-0.7.8-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4c3995abbbb60b3c129490fa985dfe6cac11d88fc3c36eeb4fb1449efbbb04fc", size = 57500, upload-time = "2026-01-14T12:55:21.219Z" }, - { url = "https://files.pythonhosted.org/packages/eb/d4/4a60fbe2e53b825f5d9a77325071d61cd8af8506255067bf0c8527530745/librt-0.7.8-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:44e0c2cbc9bebd074cf2cdbe472ca185e824be4e74b1c63a8e934cea674bebf2", size = 59019, upload-time = "2026-01-14T12:55:22.256Z" }, - { url = "https://files.pythonhosted.org/packages/6a/37/61ff80341ba5159afa524445f2d984c30e2821f31f7c73cf166dcafa5564/librt-0.7.8-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:4d2f1e492cae964b3463a03dc77a7fe8742f7855d7258c7643f0ee32b6651dd3", size = 169015, upload-time = "2026-01-14T12:55:23.24Z" }, - { url = "https://files.pythonhosted.org/packages/1c/86/13d4f2d6a93f181ebf2fc953868826653ede494559da8268023fe567fca3/librt-0.7.8-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:451e7ffcef8f785831fdb791bd69211f47e95dc4c6ddff68e589058806f044c6", size = 178161, upload-time = "2026-01-14T12:55:24.826Z" }, - { url = "https://files.pythonhosted.org/packages/88/26/e24ef01305954fc4d771f1f09f3dd682f9eb610e1bec188ffb719374d26e/librt-0.7.8-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3469e1af9f1380e093ae06bedcbdd11e407ac0b303a56bbe9afb1d6824d4982d", size = 193015, upload-time = "2026-01-14T12:55:26.04Z" }, - { url = "https://files.pythonhosted.org/packages/88/a0/92b6bd060e720d7a31ed474d046a69bd55334ec05e9c446d228c4b806ae3/librt-0.7.8-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f11b300027ce19a34f6d24ebb0a25fd0e24a9d53353225a5c1e6cadbf2916b2e", size = 192038, upload-time = "2026-01-14T12:55:27.208Z" }, - { url = "https://files.pythonhosted.org/packages/06/bb/6f4c650253704279c3a214dad188101d1b5ea23be0606628bc6739456624/librt-0.7.8-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4adc73614f0d3c97874f02f2c7fd2a27854e7e24ad532ea6b965459c5b757eca", size = 186006, upload-time = "2026-01-14T12:55:28.594Z" }, - { url = "https://files.pythonhosted.org/packages/dc/00/1c409618248d43240cadf45f3efb866837fa77e9a12a71481912135eb481/librt-0.7.8-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:60c299e555f87e4c01b2eca085dfccda1dde87f5a604bb45c2906b8305819a93", size = 206888, upload-time = "2026-01-14T12:55:30.214Z" }, - { url = "https://files.pythonhosted.org/packages/d9/83/b2cfe8e76ff5c1c77f8a53da3d5de62d04b5ebf7cf913e37f8bca43b5d07/librt-0.7.8-cp313-cp313-win32.whl", hash = "sha256:b09c52ed43a461994716082ee7d87618096851319bf695d57ec123f2ab708951", size = 44126, upload-time = "2026-01-14T12:55:31.44Z" }, - { url = "https://files.pythonhosted.org/packages/a9/0b/c59d45de56a51bd2d3a401fc63449c0ac163e4ef7f523ea8b0c0dee86ec5/librt-0.7.8-cp313-cp313-win_amd64.whl", hash = "sha256:f8f4a901a3fa28969d6e4519deceab56c55a09d691ea7b12ca830e2fa3461e34", size = 50262, upload-time = "2026-01-14T12:55:33.01Z" }, - { url = "https://files.pythonhosted.org/packages/fc/b9/973455cec0a1ec592395250c474164c4a58ebf3e0651ee920fef1a2623f1/librt-0.7.8-cp313-cp313-win_arm64.whl", hash = "sha256:43d4e71b50763fcdcf64725ac680d8cfa1706c928b844794a7aa0fa9ac8e5f09", size = 43600, upload-time = "2026-01-14T12:55:34.054Z" }, - { url = "https://files.pythonhosted.org/packages/1a/73/fa8814c6ce2d49c3827829cadaa1589b0bf4391660bd4510899393a23ebc/librt-0.7.8-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:be927c3c94c74b05128089a955fba86501c3b544d1d300282cc1b4bd370cb418", size = 57049, upload-time = "2026-01-14T12:55:35.056Z" }, - { url = "https://files.pythonhosted.org/packages/53/fe/f6c70956da23ea235fd2e3cc16f4f0b4ebdfd72252b02d1164dd58b4e6c3/librt-0.7.8-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:7b0803e9008c62a7ef79058233db7ff6f37a9933b8f2573c05b07ddafa226611", size = 58689, upload-time = "2026-01-14T12:55:36.078Z" }, - { url = "https://files.pythonhosted.org/packages/1f/4d/7a2481444ac5fba63050d9abe823e6bc16896f575bfc9c1e5068d516cdce/librt-0.7.8-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:79feb4d00b2a4e0e05c9c56df707934f41fcb5fe53fd9efb7549068d0495b758", size = 166808, upload-time = "2026-01-14T12:55:37.595Z" }, - { url = "https://files.pythonhosted.org/packages/ac/3c/10901d9e18639f8953f57c8986796cfbf4c1c514844a41c9197cf87cb707/librt-0.7.8-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b9122094e3f24aa759c38f46bd8863433820654927370250f460ae75488b66ea", size = 175614, upload-time = "2026-01-14T12:55:38.756Z" }, - { url = "https://files.pythonhosted.org/packages/db/01/5cbdde0951a5090a80e5ba44e6357d375048123c572a23eecfb9326993a7/librt-0.7.8-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7e03bea66af33c95ce3addf87a9bf1fcad8d33e757bc479957ddbc0e4f7207ac", size = 189955, upload-time = "2026-01-14T12:55:39.939Z" }, - { url = "https://files.pythonhosted.org/packages/6a/b4/e80528d2f4b7eaf1d437fcbd6fc6ba4cbeb3e2a0cb9ed5a79f47c7318706/librt-0.7.8-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f1ade7f31675db00b514b98f9ab9a7698c7282dad4be7492589109471852d398", size = 189370, upload-time = "2026-01-14T12:55:41.057Z" }, - { url = "https://files.pythonhosted.org/packages/c1/ab/938368f8ce31a9787ecd4becb1e795954782e4312095daf8fd22420227c8/librt-0.7.8-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:a14229ac62adcf1b90a15992f1ab9c69ae8b99ffb23cb64a90878a6e8a2f5b81", size = 183224, upload-time = "2026-01-14T12:55:42.328Z" }, - { url = "https://files.pythonhosted.org/packages/3c/10/559c310e7a6e4014ac44867d359ef8238465fb499e7eb31b6bfe3e3f86f5/librt-0.7.8-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:5bcaaf624fd24e6a0cb14beac37677f90793a96864c67c064a91458611446e83", size = 203541, upload-time = "2026-01-14T12:55:43.501Z" }, - { url = "https://files.pythonhosted.org/packages/f8/db/a0db7acdb6290c215f343835c6efda5b491bb05c3ddc675af558f50fdba3/librt-0.7.8-cp314-cp314-win32.whl", hash = "sha256:7aa7d5457b6c542ecaed79cec4ad98534373c9757383973e638ccced0f11f46d", size = 40657, upload-time = "2026-01-14T12:55:44.668Z" }, - { url = "https://files.pythonhosted.org/packages/72/e0/4f9bdc2a98a798511e81edcd6b54fe82767a715e05d1921115ac70717f6f/librt-0.7.8-cp314-cp314-win_amd64.whl", hash = "sha256:3d1322800771bee4a91f3b4bd4e49abc7d35e65166821086e5afd1e6c0d9be44", size = 46835, upload-time = "2026-01-14T12:55:45.655Z" }, - { url = "https://files.pythonhosted.org/packages/f9/3d/59c6402e3dec2719655a41ad027a7371f8e2334aa794ed11533ad5f34969/librt-0.7.8-cp314-cp314-win_arm64.whl", hash = "sha256:5363427bc6a8c3b1719f8f3845ea53553d301382928a86e8fab7984426949bce", size = 39885, upload-time = "2026-01-14T12:55:47.138Z" }, - { url = "https://files.pythonhosted.org/packages/4e/9c/2481d80950b83085fb14ba3c595db56330d21bbc7d88a19f20165f3538db/librt-0.7.8-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:ca916919793a77e4a98d4a1701e345d337ce53be4a16620f063191f7322ac80f", size = 59161, upload-time = "2026-01-14T12:55:48.45Z" }, - { url = "https://files.pythonhosted.org/packages/96/79/108df2cfc4e672336765d54e3ff887294c1cc36ea4335c73588875775527/librt-0.7.8-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:54feb7b4f2f6706bb82325e836a01be805770443e2400f706e824e91f6441dde", size = 61008, upload-time = "2026-01-14T12:55:49.527Z" }, - { url = "https://files.pythonhosted.org/packages/46/f2/30179898f9994a5637459d6e169b6abdc982012c0a4b2d4c26f50c06f911/librt-0.7.8-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:39a4c76fee41007070f872b648cc2f711f9abf9a13d0c7162478043377b52c8e", size = 187199, upload-time = "2026-01-14T12:55:50.587Z" }, - { url = "https://files.pythonhosted.org/packages/b4/da/f7563db55cebdc884f518ba3791ad033becc25ff68eb70902b1747dc0d70/librt-0.7.8-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ac9c8a458245c7de80bc1b9765b177055efff5803f08e548dd4bb9ab9a8d789b", size = 198317, upload-time = "2026-01-14T12:55:51.991Z" }, - { url = "https://files.pythonhosted.org/packages/b3/6c/4289acf076ad371471fa86718c30ae353e690d3de6167f7db36f429272f1/librt-0.7.8-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:95b67aa7eff150f075fda09d11f6bfb26edffd300f6ab1666759547581e8f666", size = 210334, upload-time = "2026-01-14T12:55:53.682Z" }, - { url = "https://files.pythonhosted.org/packages/4a/7f/377521ac25b78ac0a5ff44127a0360ee6d5ddd3ce7327949876a30533daa/librt-0.7.8-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:535929b6eff670c593c34ff435d5440c3096f20fa72d63444608a5aef64dd581", size = 211031, upload-time = "2026-01-14T12:55:54.827Z" }, - { url = "https://files.pythonhosted.org/packages/c5/b1/e1e96c3e20b23d00cf90f4aad48f0deb4cdfec2f0ed8380d0d85acf98bbf/librt-0.7.8-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:63937bd0f4d1cb56653dc7ae900d6c52c41f0015e25aaf9902481ee79943b33a", size = 204581, upload-time = "2026-01-14T12:55:56.811Z" }, - { url = "https://files.pythonhosted.org/packages/43/71/0f5d010e92ed9747e14bef35e91b6580533510f1e36a8a09eb79ee70b2f0/librt-0.7.8-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:cf243da9e42d914036fd362ac3fa77d80a41cadcd11ad789b1b5eec4daaf67ca", size = 224731, upload-time = "2026-01-14T12:55:58.175Z" }, - { url = "https://files.pythonhosted.org/packages/22/f0/07fb6ab5c39a4ca9af3e37554f9d42f25c464829254d72e4ebbd81da351c/librt-0.7.8-cp314-cp314t-win32.whl", hash = "sha256:171ca3a0a06c643bd0a2f62a8944e1902c94aa8e5da4db1ea9a8daf872685365", size = 41173, upload-time = "2026-01-14T12:55:59.315Z" }, - { url = "https://files.pythonhosted.org/packages/24/d4/7e4be20993dc6a782639625bd2f97f3c66125c7aa80c82426956811cfccf/librt-0.7.8-cp314-cp314t-win_amd64.whl", hash = "sha256:445b7304145e24c60288a2f172b5ce2ca35c0f81605f5299f3fa567e189d2e32", size = 47668, upload-time = "2026-01-14T12:56:00.261Z" }, - { url = "https://files.pythonhosted.org/packages/fc/85/69f92b2a7b3c0f88ffe107c86b952b397004b5b8ea5a81da3d9c04c04422/librt-0.7.8-cp314-cp314t-win_arm64.whl", hash = "sha256:8766ece9de08527deabcd7cb1b4f1a967a385d26e33e536d6d8913db6ef74f06", size = 40550, upload-time = "2026-01-14T12:56:01.542Z" }, + { url = "https://files.pythonhosted.org/packages/95/21/d39b0a87ac52fc98f621fb6f8060efb017a767ebbbac2f99fbcbc9ddc0d7/librt-0.8.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a28f2612ab566b17f3698b0da021ff9960610301607c9a5e8eaca62f5e1c350a", size = 66516, upload-time = "2026-02-17T16:11:41.604Z" }, + { url = "https://files.pythonhosted.org/packages/69/f1/46375e71441c43e8ae335905e069f1c54febee63a146278bcee8782c84fd/librt-0.8.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:60a78b694c9aee2a0f1aaeaa7d101cf713e92e8423a941d2897f4fa37908dab9", size = 68634, upload-time = "2026-02-17T16:11:43.268Z" }, + { url = "https://files.pythonhosted.org/packages/0a/33/c510de7f93bf1fa19e13423a606d8189a02624a800710f6e6a0a0f0784b3/librt-0.8.1-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:758509ea3f1eba2a57558e7e98f4659d0ea7670bff49673b0dde18a3c7e6c0eb", size = 198941, upload-time = "2026-02-17T16:11:44.28Z" }, + { url = "https://files.pythonhosted.org/packages/dd/36/e725903416409a533d92398e88ce665476f275081d0d7d42f9c4951999e5/librt-0.8.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:039b9f2c506bd0ab0f8725aa5ba339c6f0cd19d3b514b50d134789809c24285d", size = 209991, upload-time = "2026-02-17T16:11:45.462Z" }, + { url = "https://files.pythonhosted.org/packages/30/7a/8d908a152e1875c9f8eac96c97a480df425e657cdb47854b9efaa4998889/librt-0.8.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5bb54f1205a3a6ab41a6fd71dfcdcbd278670d3a90ca502a30d9da583105b6f7", size = 224476, upload-time = "2026-02-17T16:11:46.542Z" }, + { url = "https://files.pythonhosted.org/packages/a8/b8/a22c34f2c485b8903a06f3fe3315341fe6876ef3599792344669db98fcff/librt-0.8.1-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:05bd41cdee35b0c59c259f870f6da532a2c5ca57db95b5f23689fcb5c9e42440", size = 217518, upload-time = "2026-02-17T16:11:47.746Z" }, + { url = "https://files.pythonhosted.org/packages/79/6f/5c6fea00357e4f82ba44f81dbfb027921f1ab10e320d4a64e1c408d035d9/librt-0.8.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:adfab487facf03f0d0857b8710cf82d0704a309d8ffc33b03d9302b4c64e91a9", size = 225116, upload-time = "2026-02-17T16:11:49.298Z" }, + { url = "https://files.pythonhosted.org/packages/f2/a0/95ced4e7b1267fe1e2720a111685bcddf0e781f7e9e0ce59d751c44dcfe5/librt-0.8.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:153188fe98a72f206042be10a2c6026139852805215ed9539186312d50a8e972", size = 217751, upload-time = "2026-02-17T16:11:50.49Z" }, + { url = "https://files.pythonhosted.org/packages/93/c2/0517281cb4d4101c27ab59472924e67f55e375bc46bedae94ac6dc6e1902/librt-0.8.1-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:dd3c41254ee98604b08bd5b3af5bf0a89740d4ee0711de95b65166bf44091921", size = 218378, upload-time = "2026-02-17T16:11:51.783Z" }, + { url = "https://files.pythonhosted.org/packages/43/e8/37b3ac108e8976888e559a7b227d0ceac03c384cfd3e7a1c2ee248dbae79/librt-0.8.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e0d138c7ae532908cbb342162b2611dbd4d90c941cd25ab82084aaf71d2c0bd0", size = 241199, upload-time = "2026-02-17T16:11:53.561Z" }, + { url = "https://files.pythonhosted.org/packages/4b/5b/35812d041c53967fedf551a39399271bbe4257e681236a2cf1a69c8e7fa1/librt-0.8.1-cp312-cp312-win32.whl", hash = "sha256:43353b943613c5d9c49a25aaffdba46f888ec354e71e3529a00cca3f04d66a7a", size = 54917, upload-time = "2026-02-17T16:11:54.758Z" }, + { url = "https://files.pythonhosted.org/packages/de/d1/fa5d5331b862b9775aaf2a100f5ef86854e5d4407f71bddf102f4421e034/librt-0.8.1-cp312-cp312-win_amd64.whl", hash = "sha256:ff8baf1f8d3f4b6b7257fcb75a501f2a5499d0dda57645baa09d4d0d34b19444", size = 62017, upload-time = "2026-02-17T16:11:55.748Z" }, + { url = "https://files.pythonhosted.org/packages/c7/7c/c614252f9acda59b01a66e2ddfd243ed1c7e1deab0293332dfbccf862808/librt-0.8.1-cp312-cp312-win_arm64.whl", hash = "sha256:0f2ae3725904f7377e11cc37722d5d401e8b3d5851fb9273d7f4fe04f6b3d37d", size = 52441, upload-time = "2026-02-17T16:11:56.801Z" }, + { url = "https://files.pythonhosted.org/packages/c5/3c/f614c8e4eaac7cbf2bbdf9528790b21d89e277ee20d57dc6e559c626105f/librt-0.8.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7e6bad1cd94f6764e1e21950542f818a09316645337fd5ab9a7acc45d99a8f35", size = 66529, upload-time = "2026-02-17T16:11:57.809Z" }, + { url = "https://files.pythonhosted.org/packages/ab/96/5836544a45100ae411eda07d29e3d99448e5258b6e9c8059deb92945f5c2/librt-0.8.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:cf450f498c30af55551ba4f66b9123b7185362ec8b625a773b3d39aa1a717583", size = 68669, upload-time = "2026-02-17T16:11:58.843Z" }, + { url = "https://files.pythonhosted.org/packages/06/53/f0b992b57af6d5531bf4677d75c44f095f2366a1741fb695ee462ae04b05/librt-0.8.1-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:eca45e982fa074090057132e30585a7e8674e9e885d402eae85633e9f449ce6c", size = 199279, upload-time = "2026-02-17T16:11:59.862Z" }, + { url = "https://files.pythonhosted.org/packages/f3/ad/4848cc16e268d14280d8168aee4f31cea92bbd2b79ce33d3e166f2b4e4fc/librt-0.8.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0c3811485fccfda840861905b8c70bba5ec094e02825598bb9d4ca3936857a04", size = 210288, upload-time = "2026-02-17T16:12:00.954Z" }, + { url = "https://files.pythonhosted.org/packages/52/05/27fdc2e95de26273d83b96742d8d3b7345f2ea2bdbd2405cc504644f2096/librt-0.8.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5e4af413908f77294605e28cfd98063f54b2c790561383971d2f52d113d9c363", size = 224809, upload-time = "2026-02-17T16:12:02.108Z" }, + { url = "https://files.pythonhosted.org/packages/7a/d0/78200a45ba3240cb042bc597d6f2accba9193a2c57d0356268cbbe2d0925/librt-0.8.1-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:5212a5bd7fae98dae95710032902edcd2ec4dc994e883294f75c857b83f9aba0", size = 218075, upload-time = "2026-02-17T16:12:03.631Z" }, + { url = "https://files.pythonhosted.org/packages/af/72/a210839fa74c90474897124c064ffca07f8d4b347b6574d309686aae7ca6/librt-0.8.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:e692aa2d1d604e6ca12d35e51fdc36f4cda6345e28e36374579f7ef3611b3012", size = 225486, upload-time = "2026-02-17T16:12:04.725Z" }, + { url = "https://files.pythonhosted.org/packages/a3/c1/a03cc63722339ddbf087485f253493e2b013039f5b707e8e6016141130fa/librt-0.8.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:4be2a5c926b9770c9e08e717f05737a269b9d0ebc5d2f0060f0fe3fe9ce47acb", size = 218219, upload-time = "2026-02-17T16:12:05.828Z" }, + { url = "https://files.pythonhosted.org/packages/58/f5/fff6108af0acf941c6f274a946aea0e484bd10cd2dc37610287ce49388c5/librt-0.8.1-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:fd1a720332ea335ceb544cf0a03f81df92abd4bb887679fd1e460976b0e6214b", size = 218750, upload-time = "2026-02-17T16:12:07.09Z" }, + { url = "https://files.pythonhosted.org/packages/71/67/5a387bfef30ec1e4b4f30562c8586566faf87e47d696768c19feb49e3646/librt-0.8.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:93c2af9e01e0ef80d95ae3c720be101227edae5f2fe7e3dc63d8857fadfc5a1d", size = 241624, upload-time = "2026-02-17T16:12:08.43Z" }, + { url = "https://files.pythonhosted.org/packages/d4/be/24f8502db11d405232ac1162eb98069ca49c3306c1d75c6ccc61d9af8789/librt-0.8.1-cp313-cp313-win32.whl", hash = "sha256:086a32dbb71336627e78cc1d6ee305a68d038ef7d4c39aaff41ae8c9aa46e91a", size = 54969, upload-time = "2026-02-17T16:12:09.633Z" }, + { url = "https://files.pythonhosted.org/packages/5c/73/c9fdf6cb2a529c1a092ce769a12d88c8cca991194dfe641b6af12fa964d2/librt-0.8.1-cp313-cp313-win_amd64.whl", hash = "sha256:e11769a1dbda4da7b00a76cfffa67aa47cfa66921d2724539eee4b9ede780b79", size = 62000, upload-time = "2026-02-17T16:12:10.632Z" }, + { url = "https://files.pythonhosted.org/packages/d3/97/68f80ca3ac4924f250cdfa6e20142a803e5e50fca96ef5148c52ee8c10ea/librt-0.8.1-cp313-cp313-win_arm64.whl", hash = "sha256:924817ab3141aca17893386ee13261f1d100d1ef410d70afe4389f2359fea4f0", size = 52495, upload-time = "2026-02-17T16:12:11.633Z" }, + { url = "https://files.pythonhosted.org/packages/c9/6a/907ef6800f7bca71b525a05f1839b21f708c09043b1c6aa77b6b827b3996/librt-0.8.1-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:6cfa7fe54fd4d1f47130017351a959fe5804bda7a0bc7e07a2cdbc3fdd28d34f", size = 66081, upload-time = "2026-02-17T16:12:12.766Z" }, + { url = "https://files.pythonhosted.org/packages/1b/18/25e991cd5640c9fb0f8d91b18797b29066b792f17bf8493da183bf5caabe/librt-0.8.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:228c2409c079f8c11fb2e5d7b277077f694cb93443eb760e00b3b83cb8b3176c", size = 68309, upload-time = "2026-02-17T16:12:13.756Z" }, + { url = "https://files.pythonhosted.org/packages/a4/36/46820d03f058cfb5a9de5940640ba03165ed8aded69e0733c417bb04df34/librt-0.8.1-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:7aae78ab5e3206181780e56912d1b9bb9f90a7249ce12f0e8bf531d0462dd0fc", size = 196804, upload-time = "2026-02-17T16:12:14.818Z" }, + { url = "https://files.pythonhosted.org/packages/59/18/5dd0d3b87b8ff9c061849fbdb347758d1f724b9a82241aa908e0ec54ccd0/librt-0.8.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:172d57ec04346b047ca6af181e1ea4858086c80bdf455f61994c4aa6fc3f866c", size = 206907, upload-time = "2026-02-17T16:12:16.513Z" }, + { url = "https://files.pythonhosted.org/packages/d1/96/ef04902aad1424fd7299b62d1890e803e6ab4018c3044dca5922319c4b97/librt-0.8.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6b1977c4ea97ce5eb7755a78fae68d87e4102e4aaf54985e8b56806849cc06a3", size = 221217, upload-time = "2026-02-17T16:12:17.906Z" }, + { url = "https://files.pythonhosted.org/packages/6d/ff/7e01f2dda84a8f5d280637a2e5827210a8acca9a567a54507ef1c75b342d/librt-0.8.1-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:10c42e1f6fd06733ef65ae7bebce2872bcafd8d6e6b0a08fe0a05a23b044fb14", size = 214622, upload-time = "2026-02-17T16:12:19.108Z" }, + { url = "https://files.pythonhosted.org/packages/1e/8c/5b093d08a13946034fed57619742f790faf77058558b14ca36a6e331161e/librt-0.8.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:4c8dfa264b9193c4ee19113c985c95f876fae5e51f731494fc4e0cf594990ba7", size = 221987, upload-time = "2026-02-17T16:12:20.331Z" }, + { url = "https://files.pythonhosted.org/packages/d3/cc/86b0b3b151d40920ad45a94ce0171dec1aebba8a9d72bb3fa00c73ab25dd/librt-0.8.1-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:01170b6729a438f0dedc4a26ed342e3dc4f02d1000b4b19f980e1877f0c297e6", size = 215132, upload-time = "2026-02-17T16:12:21.54Z" }, + { url = "https://files.pythonhosted.org/packages/fc/be/8588164a46edf1e69858d952654e216a9a91174688eeefb9efbb38a9c799/librt-0.8.1-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:7b02679a0d783bdae30d443025b94465d8c3dc512f32f5b5031f93f57ac32071", size = 215195, upload-time = "2026-02-17T16:12:23.073Z" }, + { url = "https://files.pythonhosted.org/packages/f5/f2/0b9279bea735c734d69344ecfe056c1ba211694a72df10f568745c899c76/librt-0.8.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:190b109bb69592a3401fe1ffdea41a2e73370ace2ffdc4a0e8e2b39cdea81b78", size = 237946, upload-time = "2026-02-17T16:12:24.275Z" }, + { url = "https://files.pythonhosted.org/packages/e9/cc/5f2a34fbc8aeb35314a3641f9956fa9051a947424652fad9882be7a97949/librt-0.8.1-cp314-cp314-win32.whl", hash = "sha256:e70a57ecf89a0f64c24e37f38d3fe217a58169d2fe6ed6d70554964042474023", size = 50689, upload-time = "2026-02-17T16:12:25.766Z" }, + { url = "https://files.pythonhosted.org/packages/a0/76/cd4d010ab2147339ca2b93e959c3686e964edc6de66ddacc935c325883d7/librt-0.8.1-cp314-cp314-win_amd64.whl", hash = "sha256:7e2f3edca35664499fbb36e4770650c4bd4a08abc1f4458eab9df4ec56389730", size = 57875, upload-time = "2026-02-17T16:12:27.465Z" }, + { url = "https://files.pythonhosted.org/packages/84/0f/2143cb3c3ca48bd3379dcd11817163ca50781927c4537345d608b5045998/librt-0.8.1-cp314-cp314-win_arm64.whl", hash = "sha256:0d2f82168e55ddefd27c01c654ce52379c0750ddc31ee86b4b266bcf4d65f2a3", size = 48058, upload-time = "2026-02-17T16:12:28.556Z" }, + { url = "https://files.pythonhosted.org/packages/d2/0e/9b23a87e37baf00311c3efe6b48d6b6c168c29902dfc3f04c338372fd7db/librt-0.8.1-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:2c74a2da57a094bd48d03fa5d196da83d2815678385d2978657499063709abe1", size = 68313, upload-time = "2026-02-17T16:12:29.659Z" }, + { url = "https://files.pythonhosted.org/packages/db/9a/859c41e5a4f1c84200a7d2b92f586aa27133c8243b6cac9926f6e54d01b9/librt-0.8.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a355d99c4c0d8e5b770313b8b247411ed40949ca44e33e46a4789b9293a907ee", size = 70994, upload-time = "2026-02-17T16:12:31.516Z" }, + { url = "https://files.pythonhosted.org/packages/4c/28/10605366ee599ed34223ac2bf66404c6fb59399f47108215d16d5ad751a8/librt-0.8.1-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:2eb345e8b33fb748227409c9f1233d4df354d6e54091f0e8fc53acdb2ffedeb7", size = 220770, upload-time = "2026-02-17T16:12:33.294Z" }, + { url = "https://files.pythonhosted.org/packages/af/8d/16ed8fd452dafae9c48d17a6bc1ee3e818fd40ef718d149a8eff2c9f4ea2/librt-0.8.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9be2f15e53ce4e83cc08adc29b26fb5978db62ef2a366fbdf716c8a6c8901040", size = 235409, upload-time = "2026-02-17T16:12:35.443Z" }, + { url = "https://files.pythonhosted.org/packages/89/1b/7bdf3e49349c134b25db816e4a3db6b94a47ac69d7d46b1e682c2c4949be/librt-0.8.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:785ae29c1f5c6e7c2cde2c7c0e148147f4503da3abc5d44d482068da5322fd9e", size = 246473, upload-time = "2026-02-17T16:12:36.656Z" }, + { url = "https://files.pythonhosted.org/packages/4e/8a/91fab8e4fd2a24930a17188c7af5380eb27b203d72101c9cc000dbdfd95a/librt-0.8.1-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:1d3a7da44baf692f0c6aeb5b2a09c5e6fc7a703bca9ffa337ddd2e2da53f7732", size = 238866, upload-time = "2026-02-17T16:12:37.849Z" }, + { url = "https://files.pythonhosted.org/packages/b9/e0/c45a098843fc7c07e18a7f8a24ca8496aecbf7bdcd54980c6ca1aaa79a8e/librt-0.8.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:5fc48998000cbc39ec0d5311312dda93ecf92b39aaf184c5e817d5d440b29624", size = 250248, upload-time = "2026-02-17T16:12:39.445Z" }, + { url = "https://files.pythonhosted.org/packages/82/30/07627de23036640c952cce0c1fe78972e77d7d2f8fd54fa5ef4554ff4a56/librt-0.8.1-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:e96baa6820280077a78244b2e06e416480ed859bbd8e5d641cf5742919d8beb4", size = 240629, upload-time = "2026-02-17T16:12:40.889Z" }, + { url = "https://files.pythonhosted.org/packages/fb/c1/55bfe1ee3542eba055616f9098eaf6eddb966efb0ca0f44eaa4aba327307/librt-0.8.1-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:31362dbfe297b23590530007062c32c6f6176f6099646bb2c95ab1b00a57c382", size = 239615, upload-time = "2026-02-17T16:12:42.446Z" }, + { url = "https://files.pythonhosted.org/packages/2b/39/191d3d28abc26c9099b19852e6c99f7f6d400b82fa5a4e80291bd3803e19/librt-0.8.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:cc3656283d11540ab0ea01978378e73e10002145117055e03722417aeab30994", size = 263001, upload-time = "2026-02-17T16:12:43.627Z" }, + { url = "https://files.pythonhosted.org/packages/b9/eb/7697f60fbe7042ab4e88f4ee6af496b7f222fffb0a4e3593ef1f29f81652/librt-0.8.1-cp314-cp314t-win32.whl", hash = "sha256:738f08021b3142c2918c03692608baed43bc51144c29e35807682f8070ee2a3a", size = 51328, upload-time = "2026-02-17T16:12:45.148Z" }, + { url = "https://files.pythonhosted.org/packages/7c/72/34bf2eb7a15414a23e5e70ecb9440c1d3179f393d9349338a91e2781c0fb/librt-0.8.1-cp314-cp314t-win_amd64.whl", hash = "sha256:89815a22daf9c51884fb5dbe4f1ef65ee6a146e0b6a8df05f753e2e4a9359bf4", size = 58722, upload-time = "2026-02-17T16:12:46.85Z" }, + { url = "https://files.pythonhosted.org/packages/b2/c8/d148e041732d631fc76036f8b30fae4e77b027a1e95b7a84bb522481a940/librt-0.8.1-cp314-cp314t-win_arm64.whl", hash = "sha256:bf512a71a23504ed08103a13c941f763db13fb11177beb3d9244c98c29fb4a61", size = 48755, upload-time = "2026-02-17T16:12:47.943Z" }, +] + +[[package]] +name = "marco-polo" +version = "1.0.0" +source = { editable = "." } +dependencies = [ + { name = "gen-worker" }, +] + +[package.dev-dependencies] +dev = [ + { name = "mypy" }, ] +[package.metadata] +requires-dist = [{ name = "gen-worker", specifier = ">=0.3.10" }] + +[package.metadata.requires-dev] +dev = [{ name = "mypy", specifier = ">=1.10.0" }] + [[package]] name = "markdown-it-py" version = "4.0.0" @@ -799,101 +826,101 @@ wheels = [ [[package]] name = "multidict" -version = "6.7.0" +version = "6.7.1" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/80/1e/5492c365f222f907de1039b91f922b93fa4f764c713ee858d235495d8f50/multidict-6.7.0.tar.gz", hash = "sha256:c6e99d9a65ca282e578dfea819cfa9c0a62b2499d8677392e09feaf305e9e6f5", size = 101834, upload-time = "2025-10-06T14:52:30.657Z" } +sdist = { url = "https://files.pythonhosted.org/packages/1a/c2/c2d94cbe6ac1753f3fc980da97b3d930efe1da3af3c9f5125354436c073d/multidict-6.7.1.tar.gz", hash = "sha256:ec6652a1bee61c53a3e5776b6049172c53b6aaba34f18c9ad04f82712bac623d", size = 102010, upload-time = "2026-01-26T02:46:45.979Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/c2/9e/9f61ac18d9c8b475889f32ccfa91c9f59363480613fc807b6e3023d6f60b/multidict-6.7.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:8a3862568a36d26e650a19bb5cbbba14b71789032aebc0423f8cc5f150730184", size = 76877, upload-time = "2025-10-06T14:49:20.884Z" }, - { url = "https://files.pythonhosted.org/packages/38/6f/614f09a04e6184f8824268fce4bc925e9849edfa654ddd59f0b64508c595/multidict-6.7.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:960c60b5849b9b4f9dcc9bea6e3626143c252c74113df2c1540aebce70209b45", size = 45467, upload-time = "2025-10-06T14:49:22.054Z" }, - { url = "https://files.pythonhosted.org/packages/b3/93/c4f67a436dd026f2e780c433277fff72be79152894d9fc36f44569cab1a6/multidict-6.7.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:2049be98fb57a31b4ccf870bf377af2504d4ae35646a19037ec271e4c07998aa", size = 43834, upload-time = "2025-10-06T14:49:23.566Z" }, - { url = "https://files.pythonhosted.org/packages/7f/f5/013798161ca665e4a422afbc5e2d9e4070142a9ff8905e482139cd09e4d0/multidict-6.7.0-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:0934f3843a1860dd465d38895c17fce1f1cb37295149ab05cd1b9a03afacb2a7", size = 250545, upload-time = "2025-10-06T14:49:24.882Z" }, - { url = "https://files.pythonhosted.org/packages/71/2f/91dbac13e0ba94669ea5119ba267c9a832f0cb65419aca75549fcf09a3dc/multidict-6.7.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b3e34f3a1b8131ba06f1a73adab24f30934d148afcd5f5de9a73565a4404384e", size = 258305, upload-time = "2025-10-06T14:49:26.778Z" }, - { url = "https://files.pythonhosted.org/packages/ef/b0/754038b26f6e04488b48ac621f779c341338d78503fb45403755af2df477/multidict-6.7.0-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:efbb54e98446892590dc2458c19c10344ee9a883a79b5cec4bc34d6656e8d546", size = 242363, upload-time = "2025-10-06T14:49:28.562Z" }, - { url = "https://files.pythonhosted.org/packages/87/15/9da40b9336a7c9fa606c4cf2ed80a649dffeb42b905d4f63a1d7eb17d746/multidict-6.7.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a35c5fc61d4f51eb045061e7967cfe3123d622cd500e8868e7c0c592a09fedc4", size = 268375, upload-time = "2025-10-06T14:49:29.96Z" }, - { url = "https://files.pythonhosted.org/packages/82/72/c53fcade0cc94dfaad583105fd92b3a783af2091eddcb41a6d5a52474000/multidict-6.7.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:29fe6740ebccba4175af1b9b87bf553e9c15cd5868ee967e010efcf94e4fd0f1", size = 269346, upload-time = "2025-10-06T14:49:31.404Z" }, - { url = "https://files.pythonhosted.org/packages/0d/e2/9baffdae21a76f77ef8447f1a05a96ec4bc0a24dae08767abc0a2fe680b8/multidict-6.7.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:123e2a72e20537add2f33a79e605f6191fba2afda4cbb876e35c1a7074298a7d", size = 256107, upload-time = "2025-10-06T14:49:32.974Z" }, - { url = "https://files.pythonhosted.org/packages/3c/06/3f06f611087dc60d65ef775f1fb5aca7c6d61c6db4990e7cda0cef9b1651/multidict-6.7.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:b284e319754366c1aee2267a2036248b24eeb17ecd5dc16022095e747f2f4304", size = 253592, upload-time = "2025-10-06T14:49:34.52Z" }, - { url = "https://files.pythonhosted.org/packages/20/24/54e804ec7945b6023b340c412ce9c3f81e91b3bf5fa5ce65558740141bee/multidict-6.7.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:803d685de7be4303b5a657b76e2f6d1240e7e0a8aa2968ad5811fa2285553a12", size = 251024, upload-time = "2025-10-06T14:49:35.956Z" }, - { url = "https://files.pythonhosted.org/packages/14/48/011cba467ea0b17ceb938315d219391d3e421dfd35928e5dbdc3f4ae76ef/multidict-6.7.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c04a328260dfd5db8c39538f999f02779012268f54614902d0afc775d44e0a62", size = 251484, upload-time = "2025-10-06T14:49:37.631Z" }, - { url = "https://files.pythonhosted.org/packages/0d/2f/919258b43bb35b99fa127435cfb2d91798eb3a943396631ef43e3720dcf4/multidict-6.7.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8a19cdb57cd3df4cd865849d93ee14920fb97224300c88501f16ecfa2604b4e0", size = 263579, upload-time = "2025-10-06T14:49:39.502Z" }, - { url = "https://files.pythonhosted.org/packages/31/22/a0e884d86b5242b5a74cf08e876bdf299e413016b66e55511f7a804a366e/multidict-6.7.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9b2fd74c52accced7e75de26023b7dccee62511a600e62311b918ec5c168fc2a", size = 259654, upload-time = "2025-10-06T14:49:41.32Z" }, - { url = "https://files.pythonhosted.org/packages/b2/e5/17e10e1b5c5f5a40f2fcbb45953c9b215f8a4098003915e46a93f5fcaa8f/multidict-6.7.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3e8bfdd0e487acf992407a140d2589fe598238eaeffa3da8448d63a63cd363f8", size = 251511, upload-time = "2025-10-06T14:49:46.021Z" }, - { url = "https://files.pythonhosted.org/packages/e3/9a/201bb1e17e7af53139597069c375e7b0dcbd47594604f65c2d5359508566/multidict-6.7.0-cp312-cp312-win32.whl", hash = "sha256:dd32a49400a2c3d52088e120ee00c1e3576cbff7e10b98467962c74fdb762ed4", size = 41895, upload-time = "2025-10-06T14:49:48.718Z" }, - { url = "https://files.pythonhosted.org/packages/46/e2/348cd32faad84eaf1d20cce80e2bb0ef8d312c55bca1f7fa9865e7770aaf/multidict-6.7.0-cp312-cp312-win_amd64.whl", hash = "sha256:92abb658ef2d7ef22ac9f8bb88e8b6c3e571671534e029359b6d9e845923eb1b", size = 46073, upload-time = "2025-10-06T14:49:50.28Z" }, - { url = "https://files.pythonhosted.org/packages/25/ec/aad2613c1910dce907480e0c3aa306905830f25df2e54ccc9dea450cb5aa/multidict-6.7.0-cp312-cp312-win_arm64.whl", hash = "sha256:490dab541a6a642ce1a9d61a4781656b346a55c13038f0b1244653828e3a83ec", size = 43226, upload-time = "2025-10-06T14:49:52.304Z" }, - { url = "https://files.pythonhosted.org/packages/d2/86/33272a544eeb36d66e4d9a920602d1a2f57d4ebea4ef3cdfe5a912574c95/multidict-6.7.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:bee7c0588aa0076ce77c0ea5d19a68d76ad81fcd9fe8501003b9a24f9d4000f6", size = 76135, upload-time = "2025-10-06T14:49:54.26Z" }, - { url = "https://files.pythonhosted.org/packages/91/1c/eb97db117a1ebe46d457a3d235a7b9d2e6dcab174f42d1b67663dd9e5371/multidict-6.7.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:7ef6b61cad77091056ce0e7ce69814ef72afacb150b7ac6a3e9470def2198159", size = 45117, upload-time = "2025-10-06T14:49:55.82Z" }, - { url = "https://files.pythonhosted.org/packages/f1/d8/6c3442322e41fb1dd4de8bd67bfd11cd72352ac131f6368315617de752f1/multidict-6.7.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:9c0359b1ec12b1d6849c59f9d319610b7f20ef990a6d454ab151aa0e3b9f78ca", size = 43472, upload-time = "2025-10-06T14:49:57.048Z" }, - { url = "https://files.pythonhosted.org/packages/75/3f/e2639e80325af0b6c6febdf8e57cc07043ff15f57fa1ef808f4ccb5ac4cd/multidict-6.7.0-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:cd240939f71c64bd658f186330603aac1a9a81bf6273f523fca63673cb7378a8", size = 249342, upload-time = "2025-10-06T14:49:58.368Z" }, - { url = "https://files.pythonhosted.org/packages/5d/cc/84e0585f805cbeaa9cbdaa95f9a3d6aed745b9d25700623ac89a6ecff400/multidict-6.7.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:a60a4d75718a5efa473ebd5ab685786ba0c67b8381f781d1be14da49f1a2dc60", size = 257082, upload-time = "2025-10-06T14:49:59.89Z" }, - { url = "https://files.pythonhosted.org/packages/b0/9c/ac851c107c92289acbbf5cfb485694084690c1b17e555f44952c26ddc5bd/multidict-6.7.0-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:53a42d364f323275126aff81fb67c5ca1b7a04fda0546245730a55c8c5f24bc4", size = 240704, upload-time = "2025-10-06T14:50:01.485Z" }, - { url = "https://files.pythonhosted.org/packages/50/cc/5f93e99427248c09da95b62d64b25748a5f5c98c7c2ab09825a1d6af0e15/multidict-6.7.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:3b29b980d0ddbecb736735ee5bef69bb2ddca56eff603c86f3f29a1128299b4f", size = 266355, upload-time = "2025-10-06T14:50:02.955Z" }, - { url = "https://files.pythonhosted.org/packages/ec/0c/2ec1d883ceb79c6f7f6d7ad90c919c898f5d1c6ea96d322751420211e072/multidict-6.7.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f8a93b1c0ed2d04b97a5e9336fd2d33371b9a6e29ab7dd6503d63407c20ffbaf", size = 267259, upload-time = "2025-10-06T14:50:04.446Z" }, - { url = "https://files.pythonhosted.org/packages/c6/2d/f0b184fa88d6630aa267680bdb8623fb69cb0d024b8c6f0d23f9a0f406d3/multidict-6.7.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9ff96e8815eecacc6645da76c413eb3b3d34cfca256c70b16b286a687d013c32", size = 254903, upload-time = "2025-10-06T14:50:05.98Z" }, - { url = "https://files.pythonhosted.org/packages/06/c9/11ea263ad0df7dfabcad404feb3c0dd40b131bc7f232d5537f2fb1356951/multidict-6.7.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:7516c579652f6a6be0e266aec0acd0db80829ca305c3d771ed898538804c2036", size = 252365, upload-time = "2025-10-06T14:50:07.511Z" }, - { url = "https://files.pythonhosted.org/packages/41/88/d714b86ee2c17d6e09850c70c9d310abac3d808ab49dfa16b43aba9d53fd/multidict-6.7.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:040f393368e63fb0f3330e70c26bfd336656bed925e5cbe17c9da839a6ab13ec", size = 250062, upload-time = "2025-10-06T14:50:09.074Z" }, - { url = "https://files.pythonhosted.org/packages/15/fe/ad407bb9e818c2b31383f6131ca19ea7e35ce93cf1310fce69f12e89de75/multidict-6.7.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b3bc26a951007b1057a1c543af845f1c7e3e71cc240ed1ace7bf4484aa99196e", size = 249683, upload-time = "2025-10-06T14:50:10.714Z" }, - { url = "https://files.pythonhosted.org/packages/8c/a4/a89abdb0229e533fb925e7c6e5c40201c2873efebc9abaf14046a4536ee6/multidict-6.7.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:7b022717c748dd1992a83e219587aabe45980d88969f01b316e78683e6285f64", size = 261254, upload-time = "2025-10-06T14:50:12.28Z" }, - { url = "https://files.pythonhosted.org/packages/8d/aa/0e2b27bd88b40a4fb8dc53dd74eecac70edaa4c1dd0707eb2164da3675b3/multidict-6.7.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:9600082733859f00d79dee64effc7aef1beb26adb297416a4ad2116fd61374bd", size = 257967, upload-time = "2025-10-06T14:50:14.16Z" }, - { url = "https://files.pythonhosted.org/packages/d0/8e/0c67b7120d5d5f6d874ed85a085f9dc770a7f9d8813e80f44a9fec820bb7/multidict-6.7.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:94218fcec4d72bc61df51c198d098ce2b378e0ccbac41ddbed5ef44092913288", size = 250085, upload-time = "2025-10-06T14:50:15.639Z" }, - { url = "https://files.pythonhosted.org/packages/ba/55/b73e1d624ea4b8fd4dd07a3bb70f6e4c7c6c5d9d640a41c6ffe5cdbd2a55/multidict-6.7.0-cp313-cp313-win32.whl", hash = "sha256:a37bd74c3fa9d00be2d7b8eca074dc56bd8077ddd2917a839bd989612671ed17", size = 41713, upload-time = "2025-10-06T14:50:17.066Z" }, - { url = "https://files.pythonhosted.org/packages/32/31/75c59e7d3b4205075b4c183fa4ca398a2daf2303ddf616b04ae6ef55cffe/multidict-6.7.0-cp313-cp313-win_amd64.whl", hash = "sha256:30d193c6cc6d559db42b6bcec8a5d395d34d60c9877a0b71ecd7c204fcf15390", size = 45915, upload-time = "2025-10-06T14:50:18.264Z" }, - { url = "https://files.pythonhosted.org/packages/31/2a/8987831e811f1184c22bc2e45844934385363ee61c0a2dcfa8f71b87e608/multidict-6.7.0-cp313-cp313-win_arm64.whl", hash = "sha256:ea3334cabe4d41b7ccd01e4d349828678794edbc2d3ae97fc162a3312095092e", size = 43077, upload-time = "2025-10-06T14:50:19.853Z" }, - { url = "https://files.pythonhosted.org/packages/e8/68/7b3a5170a382a340147337b300b9eb25a9ddb573bcdfff19c0fa3f31ffba/multidict-6.7.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:ad9ce259f50abd98a1ca0aa6e490b58c316a0fce0617f609723e40804add2c00", size = 83114, upload-time = "2025-10-06T14:50:21.223Z" }, - { url = "https://files.pythonhosted.org/packages/55/5c/3fa2d07c84df4e302060f555bbf539310980362236ad49f50eeb0a1c1eb9/multidict-6.7.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:07f5594ac6d084cbb5de2df218d78baf55ef150b91f0ff8a21cc7a2e3a5a58eb", size = 48442, upload-time = "2025-10-06T14:50:22.871Z" }, - { url = "https://files.pythonhosted.org/packages/fc/56/67212d33239797f9bd91962bb899d72bb0f4c35a8652dcdb8ed049bef878/multidict-6.7.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:0591b48acf279821a579282444814a2d8d0af624ae0bc600aa4d1b920b6e924b", size = 46885, upload-time = "2025-10-06T14:50:24.258Z" }, - { url = "https://files.pythonhosted.org/packages/46/d1/908f896224290350721597a61a69cd19b89ad8ee0ae1f38b3f5cd12ea2ac/multidict-6.7.0-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:749a72584761531d2b9467cfbdfd29487ee21124c304c4b6cb760d8777b27f9c", size = 242588, upload-time = "2025-10-06T14:50:25.716Z" }, - { url = "https://files.pythonhosted.org/packages/ab/67/8604288bbd68680eee0ab568fdcb56171d8b23a01bcd5cb0c8fedf6e5d99/multidict-6.7.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b4c3d199f953acd5b446bf7c0de1fe25d94e09e79086f8dc2f48a11a129cdf1", size = 249966, upload-time = "2025-10-06T14:50:28.192Z" }, - { url = "https://files.pythonhosted.org/packages/20/33/9228d76339f1ba51e3efef7da3ebd91964d3006217aae13211653193c3ff/multidict-6.7.0-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:9fb0211dfc3b51efea2f349ec92c114d7754dd62c01f81c3e32b765b70c45c9b", size = 228618, upload-time = "2025-10-06T14:50:29.82Z" }, - { url = "https://files.pythonhosted.org/packages/f8/2d/25d9b566d10cab1c42b3b9e5b11ef79c9111eaf4463b8c257a3bd89e0ead/multidict-6.7.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a027ec240fe73a8d6281872690b988eed307cd7d91b23998ff35ff577ca688b5", size = 257539, upload-time = "2025-10-06T14:50:31.731Z" }, - { url = "https://files.pythonhosted.org/packages/b6/b1/8d1a965e6637fc33de3c0d8f414485c2b7e4af00f42cab3d84e7b955c222/multidict-6.7.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:d1d964afecdf3a8288789df2f5751dc0a8261138c3768d9af117ed384e538fad", size = 256345, upload-time = "2025-10-06T14:50:33.26Z" }, - { url = "https://files.pythonhosted.org/packages/ba/0c/06b5a8adbdeedada6f4fb8d8f193d44a347223b11939b42953eeb6530b6b/multidict-6.7.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:caf53b15b1b7df9fbd0709aa01409000a2b4dd03a5f6f5cc548183c7c8f8b63c", size = 247934, upload-time = "2025-10-06T14:50:34.808Z" }, - { url = "https://files.pythonhosted.org/packages/8f/31/b2491b5fe167ca044c6eb4b8f2c9f3b8a00b24c432c365358eadac5d7625/multidict-6.7.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:654030da3197d927f05a536a66186070e98765aa5142794c9904555d3a9d8fb5", size = 245243, upload-time = "2025-10-06T14:50:36.436Z" }, - { url = "https://files.pythonhosted.org/packages/61/1a/982913957cb90406c8c94f53001abd9eafc271cb3e70ff6371590bec478e/multidict-6.7.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:2090d3718829d1e484706a2f525e50c892237b2bf9b17a79b059cb98cddc2f10", size = 235878, upload-time = "2025-10-06T14:50:37.953Z" }, - { url = "https://files.pythonhosted.org/packages/be/c0/21435d804c1a1cf7a2608593f4d19bca5bcbd7a81a70b253fdd1c12af9c0/multidict-6.7.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:2d2cfeec3f6f45651b3d408c4acec0ebf3daa9bc8a112a084206f5db5d05b754", size = 243452, upload-time = "2025-10-06T14:50:39.574Z" }, - { url = "https://files.pythonhosted.org/packages/54/0a/4349d540d4a883863191be6eb9a928846d4ec0ea007d3dcd36323bb058ac/multidict-6.7.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:4ef089f985b8c194d341eb2c24ae6e7408c9a0e2e5658699c92f497437d88c3c", size = 252312, upload-time = "2025-10-06T14:50:41.612Z" }, - { url = "https://files.pythonhosted.org/packages/26/64/d5416038dbda1488daf16b676e4dbfd9674dde10a0cc8f4fc2b502d8125d/multidict-6.7.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:e93a0617cd16998784bf4414c7e40f17a35d2350e5c6f0bd900d3a8e02bd3762", size = 246935, upload-time = "2025-10-06T14:50:43.972Z" }, - { url = "https://files.pythonhosted.org/packages/9f/8c/8290c50d14e49f35e0bd4abc25e1bc7711149ca9588ab7d04f886cdf03d9/multidict-6.7.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:f0feece2ef8ebc42ed9e2e8c78fc4aa3cf455733b507c09ef7406364c94376c6", size = 243385, upload-time = "2025-10-06T14:50:45.648Z" }, - { url = "https://files.pythonhosted.org/packages/ef/a0/f83ae75e42d694b3fbad3e047670e511c138be747bc713cf1b10d5096416/multidict-6.7.0-cp313-cp313t-win32.whl", hash = "sha256:19a1d55338ec1be74ef62440ca9e04a2f001a04d0cc49a4983dc320ff0f3212d", size = 47777, upload-time = "2025-10-06T14:50:47.154Z" }, - { url = "https://files.pythonhosted.org/packages/dc/80/9b174a92814a3830b7357307a792300f42c9e94664b01dee8e457551fa66/multidict-6.7.0-cp313-cp313t-win_amd64.whl", hash = "sha256:3da4fb467498df97e986af166b12d01f05d2e04f978a9c1c680ea1988e0bc4b6", size = 53104, upload-time = "2025-10-06T14:50:48.851Z" }, - { url = "https://files.pythonhosted.org/packages/cc/28/04baeaf0428d95bb7a7bea0e691ba2f31394338ba424fb0679a9ed0f4c09/multidict-6.7.0-cp313-cp313t-win_arm64.whl", hash = "sha256:b4121773c49a0776461f4a904cdf6264c88e42218aaa8407e803ca8025872792", size = 45503, upload-time = "2025-10-06T14:50:50.16Z" }, - { url = "https://files.pythonhosted.org/packages/e2/b1/3da6934455dd4b261d4c72f897e3a5728eba81db59959f3a639245891baa/multidict-6.7.0-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:3bab1e4aff7adaa34410f93b1f8e57c4b36b9af0426a76003f441ee1d3c7e842", size = 75128, upload-time = "2025-10-06T14:50:51.92Z" }, - { url = "https://files.pythonhosted.org/packages/14/2c/f069cab5b51d175a1a2cb4ccdf7a2c2dabd58aa5bd933fa036a8d15e2404/multidict-6.7.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:b8512bac933afc3e45fb2b18da8e59b78d4f408399a960339598374d4ae3b56b", size = 44410, upload-time = "2025-10-06T14:50:53.275Z" }, - { url = "https://files.pythonhosted.org/packages/42/e2/64bb41266427af6642b6b128e8774ed84c11b80a90702c13ac0a86bb10cc/multidict-6.7.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:79dcf9e477bc65414ebfea98ffd013cb39552b5ecd62908752e0e413d6d06e38", size = 43205, upload-time = "2025-10-06T14:50:54.911Z" }, - { url = "https://files.pythonhosted.org/packages/02/68/6b086fef8a3f1a8541b9236c594f0c9245617c29841f2e0395d979485cde/multidict-6.7.0-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:31bae522710064b5cbeddaf2e9f32b1abab70ac6ac91d42572502299e9953128", size = 245084, upload-time = "2025-10-06T14:50:56.369Z" }, - { url = "https://files.pythonhosted.org/packages/15/ee/f524093232007cd7a75c1d132df70f235cfd590a7c9eaccd7ff422ef4ae8/multidict-6.7.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4a0df7ff02397bb63e2fd22af2c87dfa39e8c7f12947bc524dbdc528282c7e34", size = 252667, upload-time = "2025-10-06T14:50:57.991Z" }, - { url = "https://files.pythonhosted.org/packages/02/a5/eeb3f43ab45878f1895118c3ef157a480db58ede3f248e29b5354139c2c9/multidict-6.7.0-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:7a0222514e8e4c514660e182d5156a415c13ef0aabbd71682fc714e327b95e99", size = 233590, upload-time = "2025-10-06T14:50:59.589Z" }, - { url = "https://files.pythonhosted.org/packages/6a/1e/76d02f8270b97269d7e3dbd45644b1785bda457b474315f8cf999525a193/multidict-6.7.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:2397ab4daaf2698eb51a76721e98db21ce4f52339e535725de03ea962b5a3202", size = 264112, upload-time = "2025-10-06T14:51:01.183Z" }, - { url = "https://files.pythonhosted.org/packages/76/0b/c28a70ecb58963847c2a8efe334904cd254812b10e535aefb3bcce513918/multidict-6.7.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:8891681594162635948a636c9fe0ff21746aeb3dd5463f6e25d9bea3a8a39ca1", size = 261194, upload-time = "2025-10-06T14:51:02.794Z" }, - { url = "https://files.pythonhosted.org/packages/b4/63/2ab26e4209773223159b83aa32721b4021ffb08102f8ac7d689c943fded1/multidict-6.7.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:18706cc31dbf402a7945916dd5cddf160251b6dab8a2c5f3d6d5a55949f676b3", size = 248510, upload-time = "2025-10-06T14:51:04.724Z" }, - { url = "https://files.pythonhosted.org/packages/93/cd/06c1fa8282af1d1c46fd55c10a7930af652afdce43999501d4d68664170c/multidict-6.7.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:f844a1bbf1d207dd311a56f383f7eda2d0e134921d45751842d8235e7778965d", size = 248395, upload-time = "2025-10-06T14:51:06.306Z" }, - { url = "https://files.pythonhosted.org/packages/99/ac/82cb419dd6b04ccf9e7e61befc00c77614fc8134362488b553402ecd55ce/multidict-6.7.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:d4393e3581e84e5645506923816b9cc81f5609a778c7e7534054091acc64d1c6", size = 239520, upload-time = "2025-10-06T14:51:08.091Z" }, - { url = "https://files.pythonhosted.org/packages/fa/f3/a0f9bf09493421bd8716a362e0cd1d244f5a6550f5beffdd6b47e885b331/multidict-6.7.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:fbd18dc82d7bf274b37aa48d664534330af744e03bccf696d6f4c6042e7d19e7", size = 245479, upload-time = "2025-10-06T14:51:10.365Z" }, - { url = "https://files.pythonhosted.org/packages/8d/01/476d38fc73a212843f43c852b0eee266b6971f0e28329c2184a8df90c376/multidict-6.7.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:b6234e14f9314731ec45c42fc4554b88133ad53a09092cc48a88e771c125dadb", size = 258903, upload-time = "2025-10-06T14:51:12.466Z" }, - { url = "https://files.pythonhosted.org/packages/49/6d/23faeb0868adba613b817d0e69c5f15531b24d462af8012c4f6de4fa8dc3/multidict-6.7.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:08d4379f9744d8f78d98c8673c06e202ffa88296f009c71bbafe8a6bf847d01f", size = 252333, upload-time = "2025-10-06T14:51:14.48Z" }, - { url = "https://files.pythonhosted.org/packages/1e/cc/48d02ac22b30fa247f7dad82866e4b1015431092f4ba6ebc7e77596e0b18/multidict-6.7.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:9fe04da3f79387f450fd0061d4dd2e45a72749d31bf634aecc9e27f24fdc4b3f", size = 243411, upload-time = "2025-10-06T14:51:16.072Z" }, - { url = "https://files.pythonhosted.org/packages/4a/03/29a8bf5a18abf1fe34535c88adbdfa88c9fb869b5a3b120692c64abe8284/multidict-6.7.0-cp314-cp314-win32.whl", hash = "sha256:fbafe31d191dfa7c4c51f7a6149c9fb7e914dcf9ffead27dcfd9f1ae382b3885", size = 40940, upload-time = "2025-10-06T14:51:17.544Z" }, - { url = "https://files.pythonhosted.org/packages/82/16/7ed27b680791b939de138f906d5cf2b4657b0d45ca6f5dd6236fdddafb1a/multidict-6.7.0-cp314-cp314-win_amd64.whl", hash = "sha256:2f67396ec0310764b9222a1728ced1ab638f61aadc6226f17a71dd9324f9a99c", size = 45087, upload-time = "2025-10-06T14:51:18.875Z" }, - { url = "https://files.pythonhosted.org/packages/cd/3c/e3e62eb35a1950292fe39315d3c89941e30a9d07d5d2df42965ab041da43/multidict-6.7.0-cp314-cp314-win_arm64.whl", hash = "sha256:ba672b26069957ee369cfa7fc180dde1fc6f176eaf1e6beaf61fbebbd3d9c000", size = 42368, upload-time = "2025-10-06T14:51:20.225Z" }, - { url = "https://files.pythonhosted.org/packages/8b/40/cd499bd0dbc5f1136726db3153042a735fffd0d77268e2ee20d5f33c010f/multidict-6.7.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:c1dcc7524066fa918c6a27d61444d4ee7900ec635779058571f70d042d86ed63", size = 82326, upload-time = "2025-10-06T14:51:21.588Z" }, - { url = "https://files.pythonhosted.org/packages/13/8a/18e031eca251c8df76daf0288e6790561806e439f5ce99a170b4af30676b/multidict-6.7.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:27e0b36c2d388dc7b6ced3406671b401e84ad7eb0656b8f3a2f46ed0ce483718", size = 48065, upload-time = "2025-10-06T14:51:22.93Z" }, - { url = "https://files.pythonhosted.org/packages/40/71/5e6701277470a87d234e433fb0a3a7deaf3bcd92566e421e7ae9776319de/multidict-6.7.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:2a7baa46a22e77f0988e3b23d4ede5513ebec1929e34ee9495be535662c0dfe2", size = 46475, upload-time = "2025-10-06T14:51:24.352Z" }, - { url = "https://files.pythonhosted.org/packages/fe/6a/bab00cbab6d9cfb57afe1663318f72ec28289ea03fd4e8236bb78429893a/multidict-6.7.0-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:7bf77f54997a9166a2f5675d1201520586439424c2511723a7312bdb4bcc034e", size = 239324, upload-time = "2025-10-06T14:51:25.822Z" }, - { url = "https://files.pythonhosted.org/packages/2a/5f/8de95f629fc22a7769ade8b41028e3e5a822c1f8904f618d175945a81ad3/multidict-6.7.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e011555abada53f1578d63389610ac8a5400fc70ce71156b0aa30d326f1a5064", size = 246877, upload-time = "2025-10-06T14:51:27.604Z" }, - { url = "https://files.pythonhosted.org/packages/23/b4/38881a960458f25b89e9f4a4fdcb02ac101cfa710190db6e5528841e67de/multidict-6.7.0-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:28b37063541b897fd6a318007373930a75ca6d6ac7c940dbe14731ffdd8d498e", size = 225824, upload-time = "2025-10-06T14:51:29.664Z" }, - { url = "https://files.pythonhosted.org/packages/1e/39/6566210c83f8a261575f18e7144736059f0c460b362e96e9cf797a24b8e7/multidict-6.7.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:05047ada7a2fde2631a0ed706f1fd68b169a681dfe5e4cf0f8e4cb6618bbc2cd", size = 253558, upload-time = "2025-10-06T14:51:31.684Z" }, - { url = "https://files.pythonhosted.org/packages/00/a3/67f18315100f64c269f46e6c0319fa87ba68f0f64f2b8e7fd7c72b913a0b/multidict-6.7.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:716133f7d1d946a4e1b91b1756b23c088881e70ff180c24e864c26192ad7534a", size = 252339, upload-time = "2025-10-06T14:51:33.699Z" }, - { url = "https://files.pythonhosted.org/packages/c8/2a/1cb77266afee2458d82f50da41beba02159b1d6b1f7973afc9a1cad1499b/multidict-6.7.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d1bed1b467ef657f2a0ae62844a607909ef1c6889562de5e1d505f74457d0b96", size = 244895, upload-time = "2025-10-06T14:51:36.189Z" }, - { url = "https://files.pythonhosted.org/packages/dd/72/09fa7dd487f119b2eb9524946ddd36e2067c08510576d43ff68469563b3b/multidict-6.7.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:ca43bdfa5d37bd6aee89d85e1d0831fb86e25541be7e9d376ead1b28974f8e5e", size = 241862, upload-time = "2025-10-06T14:51:41.291Z" }, - { url = "https://files.pythonhosted.org/packages/65/92/bc1f8bd0853d8669300f732c801974dfc3702c3eeadae2f60cef54dc69d7/multidict-6.7.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:44b546bd3eb645fd26fb949e43c02a25a2e632e2ca21a35e2e132c8105dc8599", size = 232376, upload-time = "2025-10-06T14:51:43.55Z" }, - { url = "https://files.pythonhosted.org/packages/09/86/ac39399e5cb9d0c2ac8ef6e10a768e4d3bc933ac808d49c41f9dc23337eb/multidict-6.7.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:a6ef16328011d3f468e7ebc326f24c1445f001ca1dec335b2f8e66bed3006394", size = 240272, upload-time = "2025-10-06T14:51:45.265Z" }, - { url = "https://files.pythonhosted.org/packages/3d/b6/fed5ac6b8563ec72df6cb1ea8dac6d17f0a4a1f65045f66b6d3bf1497c02/multidict-6.7.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:5aa873cbc8e593d361ae65c68f85faadd755c3295ea2c12040ee146802f23b38", size = 248774, upload-time = "2025-10-06T14:51:46.836Z" }, - { url = "https://files.pythonhosted.org/packages/6b/8d/b954d8c0dc132b68f760aefd45870978deec6818897389dace00fcde32ff/multidict-6.7.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:3d7b6ccce016e29df4b7ca819659f516f0bc7a4b3efa3bb2012ba06431b044f9", size = 242731, upload-time = "2025-10-06T14:51:48.541Z" }, - { url = "https://files.pythonhosted.org/packages/16/9d/a2dac7009125d3540c2f54e194829ea18ac53716c61b655d8ed300120b0f/multidict-6.7.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:171b73bd4ee683d307599b66793ac80981b06f069b62eea1c9e29c9241aa66b0", size = 240193, upload-time = "2025-10-06T14:51:50.355Z" }, - { url = "https://files.pythonhosted.org/packages/39/ca/c05f144128ea232ae2178b008d5011d4e2cea86e4ee8c85c2631b1b94802/multidict-6.7.0-cp314-cp314t-win32.whl", hash = "sha256:b2d7f80c4e1fd010b07cb26820aae86b7e73b681ee4889684fb8d2d4537aab13", size = 48023, upload-time = "2025-10-06T14:51:51.883Z" }, - { url = "https://files.pythonhosted.org/packages/ba/8f/0a60e501584145588be1af5cc829265701ba3c35a64aec8e07cbb71d39bb/multidict-6.7.0-cp314-cp314t-win_amd64.whl", hash = "sha256:09929cab6fcb68122776d575e03c6cc64ee0b8fca48d17e135474b042ce515cd", size = 53507, upload-time = "2025-10-06T14:51:53.672Z" }, - { url = "https://files.pythonhosted.org/packages/7f/ae/3148b988a9c6239903e786eac19c889fab607c31d6efa7fb2147e5680f23/multidict-6.7.0-cp314-cp314t-win_arm64.whl", hash = "sha256:cc41db090ed742f32bd2d2c721861725e6109681eddf835d0a82bd3a5c382827", size = 44804, upload-time = "2025-10-06T14:51:55.415Z" }, - { url = "https://files.pythonhosted.org/packages/b7/da/7d22601b625e241d4f23ef1ebff8acfc60da633c9e7e7922e24d10f592b3/multidict-6.7.0-py3-none-any.whl", hash = "sha256:394fc5c42a333c9ffc3e421a4c85e08580d990e08b99f6bf35b4132114c5dcb3", size = 12317, upload-time = "2025-10-06T14:52:29.272Z" }, + { url = "https://files.pythonhosted.org/packages/8d/9c/f20e0e2cf80e4b2e4b1c365bf5fe104ee633c751a724246262db8f1a0b13/multidict-6.7.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:a90f75c956e32891a4eda3639ce6dd86e87105271f43d43442a3aedf3cddf172", size = 76893, upload-time = "2026-01-26T02:43:52.754Z" }, + { url = "https://files.pythonhosted.org/packages/fe/cf/18ef143a81610136d3da8193da9d80bfe1cb548a1e2d1c775f26b23d024a/multidict-6.7.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:3fccb473e87eaa1382689053e4a4618e7ba7b9b9b8d6adf2027ee474597128cd", size = 45456, upload-time = "2026-01-26T02:43:53.893Z" }, + { url = "https://files.pythonhosted.org/packages/a9/65/1caac9d4cd32e8433908683446eebc953e82d22b03d10d41a5f0fefe991b/multidict-6.7.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b0fa96985700739c4c7853a43c0b3e169360d6855780021bfc6d0f1ce7c123e7", size = 43872, upload-time = "2026-01-26T02:43:55.041Z" }, + { url = "https://files.pythonhosted.org/packages/cf/3b/d6bd75dc4f3ff7c73766e04e705b00ed6dbbaccf670d9e05a12b006f5a21/multidict-6.7.1-cp312-cp312-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:cb2a55f408c3043e42b40cc8eecd575afa27b7e0b956dfb190de0f8499a57a53", size = 251018, upload-time = "2026-01-26T02:43:56.198Z" }, + { url = "https://files.pythonhosted.org/packages/fd/80/c959c5933adedb9ac15152e4067c702a808ea183a8b64cf8f31af8ad3155/multidict-6.7.1-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:eb0ce7b2a32d09892b3dd6cc44877a0d02a33241fafca5f25c8b6b62374f8b75", size = 258883, upload-time = "2026-01-26T02:43:57.499Z" }, + { url = "https://files.pythonhosted.org/packages/86/85/7ed40adafea3d4f1c8b916e3b5cc3a8e07dfcdcb9cd72800f4ed3ca1b387/multidict-6.7.1-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:c3a32d23520ee37bf327d1e1a656fec76a2edd5c038bf43eddfa0572ec49c60b", size = 242413, upload-time = "2026-01-26T02:43:58.755Z" }, + { url = "https://files.pythonhosted.org/packages/d2/57/b8565ff533e48595503c785f8361ff9a4fde4d67de25c207cd0ba3befd03/multidict-6.7.1-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:9c90fed18bffc0189ba814749fdcc102b536e83a9f738a9003e569acd540a733", size = 268404, upload-time = "2026-01-26T02:44:00.216Z" }, + { url = "https://files.pythonhosted.org/packages/e0/50/9810c5c29350f7258180dfdcb2e52783a0632862eb334c4896ac717cebcb/multidict-6.7.1-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:da62917e6076f512daccfbbde27f46fed1c98fee202f0559adec8ee0de67f71a", size = 269456, upload-time = "2026-01-26T02:44:02.202Z" }, + { url = "https://files.pythonhosted.org/packages/f3/8d/5e5be3ced1d12966fefb5c4ea3b2a5b480afcea36406559442c6e31d4a48/multidict-6.7.1-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bfde23ef6ed9db7eaee6c37dcec08524cb43903c60b285b172b6c094711b3961", size = 256322, upload-time = "2026-01-26T02:44:03.56Z" }, + { url = "https://files.pythonhosted.org/packages/31/6e/d8a26d81ac166a5592782d208dd90dfdc0a7a218adaa52b45a672b46c122/multidict-6.7.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3758692429e4e32f1ba0df23219cd0b4fc0a52f476726fff9337d1a57676a582", size = 253955, upload-time = "2026-01-26T02:44:04.845Z" }, + { url = "https://files.pythonhosted.org/packages/59/4c/7c672c8aad41534ba619bcd4ade7a0dc87ed6b8b5c06149b85d3dd03f0cd/multidict-6.7.1-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:398c1478926eca669f2fd6a5856b6de9c0acf23a2cb59a14c0ba5844fa38077e", size = 251254, upload-time = "2026-01-26T02:44:06.133Z" }, + { url = "https://files.pythonhosted.org/packages/7b/bd/84c24de512cbafbdbc39439f74e967f19570ce7924e3007174a29c348916/multidict-6.7.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:c102791b1c4f3ab36ce4101154549105a53dc828f016356b3e3bcae2e3a039d3", size = 252059, upload-time = "2026-01-26T02:44:07.518Z" }, + { url = "https://files.pythonhosted.org/packages/fa/ba/f5449385510825b73d01c2d4087bf6d2fccc20a2d42ac34df93191d3dd03/multidict-6.7.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:a088b62bd733e2ad12c50dad01b7d0166c30287c166e137433d3b410add807a6", size = 263588, upload-time = "2026-01-26T02:44:09.382Z" }, + { url = "https://files.pythonhosted.org/packages/d7/11/afc7c677f68f75c84a69fe37184f0f82fce13ce4b92f49f3db280b7e92b3/multidict-6.7.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:3d51ff4785d58d3f6c91bdbffcb5e1f7ddfda557727043aa20d20ec4f65e324a", size = 259642, upload-time = "2026-01-26T02:44:10.73Z" }, + { url = "https://files.pythonhosted.org/packages/2b/17/ebb9644da78c4ab36403739e0e6e0e30ebb135b9caf3440825001a0bddcb/multidict-6.7.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:fc5907494fccf3e7d3f94f95c91d6336b092b5fc83811720fae5e2765890dfba", size = 251377, upload-time = "2026-01-26T02:44:12.042Z" }, + { url = "https://files.pythonhosted.org/packages/ca/a4/840f5b97339e27846c46307f2530a2805d9d537d8b8bd416af031cad7fa0/multidict-6.7.1-cp312-cp312-win32.whl", hash = "sha256:28ca5ce2fd9716631133d0e9a9b9a745ad7f60bac2bccafb56aa380fc0b6c511", size = 41887, upload-time = "2026-01-26T02:44:14.245Z" }, + { url = "https://files.pythonhosted.org/packages/80/31/0b2517913687895f5904325c2069d6a3b78f66cc641a86a2baf75a05dcbb/multidict-6.7.1-cp312-cp312-win_amd64.whl", hash = "sha256:fcee94dfbd638784645b066074b338bc9cc155d4b4bffa4adce1615c5a426c19", size = 46053, upload-time = "2026-01-26T02:44:15.371Z" }, + { url = "https://files.pythonhosted.org/packages/0c/5b/aba28e4ee4006ae4c7df8d327d31025d760ffa992ea23812a601d226e682/multidict-6.7.1-cp312-cp312-win_arm64.whl", hash = "sha256:ba0a9fb644d0c1a2194cf7ffb043bd852cea63a57f66fbd33959f7dae18517bf", size = 43307, upload-time = "2026-01-26T02:44:16.852Z" }, + { url = "https://files.pythonhosted.org/packages/f2/22/929c141d6c0dba87d3e1d38fbdf1ba8baba86b7776469f2bc2d3227a1e67/multidict-6.7.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:2b41f5fed0ed563624f1c17630cb9941cf2309d4df00e494b551b5f3e3d67a23", size = 76174, upload-time = "2026-01-26T02:44:18.509Z" }, + { url = "https://files.pythonhosted.org/packages/c7/75/bc704ae15fee974f8fccd871305e254754167dce5f9e42d88a2def741a1d/multidict-6.7.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:84e61e3af5463c19b67ced91f6c634effb89ef8bfc5ca0267f954451ed4bb6a2", size = 45116, upload-time = "2026-01-26T02:44:19.745Z" }, + { url = "https://files.pythonhosted.org/packages/79/76/55cd7186f498ed080a18440c9013011eb548f77ae1b297206d030eb1180a/multidict-6.7.1-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:935434b9853c7c112eee7ac891bc4cb86455aa631269ae35442cb316790c1445", size = 43524, upload-time = "2026-01-26T02:44:21.571Z" }, + { url = "https://files.pythonhosted.org/packages/e9/3c/414842ef8d5a1628d68edee29ba0e5bcf235dbfb3ccd3ea303a7fe8c72ff/multidict-6.7.1-cp313-cp313-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:432feb25a1cb67fe82a9680b4d65fb542e4635cb3166cd9c01560651ad60f177", size = 249368, upload-time = "2026-01-26T02:44:22.803Z" }, + { url = "https://files.pythonhosted.org/packages/f6/32/befed7f74c458b4a525e60519fe8d87eef72bb1e99924fa2b0f9d97a221e/multidict-6.7.1-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e82d14e3c948952a1a85503817e038cba5905a3352de76b9a465075d072fba23", size = 256952, upload-time = "2026-01-26T02:44:24.306Z" }, + { url = "https://files.pythonhosted.org/packages/03/d6/c878a44ba877f366630c860fdf74bfb203c33778f12b6ac274936853c451/multidict-6.7.1-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:4cfb48c6ea66c83bcaaf7e4dfa7ec1b6bbcf751b7db85a328902796dfde4c060", size = 240317, upload-time = "2026-01-26T02:44:25.772Z" }, + { url = "https://files.pythonhosted.org/packages/68/49/57421b4d7ad2e9e60e25922b08ceb37e077b90444bde6ead629095327a6f/multidict-6.7.1-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:1d540e51b7e8e170174555edecddbd5538105443754539193e3e1061864d444d", size = 267132, upload-time = "2026-01-26T02:44:27.648Z" }, + { url = "https://files.pythonhosted.org/packages/b7/fe/ec0edd52ddbcea2a2e89e174f0206444a61440b40f39704e64dc807a70bd/multidict-6.7.1-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:273d23f4b40f3dce4d6c8a821c741a86dec62cded82e1175ba3d99be128147ed", size = 268140, upload-time = "2026-01-26T02:44:29.588Z" }, + { url = "https://files.pythonhosted.org/packages/b0/73/6e1b01cbeb458807aa0831742232dbdd1fa92bfa33f52a3f176b4ff3dc11/multidict-6.7.1-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d624335fd4fa1c08a53f8b4be7676ebde19cd092b3895c421045ca87895b429", size = 254277, upload-time = "2026-01-26T02:44:30.902Z" }, + { url = "https://files.pythonhosted.org/packages/6a/b2/5fb8c124d7561a4974c342bc8c778b471ebbeb3cc17df696f034a7e9afe7/multidict-6.7.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:12fad252f8b267cc75b66e8fc51b3079604e8d43a75428ffe193cd9e2195dfd6", size = 252291, upload-time = "2026-01-26T02:44:32.31Z" }, + { url = "https://files.pythonhosted.org/packages/5a/96/51d4e4e06bcce92577fcd488e22600bd38e4fd59c20cb49434d054903bd2/multidict-6.7.1-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:03ede2a6ffbe8ef936b92cb4529f27f42be7f56afcdab5ab739cd5f27fb1cbf9", size = 250156, upload-time = "2026-01-26T02:44:33.734Z" }, + { url = "https://files.pythonhosted.org/packages/db/6b/420e173eec5fba721a50e2a9f89eda89d9c98fded1124f8d5c675f7a0c0f/multidict-6.7.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:90efbcf47dbe33dcf643a1e400d67d59abeac5db07dc3f27d6bdeae497a2198c", size = 249742, upload-time = "2026-01-26T02:44:35.222Z" }, + { url = "https://files.pythonhosted.org/packages/44/a3/ec5b5bd98f306bc2aa297b8c6f11a46714a56b1e6ef5ebda50a4f5d7c5fb/multidict-6.7.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:5c4b9bfc148f5a91be9244d6264c53035c8a0dcd2f51f1c3c6e30e30ebaa1c84", size = 262221, upload-time = "2026-01-26T02:44:36.604Z" }, + { url = "https://files.pythonhosted.org/packages/cd/f7/e8c0d0da0cd1e28d10e624604e1a36bcc3353aaebdfdc3a43c72bc683a12/multidict-6.7.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:401c5a650f3add2472d1d288c26deebc540f99e2fb83e9525007a74cd2116f1d", size = 258664, upload-time = "2026-01-26T02:44:38.008Z" }, + { url = "https://files.pythonhosted.org/packages/52/da/151a44e8016dd33feed44f730bd856a66257c1ee7aed4f44b649fb7edeb3/multidict-6.7.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:97891f3b1b3ffbded884e2916cacf3c6fc87b66bb0dde46f7357404750559f33", size = 249490, upload-time = "2026-01-26T02:44:39.386Z" }, + { url = "https://files.pythonhosted.org/packages/87/af/a3b86bf9630b732897f6fc3f4c4714b90aa4361983ccbdcd6c0339b21b0c/multidict-6.7.1-cp313-cp313-win32.whl", hash = "sha256:e1c5988359516095535c4301af38d8a8838534158f649c05dd1050222321bcb3", size = 41695, upload-time = "2026-01-26T02:44:41.318Z" }, + { url = "https://files.pythonhosted.org/packages/b2/35/e994121b0e90e46134673422dd564623f93304614f5d11886b1b3e06f503/multidict-6.7.1-cp313-cp313-win_amd64.whl", hash = "sha256:960c83bf01a95b12b08fd54324a4eb1d5b52c88932b5cba5d6e712bb3ed12eb5", size = 45884, upload-time = "2026-01-26T02:44:42.488Z" }, + { url = "https://files.pythonhosted.org/packages/ca/61/42d3e5dbf661242a69c97ea363f2d7b46c567da8eadef8890022be6e2ab0/multidict-6.7.1-cp313-cp313-win_arm64.whl", hash = "sha256:563fe25c678aaba333d5399408f5ec3c383ca5b663e7f774dd179a520b8144df", size = 43122, upload-time = "2026-01-26T02:44:43.664Z" }, + { url = "https://files.pythonhosted.org/packages/6d/b3/e6b21c6c4f314bb956016b0b3ef2162590a529b84cb831c257519e7fde44/multidict-6.7.1-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:c76c4bec1538375dad9d452d246ca5368ad6e1c9039dadcf007ae59c70619ea1", size = 83175, upload-time = "2026-01-26T02:44:44.894Z" }, + { url = "https://files.pythonhosted.org/packages/fb/76/23ecd2abfe0957b234f6c960f4ade497f55f2c16aeb684d4ecdbf1c95791/multidict-6.7.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:57b46b24b5d5ebcc978da4ec23a819a9402b4228b8a90d9c656422b4bdd8a963", size = 48460, upload-time = "2026-01-26T02:44:46.106Z" }, + { url = "https://files.pythonhosted.org/packages/c4/57/a0ed92b23f3a042c36bc4227b72b97eca803f5f1801c1ab77c8a212d455e/multidict-6.7.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e954b24433c768ce78ab7929e84ccf3422e46deb45a4dc9f93438f8217fa2d34", size = 46930, upload-time = "2026-01-26T02:44:47.278Z" }, + { url = "https://files.pythonhosted.org/packages/b5/66/02ec7ace29162e447f6382c495dc95826bf931d3818799bbef11e8f7df1a/multidict-6.7.1-cp313-cp313t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:3bd231490fa7217cc832528e1cd8752a96f0125ddd2b5749390f7c3ec8721b65", size = 242582, upload-time = "2026-01-26T02:44:48.604Z" }, + { url = "https://files.pythonhosted.org/packages/58/18/64f5a795e7677670e872673aca234162514696274597b3708b2c0d276cce/multidict-6.7.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:253282d70d67885a15c8a7716f3a73edf2d635793ceda8173b9ecc21f2fb8292", size = 250031, upload-time = "2026-01-26T02:44:50.544Z" }, + { url = "https://files.pythonhosted.org/packages/c8/ed/e192291dbbe51a8290c5686f482084d31bcd9d09af24f63358c3d42fd284/multidict-6.7.1-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:0b4c48648d7649c9335cf1927a8b87fa692de3dcb15faa676c6a6f1f1aabda43", size = 228596, upload-time = "2026-01-26T02:44:51.951Z" }, + { url = "https://files.pythonhosted.org/packages/1e/7e/3562a15a60cf747397e7f2180b0a11dc0c38d9175a650e75fa1b4d325e15/multidict-6.7.1-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:98bc624954ec4d2c7cb074b8eefc2b5d0ce7d482e410df446414355d158fe4ca", size = 257492, upload-time = "2026-01-26T02:44:53.902Z" }, + { url = "https://files.pythonhosted.org/packages/24/02/7d0f9eae92b5249bb50ac1595b295f10e263dd0078ebb55115c31e0eaccd/multidict-6.7.1-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:1b99af4d9eec0b49927b4402bcbb58dea89d3e0db8806a4086117019939ad3dd", size = 255899, upload-time = "2026-01-26T02:44:55.316Z" }, + { url = "https://files.pythonhosted.org/packages/00/e3/9b60ed9e23e64c73a5cde95269ef1330678e9c6e34dd4eb6b431b85b5a10/multidict-6.7.1-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6aac4f16b472d5b7dc6f66a0d49dd57b0e0902090be16594dc9ebfd3d17c47e7", size = 247970, upload-time = "2026-01-26T02:44:56.783Z" }, + { url = "https://files.pythonhosted.org/packages/3e/06/538e58a63ed5cfb0bd4517e346b91da32fde409d839720f664e9a4ae4f9d/multidict-6.7.1-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:21f830fe223215dffd51f538e78c172ed7c7f60c9b96a2bf05c4848ad49921c3", size = 245060, upload-time = "2026-01-26T02:44:58.195Z" }, + { url = "https://files.pythonhosted.org/packages/b2/2f/d743a3045a97c895d401e9bd29aaa09b94f5cbdf1bd561609e5a6c431c70/multidict-6.7.1-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:f5dd81c45b05518b9aa4da4aa74e1c93d715efa234fd3e8a179df611cc85e5f4", size = 235888, upload-time = "2026-01-26T02:44:59.57Z" }, + { url = "https://files.pythonhosted.org/packages/38/83/5a325cac191ab28b63c52f14f1131f3b0a55ba3b9aa65a6d0bf2a9b921a0/multidict-6.7.1-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:eb304767bca2bb92fb9c5bd33cedc95baee5bb5f6c88e63706533a1c06ad08c8", size = 243554, upload-time = "2026-01-26T02:45:01.054Z" }, + { url = "https://files.pythonhosted.org/packages/20/1f/9d2327086bd15da2725ef6aae624208e2ef828ed99892b17f60c344e57ed/multidict-6.7.1-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:c9035dde0f916702850ef66460bc4239d89d08df4d02023a5926e7446724212c", size = 252341, upload-time = "2026-01-26T02:45:02.484Z" }, + { url = "https://files.pythonhosted.org/packages/e8/2c/2a1aa0280cf579d0f6eed8ee5211c4f1730bd7e06c636ba2ee6aafda302e/multidict-6.7.1-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:af959b9beeb66c822380f222f0e0a1889331597e81f1ded7f374f3ecb0fd6c52", size = 246391, upload-time = "2026-01-26T02:45:03.862Z" }, + { url = "https://files.pythonhosted.org/packages/e5/03/7ca022ffc36c5a3f6e03b179a5ceb829be9da5783e6fe395f347c0794680/multidict-6.7.1-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:41f2952231456154ee479651491e94118229844dd7226541788be783be2b5108", size = 243422, upload-time = "2026-01-26T02:45:05.296Z" }, + { url = "https://files.pythonhosted.org/packages/dc/1d/b31650eab6c5778aceed46ba735bd97f7c7d2f54b319fa916c0f96e7805b/multidict-6.7.1-cp313-cp313t-win32.whl", hash = "sha256:df9f19c28adcb40b6aae30bbaa1478c389efd50c28d541d76760199fc1037c32", size = 47770, upload-time = "2026-01-26T02:45:06.754Z" }, + { url = "https://files.pythonhosted.org/packages/ac/5b/2d2d1d522e51285bd61b1e20df8f47ae1a9d80839db0b24ea783b3832832/multidict-6.7.1-cp313-cp313t-win_amd64.whl", hash = "sha256:d54ecf9f301853f2c5e802da559604b3e95bb7a3b01a9c295c6ee591b9882de8", size = 53109, upload-time = "2026-01-26T02:45:08.044Z" }, + { url = "https://files.pythonhosted.org/packages/3d/a3/cc409ba012c83ca024a308516703cf339bdc4b696195644a7215a5164a24/multidict-6.7.1-cp313-cp313t-win_arm64.whl", hash = "sha256:5a37ca18e360377cfda1d62f5f382ff41f2b8c4ccb329ed974cc2e1643440118", size = 45573, upload-time = "2026-01-26T02:45:09.349Z" }, + { url = "https://files.pythonhosted.org/packages/91/cc/db74228a8be41884a567e88a62fd589a913708fcf180d029898c17a9a371/multidict-6.7.1-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:8f333ec9c5eb1b7105e3b84b53141e66ca05a19a605368c55450b6ba208cb9ee", size = 75190, upload-time = "2026-01-26T02:45:10.651Z" }, + { url = "https://files.pythonhosted.org/packages/d5/22/492f2246bb5b534abd44804292e81eeaf835388901f0c574bac4eeec73c5/multidict-6.7.1-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:a407f13c188f804c759fc6a9f88286a565c242a76b27626594c133b82883b5c2", size = 44486, upload-time = "2026-01-26T02:45:11.938Z" }, + { url = "https://files.pythonhosted.org/packages/f1/4f/733c48f270565d78b4544f2baddc2fb2a245e5a8640254b12c36ac7ac68e/multidict-6.7.1-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:0e161ddf326db5577c3a4cc2d8648f81456e8a20d40415541587a71620d7a7d1", size = 43219, upload-time = "2026-01-26T02:45:14.346Z" }, + { url = "https://files.pythonhosted.org/packages/24/bb/2c0c2287963f4259c85e8bcbba9182ced8d7fca65c780c38e99e61629d11/multidict-6.7.1-cp314-cp314-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:1e3a8bb24342a8201d178c3b4984c26ba81a577c80d4d525727427460a50c22d", size = 245132, upload-time = "2026-01-26T02:45:15.712Z" }, + { url = "https://files.pythonhosted.org/packages/a7/f9/44d4b3064c65079d2467888794dea218d1601898ac50222ab8a9a8094460/multidict-6.7.1-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:97231140a50f5d447d3164f994b86a0bed7cd016e2682f8650d6a9158e14fd31", size = 252420, upload-time = "2026-01-26T02:45:17.293Z" }, + { url = "https://files.pythonhosted.org/packages/8b/13/78f7275e73fa17b24c9a51b0bd9d73ba64bb32d0ed51b02a746eb876abe7/multidict-6.7.1-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:6b10359683bd8806a200fd2909e7c8ca3a7b24ec1d8132e483d58e791d881048", size = 233510, upload-time = "2026-01-26T02:45:19.356Z" }, + { url = "https://files.pythonhosted.org/packages/4b/25/8167187f62ae3cbd52da7893f58cb036b47ea3fb67138787c76800158982/multidict-6.7.1-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:283ddac99f7ac25a4acadbf004cb5ae34480bbeb063520f70ce397b281859362", size = 264094, upload-time = "2026-01-26T02:45:20.834Z" }, + { url = "https://files.pythonhosted.org/packages/a1/e7/69a3a83b7b030cf283fb06ce074a05a02322359783424d7edf0f15fe5022/multidict-6.7.1-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:538cec1e18c067d0e6103aa9a74f9e832904c957adc260e61cd9d8cf0c3b3d37", size = 260786, upload-time = "2026-01-26T02:45:22.818Z" }, + { url = "https://files.pythonhosted.org/packages/fe/3b/8ec5074bcfc450fe84273713b4b0a0dd47c0249358f5d82eb8104ffe2520/multidict-6.7.1-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7eee46ccb30ff48a1e35bb818cc90846c6be2b68240e42a78599166722cea709", size = 248483, upload-time = "2026-01-26T02:45:24.368Z" }, + { url = "https://files.pythonhosted.org/packages/48/5a/d5a99e3acbca0e29c5d9cba8f92ceb15dce78bab963b308ae692981e3a5d/multidict-6.7.1-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:fa263a02f4f2dd2d11a7b1bb4362aa7cb1049f84a9235d31adf63f30143469a0", size = 248403, upload-time = "2026-01-26T02:45:25.982Z" }, + { url = "https://files.pythonhosted.org/packages/35/48/e58cd31f6c7d5102f2a4bf89f96b9cf7e00b6c6f3d04ecc44417c00a5a3c/multidict-6.7.1-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:2e1425e2f99ec5bd36c15a01b690a1a2456209c5deed58f95469ffb46039ccbb", size = 240315, upload-time = "2026-01-26T02:45:27.487Z" }, + { url = "https://files.pythonhosted.org/packages/94/33/1cd210229559cb90b6786c30676bb0c58249ff42f942765f88793b41fdce/multidict-6.7.1-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:497394b3239fc6f0e13a78a3e1b61296e72bf1c5f94b4c4eb80b265c37a131cd", size = 245528, upload-time = "2026-01-26T02:45:28.991Z" }, + { url = "https://files.pythonhosted.org/packages/64/f2/6e1107d226278c876c783056b7db43d800bb64c6131cec9c8dfb6903698e/multidict-6.7.1-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:233b398c29d3f1b9676b4b6f75c518a06fcb2ea0b925119fb2c1bc35c05e1601", size = 258784, upload-time = "2026-01-26T02:45:30.503Z" }, + { url = "https://files.pythonhosted.org/packages/4d/c1/11f664f14d525e4a1b5327a82d4de61a1db604ab34c6603bb3c2cc63ad34/multidict-6.7.1-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:93b1818e4a6e0930454f0f2af7dfce69307ca03cdcfb3739bf4d91241967b6c1", size = 251980, upload-time = "2026-01-26T02:45:32.603Z" }, + { url = "https://files.pythonhosted.org/packages/e1/9f/75a9ac888121d0c5bbd4ecf4eead45668b1766f6baabfb3b7f66a410e231/multidict-6.7.1-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:f33dc2a3abe9249ea5d8360f969ec7f4142e7ac45ee7014d8f8d5acddf178b7b", size = 243602, upload-time = "2026-01-26T02:45:34.043Z" }, + { url = "https://files.pythonhosted.org/packages/9a/e7/50bf7b004cc8525d80dbbbedfdc7aed3e4c323810890be4413e589074032/multidict-6.7.1-cp314-cp314-win32.whl", hash = "sha256:3ab8b9d8b75aef9df299595d5388b14530839f6422333357af1339443cff777d", size = 40930, upload-time = "2026-01-26T02:45:36.278Z" }, + { url = "https://files.pythonhosted.org/packages/e0/bf/52f25716bbe93745595800f36fb17b73711f14da59ed0bb2eba141bc9f0f/multidict-6.7.1-cp314-cp314-win_amd64.whl", hash = "sha256:5e01429a929600e7dab7b166062d9bb54a5eed752384c7384c968c2afab8f50f", size = 45074, upload-time = "2026-01-26T02:45:37.546Z" }, + { url = "https://files.pythonhosted.org/packages/97/ab/22803b03285fa3a525f48217963da3a65ae40f6a1b6f6cf2768879e208f9/multidict-6.7.1-cp314-cp314-win_arm64.whl", hash = "sha256:4885cb0e817aef5d00a2e8451d4665c1808378dc27c2705f1bf4ef8505c0d2e5", size = 42471, upload-time = "2026-01-26T02:45:38.889Z" }, + { url = "https://files.pythonhosted.org/packages/e0/6d/f9293baa6146ba9507e360ea0292b6422b016907c393e2f63fc40ab7b7b5/multidict-6.7.1-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:0458c978acd8e6ea53c81eefaddbbee9c6c5e591f41b3f5e8e194780fe026581", size = 82401, upload-time = "2026-01-26T02:45:40.254Z" }, + { url = "https://files.pythonhosted.org/packages/7a/68/53b5494738d83558d87c3c71a486504d8373421c3e0dbb6d0db48ad42ee0/multidict-6.7.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:c0abd12629b0af3cf590982c0b413b1e7395cd4ec026f30986818ab95bfaa94a", size = 48143, upload-time = "2026-01-26T02:45:41.635Z" }, + { url = "https://files.pythonhosted.org/packages/37/e8/5284c53310dcdc99ce5d66563f6e5773531a9b9fe9ec7a615e9bc306b05f/multidict-6.7.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:14525a5f61d7d0c94b368a42cff4c9a4e7ba2d52e2672a7b23d84dc86fb02b0c", size = 46507, upload-time = "2026-01-26T02:45:42.99Z" }, + { url = "https://files.pythonhosted.org/packages/e4/fc/6800d0e5b3875568b4083ecf5f310dcf91d86d52573160834fb4bfcf5e4f/multidict-6.7.1-cp314-cp314t-manylinux1_i686.manylinux_2_28_i686.manylinux_2_5_i686.whl", hash = "sha256:17307b22c217b4cf05033dabefe68255a534d637c6c9b0cc8382718f87be4262", size = 239358, upload-time = "2026-01-26T02:45:44.376Z" }, + { url = "https://files.pythonhosted.org/packages/41/75/4ad0973179361cdf3a113905e6e088173198349131be2b390f9fa4da5fc6/multidict-6.7.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:7a7e590ff876a3eaf1c02a4dfe0724b6e69a9e9de6d8f556816f29c496046e59", size = 246884, upload-time = "2026-01-26T02:45:47.167Z" }, + { url = "https://files.pythonhosted.org/packages/c3/9c/095bb28b5da139bd41fb9a5d5caff412584f377914bd8787c2aa98717130/multidict-6.7.1-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:5fa6a95dfee63893d80a34758cd0e0c118a30b8dcb46372bf75106c591b77889", size = 225878, upload-time = "2026-01-26T02:45:48.698Z" }, + { url = "https://files.pythonhosted.org/packages/07/d0/c0a72000243756e8f5a277b6b514fa005f2c73d481b7d9e47cd4568aa2e4/multidict-6.7.1-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a0543217a6a017692aa6ae5cc39adb75e587af0f3a82288b1492eb73dd6cc2a4", size = 253542, upload-time = "2026-01-26T02:45:50.164Z" }, + { url = "https://files.pythonhosted.org/packages/c0/6b/f69da15289e384ecf2a68837ec8b5ad8c33e973aa18b266f50fe55f24b8c/multidict-6.7.1-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f99fe611c312b3c1c0ace793f92464d8cd263cc3b26b5721950d977b006b6c4d", size = 252403, upload-time = "2026-01-26T02:45:51.779Z" }, + { url = "https://files.pythonhosted.org/packages/a2/76/b9669547afa5a1a25cd93eaca91c0da1c095b06b6d2d8ec25b713588d3a1/multidict-6.7.1-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9004d8386d133b7e6135679424c91b0b854d2d164af6ea3f289f8f2761064609", size = 244889, upload-time = "2026-01-26T02:45:53.27Z" }, + { url = "https://files.pythonhosted.org/packages/7e/a9/a50d2669e506dad33cfc45b5d574a205587b7b8a5f426f2fbb2e90882588/multidict-6.7.1-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e628ef0e6859ffd8273c69412a2465c4be4a9517d07261b33334b5ec6f3c7489", size = 241982, upload-time = "2026-01-26T02:45:54.919Z" }, + { url = "https://files.pythonhosted.org/packages/c5/bb/1609558ad8b456b4827d3c5a5b775c93b87878fd3117ed3db3423dfbce1b/multidict-6.7.1-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:841189848ba629c3552035a6a7f5bf3b02eb304e9fea7492ca220a8eda6b0e5c", size = 232415, upload-time = "2026-01-26T02:45:56.981Z" }, + { url = "https://files.pythonhosted.org/packages/d8/59/6f61039d2aa9261871e03ab9dc058a550d240f25859b05b67fd70f80d4b3/multidict-6.7.1-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:ce1bbd7d780bb5a0da032e095c951f7014d6b0a205f8318308140f1a6aba159e", size = 240337, upload-time = "2026-01-26T02:45:58.698Z" }, + { url = "https://files.pythonhosted.org/packages/a1/29/fdc6a43c203890dc2ae9249971ecd0c41deaedfe00d25cb6564b2edd99eb/multidict-6.7.1-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:b26684587228afed0d50cf804cc71062cc9c1cdf55051c4c6345d372947b268c", size = 248788, upload-time = "2026-01-26T02:46:00.862Z" }, + { url = "https://files.pythonhosted.org/packages/a9/14/a153a06101323e4cf086ecee3faadba52ff71633d471f9685c42e3736163/multidict-6.7.1-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:9f9af11306994335398293f9958071019e3ab95e9a707dc1383a35613f6abcb9", size = 242842, upload-time = "2026-01-26T02:46:02.824Z" }, + { url = "https://files.pythonhosted.org/packages/41/5f/604ae839e64a4a6efc80db94465348d3b328ee955e37acb24badbcd24d83/multidict-6.7.1-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:b4938326284c4f1224178a560987b6cf8b4d38458b113d9b8c1db1a836e640a2", size = 240237, upload-time = "2026-01-26T02:46:05.898Z" }, + { url = "https://files.pythonhosted.org/packages/5f/60/c3a5187bf66f6fb546ff4ab8fb5a077cbdd832d7b1908d4365c7f74a1917/multidict-6.7.1-cp314-cp314t-win32.whl", hash = "sha256:98655c737850c064a65e006a3df7c997cd3b220be4ec8fe26215760b9697d4d7", size = 48008, upload-time = "2026-01-26T02:46:07.468Z" }, + { url = "https://files.pythonhosted.org/packages/0c/f7/addf1087b860ac60e6f382240f64fb99f8bfb532bb06f7c542b83c29ca61/multidict-6.7.1-cp314-cp314t-win_amd64.whl", hash = "sha256:497bde6223c212ba11d462853cfa4f0ae6ef97465033e7dc9940cdb3ab5b48e5", size = 53542, upload-time = "2026-01-26T02:46:08.809Z" }, + { url = "https://files.pythonhosted.org/packages/4c/81/4629d0aa32302ef7b2ec65c75a728cc5ff4fa410c50096174c1632e70b3e/multidict-6.7.1-cp314-cp314t-win_arm64.whl", hash = "sha256:2bbd113e0d4af5db41d5ebfe9ccaff89de2120578164f86a5d17d5a576d1e5b2", size = 44719, upload-time = "2026-01-26T02:46:11.146Z" }, + { url = "https://files.pythonhosted.org/packages/81/08/7036c080d7117f28a4af526d794aab6a84463126db031b007717c1a6676e/multidict-6.7.1-py3-none-any.whl", hash = "sha256:55d97cc6dae627efa6a6e548885712d4864b81110ac76fa4e534c03819fa4a56", size = 12319, upload-time = "2026-01-26T02:46:44.004Z" }, ] [[package]] @@ -949,11 +976,11 @@ wheels = [ [[package]] name = "pathspec" -version = "1.0.3" +version = "1.0.4" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/4c/b2/bb8e495d5262bfec41ab5cb18f522f1012933347fb5d9e62452d446baca2/pathspec-1.0.3.tar.gz", hash = "sha256:bac5cf97ae2c2876e2d25ebb15078eb04d76e4b98921ee31c6f85ade8b59444d", size = 130841, upload-time = "2026-01-09T15:46:46.009Z" } +sdist = { url = "https://files.pythonhosted.org/packages/fa/36/e27608899f9b8d4dff0617b2d9ab17ca5608956ca44461ac14ac48b44015/pathspec-1.0.4.tar.gz", hash = "sha256:0210e2ae8a21a9137c0d470578cb0e595af87edaa6ebf12ff176f14a02e0e645", size = 131200, upload-time = "2026-01-27T03:59:46.938Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/32/2b/121e912bd60eebd623f873fd090de0e84f322972ab25a7f9044c056804ed/pathspec-1.0.3-py3-none-any.whl", hash = "sha256:e80767021c1cc524aa3fb14bedda9c34406591343cc42797b386ce7b9354fb6c", size = 55021, upload-time = "2026-01-09T15:46:44.652Z" }, + { url = "https://files.pythonhosted.org/packages/ef/3c/2c197d226f9ea224a9ab8d197933f9da0ae0aac5b6e0f884e2b8d9c8e9f7/pathspec-1.0.4-py3-none-any.whl", hash = "sha256:fb6ae2fd4e7c921a165808a552060e722767cfa526f99ca5156ed2ce45a5c723", size = 55206, upload-time = "2026-01-27T03:59:45.137Z" }, ] [[package]] @@ -1042,54 +1069,54 @@ wheels = [ [[package]] name = "protobuf" -version = "6.33.4" +version = "7.34.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/53/b8/cda15d9d46d03d4aa3a67cb6bffe05173440ccf86a9541afaf7ac59a1b6b/protobuf-6.33.4.tar.gz", hash = "sha256:dc2e61bca3b10470c1912d166fe0af67bfc20eb55971dcef8dfa48ce14f0ed91", size = 444346, upload-time = "2026-01-12T18:33:40.109Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f2/00/04a2ab36b70a52d0356852979e08b44edde0435f2115dc66e25f2100f3ab/protobuf-7.34.0.tar.gz", hash = "sha256:3871a3df67c710aaf7bb8d214cc997342e63ceebd940c8c7fc65c9b3d697591a", size = 454726, upload-time = "2026-02-27T00:30:25.421Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/e0/be/24ef9f3095bacdf95b458543334d0c4908ccdaee5130420bf064492c325f/protobuf-6.33.4-cp310-abi3-win32.whl", hash = "sha256:918966612c8232fc6c24c78e1cd89784307f5814ad7506c308ee3cf86662850d", size = 425612, upload-time = "2026-01-12T18:33:29.656Z" }, - { url = "https://files.pythonhosted.org/packages/31/ad/e5693e1974a28869e7cd244302911955c1cebc0161eb32dfa2b25b6e96f0/protobuf-6.33.4-cp310-abi3-win_amd64.whl", hash = "sha256:8f11ffae31ec67fc2554c2ef891dcb561dae9a2a3ed941f9e134c2db06657dbc", size = 436962, upload-time = "2026-01-12T18:33:31.345Z" }, - { url = "https://files.pythonhosted.org/packages/66/15/6ee23553b6bfd82670207ead921f4d8ef14c107e5e11443b04caeb5ab5ec/protobuf-6.33.4-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:2fe67f6c014c84f655ee06f6f66213f9254b3a8b6bda6cda0ccd4232c73c06f0", size = 427612, upload-time = "2026-01-12T18:33:32.646Z" }, - { url = "https://files.pythonhosted.org/packages/2b/48/d301907ce6d0db75f959ca74f44b475a9caa8fcba102d098d3c3dd0f2d3f/protobuf-6.33.4-cp39-abi3-manylinux2014_aarch64.whl", hash = "sha256:757c978f82e74d75cba88eddec479df9b99a42b31193313b75e492c06a51764e", size = 324484, upload-time = "2026-01-12T18:33:33.789Z" }, - { url = "https://files.pythonhosted.org/packages/92/1c/e53078d3f7fe710572ab2dcffd993e1e3b438ae71cfc031b71bae44fcb2d/protobuf-6.33.4-cp39-abi3-manylinux2014_s390x.whl", hash = "sha256:c7c64f259c618f0bef7bee042075e390debbf9682334be2b67408ec7c1c09ee6", size = 339256, upload-time = "2026-01-12T18:33:35.231Z" }, - { url = "https://files.pythonhosted.org/packages/e8/8e/971c0edd084914f7ee7c23aa70ba89e8903918adca179319ee94403701d5/protobuf-6.33.4-cp39-abi3-manylinux2014_x86_64.whl", hash = "sha256:3df850c2f8db9934de4cf8f9152f8dc2558f49f298f37f90c517e8e5c84c30e9", size = 323311, upload-time = "2026-01-12T18:33:36.305Z" }, - { url = "https://files.pythonhosted.org/packages/75/b1/1dc83c2c661b4c62d56cc081706ee33a4fc2835bd90f965baa2663ef7676/protobuf-6.33.4-py3-none-any.whl", hash = "sha256:1fe3730068fcf2e595816a6c34fe66eeedd37d51d0400b72fabc848811fdc1bc", size = 170532, upload-time = "2026-01-12T18:33:39.199Z" }, + { url = "https://files.pythonhosted.org/packages/13/c4/6322ab5c8f279c4c358bc14eb8aefc0550b97222a39f04eb3c1af7a830fa/protobuf-7.34.0-cp310-abi3-macosx_10_9_universal2.whl", hash = "sha256:8e329966799f2c271d5e05e236459fe1cbfdb8755aaa3b0914fa60947ddea408", size = 429248, upload-time = "2026-02-27T00:30:14.924Z" }, + { url = "https://files.pythonhosted.org/packages/45/99/b029bbbc61e8937545da5b79aa405ab2d9cf307a728f8c9459ad60d7a481/protobuf-7.34.0-cp310-abi3-manylinux2014_aarch64.whl", hash = "sha256:9d7a5005fb96f3c1e64f397f91500b0eb371b28da81296ae73a6b08a5b76cdd6", size = 325753, upload-time = "2026-02-27T00:30:17.247Z" }, + { url = "https://files.pythonhosted.org/packages/cc/79/09f02671eb75b251c5550a1c48e7b3d4b0623efd7c95a15a50f6f9fc1e2e/protobuf-7.34.0-cp310-abi3-manylinux2014_s390x.whl", hash = "sha256:4a72a8ec94e7a9f7ef7fe818ed26d073305f347f8b3b5ba31e22f81fd85fca02", size = 340200, upload-time = "2026-02-27T00:30:18.672Z" }, + { url = "https://files.pythonhosted.org/packages/b5/57/89727baef7578897af5ed166735ceb315819f1c184da8c3441271dbcfde7/protobuf-7.34.0-cp310-abi3-manylinux2014_x86_64.whl", hash = "sha256:964cf977e07f479c0697964e83deda72bcbc75c3badab506fb061b352d991b01", size = 324268, upload-time = "2026-02-27T00:30:20.088Z" }, + { url = "https://files.pythonhosted.org/packages/1f/3e/38ff2ddee5cc946f575c9d8cc822e34bde205cf61acf8099ad88ef19d7d2/protobuf-7.34.0-cp310-abi3-win32.whl", hash = "sha256:f791ec509707a1d91bd02e07df157e75e4fb9fbdad12a81b7396201ec244e2e3", size = 426628, upload-time = "2026-02-27T00:30:21.555Z" }, + { url = "https://files.pythonhosted.org/packages/cb/71/7c32eaf34a61a1bae1b62a2ac4ffe09b8d1bb0cf93ad505f42040023db89/protobuf-7.34.0-cp310-abi3-win_amd64.whl", hash = "sha256:9f9079f1dde4e32342ecbd1c118d76367090d4aaa19da78230c38101c5b3dd40", size = 437901, upload-time = "2026-02-27T00:30:22.836Z" }, + { url = "https://files.pythonhosted.org/packages/a4/e7/14dc9366696dcb53a413449881743426ed289d687bcf3d5aee4726c32ebb/protobuf-7.34.0-py3-none-any.whl", hash = "sha256:e3b914dd77fa33fa06ab2baa97937746ab25695f389869afdf03e81f34e45dc7", size = 170716, upload-time = "2026-02-27T00:30:23.994Z" }, ] [[package]] name = "psutil" -version = "7.2.1" +version = "7.2.2" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/73/cb/09e5184fb5fc0358d110fc3ca7f6b1d033800734d34cac10f4136cfac10e/psutil-7.2.1.tar.gz", hash = "sha256:f7583aec590485b43ca601dd9cea0dcd65bd7bb21d30ef4ddbf4ea6b5ed1bdd3", size = 490253, upload-time = "2025-12-29T08:26:00.169Z" } +sdist = { url = "https://files.pythonhosted.org/packages/aa/c6/d1ddf4abb55e93cebc4f2ed8b5d6dbad109ecb8d63748dd2b20ab5e57ebe/psutil-7.2.2.tar.gz", hash = "sha256:0746f5f8d406af344fd547f1c8daa5f5c33dbc293bb8d6a16d80b4bb88f59372", size = 493740, upload-time = "2026-01-28T18:14:54.428Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/77/8e/f0c242053a368c2aa89584ecd1b054a18683f13d6e5a318fc9ec36582c94/psutil-7.2.1-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:ba9f33bb525b14c3ea563b2fd521a84d2fa214ec59e3e6a2858f78d0844dd60d", size = 129624, upload-time = "2025-12-29T08:26:04.255Z" }, - { url = "https://files.pythonhosted.org/packages/26/97/a58a4968f8990617decee234258a2b4fc7cd9e35668387646c1963e69f26/psutil-7.2.1-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:81442dac7abfc2f4f4385ea9e12ddf5a796721c0f6133260687fec5c3780fa49", size = 130132, upload-time = "2025-12-29T08:26:06.228Z" }, - { url = "https://files.pythonhosted.org/packages/db/6d/ed44901e830739af5f72a85fa7ec5ff1edea7f81bfbf4875e409007149bd/psutil-7.2.1-cp313-cp313t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ea46c0d060491051d39f0d2cff4f98d5c72b288289f57a21556cc7d504db37fc", size = 180612, upload-time = "2025-12-29T08:26:08.276Z" }, - { url = "https://files.pythonhosted.org/packages/c7/65/b628f8459bca4efbfae50d4bf3feaab803de9a160b9d5f3bd9295a33f0c2/psutil-7.2.1-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:35630d5af80d5d0d49cfc4d64c1c13838baf6717a13effb35869a5919b854cdf", size = 183201, upload-time = "2025-12-29T08:26:10.622Z" }, - { url = "https://files.pythonhosted.org/packages/fb/23/851cadc9764edcc18f0effe7d0bf69f727d4cf2442deb4a9f78d4e4f30f2/psutil-7.2.1-cp313-cp313t-win_amd64.whl", hash = "sha256:923f8653416604e356073e6e0bccbe7c09990acef442def2f5640dd0faa9689f", size = 139081, upload-time = "2025-12-29T08:26:12.483Z" }, - { url = "https://files.pythonhosted.org/packages/59/82/d63e8494ec5758029f31c6cb06d7d161175d8281e91d011a4a441c8a43b5/psutil-7.2.1-cp313-cp313t-win_arm64.whl", hash = "sha256:cfbe6b40ca48019a51827f20d830887b3107a74a79b01ceb8cc8de4ccb17b672", size = 134767, upload-time = "2025-12-29T08:26:14.528Z" }, - { url = "https://files.pythonhosted.org/packages/05/c2/5fb764bd61e40e1fe756a44bd4c21827228394c17414ade348e28f83cd79/psutil-7.2.1-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:494c513ccc53225ae23eec7fe6e1482f1b8a44674241b54561f755a898650679", size = 129716, upload-time = "2025-12-29T08:26:16.017Z" }, - { url = "https://files.pythonhosted.org/packages/c9/d2/935039c20e06f615d9ca6ca0ab756cf8408a19d298ffaa08666bc18dc805/psutil-7.2.1-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:3fce5f92c22b00cdefd1645aa58ab4877a01679e901555067b1bd77039aa589f", size = 130133, upload-time = "2025-12-29T08:26:18.009Z" }, - { url = "https://files.pythonhosted.org/packages/77/69/19f1eb0e01d24c2b3eacbc2f78d3b5add8a89bf0bb69465bc8d563cc33de/psutil-7.2.1-cp314-cp314t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:93f3f7b0bb07711b49626e7940d6fe52aa9940ad86e8f7e74842e73189712129", size = 181518, upload-time = "2025-12-29T08:26:20.241Z" }, - { url = "https://files.pythonhosted.org/packages/e1/6d/7e18b1b4fa13ad370787626c95887b027656ad4829c156bb6569d02f3262/psutil-7.2.1-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d34d2ca888208eea2b5c68186841336a7f5e0b990edec929be909353a202768a", size = 184348, upload-time = "2025-12-29T08:26:22.215Z" }, - { url = "https://files.pythonhosted.org/packages/98/60/1672114392dd879586d60dd97896325df47d9a130ac7401318005aab28ec/psutil-7.2.1-cp314-cp314t-win_amd64.whl", hash = "sha256:2ceae842a78d1603753561132d5ad1b2f8a7979cb0c283f5b52fb4e6e14b1a79", size = 140400, upload-time = "2025-12-29T08:26:23.993Z" }, - { url = "https://files.pythonhosted.org/packages/fb/7b/d0e9d4513c46e46897b46bcfc410d51fc65735837ea57a25170f298326e6/psutil-7.2.1-cp314-cp314t-win_arm64.whl", hash = "sha256:08a2f175e48a898c8eb8eace45ce01777f4785bc744c90aa2cc7f2fa5462a266", size = 135430, upload-time = "2025-12-29T08:26:25.999Z" }, - { url = "https://files.pythonhosted.org/packages/c5/cf/5180eb8c8bdf6a503c6919f1da28328bd1e6b3b1b5b9d5b01ae64f019616/psutil-7.2.1-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:b2e953fcfaedcfbc952b44744f22d16575d3aa78eb4f51ae74165b4e96e55f42", size = 128137, upload-time = "2025-12-29T08:26:27.759Z" }, - { url = "https://files.pythonhosted.org/packages/c5/2c/78e4a789306a92ade5000da4f5de3255202c534acdadc3aac7b5458fadef/psutil-7.2.1-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:05cc68dbb8c174828624062e73078e7e35406f4ca2d0866c272c2410d8ef06d1", size = 128947, upload-time = "2025-12-29T08:26:29.548Z" }, - { url = "https://files.pythonhosted.org/packages/29/f8/40e01c350ad9a2b3cb4e6adbcc8a83b17ee50dd5792102b6142385937db5/psutil-7.2.1-cp36-abi3-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:5e38404ca2bb30ed7267a46c02f06ff842e92da3bb8c5bfdadbd35a5722314d8", size = 154694, upload-time = "2025-12-29T08:26:32.147Z" }, - { url = "https://files.pythonhosted.org/packages/06/e4/b751cdf839c011a9714a783f120e6a86b7494eb70044d7d81a25a5cd295f/psutil-7.2.1-cp36-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ab2b98c9fc19f13f59628d94df5cc4cc4844bc572467d113a8b517d634e362c6", size = 156136, upload-time = "2025-12-29T08:26:34.079Z" }, - { url = "https://files.pythonhosted.org/packages/44/ad/bbf6595a8134ee1e94a4487af3f132cef7fce43aef4a93b49912a48c3af7/psutil-7.2.1-cp36-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:f78baafb38436d5a128f837fab2d92c276dfb48af01a240b861ae02b2413ada8", size = 148108, upload-time = "2025-12-29T08:26:36.225Z" }, - { url = "https://files.pythonhosted.org/packages/1c/15/dd6fd869753ce82ff64dcbc18356093471a5a5adf4f77ed1f805d473d859/psutil-7.2.1-cp36-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:99a4cd17a5fdd1f3d014396502daa70b5ec21bf4ffe38393e152f8e449757d67", size = 147402, upload-time = "2025-12-29T08:26:39.21Z" }, - { url = "https://files.pythonhosted.org/packages/34/68/d9317542e3f2b180c4306e3f45d3c922d7e86d8ce39f941bb9e2e9d8599e/psutil-7.2.1-cp37-abi3-win_amd64.whl", hash = "sha256:b1b0671619343aa71c20ff9767eced0483e4fc9e1f489d50923738caf6a03c17", size = 136938, upload-time = "2025-12-29T08:26:41.036Z" }, - { url = "https://files.pythonhosted.org/packages/3e/73/2ce007f4198c80fcf2cb24c169884f833fe93fbc03d55d302627b094ee91/psutil-7.2.1-cp37-abi3-win_arm64.whl", hash = "sha256:0d67c1822c355aa6f7314d92018fb4268a76668a536f133599b91edd48759442", size = 133836, upload-time = "2025-12-29T08:26:43.086Z" }, + { url = "https://files.pythonhosted.org/packages/51/08/510cbdb69c25a96f4ae523f733cdc963ae654904e8db864c07585ef99875/psutil-7.2.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:2edccc433cbfa046b980b0df0171cd25bcaeb3a68fe9022db0979e7aa74a826b", size = 130595, upload-time = "2026-01-28T18:14:57.293Z" }, + { url = "https://files.pythonhosted.org/packages/d6/f5/97baea3fe7a5a9af7436301f85490905379b1c6f2dd51fe3ecf24b4c5fbf/psutil-7.2.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:e78c8603dcd9a04c7364f1a3e670cea95d51ee865e4efb3556a3a63adef958ea", size = 131082, upload-time = "2026-01-28T18:14:59.732Z" }, + { url = "https://files.pythonhosted.org/packages/37/d6/246513fbf9fa174af531f28412297dd05241d97a75911ac8febefa1a53c6/psutil-7.2.2-cp313-cp313t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1a571f2330c966c62aeda00dd24620425d4b0cc86881c89861fbc04549e5dc63", size = 181476, upload-time = "2026-01-28T18:15:01.884Z" }, + { url = "https://files.pythonhosted.org/packages/b8/b5/9182c9af3836cca61696dabe4fd1304e17bc56cb62f17439e1154f225dd3/psutil-7.2.2-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:917e891983ca3c1887b4ef36447b1e0873e70c933afc831c6b6da078ba474312", size = 184062, upload-time = "2026-01-28T18:15:04.436Z" }, + { url = "https://files.pythonhosted.org/packages/16/ba/0756dca669f5a9300d0cbcbfae9a4c30e446dfc7440ffe43ded5724bfd93/psutil-7.2.2-cp313-cp313t-win_amd64.whl", hash = "sha256:ab486563df44c17f5173621c7b198955bd6b613fb87c71c161f827d3fb149a9b", size = 139893, upload-time = "2026-01-28T18:15:06.378Z" }, + { url = "https://files.pythonhosted.org/packages/1c/61/8fa0e26f33623b49949346de05ec1ddaad02ed8ba64af45f40a147dbfa97/psutil-7.2.2-cp313-cp313t-win_arm64.whl", hash = "sha256:ae0aefdd8796a7737eccea863f80f81e468a1e4cf14d926bd9b6f5f2d5f90ca9", size = 135589, upload-time = "2026-01-28T18:15:08.03Z" }, + { url = "https://files.pythonhosted.org/packages/81/69/ef179ab5ca24f32acc1dac0c247fd6a13b501fd5534dbae0e05a1c48b66d/psutil-7.2.2-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:eed63d3b4d62449571547b60578c5b2c4bcccc5387148db46e0c2313dad0ee00", size = 130664, upload-time = "2026-01-28T18:15:09.469Z" }, + { url = "https://files.pythonhosted.org/packages/7b/64/665248b557a236d3fa9efc378d60d95ef56dd0a490c2cd37dafc7660d4a9/psutil-7.2.2-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7b6d09433a10592ce39b13d7be5a54fbac1d1228ed29abc880fb23df7cb694c9", size = 131087, upload-time = "2026-01-28T18:15:11.724Z" }, + { url = "https://files.pythonhosted.org/packages/d5/2e/e6782744700d6759ebce3043dcfa661fb61e2fb752b91cdeae9af12c2178/psutil-7.2.2-cp314-cp314t-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1fa4ecf83bcdf6e6c8f4449aff98eefb5d0604bf88cb883d7da3d8d2d909546a", size = 182383, upload-time = "2026-01-28T18:15:13.445Z" }, + { url = "https://files.pythonhosted.org/packages/57/49/0a41cefd10cb7505cdc04dab3eacf24c0c2cb158a998b8c7b1d27ee2c1f5/psutil-7.2.2-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e452c464a02e7dc7822a05d25db4cde564444a67e58539a00f929c51eddda0cf", size = 185210, upload-time = "2026-01-28T18:15:16.002Z" }, + { url = "https://files.pythonhosted.org/packages/dd/2c/ff9bfb544f283ba5f83ba725a3c5fec6d6b10b8f27ac1dc641c473dc390d/psutil-7.2.2-cp314-cp314t-win_amd64.whl", hash = "sha256:c7663d4e37f13e884d13994247449e9f8f574bc4655d509c3b95e9ec9e2b9dc1", size = 141228, upload-time = "2026-01-28T18:15:18.385Z" }, + { url = "https://files.pythonhosted.org/packages/f2/fc/f8d9c31db14fcec13748d373e668bc3bed94d9077dbc17fb0eebc073233c/psutil-7.2.2-cp314-cp314t-win_arm64.whl", hash = "sha256:11fe5a4f613759764e79c65cf11ebdf26e33d6dd34336f8a337aa2996d71c841", size = 136284, upload-time = "2026-01-28T18:15:19.912Z" }, + { url = "https://files.pythonhosted.org/packages/e7/36/5ee6e05c9bd427237b11b3937ad82bb8ad2752d72c6969314590dd0c2f6e/psutil-7.2.2-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:ed0cace939114f62738d808fdcecd4c869222507e266e574799e9c0faa17d486", size = 129090, upload-time = "2026-01-28T18:15:22.168Z" }, + { url = "https://files.pythonhosted.org/packages/80/c4/f5af4c1ca8c1eeb2e92ccca14ce8effdeec651d5ab6053c589b074eda6e1/psutil-7.2.2-cp36-abi3-macosx_11_0_arm64.whl", hash = "sha256:1a7b04c10f32cc88ab39cbf606e117fd74721c831c98a27dc04578deb0c16979", size = 129859, upload-time = "2026-01-28T18:15:23.795Z" }, + { url = "https://files.pythonhosted.org/packages/b5/70/5d8df3b09e25bce090399cf48e452d25c935ab72dad19406c77f4e828045/psutil-7.2.2-cp36-abi3-manylinux2010_x86_64.manylinux_2_12_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:076a2d2f923fd4821644f5ba89f059523da90dc9014e85f8e45a5774ca5bc6f9", size = 155560, upload-time = "2026-01-28T18:15:25.976Z" }, + { url = "https://files.pythonhosted.org/packages/63/65/37648c0c158dc222aba51c089eb3bdfa238e621674dc42d48706e639204f/psutil-7.2.2-cp36-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b0726cecd84f9474419d67252add4ac0cd9811b04d61123054b9fb6f57df6e9e", size = 156997, upload-time = "2026-01-28T18:15:27.794Z" }, + { url = "https://files.pythonhosted.org/packages/8e/13/125093eadae863ce03c6ffdbae9929430d116a246ef69866dad94da3bfbc/psutil-7.2.2-cp36-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:fd04ef36b4a6d599bbdb225dd1d3f51e00105f6d48a28f006da7f9822f2606d8", size = 148972, upload-time = "2026-01-28T18:15:29.342Z" }, + { url = "https://files.pythonhosted.org/packages/04/78/0acd37ca84ce3ddffaa92ef0f571e073faa6d8ff1f0559ab1272188ea2be/psutil-7.2.2-cp36-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:b58fabe35e80b264a4e3bb23e6b96f9e45a3df7fb7eed419ac0e5947c61e47cc", size = 148266, upload-time = "2026-01-28T18:15:31.597Z" }, + { url = "https://files.pythonhosted.org/packages/b4/90/e2159492b5426be0c1fef7acba807a03511f97c5f86b3caeda6ad92351a7/psutil-7.2.2-cp37-abi3-win_amd64.whl", hash = "sha256:eb7e81434c8d223ec4a219b5fc1c47d0417b12be7ea866e24fb5ad6e84b3d988", size = 137737, upload-time = "2026-01-28T18:15:33.849Z" }, + { url = "https://files.pythonhosted.org/packages/8c/c7/7bb2e321574b10df20cbde462a94e2b71d05f9bbda251ef27d104668306a/psutil-7.2.2-cp37-abi3-win_arm64.whl", hash = "sha256:8c233660f575a5a89e6d4cb65d9f938126312bca76d8fe087b947b3a1aaac9ee", size = 134617, upload-time = "2026-01-28T18:15:36.514Z" }, ] [[package]] name = "pycparser" -version = "2.23" +version = "3.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/fe/cf/d2d3b9f5699fb1e4615c8e32ff220203e43b248e1dfcc6736ad9057731ca/pycparser-2.23.tar.gz", hash = "sha256:78816d4f24add8f10a06d6f05b4d424ad9e96cfebf68a4ddc99c65c0720d00c2", size = 173734, upload-time = "2025-09-09T13:23:47.91Z" } +sdist = { url = "https://files.pythonhosted.org/packages/1b/7d/92392ff7815c21062bea51aa7b87d45576f649f16458d78b7cf94b9ab2e6/pycparser-3.0.tar.gz", hash = "sha256:600f49d217304a5902ac3c37e1281c9fe94e4d0489de643a9504c5cdfdfc6b29", size = 103492, upload-time = "2026-01-21T14:26:51.89Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/a0/e3/59cd50310fc9b59512193629e1984c1f95e5c8ae6e5d8c69532ccc65a7fe/pycparser-2.23-py3-none-any.whl", hash = "sha256:e5c6e8d3fbad53479cab09ac03729e0a9faf2bee3db8208a550daf5af81a5934", size = 118140, upload-time = "2025-09-09T13:23:46.651Z" }, + { url = "https://files.pythonhosted.org/packages/0c/c3/44f3fbbfa403ea2a7c779186dc20772604442dde72947e7d01069cbe98e3/pycparser-3.0-py3-none-any.whl", hash = "sha256:b727414169a36b7d524c1c3e31839a521725078d7b2ff038656844266160a992", size = 48172, upload-time = "2026-01-21T14:26:50.693Z" }, ] [[package]] @@ -1103,11 +1130,11 @@ wheels = [ [[package]] name = "pyjwt" -version = "2.10.1" +version = "2.11.0" source = { registry = "https://pypi.org/simple" } -sdist = { url = "https://files.pythonhosted.org/packages/e7/46/bd74733ff231675599650d3e47f361794b22ef3e3770998dda30d3b63726/pyjwt-2.10.1.tar.gz", hash = "sha256:3cc5772eb20009233caf06e9d8a0577824723b44e6648ee0a2aedb6cf9381953", size = 87785, upload-time = "2024-11-28T03:43:29.933Z" } +sdist = { url = "https://files.pythonhosted.org/packages/5c/5a/b46fa56bf322901eee5b0454a34343cdbdae202cd421775a8ee4e42fd519/pyjwt-2.11.0.tar.gz", hash = "sha256:35f95c1f0fbe5d5ba6e43f00271c275f7a1a4db1dab27bf708073b75318ea623", size = 98019, upload-time = "2026-01-30T19:59:55.694Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/61/ad/689f02752eeec26aed679477e80e632ef1b682313be70793d798c1d5fc8f/PyJWT-2.10.1-py3-none-any.whl", hash = "sha256:dcdd193e30abefd5debf142f9adfcdd2b58004e644f25406ffaebd50bd98dacb", size = 22997, upload-time = "2024-11-28T03:43:27.893Z" }, + { url = "https://files.pythonhosted.org/packages/6f/01/c26ce75ba460d5cd503da9e13b21a33804d38c2165dec7b716d06b13010c/pyjwt-2.11.0-py3-none-any.whl", hash = "sha256:94a6bde30eb5c8e04fee991062b534071fd1439ef58d2adc9ccb823e7bcd0469", size = 28224, upload-time = "2026-01-30T19:59:54.539Z" }, ] [package.optional-dependencies] @@ -1178,15 +1205,15 @@ wheels = [ [[package]] name = "rich" -version = "14.3.2" +version = "14.3.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "markdown-it-py" }, { name = "pygments" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/74/99/a4cab2acbb884f80e558b0771e97e21e939c5dfb460f488d19df485e8298/rich-14.3.2.tar.gz", hash = "sha256:e712f11c1a562a11843306f5ed999475f09ac31ffb64281f73ab29ffdda8b3b8", size = 230143, upload-time = "2026-02-01T16:20:47.908Z" } +sdist = { url = "https://files.pythonhosted.org/packages/b3/c6/f3b320c27991c46f43ee9d856302c70dc2d0fb2dba4842ff739d5f46b393/rich-14.3.3.tar.gz", hash = "sha256:b8daa0b9e4eef54dd8cf7c86c03713f53241884e814f4e2f5fb342fe520f639b", size = 230582, upload-time = "2026-02-19T17:23:12.474Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ef/45/615f5babd880b4bd7d405cc0dc348234c5ffb6ed1ea33e152ede08b2072d/rich-14.3.2-py3-none-any.whl", hash = "sha256:08e67c3e90884651da3239ea668222d19bea7b589149d8014a21c633420dbb69", size = 309963, upload-time = "2026-02-01T16:20:46.078Z" }, + { url = "https://files.pythonhosted.org/packages/14/25/b208c5683343959b670dc001595f2f3737e051da617f66c31f7c4fa93abc/rich-14.3.3-py3-none-any.whl", hash = "sha256:793431c1f8619afa7d3b52b2cdec859562b950ea0d4b6b505397612db8d5362d", size = 310458, upload-time = "2026-02-19T17:23:13.732Z" }, ] [[package]] @@ -1199,39 +1226,29 @@ wheels = [ ] [[package]] -name = "smoke-test" -version = "0.1.0" -source = { editable = "." } -dependencies = [ - { name = "gen-worker" }, -] - -[package.dev-dependencies] -dev = [ - { name = "mypy" }, +name = "tomli-w" +version = "1.2.0" +source = { registry = "https://pypi.org/simple" } +sdist = { url = "https://files.pythonhosted.org/packages/19/75/241269d1da26b624c0d5e110e8149093c759b7a286138f4efd61a60e75fe/tomli_w-1.2.0.tar.gz", hash = "sha256:2dd14fac5a47c27be9cd4c976af5a12d87fb1f0b4512f81d69cce3b35ae25021", size = 7184, upload-time = "2025-01-15T12:07:24.262Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/c7/18/c86eb8e0202e32dd3df50d43d7ff9854f8e0603945ff398974c1d91ac1ef/tomli_w-1.2.0-py3-none-any.whl", hash = "sha256:188306098d013b691fcadc011abd66727d3c414c571bb01b1a174ba8c983cf90", size = 6675, upload-time = "2025-01-15T12:07:22.074Z" }, ] -[package.metadata] -requires-dist = [{ name = "gen-worker", specifier = "==0.3.0" }] - -[package.metadata.requires-dev] -dev = [{ name = "mypy", specifier = ">=1.10.0" }] - [[package]] name = "tqdm" -version = "4.67.1" +version = "4.67.3" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "colorama", marker = "sys_platform == 'win32'" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/a8/4b/29b4ef32e036bb34e4ab51796dd745cdba7ed47ad142a9f4a1eb8e0c744d/tqdm-4.67.1.tar.gz", hash = "sha256:f8aef9c52c08c13a65f30ea34f4e5aac3fd1a34959879d7e59e63027286627f2", size = 169737, upload-time = "2024-11-24T20:12:22.481Z" } +sdist = { url = "https://files.pythonhosted.org/packages/09/a9/6ba95a270c6f1fbcd8dac228323f2777d886cb206987444e4bce66338dd4/tqdm-4.67.3.tar.gz", hash = "sha256:7d825f03f89244ef73f1d4ce193cb1774a8179fd96f31d7e1dcde62092b960bb", size = 169598, upload-time = "2026-02-03T17:35:53.048Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/d0/30/dc54f88dd4a2b5dc8a0279bdd7270e735851848b762aeb1c1184ed1f6b14/tqdm-4.67.1-py3-none-any.whl", hash = "sha256:26445eca388f82e72884e0d580d5464cd801a3ea01e63e5601bdff9ba6a48de2", size = 78540, upload-time = "2024-11-24T20:12:19.698Z" }, + { url = "https://files.pythonhosted.org/packages/16/e1/3079a9ff9b8e11b846c6ac5c8b5bfb7ff225eee721825310c91b3b50304f/tqdm-4.67.3-py3-none-any.whl", hash = "sha256:ee1e4c0e59148062281c49d80b25b67771a127c85fc9676d3be5f243206826bf", size = 78374, upload-time = "2026-02-03T17:35:50.982Z" }, ] [[package]] name = "typer" -version = "0.23.1" +version = "0.24.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "annotated-doc" }, @@ -1239,21 +1256,9 @@ dependencies = [ { name = "rich" }, { name = "shellingham" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/fd/07/b822e1b307d40e263e8253d2384cf98c51aa2368cc7ba9a07e523a1d964b/typer-0.23.1.tar.gz", hash = "sha256:2070374e4d31c83e7b61362fd859aa683576432fd5b026b060ad6b4cd3b86134", size = 120047, upload-time = "2026-02-13T10:04:30.984Z" } -wheels = [ - { url = "https://files.pythonhosted.org/packages/d5/91/9b286ab899c008c2cb05e8be99814807e7fbbd33f0c0c960470826e5ac82/typer-0.23.1-py3-none-any.whl", hash = "sha256:3291ad0d3c701cbf522012faccfbb29352ff16ad262db2139e6b01f15781f14e", size = 56813, upload-time = "2026-02-13T10:04:32.008Z" }, -] - -[[package]] -name = "typer-slim" -version = "0.23.1" -source = { registry = "https://pypi.org/simple" } -dependencies = [ - { name = "typer" }, -] -sdist = { url = "https://files.pythonhosted.org/packages/da/22/b9c47b8655937b6877d40791b937931702ba9c5f9d28753199266aa96f50/typer_slim-0.23.1.tar.gz", hash = "sha256:dfe92a6317030ee2380f65bf92e540d7c77fefcc689e10d585b4925b45b5e06a", size = 4762, upload-time = "2026-02-13T10:04:26.416Z" } +sdist = { url = "https://files.pythonhosted.org/packages/f5/24/cb09efec5cc954f7f9b930bf8279447d24618bb6758d4f6adf2574c41780/typer-0.24.1.tar.gz", hash = "sha256:e39b4732d65fbdcde189ae76cf7cd48aeae72919dea1fdfc16593be016256b45", size = 118613, upload-time = "2026-02-21T16:54:40.609Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/ad/8a/5764b851659345f34787f1b6eb30b9d308bbd6c294825cbe38b6b869c97a/typer_slim-0.23.1-py3-none-any.whl", hash = "sha256:8146d5df1eb89f628191c4c604c8464fa841885d0733c58e6e700ff0228adac5", size = 3397, upload-time = "2026-02-13T10:04:27.132Z" }, + { url = "https://files.pythonhosted.org/packages/4a/91/48db081e7a63bb37284f9fbcefda7c44c277b18b0e13fbc36ea2335b71e6/typer-0.24.1-py3-none-any.whl", hash = "sha256:112c1f0ce578bfb4cab9ffdabc68f031416ebcc216536611ba21f04e9aa84c9e", size = 56085, upload-time = "2026-02-21T16:54:41.616Z" }, ] [[package]] @@ -1276,94 +1281,104 @@ wheels = [ [[package]] name = "yarl" -version = "1.22.0" +version = "1.23.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "idna" }, { name = "multidict" }, { name = "propcache" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/57/63/0c6ebca57330cd313f6102b16dd57ffaf3ec4c83403dcb45dbd15c6f3ea1/yarl-1.22.0.tar.gz", hash = "sha256:bebf8557577d4401ba8bd9ff33906f1376c877aa78d1fe216ad01b4d6745af71", size = 187169, upload-time = "2025-10-06T14:12:55.963Z" } +sdist = { url = "https://files.pythonhosted.org/packages/23/6e/beb1beec874a72f23815c1434518bfc4ed2175065173fb138c3705f658d4/yarl-1.23.0.tar.gz", hash = "sha256:53b1ea6ca88ebd4420379c330aea57e258408dd0df9af0992e5de2078dc9f5d5", size = 194676, upload-time = "2026-03-01T22:07:53.373Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/75/ff/46736024fee3429b80a165a732e38e5d5a238721e634ab41b040d49f8738/yarl-1.22.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:e340382d1afa5d32b892b3ff062436d592ec3d692aeea3bef3a5cfe11bbf8c6f", size = 142000, upload-time = "2025-10-06T14:09:44.631Z" }, - { url = "https://files.pythonhosted.org/packages/5a/9a/b312ed670df903145598914770eb12de1bac44599549b3360acc96878df8/yarl-1.22.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:f1e09112a2c31ffe8d80be1b0988fa6a18c5d5cad92a9ffbb1c04c91bfe52ad2", size = 94338, upload-time = "2025-10-06T14:09:46.372Z" }, - { url = "https://files.pythonhosted.org/packages/ba/f5/0601483296f09c3c65e303d60c070a5c19fcdbc72daa061e96170785bc7d/yarl-1.22.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:939fe60db294c786f6b7c2d2e121576628468f65453d86b0fe36cb52f987bd74", size = 94909, upload-time = "2025-10-06T14:09:48.648Z" }, - { url = "https://files.pythonhosted.org/packages/60/41/9a1fe0b73dbcefce72e46cf149b0e0a67612d60bfc90fb59c2b2efdfbd86/yarl-1.22.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:e1651bf8e0398574646744c1885a41198eba53dc8a9312b954073f845c90a8df", size = 372940, upload-time = "2025-10-06T14:09:50.089Z" }, - { url = "https://files.pythonhosted.org/packages/17/7a/795cb6dfee561961c30b800f0ed616b923a2ec6258b5def2a00bf8231334/yarl-1.22.0-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:b8a0588521a26bf92a57a1705b77b8b59044cdceccac7151bd8d229e66b8dedb", size = 345825, upload-time = "2025-10-06T14:09:52.142Z" }, - { url = "https://files.pythonhosted.org/packages/d7/93/a58f4d596d2be2ae7bab1a5846c4d270b894958845753b2c606d666744d3/yarl-1.22.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:42188e6a615c1a75bcaa6e150c3fe8f3e8680471a6b10150c5f7e83f47cc34d2", size = 386705, upload-time = "2025-10-06T14:09:54.128Z" }, - { url = "https://files.pythonhosted.org/packages/61/92/682279d0e099d0e14d7fd2e176bd04f48de1484f56546a3e1313cd6c8e7c/yarl-1.22.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:f6d2cb59377d99718913ad9a151030d6f83ef420a2b8f521d94609ecc106ee82", size = 396518, upload-time = "2025-10-06T14:09:55.762Z" }, - { url = "https://files.pythonhosted.org/packages/db/0f/0d52c98b8a885aeda831224b78f3be7ec2e1aa4a62091f9f9188c3c65b56/yarl-1.22.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:50678a3b71c751d58d7908edc96d332af328839eea883bb554a43f539101277a", size = 377267, upload-time = "2025-10-06T14:09:57.958Z" }, - { url = "https://files.pythonhosted.org/packages/22/42/d2685e35908cbeaa6532c1fc73e89e7f2efb5d8a7df3959ea8e37177c5a3/yarl-1.22.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:1e8fbaa7cec507aa24ea27a01456e8dd4b6fab829059b69844bd348f2d467124", size = 365797, upload-time = "2025-10-06T14:09:59.527Z" }, - { url = "https://files.pythonhosted.org/packages/a2/83/cf8c7bcc6355631762f7d8bdab920ad09b82efa6b722999dfb05afa6cfac/yarl-1.22.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:433885ab5431bc3d3d4f2f9bd15bfa1614c522b0f1405d62c4f926ccd69d04fa", size = 365535, upload-time = "2025-10-06T14:10:01.139Z" }, - { url = "https://files.pythonhosted.org/packages/25/e1/5302ff9b28f0c59cac913b91fe3f16c59a033887e57ce9ca5d41a3a94737/yarl-1.22.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:b790b39c7e9a4192dc2e201a282109ed2985a1ddbd5ac08dc56d0e121400a8f7", size = 382324, upload-time = "2025-10-06T14:10:02.756Z" }, - { url = "https://files.pythonhosted.org/packages/bf/cd/4617eb60f032f19ae3a688dc990d8f0d89ee0ea378b61cac81ede3e52fae/yarl-1.22.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:31f0b53913220599446872d757257be5898019c85e7971599065bc55065dc99d", size = 383803, upload-time = "2025-10-06T14:10:04.552Z" }, - { url = "https://files.pythonhosted.org/packages/59/65/afc6e62bb506a319ea67b694551dab4a7e6fb7bf604e9bd9f3e11d575fec/yarl-1.22.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:a49370e8f711daec68d09b821a34e1167792ee2d24d405cbc2387be4f158b520", size = 374220, upload-time = "2025-10-06T14:10:06.489Z" }, - { url = "https://files.pythonhosted.org/packages/e7/3d/68bf18d50dc674b942daec86a9ba922d3113d8399b0e52b9897530442da2/yarl-1.22.0-cp312-cp312-win32.whl", hash = "sha256:70dfd4f241c04bd9239d53b17f11e6ab672b9f1420364af63e8531198e3f5fe8", size = 81589, upload-time = "2025-10-06T14:10:09.254Z" }, - { url = "https://files.pythonhosted.org/packages/c8/9a/6ad1a9b37c2f72874f93e691b2e7ecb6137fb2b899983125db4204e47575/yarl-1.22.0-cp312-cp312-win_amd64.whl", hash = "sha256:8884d8b332a5e9b88e23f60bb166890009429391864c685e17bd73a9eda9105c", size = 87213, upload-time = "2025-10-06T14:10:11.369Z" }, - { url = "https://files.pythonhosted.org/packages/44/c5/c21b562d1680a77634d748e30c653c3ca918beb35555cff24986fff54598/yarl-1.22.0-cp312-cp312-win_arm64.whl", hash = "sha256:ea70f61a47f3cc93bdf8b2f368ed359ef02a01ca6393916bc8ff877427181e74", size = 81330, upload-time = "2025-10-06T14:10:13.112Z" }, - { url = "https://files.pythonhosted.org/packages/ea/f3/d67de7260456ee105dc1d162d43a019ecad6b91e2f51809d6cddaa56690e/yarl-1.22.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:8dee9c25c74997f6a750cd317b8ca63545169c098faee42c84aa5e506c819b53", size = 139980, upload-time = "2025-10-06T14:10:14.601Z" }, - { url = "https://files.pythonhosted.org/packages/01/88/04d98af0b47e0ef42597b9b28863b9060bb515524da0a65d5f4db160b2d5/yarl-1.22.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:01e73b85a5434f89fc4fe27dcda2aff08ddf35e4d47bbbea3bdcd25321af538a", size = 93424, upload-time = "2025-10-06T14:10:16.115Z" }, - { url = "https://files.pythonhosted.org/packages/18/91/3274b215fd8442a03975ce6bee5fe6aa57a8326b29b9d3d56234a1dca244/yarl-1.22.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:22965c2af250d20c873cdbee8ff958fb809940aeb2e74ba5f20aaf6b7ac8c70c", size = 93821, upload-time = "2025-10-06T14:10:17.993Z" }, - { url = "https://files.pythonhosted.org/packages/61/3a/caf4e25036db0f2da4ca22a353dfeb3c9d3c95d2761ebe9b14df8fc16eb0/yarl-1.22.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:b4f15793aa49793ec8d1c708ab7f9eded1aa72edc5174cae703651555ed1b601", size = 373243, upload-time = "2025-10-06T14:10:19.44Z" }, - { url = "https://files.pythonhosted.org/packages/6e/9e/51a77ac7516e8e7803b06e01f74e78649c24ee1021eca3d6a739cb6ea49c/yarl-1.22.0-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e5542339dcf2747135c5c85f68680353d5cb9ffd741c0f2e8d832d054d41f35a", size = 342361, upload-time = "2025-10-06T14:10:21.124Z" }, - { url = "https://files.pythonhosted.org/packages/d4/f8/33b92454789dde8407f156c00303e9a891f1f51a0330b0fad7c909f87692/yarl-1.22.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:5c401e05ad47a75869c3ab3e35137f8468b846770587e70d71e11de797d113df", size = 387036, upload-time = "2025-10-06T14:10:22.902Z" }, - { url = "https://files.pythonhosted.org/packages/d9/9a/c5db84ea024f76838220280f732970aa4ee154015d7f5c1bfb60a267af6f/yarl-1.22.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:243dda95d901c733f5b59214d28b0120893d91777cb8aa043e6ef059d3cddfe2", size = 397671, upload-time = "2025-10-06T14:10:24.523Z" }, - { url = "https://files.pythonhosted.org/packages/11/c9/cd8538dc2e7727095e0c1d867bad1e40c98f37763e6d995c1939f5fdc7b1/yarl-1.22.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bec03d0d388060058f5d291a813f21c011041938a441c593374da6077fe21b1b", size = 377059, upload-time = "2025-10-06T14:10:26.406Z" }, - { url = "https://files.pythonhosted.org/packages/a1/b9/ab437b261702ced75122ed78a876a6dec0a1b0f5e17a4ac7a9a2482d8abe/yarl-1.22.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:b0748275abb8c1e1e09301ee3cf90c8a99678a4e92e4373705f2a2570d581273", size = 365356, upload-time = "2025-10-06T14:10:28.461Z" }, - { url = "https://files.pythonhosted.org/packages/b2/9d/8e1ae6d1d008a9567877b08f0ce4077a29974c04c062dabdb923ed98e6fe/yarl-1.22.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:47fdb18187e2a4e18fda2c25c05d8251a9e4a521edaed757fef033e7d8498d9a", size = 361331, upload-time = "2025-10-06T14:10:30.541Z" }, - { url = "https://files.pythonhosted.org/packages/ca/5a/09b7be3905962f145b73beb468cdd53db8aa171cf18c80400a54c5b82846/yarl-1.22.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c7044802eec4524fde550afc28edda0dd5784c4c45f0be151a2d3ba017daca7d", size = 382590, upload-time = "2025-10-06T14:10:33.352Z" }, - { url = "https://files.pythonhosted.org/packages/aa/7f/59ec509abf90eda5048b0bc3e2d7b5099dffdb3e6b127019895ab9d5ef44/yarl-1.22.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:139718f35149ff544caba20fce6e8a2f71f1e39b92c700d8438a0b1d2a631a02", size = 385316, upload-time = "2025-10-06T14:10:35.034Z" }, - { url = "https://files.pythonhosted.org/packages/e5/84/891158426bc8036bfdfd862fabd0e0fa25df4176ec793e447f4b85cf1be4/yarl-1.22.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e1b51bebd221006d3d2f95fbe124b22b247136647ae5dcc8c7acafba66e5ee67", size = 374431, upload-time = "2025-10-06T14:10:37.76Z" }, - { url = "https://files.pythonhosted.org/packages/bb/49/03da1580665baa8bef5e8ed34c6df2c2aca0a2f28bf397ed238cc1bbc6f2/yarl-1.22.0-cp313-cp313-win32.whl", hash = "sha256:d3e32536234a95f513bd374e93d717cf6b2231a791758de6c509e3653f234c95", size = 81555, upload-time = "2025-10-06T14:10:39.649Z" }, - { url = "https://files.pythonhosted.org/packages/9a/ee/450914ae11b419eadd067c6183ae08381cfdfcb9798b90b2b713bbebddda/yarl-1.22.0-cp313-cp313-win_amd64.whl", hash = "sha256:47743b82b76d89a1d20b83e60d5c20314cbd5ba2befc9cda8f28300c4a08ed4d", size = 86965, upload-time = "2025-10-06T14:10:41.313Z" }, - { url = "https://files.pythonhosted.org/packages/98/4d/264a01eae03b6cf629ad69bae94e3b0e5344741e929073678e84bf7a3e3b/yarl-1.22.0-cp313-cp313-win_arm64.whl", hash = "sha256:5d0fcda9608875f7d052eff120c7a5da474a6796fe4d83e152e0e4d42f6d1a9b", size = 81205, upload-time = "2025-10-06T14:10:43.167Z" }, - { url = "https://files.pythonhosted.org/packages/88/fc/6908f062a2f77b5f9f6d69cecb1747260831ff206adcbc5b510aff88df91/yarl-1.22.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:719ae08b6972befcba4310e49edb1161a88cdd331e3a694b84466bd938a6ab10", size = 146209, upload-time = "2025-10-06T14:10:44.643Z" }, - { url = "https://files.pythonhosted.org/packages/65/47/76594ae8eab26210b4867be6f49129861ad33da1f1ebdf7051e98492bf62/yarl-1.22.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:47d8a5c446df1c4db9d21b49619ffdba90e77c89ec6e283f453856c74b50b9e3", size = 95966, upload-time = "2025-10-06T14:10:46.554Z" }, - { url = "https://files.pythonhosted.org/packages/ab/ce/05e9828a49271ba6b5b038b15b3934e996980dd78abdfeb52a04cfb9467e/yarl-1.22.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:cfebc0ac8333520d2d0423cbbe43ae43c8838862ddb898f5ca68565e395516e9", size = 97312, upload-time = "2025-10-06T14:10:48.007Z" }, - { url = "https://files.pythonhosted.org/packages/d1/c5/7dffad5e4f2265b29c9d7ec869c369e4223166e4f9206fc2243ee9eea727/yarl-1.22.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4398557cbf484207df000309235979c79c4356518fd5c99158c7d38203c4da4f", size = 361967, upload-time = "2025-10-06T14:10:49.997Z" }, - { url = "https://files.pythonhosted.org/packages/50/b2/375b933c93a54bff7fc041e1a6ad2c0f6f733ffb0c6e642ce56ee3b39970/yarl-1.22.0-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:2ca6fd72a8cd803be290d42f2dec5cdcd5299eeb93c2d929bf060ad9efaf5de0", size = 323949, upload-time = "2025-10-06T14:10:52.004Z" }, - { url = "https://files.pythonhosted.org/packages/66/50/bfc2a29a1d78644c5a7220ce2f304f38248dc94124a326794e677634b6cf/yarl-1.22.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:ca1f59c4e1ab6e72f0a23c13fca5430f889634166be85dbf1013683e49e3278e", size = 361818, upload-time = "2025-10-06T14:10:54.078Z" }, - { url = "https://files.pythonhosted.org/packages/46/96/f3941a46af7d5d0f0498f86d71275696800ddcdd20426298e572b19b91ff/yarl-1.22.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:6c5010a52015e7c70f86eb967db0f37f3c8bd503a695a49f8d45700144667708", size = 372626, upload-time = "2025-10-06T14:10:55.767Z" }, - { url = "https://files.pythonhosted.org/packages/c1/42/8b27c83bb875cd89448e42cd627e0fb971fa1675c9ec546393d18826cb50/yarl-1.22.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9d7672ecf7557476642c88497c2f8d8542f8e36596e928e9bcba0e42e1e7d71f", size = 341129, upload-time = "2025-10-06T14:10:57.985Z" }, - { url = "https://files.pythonhosted.org/packages/49/36/99ca3122201b382a3cf7cc937b95235b0ac944f7e9f2d5331d50821ed352/yarl-1.22.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:3b7c88eeef021579d600e50363e0b6ee4f7f6f728cd3486b9d0f3ee7b946398d", size = 346776, upload-time = "2025-10-06T14:10:59.633Z" }, - { url = "https://files.pythonhosted.org/packages/85/b4/47328bf996acd01a4c16ef9dcd2f59c969f495073616586f78cd5f2efb99/yarl-1.22.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:f4afb5c34f2c6fecdcc182dfcfc6af6cccf1aa923eed4d6a12e9d96904e1a0d8", size = 334879, upload-time = "2025-10-06T14:11:01.454Z" }, - { url = "https://files.pythonhosted.org/packages/c2/ad/b77d7b3f14a4283bffb8e92c6026496f6de49751c2f97d4352242bba3990/yarl-1.22.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:59c189e3e99a59cf8d83cbb31d4db02d66cda5a1a4374e8a012b51255341abf5", size = 350996, upload-time = "2025-10-06T14:11:03.452Z" }, - { url = "https://files.pythonhosted.org/packages/81/c8/06e1d69295792ba54d556f06686cbd6a7ce39c22307100e3fb4a2c0b0a1d/yarl-1.22.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:5a3bf7f62a289fa90f1990422dc8dff5a458469ea71d1624585ec3a4c8d6960f", size = 356047, upload-time = "2025-10-06T14:11:05.115Z" }, - { url = "https://files.pythonhosted.org/packages/4b/b8/4c0e9e9f597074b208d18cef227d83aac36184bfbc6eab204ea55783dbc5/yarl-1.22.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:de6b9a04c606978fdfe72666fa216ffcf2d1a9f6a381058d4378f8d7b1e5de62", size = 342947, upload-time = "2025-10-06T14:11:08.137Z" }, - { url = "https://files.pythonhosted.org/packages/e0/e5/11f140a58bf4c6ad7aca69a892bff0ee638c31bea4206748fc0df4ebcb3a/yarl-1.22.0-cp313-cp313t-win32.whl", hash = "sha256:1834bb90991cc2999f10f97f5f01317f99b143284766d197e43cd5b45eb18d03", size = 86943, upload-time = "2025-10-06T14:11:10.284Z" }, - { url = "https://files.pythonhosted.org/packages/31/74/8b74bae38ed7fe6793d0c15a0c8207bbb819cf287788459e5ed230996cdd/yarl-1.22.0-cp313-cp313t-win_amd64.whl", hash = "sha256:ff86011bd159a9d2dfc89c34cfd8aff12875980e3bd6a39ff097887520e60249", size = 93715, upload-time = "2025-10-06T14:11:11.739Z" }, - { url = "https://files.pythonhosted.org/packages/69/66/991858aa4b5892d57aef7ee1ba6b4d01ec3b7eb3060795d34090a3ca3278/yarl-1.22.0-cp313-cp313t-win_arm64.whl", hash = "sha256:7861058d0582b847bc4e3a4a4c46828a410bca738673f35a29ba3ca5db0b473b", size = 83857, upload-time = "2025-10-06T14:11:13.586Z" }, - { url = "https://files.pythonhosted.org/packages/46/b3/e20ef504049f1a1c54a814b4b9bed96d1ac0e0610c3b4da178f87209db05/yarl-1.22.0-cp314-cp314-macosx_10_13_universal2.whl", hash = "sha256:34b36c2c57124530884d89d50ed2c1478697ad7473efd59cfd479945c95650e4", size = 140520, upload-time = "2025-10-06T14:11:15.465Z" }, - { url = "https://files.pythonhosted.org/packages/e4/04/3532d990fdbab02e5ede063676b5c4260e7f3abea2151099c2aa745acc4c/yarl-1.22.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:0dd9a702591ca2e543631c2a017e4a547e38a5c0f29eece37d9097e04a7ac683", size = 93504, upload-time = "2025-10-06T14:11:17.106Z" }, - { url = "https://files.pythonhosted.org/packages/11/63/ff458113c5c2dac9a9719ac68ee7c947cb621432bcf28c9972b1c0e83938/yarl-1.22.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:594fcab1032e2d2cc3321bb2e51271e7cd2b516c7d9aee780ece81b07ff8244b", size = 94282, upload-time = "2025-10-06T14:11:19.064Z" }, - { url = "https://files.pythonhosted.org/packages/a7/bc/315a56aca762d44a6aaaf7ad253f04d996cb6b27bad34410f82d76ea8038/yarl-1.22.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f3d7a87a78d46a2e3d5b72587ac14b4c16952dd0887dbb051451eceac774411e", size = 372080, upload-time = "2025-10-06T14:11:20.996Z" }, - { url = "https://files.pythonhosted.org/packages/3f/3f/08e9b826ec2e099ea6e7c69a61272f4f6da62cb5b1b63590bb80ca2e4a40/yarl-1.22.0-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:852863707010316c973162e703bddabec35e8757e67fcb8ad58829de1ebc8590", size = 338696, upload-time = "2025-10-06T14:11:22.847Z" }, - { url = "https://files.pythonhosted.org/packages/e3/9f/90360108e3b32bd76789088e99538febfea24a102380ae73827f62073543/yarl-1.22.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:131a085a53bfe839a477c0845acf21efc77457ba2bcf5899618136d64f3303a2", size = 387121, upload-time = "2025-10-06T14:11:24.889Z" }, - { url = "https://files.pythonhosted.org/packages/98/92/ab8d4657bd5b46a38094cfaea498f18bb70ce6b63508fd7e909bd1f93066/yarl-1.22.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:078a8aefd263f4d4f923a9677b942b445a2be970ca24548a8102689a3a8ab8da", size = 394080, upload-time = "2025-10-06T14:11:27.307Z" }, - { url = "https://files.pythonhosted.org/packages/f5/e7/d8c5a7752fef68205296201f8ec2bf718f5c805a7a7e9880576c67600658/yarl-1.22.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bca03b91c323036913993ff5c738d0842fc9c60c4648e5c8d98331526df89784", size = 372661, upload-time = "2025-10-06T14:11:29.387Z" }, - { url = "https://files.pythonhosted.org/packages/b6/2e/f4d26183c8db0bb82d491b072f3127fb8c381a6206a3a56332714b79b751/yarl-1.22.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:68986a61557d37bb90d3051a45b91fa3d5c516d177dfc6dd6f2f436a07ff2b6b", size = 364645, upload-time = "2025-10-06T14:11:31.423Z" }, - { url = "https://files.pythonhosted.org/packages/80/7c/428e5812e6b87cd00ee8e898328a62c95825bf37c7fa87f0b6bb2ad31304/yarl-1.22.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:4792b262d585ff0dff6bcb787f8492e40698443ec982a3568c2096433660c694", size = 355361, upload-time = "2025-10-06T14:11:33.055Z" }, - { url = "https://files.pythonhosted.org/packages/ec/2a/249405fd26776f8b13c067378ef4d7dd49c9098d1b6457cdd152a99e96a9/yarl-1.22.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:ebd4549b108d732dba1d4ace67614b9545b21ece30937a63a65dd34efa19732d", size = 381451, upload-time = "2025-10-06T14:11:35.136Z" }, - { url = "https://files.pythonhosted.org/packages/67/a8/fb6b1adbe98cf1e2dd9fad71003d3a63a1bc22459c6e15f5714eb9323b93/yarl-1.22.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:f87ac53513d22240c7d59203f25cc3beac1e574c6cd681bbfd321987b69f95fd", size = 383814, upload-time = "2025-10-06T14:11:37.094Z" }, - { url = "https://files.pythonhosted.org/packages/d9/f9/3aa2c0e480fb73e872ae2814c43bc1e734740bb0d54e8cb2a95925f98131/yarl-1.22.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:22b029f2881599e2f1b06f8f1db2ee63bd309e2293ba2d566e008ba12778b8da", size = 370799, upload-time = "2025-10-06T14:11:38.83Z" }, - { url = "https://files.pythonhosted.org/packages/50/3c/af9dba3b8b5eeb302f36f16f92791f3ea62e3f47763406abf6d5a4a3333b/yarl-1.22.0-cp314-cp314-win32.whl", hash = "sha256:6a635ea45ba4ea8238463b4f7d0e721bad669f80878b7bfd1f89266e2ae63da2", size = 82990, upload-time = "2025-10-06T14:11:40.624Z" }, - { url = "https://files.pythonhosted.org/packages/ac/30/ac3a0c5bdc1d6efd1b41fa24d4897a4329b3b1e98de9449679dd327af4f0/yarl-1.22.0-cp314-cp314-win_amd64.whl", hash = "sha256:0d6e6885777af0f110b0e5d7e5dda8b704efed3894da26220b7f3d887b839a79", size = 88292, upload-time = "2025-10-06T14:11:42.578Z" }, - { url = "https://files.pythonhosted.org/packages/df/0a/227ab4ff5b998a1b7410abc7b46c9b7a26b0ca9e86c34ba4b8d8bc7c63d5/yarl-1.22.0-cp314-cp314-win_arm64.whl", hash = "sha256:8218f4e98d3c10d683584cb40f0424f4b9fd6e95610232dd75e13743b070ee33", size = 82888, upload-time = "2025-10-06T14:11:44.863Z" }, - { url = "https://files.pythonhosted.org/packages/06/5e/a15eb13db90abd87dfbefb9760c0f3f257ac42a5cac7e75dbc23bed97a9f/yarl-1.22.0-cp314-cp314t-macosx_10_13_universal2.whl", hash = "sha256:45c2842ff0e0d1b35a6bf1cd6c690939dacb617a70827f715232b2e0494d55d1", size = 146223, upload-time = "2025-10-06T14:11:46.796Z" }, - { url = "https://files.pythonhosted.org/packages/18/82/9665c61910d4d84f41a5bf6837597c89e665fa88aa4941080704645932a9/yarl-1.22.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:d947071e6ebcf2e2bee8fce76e10faca8f7a14808ca36a910263acaacef08eca", size = 95981, upload-time = "2025-10-06T14:11:48.845Z" }, - { url = "https://files.pythonhosted.org/packages/5d/9a/2f65743589809af4d0a6d3aa749343c4b5f4c380cc24a8e94a3c6625a808/yarl-1.22.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:334b8721303e61b00019474cc103bdac3d7b1f65e91f0bfedeec2d56dfe74b53", size = 97303, upload-time = "2025-10-06T14:11:50.897Z" }, - { url = "https://files.pythonhosted.org/packages/b0/ab/5b13d3e157505c43c3b43b5a776cbf7b24a02bc4cccc40314771197e3508/yarl-1.22.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1e7ce67c34138a058fd092f67d07a72b8e31ff0c9236e751957465a24b28910c", size = 361820, upload-time = "2025-10-06T14:11:52.549Z" }, - { url = "https://files.pythonhosted.org/packages/fb/76/242a5ef4677615cf95330cfc1b4610e78184400699bdda0acb897ef5e49a/yarl-1.22.0-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:d77e1b2c6d04711478cb1c4ab90db07f1609ccf06a287d5607fcd90dc9863acf", size = 323203, upload-time = "2025-10-06T14:11:54.225Z" }, - { url = "https://files.pythonhosted.org/packages/8c/96/475509110d3f0153b43d06164cf4195c64d16999e0c7e2d8a099adcd6907/yarl-1.22.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c4647674b6150d2cae088fc07de2738a84b8bcedebef29802cf0b0a82ab6face", size = 363173, upload-time = "2025-10-06T14:11:56.069Z" }, - { url = "https://files.pythonhosted.org/packages/c9/66/59db471aecfbd559a1fd48aedd954435558cd98c7d0da8b03cc6c140a32c/yarl-1.22.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:efb07073be061c8f79d03d04139a80ba33cbd390ca8f0297aae9cce6411e4c6b", size = 373562, upload-time = "2025-10-06T14:11:58.783Z" }, - { url = "https://files.pythonhosted.org/packages/03/1f/c5d94abc91557384719da10ff166b916107c1b45e4d0423a88457071dd88/yarl-1.22.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e51ac5435758ba97ad69617e13233da53908beccc6cfcd6c34bbed8dcbede486", size = 339828, upload-time = "2025-10-06T14:12:00.686Z" }, - { url = "https://files.pythonhosted.org/packages/5f/97/aa6a143d3afba17b6465733681c70cf175af89f76ec8d9286e08437a7454/yarl-1.22.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:33e32a0dd0c8205efa8e83d04fc9f19313772b78522d1bdc7d9aed706bfd6138", size = 347551, upload-time = "2025-10-06T14:12:02.628Z" }, - { url = "https://files.pythonhosted.org/packages/43/3c/45a2b6d80195959239a7b2a8810506d4eea5487dce61c2a3393e7fc3c52e/yarl-1.22.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:bf4a21e58b9cde0e401e683ebd00f6ed30a06d14e93f7c8fd059f8b6e8f87b6a", size = 334512, upload-time = "2025-10-06T14:12:04.871Z" }, - { url = "https://files.pythonhosted.org/packages/86/a0/c2ab48d74599c7c84cb104ebd799c5813de252bea0f360ffc29d270c2caa/yarl-1.22.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:e4b582bab49ac33c8deb97e058cd67c2c50dac0dd134874106d9c774fd272529", size = 352400, upload-time = "2025-10-06T14:12:06.624Z" }, - { url = "https://files.pythonhosted.org/packages/32/75/f8919b2eafc929567d3d8411f72bdb1a2109c01caaab4ebfa5f8ffadc15b/yarl-1.22.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:0b5bcc1a9c4839e7e30b7b30dd47fe5e7e44fb7054ec29b5bb8d526aa1041093", size = 357140, upload-time = "2025-10-06T14:12:08.362Z" }, - { url = "https://files.pythonhosted.org/packages/cf/72/6a85bba382f22cf78add705d8c3731748397d986e197e53ecc7835e76de7/yarl-1.22.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:c0232bce2170103ec23c454e54a57008a9a72b5d1c3105dc2496750da8cfa47c", size = 341473, upload-time = "2025-10-06T14:12:10.994Z" }, - { url = "https://files.pythonhosted.org/packages/35/18/55e6011f7c044dc80b98893060773cefcfdbf60dfefb8cb2f58b9bacbd83/yarl-1.22.0-cp314-cp314t-win32.whl", hash = "sha256:8009b3173bcd637be650922ac455946197d858b3630b6d8787aa9e5c4564533e", size = 89056, upload-time = "2025-10-06T14:12:13.317Z" }, - { url = "https://files.pythonhosted.org/packages/f9/86/0f0dccb6e59a9e7f122c5afd43568b1d31b8ab7dda5f1b01fb5c7025c9a9/yarl-1.22.0-cp314-cp314t-win_amd64.whl", hash = "sha256:9fb17ea16e972c63d25d4a97f016d235c78dd2344820eb35bc034bc32012ee27", size = 96292, upload-time = "2025-10-06T14:12:15.398Z" }, - { url = "https://files.pythonhosted.org/packages/48/b7/503c98092fb3b344a179579f55814b613c1fbb1c23b3ec14a7b008a66a6e/yarl-1.22.0-cp314-cp314t-win_arm64.whl", hash = "sha256:9f6d73c1436b934e3f01df1e1b21ff765cd1d28c77dfb9ace207f746d4610ee1", size = 85171, upload-time = "2025-10-06T14:12:16.935Z" }, - { url = "https://files.pythonhosted.org/packages/73/ae/b48f95715333080afb75a4504487cbe142cae1268afc482d06692d605ae6/yarl-1.22.0-py3-none-any.whl", hash = "sha256:1380560bdba02b6b6c90de54133c81c9f2a453dee9912fe58c1dcced1edb7cff", size = 46814, upload-time = "2025-10-06T14:12:53.872Z" }, + { url = "https://files.pythonhosted.org/packages/88/8a/94615bc31022f711add374097ad4144d569e95ff3c38d39215d07ac153a0/yarl-1.23.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:1932b6b8bba8d0160a9d1078aae5838a66039e8832d41d2992daa9a3a08f7860", size = 124737, upload-time = "2026-03-01T22:05:12.897Z" }, + { url = "https://files.pythonhosted.org/packages/e3/6f/c6554045d59d64052698add01226bc867b52fe4a12373415d7991fdca95d/yarl-1.23.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:411225bae281f114067578891bc75534cfb3d92a3b4dfef7a6ca78ba354e6069", size = 87029, upload-time = "2026-03-01T22:05:14.376Z" }, + { url = "https://files.pythonhosted.org/packages/19/2a/725ecc166d53438bc88f76822ed4b1e3b10756e790bafd7b523fe97c322d/yarl-1.23.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:13a563739ae600a631c36ce096615fe307f131344588b0bc0daec108cdb47b25", size = 86310, upload-time = "2026-03-01T22:05:15.71Z" }, + { url = "https://files.pythonhosted.org/packages/99/30/58260ed98e6ff7f90ba84442c1ddd758c9170d70327394a6227b310cd60f/yarl-1.23.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:9cbf44c5cb4a7633d078788e1b56387e3d3cf2b8139a3be38040b22d6c3221c8", size = 97587, upload-time = "2026-03-01T22:05:17.384Z" }, + { url = "https://files.pythonhosted.org/packages/76/0a/8b08aac08b50682e65759f7f8dde98ae8168f72487e7357a5d684c581ef9/yarl-1.23.0-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:53ad387048f6f09a8969631e4de3f1bf70c50e93545d64af4f751b2498755072", size = 92528, upload-time = "2026-03-01T22:05:18.804Z" }, + { url = "https://files.pythonhosted.org/packages/52/07/0b7179101fe5f8385ec6c6bb5d0cb9f76bd9fb4a769591ab6fb5cdbfc69a/yarl-1.23.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:4a59ba56f340334766f3a4442e0efd0af895fae9e2b204741ef885c446b3a1a8", size = 105339, upload-time = "2026-03-01T22:05:20.235Z" }, + { url = "https://files.pythonhosted.org/packages/d3/8a/36d82869ab5ec829ca8574dfcb92b51286fcfb1e9c7a73659616362dc880/yarl-1.23.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:803a3c3ce4acc62eaf01eaca1208dcf0783025ef27572c3336502b9c232005e7", size = 105061, upload-time = "2026-03-01T22:05:22.268Z" }, + { url = "https://files.pythonhosted.org/packages/66/3e/868e5c3364b6cee19ff3e1a122194fa4ce51def02c61023970442162859e/yarl-1.23.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:a3d2bff8f37f8d0f96c7ec554d16945050d54462d6e95414babaa18bfafc7f51", size = 100132, upload-time = "2026-03-01T22:05:23.638Z" }, + { url = "https://files.pythonhosted.org/packages/cf/26/9c89acf82f08a52cb52d6d39454f8d18af15f9d386a23795389d1d423823/yarl-1.23.0-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c75eb09e8d55bceb4367e83496ff8ef2bc7ea6960efb38e978e8073ea59ecb67", size = 99289, upload-time = "2026-03-01T22:05:25.749Z" }, + { url = "https://files.pythonhosted.org/packages/6f/54/5b0db00d2cb056922356104468019c0a132e89c8d3ab67d8ede9f4483d2a/yarl-1.23.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:877b0738624280e34c55680d6054a307aa94f7d52fa0e3034a9cc6e790871da7", size = 96950, upload-time = "2026-03-01T22:05:27.318Z" }, + { url = "https://files.pythonhosted.org/packages/f6/40/10fa93811fd439341fad7e0718a86aca0de9548023bbb403668d6555acab/yarl-1.23.0-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:b5405bb8f0e783a988172993cfc627e4d9d00432d6bbac65a923041edacf997d", size = 93960, upload-time = "2026-03-01T22:05:28.738Z" }, + { url = "https://files.pythonhosted.org/packages/bc/d2/8ae2e6cd77d0805f4526e30ec43b6f9a3dfc542d401ac4990d178e4bf0cf/yarl-1.23.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:1c3a3598a832590c5a3ce56ab5576361b5688c12cb1d39429cf5dba30b510760", size = 104703, upload-time = "2026-03-01T22:05:30.438Z" }, + { url = "https://files.pythonhosted.org/packages/2f/0c/b3ceacf82c3fe21183ce35fa2acf5320af003d52bc1fcf5915077681142e/yarl-1.23.0-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:8419ebd326430d1cbb7efb5292330a2cf39114e82df5cc3d83c9a0d5ebeaf2f2", size = 98325, upload-time = "2026-03-01T22:05:31.835Z" }, + { url = "https://files.pythonhosted.org/packages/9d/e0/12900edd28bdab91a69bd2554b85ad7b151f64e8b521fe16f9ad2f56477a/yarl-1.23.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:be61f6fff406ca40e3b1d84716fde398fc08bc63dd96d15f3a14230a0973ed86", size = 105067, upload-time = "2026-03-01T22:05:33.358Z" }, + { url = "https://files.pythonhosted.org/packages/15/61/74bb1182cf79c9bbe4eb6b1f14a57a22d7a0be5e9cedf8e2d5c2086474c3/yarl-1.23.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:3ceb13c5c858d01321b5d9bb65e4cf37a92169ea470b70fec6f236b2c9dd7e34", size = 100285, upload-time = "2026-03-01T22:05:35.4Z" }, + { url = "https://files.pythonhosted.org/packages/69/7f/cd5ef733f2550de6241bd8bd8c3febc78158b9d75f197d9c7baa113436af/yarl-1.23.0-cp312-cp312-win32.whl", hash = "sha256:fffc45637bcd6538de8b85f51e3df3223e4ad89bccbfca0481c08c7fc8b7ed7d", size = 82359, upload-time = "2026-03-01T22:05:36.811Z" }, + { url = "https://files.pythonhosted.org/packages/f5/be/25216a49daeeb7af2bec0db22d5e7df08ed1d7c9f65d78b14f3b74fd72fc/yarl-1.23.0-cp312-cp312-win_amd64.whl", hash = "sha256:f69f57305656a4852f2a7203efc661d8c042e6cc67f7acd97d8667fb448a426e", size = 87674, upload-time = "2026-03-01T22:05:38.171Z" }, + { url = "https://files.pythonhosted.org/packages/d2/35/aeab955d6c425b227d5b7247eafb24f2653fedc32f95373a001af5dfeb9e/yarl-1.23.0-cp312-cp312-win_arm64.whl", hash = "sha256:6e87a6e8735b44816e7db0b2fbc9686932df473c826b0d9743148432e10bb9b9", size = 81879, upload-time = "2026-03-01T22:05:40.006Z" }, + { url = "https://files.pythonhosted.org/packages/9a/4b/a0a6e5d0ee8a2f3a373ddef8a4097d74ac901ac363eea1440464ccbe0898/yarl-1.23.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:16c6994ac35c3e74fb0ae93323bf8b9c2a9088d55946109489667c510a7d010e", size = 123796, upload-time = "2026-03-01T22:05:41.412Z" }, + { url = "https://files.pythonhosted.org/packages/67/b6/8925d68af039b835ae876db5838e82e76ec87b9782ecc97e192b809c4831/yarl-1.23.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:4a42e651629dafb64fd5b0286a3580613702b5809ad3f24934ea87595804f2c5", size = 86547, upload-time = "2026-03-01T22:05:42.841Z" }, + { url = "https://files.pythonhosted.org/packages/ae/50/06d511cc4b8e0360d3c94af051a768e84b755c5eb031b12adaaab6dec6e5/yarl-1.23.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:7c6b9461a2a8b47c65eef63bb1c76a4f1c119618ffa99ea79bc5bb1e46c5821b", size = 85854, upload-time = "2026-03-01T22:05:44.85Z" }, + { url = "https://files.pythonhosted.org/packages/c4/f4/4e30b250927ffdab4db70da08b9b8d2194d7c7b400167b8fbeca1e4701ca/yarl-1.23.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:2569b67d616eab450d262ca7cb9f9e19d2f718c70a8b88712859359d0ab17035", size = 98351, upload-time = "2026-03-01T22:05:46.836Z" }, + { url = "https://files.pythonhosted.org/packages/86/fc/4118c5671ea948208bdb1492d8b76bdf1453d3e73df051f939f563e7dcc5/yarl-1.23.0-cp313-cp313-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:e9d9a4d06d3481eab79803beb4d9bd6f6a8e781ec078ac70d7ef2dcc29d1bea5", size = 92711, upload-time = "2026-03-01T22:05:48.316Z" }, + { url = "https://files.pythonhosted.org/packages/56/11/1ed91d42bd9e73c13dc9e7eb0dd92298d75e7ac4dd7f046ad0c472e231cd/yarl-1.23.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:f514f6474e04179d3d33175ed3f3e31434d3130d42ec153540d5b157deefd735", size = 106014, upload-time = "2026-03-01T22:05:50.028Z" }, + { url = "https://files.pythonhosted.org/packages/ce/c9/74e44e056a23fbc33aca71779ef450ca648a5bc472bdad7a82339918f818/yarl-1.23.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:fda207c815b253e34f7e1909840fd14299567b1c0eb4908f8c2ce01a41265401", size = 105557, upload-time = "2026-03-01T22:05:51.416Z" }, + { url = "https://files.pythonhosted.org/packages/66/fe/b1e10b08d287f518994f1e2ff9b6d26f0adeecd8dd7d533b01bab29a3eda/yarl-1.23.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:34b6cf500e61c90f305094911f9acc9c86da1a05a7a3f5be9f68817043f486e4", size = 101559, upload-time = "2026-03-01T22:05:52.872Z" }, + { url = "https://files.pythonhosted.org/packages/72/59/c5b8d94b14e3d3c2a9c20cb100119fd534ab5a14b93673ab4cc4a4141ea5/yarl-1.23.0-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:d7504f2b476d21653e4d143f44a175f7f751cd41233525312696c76aa3dbb23f", size = 100502, upload-time = "2026-03-01T22:05:54.954Z" }, + { url = "https://files.pythonhosted.org/packages/77/4f/96976cb54cbfc5c9fd73ed4c51804f92f209481d1fb190981c0f8a07a1d7/yarl-1.23.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:578110dd426f0d209d1509244e6d4a3f1a3e9077655d98c5f22583d63252a08a", size = 98027, upload-time = "2026-03-01T22:05:56.409Z" }, + { url = "https://files.pythonhosted.org/packages/63/6e/904c4f476471afdbad6b7e5b70362fb5810e35cd7466529a97322b6f5556/yarl-1.23.0-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:609d3614d78d74ebe35f54953c5bbd2ac647a7ddb9c30a5d877580f5e86b22f2", size = 95369, upload-time = "2026-03-01T22:05:58.141Z" }, + { url = "https://files.pythonhosted.org/packages/9d/40/acfcdb3b5f9d68ef499e39e04d25e141fe90661f9d54114556cf83be8353/yarl-1.23.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4966242ec68afc74c122f8459abd597afd7d8a60dc93d695c1334c5fd25f762f", size = 105565, upload-time = "2026-03-01T22:06:00.286Z" }, + { url = "https://files.pythonhosted.org/packages/5e/c6/31e28f3a6ba2869c43d124f37ea5260cac9c9281df803c354b31f4dd1f3c/yarl-1.23.0-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:e0fd068364a6759bc794459f0a735ab151d11304346332489c7972bacbe9e72b", size = 99813, upload-time = "2026-03-01T22:06:01.712Z" }, + { url = "https://files.pythonhosted.org/packages/08/1f/6f65f59e72d54aa467119b63fc0b0b1762eff0232db1f4720cd89e2f4a17/yarl-1.23.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:39004f0ad156da43e86aa71f44e033de68a44e5a31fc53507b36dd253970054a", size = 105632, upload-time = "2026-03-01T22:06:03.188Z" }, + { url = "https://files.pythonhosted.org/packages/a3/c4/18b178a69935f9e7a338127d5b77d868fdc0f0e49becd286d51b3a18c61d/yarl-1.23.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e5723c01a56c5028c807c701aa66722916d2747ad737a046853f6c46f4875543", size = 101895, upload-time = "2026-03-01T22:06:04.651Z" }, + { url = "https://files.pythonhosted.org/packages/8f/54/f5b870b5505663911dba950a8e4776a0dbd51c9c54c0ae88e823e4b874a0/yarl-1.23.0-cp313-cp313-win32.whl", hash = "sha256:1b6b572edd95b4fa8df75de10b04bc81acc87c1c7d16bcdd2035b09d30acc957", size = 82356, upload-time = "2026-03-01T22:06:06.04Z" }, + { url = "https://files.pythonhosted.org/packages/7a/84/266e8da36879c6edcd37b02b547e2d9ecdfea776be49598e75696e3316e1/yarl-1.23.0-cp313-cp313-win_amd64.whl", hash = "sha256:baaf55442359053c7d62f6f8413a62adba3205119bcb6f49594894d8be47e5e3", size = 87515, upload-time = "2026-03-01T22:06:08.107Z" }, + { url = "https://files.pythonhosted.org/packages/00/fd/7e1c66efad35e1649114fa13f17485f62881ad58edeeb7f49f8c5e748bf9/yarl-1.23.0-cp313-cp313-win_arm64.whl", hash = "sha256:fb4948814a2a98e3912505f09c9e7493b1506226afb1f881825368d6fb776ee3", size = 81785, upload-time = "2026-03-01T22:06:10.181Z" }, + { url = "https://files.pythonhosted.org/packages/9c/fc/119dd07004f17ea43bb91e3ece6587759edd7519d6b086d16bfbd3319982/yarl-1.23.0-cp313-cp313t-macosx_10_13_universal2.whl", hash = "sha256:aecfed0b41aa72b7881712c65cf764e39ce2ec352324f5e0837c7048d9e6daaa", size = 130719, upload-time = "2026-03-01T22:06:11.708Z" }, + { url = "https://files.pythonhosted.org/packages/e6/0d/9f2348502fbb3af409e8f47730282cd6bc80dec6630c1e06374d882d6eb2/yarl-1.23.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:a41bcf68efd19073376eb8cf948b8d9be0af26256403e512bb18f3966f1f9120", size = 89690, upload-time = "2026-03-01T22:06:13.429Z" }, + { url = "https://files.pythonhosted.org/packages/50/93/e88f3c80971b42cfc83f50a51b9d165a1dbf154b97005f2994a79f212a07/yarl-1.23.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:cde9a2ecd91668bcb7f077c4966d8ceddb60af01b52e6e3e2680e4cf00ad1a59", size = 89851, upload-time = "2026-03-01T22:06:15.53Z" }, + { url = "https://files.pythonhosted.org/packages/1c/07/61c9dd8ba8f86473263b4036f70fb594c09e99c0d9737a799dfd8bc85651/yarl-1.23.0-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:5023346c4ee7992febc0068e7593de5fa2bf611848c08404b35ebbb76b1b0512", size = 95874, upload-time = "2026-03-01T22:06:17.553Z" }, + { url = "https://files.pythonhosted.org/packages/9e/e9/f9ff8ceefba599eac6abddcfb0b3bee9b9e636e96dbf54342a8577252379/yarl-1.23.0-cp313-cp313t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:d1009abedb49ae95b136a8904a3f71b342f849ffeced2d3747bf29caeda218c4", size = 88710, upload-time = "2026-03-01T22:06:19.004Z" }, + { url = "https://files.pythonhosted.org/packages/eb/78/0231bfcc5d4c8eec220bc2f9ef82cb4566192ea867a7c5b4148f44f6cbcd/yarl-1.23.0-cp313-cp313t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a8d00f29b42f534cc8aa3931cfe773b13b23e561e10d2b26f27a8d309b0e82a1", size = 101033, upload-time = "2026-03-01T22:06:21.203Z" }, + { url = "https://files.pythonhosted.org/packages/cd/9b/30ea5239a61786f18fd25797151a17fbb3be176977187a48d541b5447dd4/yarl-1.23.0-cp313-cp313t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:95451e6ce06c3e104556d73b559f5da6c34a069b6b62946d3ad66afcd51642ea", size = 100817, upload-time = "2026-03-01T22:06:22.738Z" }, + { url = "https://files.pythonhosted.org/packages/62/e2/a4980481071791bc83bce2b7a1a1f7adcabfa366007518b4b845e92eeee3/yarl-1.23.0-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:531ef597132086b6cf96faa7c6c1dcd0361dd5f1694e5cc30375907b9b7d3ea9", size = 97482, upload-time = "2026-03-01T22:06:24.21Z" }, + { url = "https://files.pythonhosted.org/packages/e5/1e/304a00cf5f6100414c4b5a01fc7ff9ee724b62158a08df2f8170dfc72a2d/yarl-1.23.0-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:88f9fb0116fbfcefcab70f85cf4b74a2b6ce5d199c41345296f49d974ddb4123", size = 95949, upload-time = "2026-03-01T22:06:25.697Z" }, + { url = "https://files.pythonhosted.org/packages/68/03/093f4055ed4cae649ac53bca3d180bd37102e9e11d048588e9ab0c0108d0/yarl-1.23.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:e7b0460976dc75cb87ad9cc1f9899a4b97751e7d4e77ab840fc9b6d377b8fd24", size = 95839, upload-time = "2026-03-01T22:06:27.309Z" }, + { url = "https://files.pythonhosted.org/packages/b9/28/4c75ebb108f322aa8f917ae10a8ffa4f07cae10a8a627b64e578617df6a0/yarl-1.23.0-cp313-cp313t-musllinux_1_2_armv7l.whl", hash = "sha256:115136c4a426f9da976187d238e84139ff6b51a20839aa6e3720cd1026d768de", size = 90696, upload-time = "2026-03-01T22:06:29.048Z" }, + { url = "https://files.pythonhosted.org/packages/23/9c/42c2e2dd91c1a570402f51bdf066bfdb1241c2240ba001967bad778e77b7/yarl-1.23.0-cp313-cp313t-musllinux_1_2_ppc64le.whl", hash = "sha256:ead11956716a940c1abc816b7df3fa2b84d06eaed8832ca32f5c5e058c65506b", size = 100865, upload-time = "2026-03-01T22:06:30.525Z" }, + { url = "https://files.pythonhosted.org/packages/74/05/1bcd60a8a0a914d462c305137246b6f9d167628d73568505fce3f1cb2e65/yarl-1.23.0-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:fe8f8f5e70e6dbdfca9882cd9deaac058729bcf323cf7a58660901e55c9c94f6", size = 96234, upload-time = "2026-03-01T22:06:32.692Z" }, + { url = "https://files.pythonhosted.org/packages/90/b2/f52381aac396d6778ce516b7bc149c79e65bfc068b5de2857ab69eeea3b7/yarl-1.23.0-cp313-cp313t-musllinux_1_2_s390x.whl", hash = "sha256:a0e317df055958a0c1e79e5d2aa5a5eaa4a6d05a20d4b0c9c3f48918139c9fc6", size = 100295, upload-time = "2026-03-01T22:06:34.268Z" }, + { url = "https://files.pythonhosted.org/packages/e5/e8/638bae5bbf1113a659b2435d8895474598afe38b4a837103764f603aba56/yarl-1.23.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:6f0fd84de0c957b2d280143522c4f91a73aada1923caee763e24a2b3fda9f8a5", size = 97784, upload-time = "2026-03-01T22:06:35.864Z" }, + { url = "https://files.pythonhosted.org/packages/80/25/a3892b46182c586c202629fc2159aa13975d3741d52ebd7347fd501d48d5/yarl-1.23.0-cp313-cp313t-win32.whl", hash = "sha256:93a784271881035ab4406a172edb0faecb6e7d00f4b53dc2f55919d6c9688595", size = 88313, upload-time = "2026-03-01T22:06:37.39Z" }, + { url = "https://files.pythonhosted.org/packages/43/68/8c5b36aa5178900b37387937bc2c2fe0e9505537f713495472dcf6f6fccc/yarl-1.23.0-cp313-cp313t-win_amd64.whl", hash = "sha256:dd00607bffbf30250fe108065f07453ec124dbf223420f57f5e749b04295e090", size = 94932, upload-time = "2026-03-01T22:06:39.579Z" }, + { url = "https://files.pythonhosted.org/packages/c6/cc/d79ba8292f51f81f4dc533a8ccfb9fc6992cabf0998ed3245de7589dc07c/yarl-1.23.0-cp313-cp313t-win_arm64.whl", hash = "sha256:ac09d42f48f80c9ee1635b2fcaa819496a44502737660d3c0f2ade7526d29144", size = 84786, upload-time = "2026-03-01T22:06:41.988Z" }, + { url = "https://files.pythonhosted.org/packages/90/98/b85a038d65d1b92c3903ab89444f48d3cee490a883477b716d7a24b1a78c/yarl-1.23.0-cp314-cp314-macosx_10_15_universal2.whl", hash = "sha256:21d1b7305a71a15b4794b5ff22e8eef96ff4a6d7f9657155e5aa419444b28912", size = 124455, upload-time = "2026-03-01T22:06:43.615Z" }, + { url = "https://files.pythonhosted.org/packages/39/54/bc2b45559f86543d163b6e294417a107bb87557609007c007ad889afec18/yarl-1.23.0-cp314-cp314-macosx_10_15_x86_64.whl", hash = "sha256:85610b4f27f69984932a7abbe52703688de3724d9f72bceb1cca667deff27474", size = 86752, upload-time = "2026-03-01T22:06:45.425Z" }, + { url = "https://files.pythonhosted.org/packages/24/f9/e8242b68362bffe6fb536c8db5076861466fc780f0f1b479fc4ffbebb128/yarl-1.23.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:23f371bd662cf44a7630d4d113101eafc0cfa7518a2760d20760b26021454719", size = 86291, upload-time = "2026-03-01T22:06:46.974Z" }, + { url = "https://files.pythonhosted.org/packages/ea/d8/d1cb2378c81dd729e98c716582b1ccb08357e8488e4c24714658cc6630e8/yarl-1.23.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:c4a80f77dc1acaaa61f0934176fccca7096d9b1ff08c8ba9cddf5ae034a24319", size = 99026, upload-time = "2026-03-01T22:06:48.459Z" }, + { url = "https://files.pythonhosted.org/packages/0a/ff/7196790538f31debe3341283b5b0707e7feb947620fc5e8236ef28d44f72/yarl-1.23.0-cp314-cp314-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:bd654fad46d8d9e823afbb4f87c79160b5a374ed1ff5bde24e542e6ba8f41434", size = 92355, upload-time = "2026-03-01T22:06:50.306Z" }, + { url = "https://files.pythonhosted.org/packages/c1/56/25d58c3eddde825890a5fe6aa1866228377354a3c39262235234ab5f616b/yarl-1.23.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:682bae25f0a0dd23a056739f23a134db9f52a63e2afd6bfb37ddc76292bbd723", size = 106417, upload-time = "2026-03-01T22:06:52.1Z" }, + { url = "https://files.pythonhosted.org/packages/51/8a/882c0e7bc8277eb895b31bce0138f51a1ba551fc2e1ec6753ffc1e7c1377/yarl-1.23.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:a82836cab5f197a0514235aaf7ffccdc886ccdaa2324bc0aafdd4ae898103039", size = 106422, upload-time = "2026-03-01T22:06:54.424Z" }, + { url = "https://files.pythonhosted.org/packages/42/2b/fef67d616931055bf3d6764885990a3ac647d68734a2d6a9e1d13de437a2/yarl-1.23.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:1c57676bdedc94cd3bc37724cf6f8cd2779f02f6aba48de45feca073e714fe52", size = 101915, upload-time = "2026-03-01T22:06:55.895Z" }, + { url = "https://files.pythonhosted.org/packages/18/6a/530e16aebce27c5937920f3431c628a29a4b6b430fab3fd1c117b26ff3f6/yarl-1.23.0-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c7f8dc16c498ff06497c015642333219871effba93e4a2e8604a06264aca5c5c", size = 100690, upload-time = "2026-03-01T22:06:58.21Z" }, + { url = "https://files.pythonhosted.org/packages/88/08/93749219179a45e27b036e03260fda05190b911de8e18225c294ac95bbc9/yarl-1.23.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:5ee586fb17ff8f90c91cf73c6108a434b02d69925f44f5f8e0d7f2f260607eae", size = 98750, upload-time = "2026-03-01T22:06:59.794Z" }, + { url = "https://files.pythonhosted.org/packages/d9/cf/ea424a004969f5d81a362110a6ac1496d79efdc6d50c2c4b2e3ea0fc2519/yarl-1.23.0-cp314-cp314-musllinux_1_2_armv7l.whl", hash = "sha256:17235362f580149742739cc3828b80e24029d08cbb9c4bda0242c7b5bc610a8e", size = 94685, upload-time = "2026-03-01T22:07:01.375Z" }, + { url = "https://files.pythonhosted.org/packages/e2/b7/14341481fe568e2b0408bcf1484c652accafe06a0ade9387b5d3fd9df446/yarl-1.23.0-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:0793e2bd0cf14234983bbb371591e6bea9e876ddf6896cdcc93450996b0b5c85", size = 106009, upload-time = "2026-03-01T22:07:03.151Z" }, + { url = "https://files.pythonhosted.org/packages/0a/e6/5c744a9b54f4e8007ad35bce96fbc9218338e84812d36f3390cea616881a/yarl-1.23.0-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:3650dc2480f94f7116c364096bc84b1d602f44224ef7d5c7208425915c0475dd", size = 100033, upload-time = "2026-03-01T22:07:04.701Z" }, + { url = "https://files.pythonhosted.org/packages/0c/23/e3bfc188d0b400f025bc49d99793d02c9abe15752138dcc27e4eaf0c4a9e/yarl-1.23.0-cp314-cp314-musllinux_1_2_s390x.whl", hash = "sha256:f40e782d49630ad384db66d4d8b73ff4f1b8955dc12e26b09a3e3af064b3b9d6", size = 106483, upload-time = "2026-03-01T22:07:06.231Z" }, + { url = "https://files.pythonhosted.org/packages/72/42/f0505f949a90b3f8b7a363d6cbdf398f6e6c58946d85c6d3a3bc70595b26/yarl-1.23.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:94f8575fbdf81749008d980c17796097e645574a3b8c28ee313931068dad14fe", size = 102175, upload-time = "2026-03-01T22:07:08.4Z" }, + { url = "https://files.pythonhosted.org/packages/aa/65/b39290f1d892a9dd671d1c722014ca062a9c35d60885d57e5375db0404b5/yarl-1.23.0-cp314-cp314-win32.whl", hash = "sha256:c8aa34a5c864db1087d911a0b902d60d203ea3607d91f615acd3f3108ac32169", size = 83871, upload-time = "2026-03-01T22:07:09.968Z" }, + { url = "https://files.pythonhosted.org/packages/a9/5b/9b92f54c784c26e2a422e55a8d2607ab15b7ea3349e28359282f84f01d43/yarl-1.23.0-cp314-cp314-win_amd64.whl", hash = "sha256:63e92247f383c85ab00dd0091e8c3fa331a96e865459f5ee80353c70a4a42d70", size = 89093, upload-time = "2026-03-01T22:07:11.501Z" }, + { url = "https://files.pythonhosted.org/packages/e0/7d/8a84dc9381fd4412d5e7ff04926f9865f6372b4c2fd91e10092e65d29eb8/yarl-1.23.0-cp314-cp314-win_arm64.whl", hash = "sha256:70efd20be968c76ece7baa8dafe04c5be06abc57f754d6f36f3741f7aa7a208e", size = 83384, upload-time = "2026-03-01T22:07:13.069Z" }, + { url = "https://files.pythonhosted.org/packages/dd/8d/d2fad34b1c08aa161b74394183daa7d800141aaaee207317e82c790b418d/yarl-1.23.0-cp314-cp314t-macosx_10_15_universal2.whl", hash = "sha256:9a18d6f9359e45722c064c97464ec883eb0e0366d33eda61cb19a244bf222679", size = 131019, upload-time = "2026-03-01T22:07:14.903Z" }, + { url = "https://files.pythonhosted.org/packages/19/ff/33009a39d3ccf4b94d7d7880dfe17fb5816c5a4fe0096d9b56abceea9ac7/yarl-1.23.0-cp314-cp314t-macosx_10_15_x86_64.whl", hash = "sha256:2803ed8b21ca47a43da80a6fd1ed3019d30061f7061daa35ac54f63933409412", size = 89894, upload-time = "2026-03-01T22:07:17.372Z" }, + { url = "https://files.pythonhosted.org/packages/0c/f1/dab7ac5e7306fb79c0190766a3c00b4cb8d09a1f390ded68c85a5934faf5/yarl-1.23.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:394906945aa8b19fc14a61cf69743a868bb8c465efe85eee687109cc540b98f4", size = 89979, upload-time = "2026-03-01T22:07:19.361Z" }, + { url = "https://files.pythonhosted.org/packages/aa/b1/08e95f3caee1fad6e65017b9f26c1d79877b502622d60e517de01e72f95d/yarl-1.23.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:71d006bee8397a4a89f469b8deb22469fe7508132d3c17fa6ed871e79832691c", size = 95943, upload-time = "2026-03-01T22:07:21.266Z" }, + { url = "https://files.pythonhosted.org/packages/c0/cc/6409f9018864a6aa186c61175b977131f373f1988e198e031236916e87e4/yarl-1.23.0-cp314-cp314t-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:62694e275c93d54f7ccedcfef57d42761b2aad5234b6be1f3e3026cae4001cd4", size = 88786, upload-time = "2026-03-01T22:07:23.129Z" }, + { url = "https://files.pythonhosted.org/packages/76/40/cc22d1d7714b717fde2006fad2ced5efe5580606cb059ae42117542122f3/yarl-1.23.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:a31de1613658308efdb21ada98cbc86a97c181aa050ba22a808120bb5be3ab94", size = 101307, upload-time = "2026-03-01T22:07:24.689Z" }, + { url = "https://files.pythonhosted.org/packages/8f/0d/476c38e85ddb4c6ec6b20b815bdd779aa386a013f3d8b85516feee55c8dc/yarl-1.23.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:fb1e8b8d66c278b21d13b0a7ca22c41dd757a7c209c6b12c313e445c31dd3b28", size = 100904, upload-time = "2026-03-01T22:07:26.287Z" }, + { url = "https://files.pythonhosted.org/packages/72/32/0abe4a76d59adf2081dcb0397168553ece4616ada1c54d1c49d8936c74f8/yarl-1.23.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:50f9d8d531dfb767c565f348f33dd5139a6c43f5cbdf3f67da40d54241df93f6", size = 97728, upload-time = "2026-03-01T22:07:27.906Z" }, + { url = "https://files.pythonhosted.org/packages/b7/35/7b30f4810fba112f60f5a43237545867504e15b1c7647a785fbaf588fac2/yarl-1.23.0-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:575aa4405a656e61a540f4a80eaa5260f2a38fff7bfdc4b5f611840d76e9e277", size = 95964, upload-time = "2026-03-01T22:07:30.198Z" }, + { url = "https://files.pythonhosted.org/packages/2d/86/ed7a73ab85ef00e8bb70b0cb5421d8a2a625b81a333941a469a6f4022828/yarl-1.23.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:041b1a4cefacf65840b4e295c6985f334ba83c30607441ae3cf206a0eed1a2e4", size = 95882, upload-time = "2026-03-01T22:07:32.132Z" }, + { url = "https://files.pythonhosted.org/packages/19/90/d56967f61a29d8498efb7afb651e0b2b422a1e9b47b0ab5f4e40a19b699b/yarl-1.23.0-cp314-cp314t-musllinux_1_2_armv7l.whl", hash = "sha256:d38c1e8231722c4ce40d7593f28d92b5fc72f3e9774fe73d7e800ec32299f63a", size = 90797, upload-time = "2026-03-01T22:07:34.404Z" }, + { url = "https://files.pythonhosted.org/packages/72/00/8b8f76909259f56647adb1011d7ed8b321bcf97e464515c65016a47ecdf0/yarl-1.23.0-cp314-cp314t-musllinux_1_2_ppc64le.whl", hash = "sha256:d53834e23c015ee83a99377db6e5e37d8484f333edb03bd15b4bc312cc7254fb", size = 101023, upload-time = "2026-03-01T22:07:35.953Z" }, + { url = "https://files.pythonhosted.org/packages/ac/e2/cab11b126fb7d440281b7df8e9ddbe4851e70a4dde47a202b6642586b8d9/yarl-1.23.0-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:2e27c8841126e017dd2a054a95771569e6070b9ee1b133366d8b31beb5018a41", size = 96227, upload-time = "2026-03-01T22:07:37.594Z" }, + { url = "https://files.pythonhosted.org/packages/c2/9b/2c893e16bfc50e6b2edf76c1a9eb6cb0c744346197e74c65e99ad8d634d0/yarl-1.23.0-cp314-cp314t-musllinux_1_2_s390x.whl", hash = "sha256:76855800ac56f878847a09ce6dba727c93ca2d89c9e9d63002d26b916810b0a2", size = 100302, upload-time = "2026-03-01T22:07:39.334Z" }, + { url = "https://files.pythonhosted.org/packages/28/ec/5498c4e3a6d5f1003beb23405671c2eb9cdbf3067d1c80f15eeafe301010/yarl-1.23.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:e09fd068c2e169a7070d83d3bde728a4d48de0549f975290be3c108c02e499b4", size = 98202, upload-time = "2026-03-01T22:07:41.717Z" }, + { url = "https://files.pythonhosted.org/packages/fe/c3/cd737e2d45e70717907f83e146f6949f20cc23cd4bf7b2688727763aa458/yarl-1.23.0-cp314-cp314t-win32.whl", hash = "sha256:73309162a6a571d4cbd3b6a1dcc703c7311843ae0d1578df6f09be4e98df38d4", size = 90558, upload-time = "2026-03-01T22:07:43.433Z" }, + { url = "https://files.pythonhosted.org/packages/e1/19/3774d162f6732d1cfb0b47b4140a942a35ca82bb19b6db1f80e9e7bdc8f8/yarl-1.23.0-cp314-cp314t-win_amd64.whl", hash = "sha256:4503053d296bc6e4cbd1fad61cf3b6e33b939886c4f249ba7c78b602214fabe2", size = 97610, upload-time = "2026-03-01T22:07:45.773Z" }, + { url = "https://files.pythonhosted.org/packages/51/47/3fa2286c3cb162c71cdb34c4224d5745a1ceceb391b2bd9b19b668a8d724/yarl-1.23.0-cp314-cp314t-win_arm64.whl", hash = "sha256:44bb7bef4ea409384e3f8bc36c063d77ea1b8d4a5b2706956c0d6695f07dcc25", size = 86041, upload-time = "2026-03-01T22:07:49.026Z" }, + { url = "https://files.pythonhosted.org/packages/69/68/c8739671f5699c7dc470580a4f821ef37c32c4cb0b047ce223a7f115757f/yarl-1.23.0-py3-none-any.whl", hash = "sha256:a2df6afe50dea8ae15fa34c9f824a3ee958d785fd5d089063d960bae1daa0a3f", size = 48288, upload-time = "2026-03-01T22:07:51.388Z" }, ] diff --git a/examples/medasr-transcribe/Dockerfile b/examples/medasr-transcribe/Dockerfile index 837f56e..8c87c3e 100644 --- a/examples/medasr-transcribe/Dockerfile +++ b/examples/medasr-transcribe/Dockerfile @@ -1,14 +1,15 @@ # Tenant-supplied Dockerfile example. # -# - Installs torch + gen-worker in stable cacheable layers. -# - Installs tenant deps from uv.lock without replacing torch/gen-worker. +# - Installs torch in a stable cacheable layer. +# - Installs tenant deps from uv.lock without replacing torch. # - Installs required system libs (libsndfile1) for soundfile. # - Bakes discovered functions into /app/.tensorhub/endpoint.lock at build time. # ARG PYTHON_VERSION=3.12 FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-bookworm-slim AS cozy_base -ARG UV_TORCH_BACKEND=cu126 +ARG CUDA_VERSION=12.8 +ARG UV_TORCH_BACKEND= ARG TORCH_SPEC="~=2.10.0" WORKDIR /app @@ -24,10 +25,9 @@ RUN --mount=type=cache,id=cozy-apt-cache,target=/var/cache/apt,sharing=locked \ && apt-get clean RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - uv pip install --system --break-system-packages --torch-backend ${UV_TORCH_BACKEND} \ + backend="${UV_TORCH_BACKEND:-cu$(printf '%s' "${CUDA_VERSION}" | tr -d '.')}" \ + && uv pip install --system --break-system-packages --torch-backend "$backend" \ "torch${TORCH_SPEC}" -RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - uv pip install --system --break-system-packages gen-worker==0.3.0 FROM cozy_base AS cozy_final @@ -42,7 +42,7 @@ COPY pyproject.toml uv.lock /app/ RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ uv export --no-dev --no-hashes --no-sources --no-emit-project --no-emit-local \ - --no-emit-package torch --no-emit-package gen-worker \ + --no-emit-package torch \ -o /tmp/requirements.all.txt \ && grep -Ev '^(torch|triton|nvidia-|cuda-)' /tmp/requirements.all.txt > /tmp/requirements.txt \ && uv pip install --system --break-system-packages --no-deps -r /tmp/requirements.txt diff --git a/examples/medasr-transcribe/endpoint.toml b/examples/medasr-transcribe/endpoint.toml index dd7ebeb..6362d69 100644 --- a/examples/medasr-transcribe/endpoint.toml +++ b/examples/medasr-transcribe/endpoint.toml @@ -7,7 +7,7 @@ main = "medasr_transcribe.main" medasr = { ref = "google/medasr", dtypes = ["fp16", "bf16"] } [host.requirements] -cuda = "12.6" +cuda = "12.8" [resources] vram_gb = 8 diff --git a/examples/medasr-transcribe/pyproject.toml b/examples/medasr-transcribe/pyproject.toml index f2d7c0d..44e0889 100644 --- a/examples/medasr-transcribe/pyproject.toml +++ b/examples/medasr-transcribe/pyproject.toml @@ -4,7 +4,7 @@ version = "0.1.0" description = "MedASR transcription worker function example" requires-python = ">=3.12" dependencies = [ - "gen-worker[torch]==0.3.0", + "gen-worker[torch]", "numpy", "soundfile", "soxr", diff --git a/examples/medasr-transcribe/uv.lock b/examples/medasr-transcribe/uv.lock index 2214c33..75ae15d 100644 --- a/examples/medasr-transcribe/uv.lock +++ b/examples/medasr-transcribe/uv.lock @@ -841,7 +841,7 @@ dev = [ [package.metadata] requires-dist = [ - { name = "gen-worker", extras = ["torch"], specifier = "==0.3.0" }, + { name = "gen-worker", extras = ["torch"] }, { name = "numpy" }, { name = "soundfile" }, { name = "soxr" }, diff --git a/examples/multi-sdxl-checkpoints/Dockerfile b/examples/multi-sdxl-checkpoints/Dockerfile index 4f87469..f4c4f1b 100644 --- a/examples/multi-sdxl-checkpoints/Dockerfile +++ b/examples/multi-sdxl-checkpoints/Dockerfile @@ -1,13 +1,14 @@ # Tenant-supplied Dockerfile example. # -# - Installs torch + gen-worker in stable cacheable layers. -# - Installs tenant deps from uv.lock without replacing torch/gen-worker. +# - Installs torch in a stable cacheable layer. +# - Installs tenant deps from uv.lock without replacing torch. # - Bakes discovered functions into /app/.tensorhub/endpoint.lock at build time. # ARG PYTHON_VERSION=3.12 FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-bookworm-slim AS cozy_base -ARG UV_TORCH_BACKEND=cu126 +ARG CUDA_VERSION=12.8 +ARG UV_TORCH_BACKEND= ARG TORCH_SPEC="~=2.10.0" WORKDIR /app @@ -23,11 +24,10 @@ RUN --mount=type=cache,id=cozy-apt-cache,target=/var/cache/apt,sharing=locked \ && apt-get clean RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - uv pip install --system --break-system-packages --torch-backend ${UV_TORCH_BACKEND} \ + backend="${UV_TORCH_BACKEND:-cu$(printf '%s' "${CUDA_VERSION}" | tr -d '.')}" \ + && uv pip install --system --break-system-packages --torch-backend "$backend" \ "torch${TORCH_SPEC}" -RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - uv pip install --system --break-system-packages gen-worker==0.3.0 FROM cozy_base AS cozy_final @@ -35,7 +35,7 @@ COPY pyproject.toml uv.lock /app/ RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ uv export --no-dev --no-hashes --no-sources --no-emit-project --no-emit-local \ - --no-emit-package torch --no-emit-package gen-worker \ + --no-emit-package torch \ -o /tmp/requirements.all.txt \ && grep -Ev '^(torch|triton|nvidia-|cuda-)' /tmp/requirements.all.txt > /tmp/requirements.txt \ && uv pip install --system --break-system-packages --no-deps -r /tmp/requirements.txt diff --git a/examples/multi-sdxl-checkpoints/endpoint.toml b/examples/multi-sdxl-checkpoints/endpoint.toml index 1fd8208..0a8f312 100644 --- a/examples/multi-sdxl-checkpoints/endpoint.toml +++ b/examples/multi-sdxl-checkpoints/endpoint.toml @@ -4,7 +4,7 @@ name = "multi-sdxl-checkpoints" main = "multi_sdxl_checkpoints.main" [host.requirements] -cuda = "12.6" +cuda = "12.8" [models.generate] sdxl-base = "stabilityai/stable-diffusion-xl-base-1.0" diff --git a/examples/multi-sdxl-checkpoints/pyproject.toml b/examples/multi-sdxl-checkpoints/pyproject.toml index f3ddf56..a4a586e 100644 --- a/examples/multi-sdxl-checkpoints/pyproject.toml +++ b/examples/multi-sdxl-checkpoints/pyproject.toml @@ -4,7 +4,7 @@ version = "0.1.0" description = "Multi SDXL checkpoints example with payload-based model selection" requires-python = ">=3.12" dependencies = [ - "gen-worker[torch]==0.3.0", + "gen-worker[torch]", "diffusers", "transformers", "accelerate", diff --git a/examples/multi-sdxl-checkpoints/uv.lock b/examples/multi-sdxl-checkpoints/uv.lock index 07fa718..872c7f7 100644 --- a/examples/multi-sdxl-checkpoints/uv.lock +++ b/examples/multi-sdxl-checkpoints/uv.lock @@ -971,7 +971,7 @@ dev = [ requires-dist = [ { name = "accelerate" }, { name = "diffusers" }, - { name = "gen-worker", extras = ["torch"], specifier = "==0.3.0" }, + { name = "gen-worker", extras = ["torch"] }, { name = "pillow" }, { name = "transformers" }, ] diff --git a/examples/openai-codex/Dockerfile b/examples/openai-codex/Dockerfile index b0b3f67..73fb8fd 100644 --- a/examples/openai-codex/Dockerfile +++ b/examples/openai-codex/Dockerfile @@ -1,7 +1,7 @@ # Tenant-supplied Dockerfile example: OpenAI Codex CLI worker (Ubuntu 24.04). # # Contract: -# - Installs gen-worker. +# - Installs dependencies from pyproject.toml/uv.lock. # - Runs discovery at build time, baking /app/.tensorhub/endpoint.lock into the image. # - Uses gen-worker as the image ENTRYPOINT. # @@ -53,18 +53,12 @@ RUN set -eu; \ chmod +x /usr/local/bin/codex; \ rm -rf /tmp/codex.tar.gz -# Stable runtime layer. -RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - uv pip install --system --break-system-packages gen-worker==0.3.0 - # Copy lock metadata first (cache-friendly). COPY pyproject.toml uv.lock /app/ RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ uv export --no-dev --no-hashes --no-sources --no-emit-project --no-emit-local \ - --no-emit-package gen-worker \ - -o /tmp/requirements.all.txt \ - && grep -Ev '^(gen-worker)' /tmp/requirements.all.txt > /tmp/requirements.txt \ + -o /tmp/requirements.txt \ && uv pip install --system --break-system-packages --no-deps -r /tmp/requirements.txt # Copy app code late. diff --git a/examples/openai-codex/pyproject.toml b/examples/openai-codex/pyproject.toml index 52f20ed..8f7394f 100644 --- a/examples/openai-codex/pyproject.toml +++ b/examples/openai-codex/pyproject.toml @@ -4,7 +4,7 @@ version = "0.1.0" description = "Example Cozy worker that shells out to OpenAI Codex CLI and streams JSONL events." requires-python = ">=3.12" dependencies = [ - "gen-worker==0.3.0", + "gen-worker", "msgspec", ] diff --git a/examples/openai-codex/uv.lock b/examples/openai-codex/uv.lock index a3aabb1..2d01c9e 100644 --- a/examples/openai-codex/uv.lock +++ b/examples/openai-codex/uv.lock @@ -852,7 +852,7 @@ dependencies = [ [package.metadata] requires-dist = [ - { name = "gen-worker", specifier = "==0.3.0" }, + { name = "gen-worker" }, { name = "msgspec" }, ] diff --git a/examples/qwen-image-2512/Dockerfile b/examples/qwen-image-2512/Dockerfile index 6a531e4..4fae936 100644 --- a/examples/qwen-image-2512/Dockerfile +++ b/examples/qwen-image-2512/Dockerfile @@ -1,13 +1,14 @@ # Tenant-supplied Dockerfile example. # # - Installs torch in a stable cacheable layer. -# - Installs tenant deps from uv.lock without replacing torch/gen-worker. +# - Installs tenant deps from uv.lock without replacing torch. # - Bakes discovered functions into /app/.tensorhub/endpoint.lock at build time. # ARG PYTHON_VERSION=3.12 FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-bookworm-slim AS cozy_base -ARG UV_TORCH_BACKEND=cu126 +ARG CUDA_VERSION=12.8 +ARG UV_TORCH_BACKEND= ARG TORCH_SPEC="~=2.10.0" WORKDIR /app @@ -23,10 +24,9 @@ RUN --mount=type=cache,id=cozy-apt-cache,target=/var/cache/apt,sharing=locked \ && apt-get clean RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - uv pip install --system --break-system-packages --torch-backend ${UV_TORCH_BACKEND} \ + backend="${UV_TORCH_BACKEND:-cu$(printf '%s' "${CUDA_VERSION}" | tr -d '.')}" \ + && uv pip install --system --break-system-packages --torch-backend "$backend" \ "torch${TORCH_SPEC}" -RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - uv pip install --system --break-system-packages gen-worker==0.3.0 FROM cozy_base AS cozy_final @@ -34,7 +34,7 @@ COPY pyproject.toml uv.lock /app/ RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ uv export --no-dev --no-hashes --no-sources --no-emit-project --no-emit-local \ - --no-emit-package torch --no-emit-package gen-worker \ + --no-emit-package torch \ -o /tmp/requirements.all.txt \ && grep -Ev '^(torch|triton|nvidia-|cuda-)' /tmp/requirements.all.txt > /tmp/requirements.txt \ && uv pip install --system --break-system-packages --no-deps -r /tmp/requirements.txt diff --git a/examples/qwen-image-2512/endpoint.toml b/examples/qwen-image-2512/endpoint.toml index d0e3bc4..e56bb6b 100644 --- a/examples/qwen-image-2512/endpoint.toml +++ b/examples/qwen-image-2512/endpoint.toml @@ -7,7 +7,7 @@ main = "qwen_image_2512.main" qwen_image = { ref = "qwen/qwen-image-2512", dtypes = ["bf16", "fp16"] } [host.requirements] -cuda = "12.6" +cuda = "12.8" [resources] vram_gb = 24 diff --git a/examples/qwen-image-2512/pyproject.toml b/examples/qwen-image-2512/pyproject.toml index 14a3b92..342c9e7 100644 --- a/examples/qwen-image-2512/pyproject.toml +++ b/examples/qwen-image-2512/pyproject.toml @@ -4,7 +4,7 @@ version = "0.1.0" description = "Qwen Image 2512 generation function" requires-python = ">=3.12" dependencies = [ - "gen-worker[torch]==0.3.0", + "gen-worker[torch]", "diffusers @ git+https://github.com/huggingface/diffusers", "transformers>=4.51.3", "accelerate", diff --git a/examples/qwen-image-2512/uv.lock b/examples/qwen-image-2512/uv.lock index 77af1b4..b96fbac 100644 --- a/examples/qwen-image-2512/uv.lock +++ b/examples/qwen-image-2512/uv.lock @@ -1598,7 +1598,7 @@ dev = [ requires-dist = [ { name = "accelerate" }, { name = "diffusers", git = "https://github.com/huggingface/diffusers" }, - { name = "gen-worker", extras = ["torch"], specifier = "==0.3.0" }, + { name = "gen-worker", extras = ["torch"] }, { name = "pillow" }, { name = "transformers", specifier = ">=4.51.3" }, ] diff --git a/examples/sd15/Dockerfile b/examples/sd15/Dockerfile index 99bb5a2..c52a5dc 100644 --- a/examples/sd15/Dockerfile +++ b/examples/sd15/Dockerfile @@ -1,7 +1,7 @@ # Tenant-supplied Dockerfile example. # -# - Installs torch + gen-worker in stable cacheable layers. -# - Installs tenant deps from uv.lock without replacing torch/gen-worker. +# - Installs torch in a stable cacheable layer. +# - Installs tenant deps from uv.lock without replacing torch. # - Bakes discovered functions into /app/.tensorhub/endpoint.lock at build time. # # Local build (GPU): @@ -10,7 +10,8 @@ ARG PYTHON_VERSION=3.12 FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-bookworm-slim AS cozy_base -ARG UV_TORCH_BACKEND=cu126 +ARG CUDA_VERSION=12.8 +ARG UV_TORCH_BACKEND= ARG TORCH_SPEC="~=2.10.0" WORKDIR /app @@ -27,20 +28,19 @@ RUN --mount=type=cache,id=cozy-apt-cache,target=/var/cache/apt,sharing=locked \ # Stable runtime layers (avoid re-downloading torch/cu libs on every tenant build). RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - uv pip install --system --break-system-packages --torch-backend ${UV_TORCH_BACKEND} \ + backend="${UV_TORCH_BACKEND:-cu$(printf '%s' "${CUDA_VERSION}" | tr -d '.')}" \ + && uv pip install --system --break-system-packages --torch-backend "$backend" \ "torch${TORCH_SPEC}" -RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - uv pip install --system --break-system-packages gen-worker FROM cozy_base AS cozy_final # Copy lock metadata first so dependency layers are cacheable across source edits. COPY pyproject.toml uv.lock /app/ -# Install tenant dependencies into the global environment without replacing torch/gen-worker. +# Install tenant dependencies into the global environment without replacing torch. RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ uv export --no-dev --no-hashes --no-sources --no-emit-project --no-emit-local \ - --no-emit-package torch --no-emit-package gen-worker \ + --no-emit-package torch \ -o /tmp/requirements.all.txt \ && grep -Ev '^(torch|triton|nvidia-|cuda-)' /tmp/requirements.all.txt > /tmp/requirements.txt \ && uv pip install --system --break-system-packages --no-deps -r /tmp/requirements.txt diff --git a/examples/sd15/Dockerfile.local b/examples/sd15/Dockerfile.local deleted file mode 100644 index 959a94c..0000000 --- a/examples/sd15/Dockerfile.local +++ /dev/null @@ -1,72 +0,0 @@ -# Local-dev Dockerfile for the sd15 example. -# -# Difference vs ./Dockerfile: -# - Installs gen-worker from the local checkout (this repo) so you can test -# unreleased changes. -# -# Build from repo root: -# docker build -f examples/sd15/Dockerfile.local -t cozy-example-sd15-local:dev . -# -ARG PYTHON_VERSION=3.12 -FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-bookworm-slim AS cozy_base - -ARG UV_TORCH_BACKEND=cu126 -ARG TORCH_SPEC="~=2.10.0" - -WORKDIR /app - -ENV UV_CACHE_DIR=/var/cache/uv -ENV UV_LINK_MODE=copy - -RUN --mount=type=cache,id=cozy-apt-cache,target=/var/cache/apt,sharing=locked \ - --mount=type=cache,id=cozy-apt-lists,target=/var/lib/apt/lists,sharing=locked \ - apt-get update && apt-get install -y --no-install-recommends \ - git \ - && apt-get clean - -RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - uv pip install --system --break-system-packages --torch-backend ${UV_TORCH_BACKEND} \ - "torch${TORCH_SPEC}" - -# Install gen-worker from the local checkout. -COPY pyproject.toml uv.lock README.md /gen-worker/ -COPY src/ /gen-worker/src/ -RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - uv pip install --system --break-system-packages /gen-worker - -FROM cozy_base AS cozy_final - -# Copy lock metadata first so dependency layers are cacheable across source edits. -COPY examples/sd15/pyproject.toml examples/sd15/uv.lock /app/ - -# Install tenant dependencies into the global environment without replacing torch/gen-worker. -RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - uv export --no-dev --no-hashes --no-sources --no-emit-project --no-emit-local \ - --no-emit-package torch --no-emit-package gen-worker \ - -o /tmp/requirements.all.txt \ - && grep -Ev '^(torch|triton|nvidia-|cuda-)' /tmp/requirements.all.txt > /tmp/requirements.txt \ - && uv pip install --system --break-system-packages --no-deps -r /tmp/requirements.txt - -# Copy app code late so app edits only invalidate the final layers. -COPY examples/sd15/ /app/ - -# Install the project itself without altering dependency layers. -RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - uv pip install --system --break-system-packages --no-deps --no-sources /app - -# Bake discovered functions into the image so Cozy Hub can read them without executing tenant code. -RUN mkdir -p /app/.tensorhub \ - && python -m gen_worker.discover > /app/.tensorhub/endpoint.lock - -# Run as non-root at runtime. -RUN groupadd --system --gid 10001 cozy \ - && useradd --system --uid 10001 --gid cozy --create-home --home-dir /home/cozy --shell /usr/sbin/nologin cozy \ - && chown -R cozy:cozy /app /home/cozy - -ENV HOME=/home/cozy -ENV XDG_CACHE_HOME=/home/cozy/.cache -ENV HF_HOME=/home/cozy/.cache/huggingface - -USER cozy:cozy - -ENTRYPOINT ["python", "-m", "gen_worker.entrypoint"] diff --git a/examples/sd15/endpoint.toml b/examples/sd15/endpoint.toml index 98d3a69..4248a9f 100644 --- a/examples/sd15/endpoint.toml +++ b/examples/sd15/endpoint.toml @@ -12,7 +12,7 @@ sd15_int4 = { ref = "stable-diffusion-v1-5/stable-diffusion-v1-5", dtypes = ["in [host.requirements] # If cuda is set, the platform treats this worker as requiring an NVIDIA GPU. -cuda = "12.6" +cuda = "12.8" [resources] vram_gb = 8 diff --git a/examples/sd15/pyproject.toml b/examples/sd15/pyproject.toml index c8fb76b..9c83eac 100644 --- a/examples/sd15/pyproject.toml +++ b/examples/sd15/pyproject.toml @@ -4,7 +4,7 @@ version = "0.1.1" description = "Stable Diffusion 1.5 example (inference-only; Cozy manifest via endpoint.toml)" requires-python = ">=3.12" dependencies = [ - "gen-worker[torch]==0.3.0", + "gen-worker[torch]", "diffusers", "transformers<5", "accelerate", diff --git a/examples/sd15/uv.lock b/examples/sd15/uv.lock index 8574804..aded35c 100644 --- a/examples/sd15/uv.lock +++ b/examples/sd15/uv.lock @@ -1733,7 +1733,7 @@ requires-dist = [ { name = "accelerate" }, { name = "diffusers" }, { name = "ftfy" }, - { name = "gen-worker", extras = ["torch"], specifier = "==0.3.0" }, + { name = "gen-worker", extras = ["torch"] }, { name = "pillow" }, { name = "torchao" }, { name = "transformers", specifier = "<5" }, diff --git a/examples/smoke-test/Dockerfile b/examples/smoke-test/Dockerfile deleted file mode 100644 index 4fab034..0000000 --- a/examples/smoke-test/Dockerfile +++ /dev/null @@ -1,53 +0,0 @@ -# Tenant-supplied Dockerfile example (CPU). -# -# - Installs gen-worker in a stable cacheable layer. -# - Installs torch (CPU) so Cozy Hub can record a stable backend version for tagging. -# - Installs tenant deps from uv.lock without reinstalling gen-worker. -# - Bakes discovered functions into /app/.tensorhub/endpoint.lock at build time. -# -ARG PYTHON_VERSION=3.12 -FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-bookworm-slim - -WORKDIR /app - -ENV UV_CACHE_DIR=/var/cache/uv -ENV UV_LINK_MODE=copy - -RUN --mount=type=cache,id=cozy-apt-cache,target=/var/cache/apt,sharing=locked \ - --mount=type=cache,id=cozy-apt-lists,target=/var/lib/apt/lists,sharing=locked \ - apt-get update && apt-get install -y --no-install-recommends \ - git \ - && apt-get clean - -ARG UV_TORCH_BACKEND=cpu -ARG TORCH_SPEC="~=2.10.0" - -RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - uv pip install --system --break-system-packages --torch-backend ${UV_TORCH_BACKEND} \ - "torch${TORCH_SPEC}" - -RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - uv pip install --system --break-system-packages gen-worker==0.3.0 - -COPY pyproject.toml uv.lock /app/ - -RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - uv export --no-dev --no-hashes --no-sources --no-emit-project --no-emit-local \ - --no-emit-package gen-worker --no-emit-package torch \ - -o /tmp/requirements.txt \ - && uv pip install --system --break-system-packages --no-deps -r /tmp/requirements.txt - -COPY . /app - -RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - uv pip install --system --break-system-packages --no-deps --no-sources /app - -RUN mkdir -p /app/.tensorhub \ - && python -m gen_worker.discover > /app/.tensorhub/endpoint.lock - - -ENV HOME=/root -ENV XDG_CACHE_HOME=/root/.cache - - -ENTRYPOINT ["python", "-m", "gen_worker.entrypoint"] diff --git a/examples/smoke-test/src/smoke_test/example.py b/examples/smoke-test/src/smoke_test/example.py deleted file mode 100644 index fdf003d..0000000 --- a/examples/smoke-test/src/smoke_test/example.py +++ /dev/null @@ -1,148 +0,0 @@ -import time -import base64 -from typing import Iterator, List - -import msgspec - -from gen_worker import RequestContext, ResourceRequirements, worker_function -from gen_worker.types import Asset - - -class ImageGenInput(msgspec.Struct): - """Input parameters for image generation.""" - - prompt: str = "a tiny test image" - width: int = 1 - height: int = 1 - num_images: int = 1 - - -class ImageGenOutput(msgspec.Struct): - """Output from image generation (real file output).""" - - images: List[Asset] - - -# 1x1 PNG (valid image bytes). This is deterministic and avoids pulling in Pillow. -_PNG_1X1 = base64.b64decode( - "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mP8/x8AAwMBAAEnnX8AAAAASUVORK5CYII=" -) - - -@worker_function(ResourceRequirements()) -def image_gen_action(ctx: RequestContext, data: ImageGenInput) -> ImageGenOutput: - """Example image generation function that returns real output Assets. - - This is a smoke test: it does not run ML inference. It validates: - - request/response msgspec serialization - - output file creation + upload path wiring (ctx.save_bytes) - """ - if ctx.is_canceled(): - raise InterruptedError("Task cancelled") - - out: List[Asset] = [] - n = max(1, int(data.num_images)) - for i in range(n): - ref = f"runs/{ctx.request_id}/outputs/image-{i+1}.png" - out.append(ctx.save_bytes(ref, _PNG_1X1)) - return ImageGenOutput(images=out) - - -class AddInput(msgspec.Struct): - a: int = 0 - b: int = 0 - - -class AddOutput(msgspec.Struct): - result: int - - -@worker_function(ResourceRequirements()) -def add_numbers(ctx: RequestContext, data: AddInput) -> AddOutput: - """Example function that adds two numbers.""" - if ctx.is_canceled(): - raise InterruptedError("Task cancelled") - time.sleep(0.1) - return AddOutput(result=data.a + data.b) - - -class MultiplyInput(msgspec.Struct): - a: int = 0 - b: int = 0 - - -class MultiplyOutput(msgspec.Struct): - result: int - - -@worker_function(ResourceRequirements()) -def multiply_numbers(ctx: RequestContext, data: MultiplyInput) -> MultiplyOutput: - """Example function that multiplies two numbers.""" - if ctx.is_canceled(): - raise InterruptedError("Task cancelled") - time.sleep(0.1) - return MultiplyOutput(result=data.a * data.b) - - -class TokenDelta(msgspec.Struct): - delta: str - - -class StreamInput(msgspec.Struct): - text: str - delay_ms: int = 25 - - -@worker_function(ResourceRequirements()) -def token_stream(ctx: RequestContext, data: StreamInput) -> Iterator[TokenDelta]: - """Example incremental-output function (LLM-style token deltas).""" - for ch in data.text: - if ctx.is_canceled(): - raise InterruptedError("Task cancelled") - yield TokenDelta(delta=ch) - if data.delay_ms > 0: - time.sleep(data.delay_ms / 1000.0) - - -class PromptBatchInput(msgspec.Struct): - """Example multi-item request payload (partition axis: input.prompts[]).""" - - prompts: List[str] - style: str = "neutral" - - -class PromptCaption(msgspec.Struct): - index: int - prompt: str - caption: str - - -class PromptBatchOutput(msgspec.Struct): - items: List[PromptCaption] - - -@worker_function(ResourceRequirements()) -def caption_prompts(ctx: RequestContext, data: PromptBatchInput) -> PromptBatchOutput: - """Example multi-item request handler for input.prompts[]. - - This function is intentionally simple and deterministic so it can be used in - e2e tests for partitioning and output correlation. - """ - if ctx.is_canceled(): - raise InterruptedError("Task cancelled") - - out: List[PromptCaption] = [] - total = max(1, len(data.prompts)) - for i, prompt in enumerate(data.prompts): - if ctx.is_canceled(): - raise InterruptedError("Task cancelled") - text = str(prompt or "").strip() - out.append( - PromptCaption( - index=i, - prompt=text, - caption=f"[{data.style}] {text}" if text else f"[{data.style}]", - ) - ) - ctx.progress((i + 1) / total, stage="multi_item_request") - return PromptBatchOutput(items=out) diff --git a/examples/z-image-lora/Dockerfile b/examples/z-image-lora/Dockerfile index cd4c800..1203d3c 100644 --- a/examples/z-image-lora/Dockerfile +++ b/examples/z-image-lora/Dockerfile @@ -1,13 +1,14 @@ # Tenant-supplied Dockerfile example. # -# - Installs torch + gen-worker in stable cacheable layers. -# - Installs tenant deps from uv.lock without replacing torch/gen-worker. +# - Installs torch in a stable cacheable layer. +# - Installs tenant deps from uv.lock without replacing torch. # - Bakes discovered functions into /app/.tensorhub/endpoint.lock at build time. # ARG PYTHON_VERSION=3.12 FROM ghcr.io/astral-sh/uv:python${PYTHON_VERSION}-bookworm-slim AS cozy_base -ARG UV_TORCH_BACKEND=cu126 +ARG CUDA_VERSION=12.8 +ARG UV_TORCH_BACKEND= ARG TORCH_SPEC="~=2.10.0" WORKDIR /app @@ -23,10 +24,9 @@ RUN --mount=type=cache,id=cozy-apt-cache,target=/var/cache/apt,sharing=locked \ && apt-get clean RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - uv pip install --system --break-system-packages --torch-backend ${UV_TORCH_BACKEND} \ + backend="${UV_TORCH_BACKEND:-cu$(printf '%s' "${CUDA_VERSION}" | tr -d '.')}" \ + && uv pip install --system --break-system-packages --torch-backend "$backend" \ "torch${TORCH_SPEC}" -RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ - uv pip install --system --break-system-packages gen-worker==0.3.0 FROM cozy_base AS cozy_final @@ -34,7 +34,7 @@ COPY pyproject.toml uv.lock /app/ RUN --mount=type=cache,id=cozy-uv-cache,target=/var/cache/uv,sharing=locked \ uv export --no-dev --no-hashes --no-sources --no-emit-project --no-emit-local \ - --no-emit-package torch --no-emit-package gen-worker \ + --no-emit-package torch \ -o /tmp/requirements.all.txt \ && grep -Ev '^(torch|triton|nvidia-|cuda-)' /tmp/requirements.all.txt > /tmp/requirements.txt \ && uv pip install --system --break-system-packages --no-deps -r /tmp/requirements.txt diff --git a/examples/z-image-lora/endpoint.toml b/examples/z-image-lora/endpoint.toml index 080fb51..08f5fcf 100644 --- a/examples/z-image-lora/endpoint.toml +++ b/examples/z-image-lora/endpoint.toml @@ -7,7 +7,7 @@ main = "z_image_lora.main" z-image-turbo = { ref = "tongyi-mai/z-image-turbo", dtypes = ["bf16", "fp16"] } [host.requirements] -cuda = "12.6" +cuda = "12.8" [resources] vram_gb = 16 diff --git a/examples/z-image-lora/pyproject.toml b/examples/z-image-lora/pyproject.toml index e13f00f..77dc2f0 100644 --- a/examples/z-image-lora/pyproject.toml +++ b/examples/z-image-lora/pyproject.toml @@ -4,7 +4,7 @@ version = "0.1.0" description = "Image generation with dynamic LoRA loading (z-image pattern)" requires-python = ">=3.12" dependencies = [ - "gen-worker[torch]==0.3.0", + "gen-worker[torch]", "diffusers>=0.36.0", "transformers", "accelerate", diff --git a/examples/z-image-lora/uv.lock b/examples/z-image-lora/uv.lock index aed4a1d..cf8fe8f 100644 --- a/examples/z-image-lora/uv.lock +++ b/examples/z-image-lora/uv.lock @@ -2088,7 +2088,7 @@ dev = [ requires-dist = [ { name = "accelerate" }, { name = "diffusers", specifier = ">=0.36.0" }, - { name = "gen-worker", extras = ["torch"], specifier = "==0.3.0" }, + { name = "gen-worker", extras = ["torch"] }, { name = "peft" }, { name = "pillow" }, { name = "transformers" },