From 5d50c73554391abdb012ccb294951fb6e49f713f Mon Sep 17 00:00:00 2001 From: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> Date: Tue, 28 Apr 2026 00:42:50 +0530 Subject: [PATCH 01/24] Add release-cherry-pick Claude Code skill (#1352) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds `.claude/skills/release-cherry-pick/SKILL.md` — a Claude Code skill for cherry-picking labeled PRs to a release branch. Invoke with `/release-cherry-pick `. See this PR created with the skill: https://github.com/NVIDIA/Model-Optimizer/pull/1350 ## Summary by CodeRabbit * **New Features** * Added automated release cherry-pick workflow to streamline selecting and applying multiple PRs into release branches. --------- Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> --- .claude/skills/release-cherry-pick/SKILL.md | 89 +++++++++++++++++++++ 1 file changed, 89 insertions(+) create mode 100644 .claude/skills/release-cherry-pick/SKILL.md diff --git a/.claude/skills/release-cherry-pick/SKILL.md b/.claude/skills/release-cherry-pick/SKILL.md new file mode 100644 index 00000000000..c0fe608b169 --- /dev/null +++ b/.claude/skills/release-cherry-pick/SKILL.md @@ -0,0 +1,89 @@ +--- +name: release-cherry-pick +description: Cherry-pick merged PRs labeled for a release branch into that branch, then open a PR and apply the cherry-pick-done label. Use when asked to "cherry-pick PRs for release/X.Y.Z", "pick PRs to release branch", or "cherry-pick labeled PRs". +--- + +# Cherry-pick PRs to a Release Branch + +Cherry-pick all merged `main` PRs labeled `cherry-pick-` (but not `cherry-pick-done`) into the corresponding `release/` branch, one by one in merge order. + +## Step 1 — Identify the target version + +Ask the user for the release version (e.g. `0.44.0`) if not already provided. + +Set `VERSION=` for use in subsequent steps. + +## Step 2 — Fetch pending PRs + +Use the GitHub search API to list PRs that have the cherry-pick label but not cherry-pick-done, sorted by merge date ascending: + +```bash +gh api "search/issues?q=repo:NVIDIA/Model-Optimizer+is:pr+is:merged+base:main+label:cherry-pick-+-label:cherry-pick-done&sort=updated&order=asc&per_page=50" \ + --jq '.items[] | [.number, .title, .pull_request.merged_at] | @tsv' \ + | sort -t$'\t' -k3 +``` + +Present the list to the user before proceeding. + +## Step 3 — Set up the release branch + +Check out `release/`, creating it from the remote if it doesn't exist locally: + +```bash +git fetch origin release/ +git checkout release/ +``` + +## Step 4 — Get merge commit SHAs + +All PRs are squash-merged, so each has a single-parent commit. Retrieve the SHA for each PR: + +```bash +gh pr view --repo NVIDIA/Model-Optimizer --json mergeCommit --jq '.mergeCommit.oid' +``` + +## Step 5 — Cherry-pick in merge order + +Cherry-pick each commit with `-s` (DCO sign-off). GPG signing is handled automatically by the repo's git config. + +```bash +git cherry-pick -s +``` + +**On conflict:** Tell the user which PR caused the conflict and ask them to fix it, then continue: + +```bash +git cherry-pick --continue +``` + +## Step 6 — Create a PR to the release branch + +Push the cherry-picks to a new branch and open a PR targeting `release/`. The PR title lists every cherry-picked PR number. The body uses `## Cherry-picked PRs` as the only heading with one `- #` bullet per PR — no titles, no links, no extra text. + +```bash +git checkout -B cherry-picks/release- +git push -u origin cherry-picks/release- + +gh pr create \ + --title "[Cherry-pick] PRs # # ..." \ + --base release/ \ + --head cherry-picks/release- \ + --body "$(cat <<'EOF' +## Cherry-picked PRs + +- # +- # +... +EOF +)" +``` + +## Step 7 — Apply cherry-pick-done label + +Add the `cherry-pick-done` label to every PR that was successfully cherry-picked: + +```bash +for pr in ...; do + gh pr edit $pr --repo NVIDIA/Model-Optimizer --add-label "cherry-pick-done" +done +``` From 57eb6b7bef595188fd693bbda805ec09d2666822 Mon Sep 17 00:00:00 2001 From: Ajinkya Rasane <131806219+ajrasane@users.noreply.github.com> Date: Mon, 27 Apr 2026 10:39:43 -0400 Subject: [PATCH 02/24] chore: Move FP8 MHA quantization entry from 0.45 to 0.44 in CHANGELOG (#1351) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR #1289 (FP8 MHA quantization for ViT) was merged to `main` after `0.44.0rc1` was tagged, so the rc1 wheel ships without the `nn.LayerNorm` registration that the example's `_FP8_MHA_OVERRIDE` now references — surfaced as nvbug 6114983 (`ValueError: parent_class 'nn.LayerNorm' not found in QuantModuleRegistry` when running `torch_quant_to_onnx.py --quantize_mode=fp8`). PR #1289 is labeled `cherry-pick-0.44.0` and will be cherry-picked to `release/0.44.0` for the next rc, so the feature ships in 0.44 — this PR moves the corresponding release-notes bullet from the `0.45 (Future)` section to `0.44 (2026-05-xx)` to match. Signed-off-by: ajrasane <131806219+ajrasane@users.noreply.github.com> Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> --- CHANGELOG.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 2b595515f58..2e7cd6bfdf1 100755 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -17,6 +17,7 @@ Changelog - [Early Testing] Add Claude Code PTQ skill (``.claude/skills/ptq/``) for agent-assisted post-training quantization. The skill guides the agent through environment detection, model support checking, format selection, and execution via the launcher or manual SLURM/Docker/bare GPU paths. Includes handling for unlisted models with custom module patching. This feature is in early testing — use with caution. - Add performant layerwise calibration for large models that don't fit on GPU (e.g. DeepSeek-R1, Kimi-K2). See `modelopt_recipes/general/ptq/nvfp4_experts_only-fp8_kv.yaml `_ for usage. Layerwise calibration also supports PTQ with intermediate progress saving — useful when long PTQ runs get hit with Slurm timeouts. See `modelopt_recipes/general/ptq/nvfp4_default-none_kv_gptq.yaml `_ for usage. - Add implicit GEMM CUDA kernel for Conv3D with fused NVFP4 fake quantization (``modelopt.torch.quantization.src.conv``). When NVFP4 quantization is applied to an ``nn.Conv3d`` layer via ModelOpt PTQ, the implicit GEMM path is used automatically instead of cuDNN. Uses BF16 WMMA tensor cores (SM80+) with FP32 accumulation and in-kernel FP4 (E2M1) activation quantization. Grouped convolution (``groups > 1``) falls back to the default cuDNN path. Inference only — training mode falls back to cuDNN with a warning. +- Add FP8 MHA quantization support for vision transformers. Adds an attention-aware ONNX post-processing pass (scale Mul / K-transpose move before Q, Q→DQ insertion on softmax output) in :class:`FP8QuantExporter `, per-instance nested-attention-wrapper skipping in the HF plugin, and ``nn.LayerNorm`` registration in ``QuantModuleRegistry`` so BMM input quantizers and LayerNorm output quantizers defined in FP8_DEFAULT_CFG are honored end-to-end. See `examples/torch_onnx/torch_quant_to_onnx.py `_ for the general timm-model quantize→ONNX workflow. **Backward Breaking Changes** From 816da0fca1dfa1084665381b02abf97af91b1845 Mon Sep 17 00:00:00 2001 From: "Grzegorz K. Karch" Date: Mon, 27 Apr 2026 20:01:27 +0200 Subject: [PATCH 03/24] fix incomplete mapping of safetensors in generated puzzletron checkpoint (#1330) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### What does this PR do? Type of change: ? Bug fix Fixes `https://github.com/NVIDIA/Model-Optimizer/blob/main/examples/puzzletron/main.py` where multi-GPU run caused only part of the file `model.safetensors.index.json` to be written to disk. ### Usage does not apply ### Testing Follow [instructions, step 3](https://github.com/NVIDIA/Model-Optimizer/tree/main/examples/puzzletron#compress-the-model) - run with `--nproc_per_node 2` ### Before your PR is "*Ready for review*" Make sure you read and follow [Contributor guidelines](https://github.com/NVIDIA/Model-Optimizer/blob/main/CONTRIBUTING.md) and your commits are signed (`git commit -s -S`). Make sure you read and follow the [Security Best Practices](https://github.com/NVIDIA/Model-Optimizer/blob/main/SECURITY.md#security-coding-practices-for-contributors) (e.g. avoiding hardcoded `trust_remote_code=True`, `torch.load(..., weights_only=False)`, `pickle`, etc.). - Is this change backward compatible?: ✅ - If you copied code from any other sources or added a new PIP dependency, did you follow guidance in `CONTRIBUTING.md`: N/A - Did you write any new necessary tests?: N/A - Did you update [Changelog](https://github.com/NVIDIA/Model-Optimizer/blob/main/CHANGELOG.rst)?: N/A ### Additional Information ## Summary by CodeRabbit * **New Features** * Added a public checkpoint-saving entry that consolidates distributed sharded model shards into a single filesystem checkpoint; retains direct saving for single-process runs. * **Refactor** * Validation/evaluation tooling now uses the consolidated checkpoint-saving flow when persisting realized model checkpoints during runs. --------- Signed-off-by: Grzegorz Karch Signed-off-by: Grzegorz K. Karch Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> Co-authored-by: CodeRabbit Co-authored-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> --- .../model_descriptor_factory.py | 2 +- .../build_replacement_library.py | 19 ++- .../puzzletron/tools/checkpoint_utils_hf.py | 49 +++++++ ...validate_puzzle_with_multi_replacements.py | 4 +- tests/gpu/torch/puzzletron/test_puzzletron.py | 5 - .../tools/test_save_ckpt_from_shards.py | 132 ++++++++++++++++++ 6 files changed, 196 insertions(+), 15 deletions(-) create mode 100644 tests/gpu/torch/puzzletron/tools/test_save_ckpt_from_shards.py diff --git a/modelopt/torch/puzzletron/anymodel/model_descriptor/model_descriptor_factory.py b/modelopt/torch/puzzletron/anymodel/model_descriptor/model_descriptor_factory.py index 74aaf311bf4..cff972a51ec 100644 --- a/modelopt/torch/puzzletron/anymodel/model_descriptor/model_descriptor_factory.py +++ b/modelopt/torch/puzzletron/anymodel/model_descriptor/model_descriptor_factory.py @@ -33,7 +33,7 @@ "qwen3": "qwen3", "nemotron_h": "nemotron_h", "nemotron_h_v2": "nemotron_h_v2", - "gpt_oss_20b": "gpt_oss_20b", + "gpt_oss": "gpt_oss", } diff --git a/modelopt/torch/puzzletron/replacement_library/build_replacement_library.py b/modelopt/torch/puzzletron/replacement_library/build_replacement_library.py index b5d0c754f16..999ec6c690a 100644 --- a/modelopt/torch/puzzletron/replacement_library/build_replacement_library.py +++ b/modelopt/torch/puzzletron/replacement_library/build_replacement_library.py @@ -509,13 +509,18 @@ def _build_layer_replacements_from_block_library(block_library_df: pd.DataFrame) weight_paths = [] for subblock_name in ["attention", "ffn"]: checkpoint_dir = row[f"{subblock_name}_checkpoint_dir"] - if checkpoint_dir is not None: - subblock_path = ( - Path(checkpoint_dir) - / SAFETENSORS_SUBBLOCKS_DIR_NAME - / f"block_{block_idx}_{subblock_name}.safetensors" - ) - weight_paths.append(subblock_path) + # pandas represents missing cells as float NaN (e.g. for no-op subblocks), + # so check for both None and NaN before constructing a Path. + if checkpoint_dir is None or ( + isinstance(checkpoint_dir, float) and pd.isna(checkpoint_dir) + ): + continue + subblock_path = ( + Path(checkpoint_dir) + / SAFETENSORS_SUBBLOCKS_DIR_NAME + / f"block_{block_idx}_{subblock_name}.safetensors" + ) + weight_paths.append(subblock_path) weight_paths = sorted(set(weight_paths)) layer_replacement = { "parent_layer_indices": [block_idx], diff --git a/modelopt/torch/puzzletron/tools/checkpoint_utils_hf.py b/modelopt/torch/puzzletron/tools/checkpoint_utils_hf.py index 69b8e5e29d9..1240d1c9b65 100644 --- a/modelopt/torch/puzzletron/tools/checkpoint_utils_hf.py +++ b/modelopt/torch/puzzletron/tools/checkpoint_utils_hf.py @@ -29,12 +29,14 @@ from typing import TYPE_CHECKING, Any, BinaryIO import torch +import torch.distributed as tdist import transformers from safetensors.torch import save_file as safe_save_file from transformers import AutoConfig, AutoModelForCausalLM, PretrainedConfig, PreTrainedModel from transformers.dynamic_module_utils import get_class_from_dynamic_module from transformers.utils import SAFE_WEIGHTS_INDEX_NAME +import modelopt.torch.utils.distributed as dist_utils from modelopt.torch.utils import json_dumps from ..block_config import maybe_cast_block_configs @@ -51,6 +53,7 @@ "load_model_config", "init_model_from_config", "save_checkpoint", + "save_checkpoint_from_shards", "save_subblocks", "save_model_config", ] @@ -200,6 +203,52 @@ def save_checkpoint( _save_checkpoint(model.config, model.state_dict(), checkpoint_dir, descriptor) +def save_checkpoint_from_shards( + model: PreTrainedModel, checkpoint_dir: Path | str, descriptor: "ModelDescriptor" +) -> None: + """ + Save a checkpoint when the model's weights are sharded across distributed ranks. + + Gathers each rank's partial state dictionary onto rank 0 and writes a complete checkpoint + (including the safetensors index and subblocks) from the merged weights. On a single-process + run, saves directly from the local state dict. Only rank 0 performs the filesystem write; + non-master ranks only participate in the gather. + + Parameters: + model (PreTrainedModel): The model instance whose local state_dict contains this rank's + shard of weights. + checkpoint_dir (Path | str): Destination directory for the checkpoint files. + descriptor (ModelDescriptor): Descriptor used to partition weights into subblocks and build + the safetensors index. + """ + + local_sd = {k: v.cpu() for k, v in model.state_dict().items()} + if dist_utils.size() > 1: + save_err: str | None = None + if dist_utils.is_master(): + gathered: list[dict] = [None] * dist_utils.size() + tdist.gather_object(local_sd, gathered, dst=0) + full_sd: dict[str, torch.Tensor] = {} + for shard_sd in gathered: + if shard_sd is None: + continue + full_sd.update(shard_sd) + try: + _save_checkpoint(model.config, full_sd, checkpoint_dir, descriptor) + except Exception as e: + save_err = repr(e) + else: + tdist.gather_object(local_sd, dst=0) + err_box = [save_err] + tdist.broadcast_object_list(err_box, src=0) + # Barrier ensures all ranks wait until file I/O completes before continuing + dist_utils.barrier() + if err_box[0] is not None: + raise RuntimeError(f"Checkpoint save failed on rank 0: {err_box[0]}") + else: + _save_checkpoint(model.config, local_sd, checkpoint_dir, descriptor) + + def _save_checkpoint( model_config: PretrainedConfig, state_dict: dict[str, torch.Tensor], diff --git a/modelopt/torch/puzzletron/tools/validate_puzzle_with_multi_replacements.py b/modelopt/torch/puzzletron/tools/validate_puzzle_with_multi_replacements.py index d8471aee236..a46fba52d09 100644 --- a/modelopt/torch/puzzletron/tools/validate_puzzle_with_multi_replacements.py +++ b/modelopt/torch/puzzletron/tools/validate_puzzle_with_multi_replacements.py @@ -41,7 +41,7 @@ from ..utils.validate_runtime_pipeline import perform_pipeline_stitches from . import validate_model from .checkpoint_utils import copy_tokenizer -from .checkpoint_utils_hf import save_checkpoint +from .checkpoint_utils_hf import save_checkpoint_from_shards from .common import resolve_torch_dtype from .sharded_checkpoint_utils import load_and_shard_model from .validation_utils import ( @@ -189,7 +189,7 @@ def validate_puzzle_solutions(args: DictConfig) -> None: # TODO: Loo into internal Puzzleron code to see how to save as symlinks # save_checkpoint_as_symlinks is currently not supported pass - save_checkpoint(model, checkpoint_dir, descriptor) + save_checkpoint_from_shards(model, checkpoint_dir, descriptor) copy_tokenizer( args.tokenizer_name, diff --git a/tests/gpu/torch/puzzletron/test_puzzletron.py b/tests/gpu/torch/puzzletron/test_puzzletron.py index a393e1e086a..d44cbc71e9c 100644 --- a/tests/gpu/torch/puzzletron/test_puzzletron.py +++ b/tests/gpu/torch/puzzletron/test_puzzletron.py @@ -25,11 +25,6 @@ from _test_utils.torch.puzzletron.utils import setup_test_model_and_data from packaging.version import Version -# The puzzletron pipeline imports mip unconditionally at module level. In NeMo containers -# the [puzzletron] extras are not pre-installed, so importing the test file fails with a -# deep ModuleNotFoundError. Skip early with an actionable message instead. -pytest.importorskip("mip", reason="pip install -e '.[puzzletron]' to install MIP solver") - import modelopt.torch.puzzletron as mtpz import modelopt.torch.utils.distributed as dist diff --git a/tests/gpu/torch/puzzletron/tools/test_save_ckpt_from_shards.py b/tests/gpu/torch/puzzletron/tools/test_save_ckpt_from_shards.py new file mode 100644 index 00000000000..a31c687cc1e --- /dev/null +++ b/tests/gpu/torch/puzzletron/tools/test_save_ckpt_from_shards.py @@ -0,0 +1,132 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for save_checkpoint_from_shards in checkpoint_utils_hf.""" + +import json +from functools import partial + +import pytest +import torch +from _test_utils.torch.distributed.utils import spawn_multiprocess_job +from _test_utils.torch.transformers_models import get_tiny_llama +from safetensors.torch import load_file as safe_load_file + +from modelopt.torch.puzzletron.anymodel.models.llama.llama_model_descriptor import ( + LlamaModelDescriptor, +) +from modelopt.torch.puzzletron.tools.checkpoint_utils_hf import ( + SAFE_WEIGHTS_INDEX_NAME, + SAFETENSORS_SUBBLOCKS_DIR_NAME, + save_checkpoint_from_shards, +) + + +class TestSaveCheckpointFromShardsSingleProcess: + """Tests that run without torch.distributed (world_size=1 path).""" + + def test_creates_config_index_and_subblocks(self, tmp_path): + model = get_tiny_llama() + expected_keys = set(model.state_dict().keys()) + save_checkpoint_from_shards(model, tmp_path, LlamaModelDescriptor) + + # test safetensors index file exists and contains weight map + index_path = tmp_path / SAFE_WEIGHTS_INDEX_NAME + assert index_path.exists(), "safetensors index file was not written" + index = json.loads(index_path.read_text()) + assert "weight_map" in index + assert set(index["weight_map"].keys()) == expected_keys + + # test subblocks directory exists and contains shard files + subblocks_dir = tmp_path / SAFETENSORS_SUBBLOCKS_DIR_NAME + assert subblocks_dir.is_dir(), "subblocks directory was not created" + assert len(list(subblocks_dir.glob("*.safetensors"))) > 0, ( + "no safetensors shard files were saved" + ) + + # test config.json saved + config_path = tmp_path / "config.json" + assert config_path.exists(), "config.json was not saved" + cfg = json.loads(config_path.read_text()) + assert cfg["num_hidden_layers"] == get_tiny_llama().config.num_hidden_layers + + # test subblock filenames follow descriptor groups + filenames = set(index["weight_map"].values()) + expected_substrings = {"embeddings", "lm_head", "block_0_ffn", "block_0_attention"} + for substr in expected_substrings: + assert any(substr in f for f in filenames), f"no shard filename contains '{substr}'" + + def test_tie_word_embeddings_excluded(self, tmp_path): + model = get_tiny_llama(tie_word_embeddings=True) + save_checkpoint_from_shards(model, tmp_path, LlamaModelDescriptor) + + index = json.loads((tmp_path / SAFE_WEIGHTS_INDEX_NAME).read_text()) + assert "lm_head.weight" not in index["weight_map"] + + reloaded_sd = {} + for shard in (tmp_path / SAFETENSORS_SUBBLOCKS_DIR_NAME).glob("*.safetensors"): + reloaded_sd.update(safe_load_file(str(shard))) + assert "lm_head.weight" not in reloaded_sd + + def test_saved_weights_match_original(self, tmp_path): + model = get_tiny_llama() + original_sd = {k: v.clone().cpu() for k, v in model.state_dict().items()} + save_checkpoint_from_shards(model, tmp_path, LlamaModelDescriptor) + + reloaded_sd = {} + for shard in (tmp_path / SAFETENSORS_SUBBLOCKS_DIR_NAME).glob("*.safetensors"): + reloaded_sd.update(safe_load_file(str(shard))) + + assert set(reloaded_sd.keys()) == set(original_sd.keys()) + for key in original_sd: + torch.testing.assert_close(reloaded_sd[key], original_sd[key]) + + +def _distributed_save_worker(rank, world_size, checkpoint_dir): + """Worker that shards a model's state dict across ranks and saves.""" + model = get_tiny_llama() + full_sd = model.state_dict() + keys = sorted(full_sd.keys()) + per_rank = len(keys) // world_size + start = rank * per_rank + end = start + per_rank if rank < world_size - 1 else len(keys) + shard_keys = keys[start:end] + + # Zero out keys not owned by this rank so gather reconstructs the full dict. + for k in keys: + if k not in shard_keys: + full_sd[k] = torch.zeros_like(full_sd[k]) + + model.load_state_dict(full_sd) + save_checkpoint_from_shards(model, checkpoint_dir, LlamaModelDescriptor) + + +@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="need >=2 GPUs for multi-rank test") +class TestSaveCheckpointFromShardsMultiProcess: + """Tests that exercise the distributed gather path (world_size > 1).""" + + def test_distributed_save_creates_valid_checkpoint(self, tmp_path): + spawn_multiprocess_job(2, partial(_distributed_save_worker, checkpoint_dir=tmp_path)) + + index_path = tmp_path / SAFE_WEIGHTS_INDEX_NAME + assert index_path.exists() + index = json.loads(index_path.read_text()) + + model = get_tiny_llama() + expected_keys = set(model.state_dict().keys()) + assert set(index["weight_map"].keys()) == expected_keys + + shard_files = list((tmp_path / SAFETENSORS_SUBBLOCKS_DIR_NAME).glob("*.safetensors")) + assert len(shard_files) > 0 From 8abe394fe2d3c6cbac2a23c0633b0d3c39ef859a Mon Sep 17 00:00:00 2001 From: Chenjie Luo <108829653+cjluo-nv@users.noreply.github.com> Date: Mon, 27 Apr 2026 12:40:14 -0700 Subject: [PATCH 04/24] [NVBUG: 6103846] Fix nvfp4_awq export for uncalibrated MoE experts (#1354) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary - NVBug: [6103846](https://nvbugspro.nvidia.com/bug/6103846) — `Qwen3-30B-A3B nvfp4_awq` quantization fails at export with `AssertionError: Modules have different quantization formats`. - Root cause: in `model_calib.awq_lite`, MoE experts that end up disabled (NaN in act/weight scales, or no search-pass tokens) get `max_calibrate`-d but no `pre_quant_scale`. `get_quantization_format` then returns `nvfp4` for those experts while siblings stay `nvfp4_awq`. `unified_export_hf.requantize_resmooth_fused_llm_layers` groups all 128 experts of each linear name (gate_proj/down_proj/up_proj) and calls `preprocess_linear_fusion(..., resmooth_only=True)`, which asserts uniform format → fires for any single mismatched expert. - Fix: unify the disabled-expert paths in the awq_lite postprocess loop so any expert with `is_enabled == False` (no cache hits, NaN scales, or no search-pass tokens) receives `max_calibrate` + a neutral all-ones `pre_quant_scale`, matching the existing behavior for `num_cache_steps == 0`. Emit a warning so users notice that calibration coverage is incomplete and accuracy may degrade. ## Test plan - [x] `pytest tests/unit/torch/quantization/test_calib.py -k 'awq'` → 5 passed - [x] End-to-end on `Qwen/Qwen3-30B-A3B` with `NVFP4_AWQ_LITE_CFG` and a small calib set that leaves many experts uncalibrated: - All 6144 gate_proj/up_proj/down_proj expert linears report `nvfp4_awq` (no mismatch) - `export_hf_checkpoint` succeeds with no `AssertionError` - The new "Forcing pre_quant_scale=1 ... may degrade accuracy" warning fires for each affected expert - [x] Re-run via `examples/llm_ptq/hf_ptq.py` with the bug-report CLI (cnn_dailymail, batch_size=8, calib_size=64 — scaled down from 512 to fit budget) on B200: - 36 "the second time did not forward data through ..experts.X.{gate,up,down}_proj" warnings — i.e. the exact bug-triggering condition from the original NVBug log naturally reproduces - 2058 "Forcing pre_quant_scale=1" warnings — fix path activates for uncalibrated/disabled experts - 0 `AssertionError`s — export completes - `Quantized model exported to: /tmp/test_plan_qwen3-30b-a3b-nvfp4_awq` and post-PTQ generation works --------- Signed-off-by: Chenjie Luo Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> --- CHANGELOG.rst | 1 + modelopt/torch/quantization/model_calib.py | 34 +++++++++++++++------- 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 2e7cd6bfdf1..ae08ee3a04a 100755 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -30,6 +30,7 @@ Changelog - Fix Minitron pruning (``mcore_minitron``) for MoE models. Importance estimation hooks were incorrectly registered for MoE modules and NAS step was hanging before this. - Fix TRT support for remote autotuning in ONNX Autotune from 10.16+ to 10.15+ and fix TRT versioning check to the ``trtexec`` version instead of the TRT Python API when using ``trtexec`` backend. - Exclude MatMul/Gemm nodes with K or N < 16 from ONNX INT8 and FP8 quantization. Such small-dimension GEMMs cannot efficiently use INT8/FP8 Tensor Cores and the added Q/DQ layers cause perf regressions in TensorRT. Honors Gemm ``transB`` when deriving K. +- Fix ``nvfp4_awq`` export ``AssertionError: Modules have different quantization formats`` for MoE models (e.g. Qwen3-30B-A3B) when some experts are not exercised by the calibration data. ``awq_lite`` now applies a neutral all-ones ``pre_quant_scale`` to any expert that ends up disabled (no cache-pass tokens, NaN scales, or no search-pass tokens) so its format remains ``nvfp4_awq``, consistent with the rest of the MoE block. A warning is emitted whenever this fallback fires. **Misc** diff --git a/modelopt/torch/quantization/model_calib.py b/modelopt/torch/quantization/model_calib.py index 9b1cc5bc0c6..0aec4411e0e 100644 --- a/modelopt/torch/quantization/model_calib.py +++ b/modelopt/torch/quantization/model_calib.py @@ -1270,11 +1270,30 @@ def postprocess(module, name): for name, module in model.named_modules(): if hasattr(module, "awq_lite"): - if module.awq_lite.num_cache_steps == 0: - # Uncalibrated expert: max calibrate weights and apply neutral - # (all-ones) pre_quant_scale for export consistency. - # NOTE: ones_scale must be registered OUTSIDE enable_weight_access_and_writeback + # Flag modules whose search pass missed them despite cache hits, so + # they fall through to the neutral-scale path below. + if module.awq_lite.num_cache_steps > 0 and module.awq_lite.num_search_steps == 0: + module.awq_lite.is_enabled = False + warnings.warn( + "awq_lite: Calling `forward_loop(model)` the second time did not forward" + f" data through the {name}. Please provide a valid `forward_loop` function" + " that can be used to forward data through the model many times." + ) + + if not module.awq_lite.is_enabled: + # Expert is disabled — uncalibrated (no cache-pass tokens, set + # at the pre-search pass above), had NaN in act/weight scales, + # or saw no search-pass tokens. Max-calibrate weights and apply + # a neutral (all-ones) pre_quant_scale so the exporter sees a + # consistent nvfp4_awq format across all expert linears in an + # MoE group. + # NOTE: ones-scale must be registered OUTSIDE enable_weight_access_and_writeback # because HF accelerate post_forward drops newly-registered submodule buffers. + warnings.warn( + f"awq_lite: Forcing pre_quant_scale=1 for {name} because the expert " + "was not properly exercised during calibration. This may degrade accuracy; " + "consider increasing calibration size or using a more diverse dataset." + ) with enable_weight_access_and_writeback(module, model, name_to_module): max_calibrate(module, lambda module: module.weight_quantizer(module.weight)) w_shape, w_dtype, w_device = ( @@ -1289,13 +1308,6 @@ def postprocess(module, name): device=w_device, ) else: - if module.awq_lite.num_search_steps == 0: - module.awq_lite.is_enabled = False - warnings.warn( - "awq_lite: Calling `forward_loop(model)` the second time did not forward" - f" data through the {name}. Please provide a valid `forward_loop` function" - " that can be used to forward data through the model many times." - ) with enable_weight_access_and_writeback(module, model, name_to_module): postprocess(module, name) From 3383a20abb1d2a883b9346beaa9784ffc6db44ad Mon Sep 17 00:00:00 2001 From: Asha Anoosheh Date: Mon, 27 Apr 2026 22:42:45 +0200 Subject: [PATCH 05/24] Fix regex capture for Megatron KD PP layer renaming (#1355) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### What does this PR do? Type of change: Bug fix Previously the regex we had looked for a dot after the integer layer number, but it might not exist sometimes. ### Usage ```python # Add a code snippet demonstrating how to use this ``` ### Testing ### Before your PR is "*Ready for review*" Make sure you read and follow [Contributor guidelines](https://github.com/NVIDIA/Model-Optimizer/blob/main/CONTRIBUTING.md) and your commits are signed (`git commit -s -S`). Make sure you read and follow the [Security Best Practices](https://github.com/NVIDIA/Model-Optimizer/blob/main/SECURITY.md#security-coding-practices-for-contributors) (e.g. avoiding hardcoded `trust_remote_code=True`, `torch.load(..., weights_only=False)`, `pickle`, etc.). - Is this change backward compatible?: ✅ / ❌ / N/A - If you copied code from any other sources or added a new PIP dependency, did you follow guidance in `CONTRIBUTING.md`: ✅ / ❌ / N/A - Did you write any new necessary tests?: ✅ / ❌ / N/A - Did you update [Changelog](https://github.com/NVIDIA/Model-Optimizer/blob/main/CHANGELOG.rst)?: ✅ / ❌ / N/A ### Additional Information ## Summary by CodeRabbit * **Bug Fixes** * Improved detection and handling of pipeline-parallel layer indices in model names to correctly support layer identifiers positioned at the end of submodule names, enhancing compatibility with various naming conventions in distillation workflows. --------- Signed-off-by: Asha Anoosheh Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> --- modelopt/torch/distill/plugins/megatron.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modelopt/torch/distill/plugins/megatron.py b/modelopt/torch/distill/plugins/megatron.py index dbfad6fb6bb..9a98eee9c77 100644 --- a/modelopt/torch/distill/plugins/megatron.py +++ b/modelopt/torch/distill/plugins/megatron.py @@ -163,7 +163,7 @@ def setup_distillation_config( def _adjust_layer_index_for_pp(submodule_name, model_cfg): """Adjust any sequence-based layer indices found in a submodule name for Pipeline Parallelism.""" - match = re.search(r"(?<=\.)\d+(?=\.)", submodule_name) + match = re.search(r"(?<=\.)\d+(?=\.|$)", submodule_name) if not match: return submodule_name @@ -172,7 +172,7 @@ def _adjust_layer_index_for_pp(submodule_name, model_cfg): if new_layer_idx < 0: raise ValueError(f"Layer {submodule_name} does not fall on final PP rank.") - new_submodule_name = submodule_name.replace(match.group(0), str(new_layer_idx)) + new_submodule_name = submodule_name.replace(f".{match.group(0)}", f".{new_layer_idx}") if parallel_state.get_tensor_and_context_parallel_rank() == 0: logger.info( f'Distillation: Renamed layer "{submodule_name}" on final PP rank to "{new_submodule_name}"' From 34d554ff8ee608f264e29101826e1f681d2523be Mon Sep 17 00:00:00 2001 From: "Grzegorz K. Karch" Date: Tue, 28 Apr 2026 15:04:51 +0200 Subject: [PATCH 06/24] Add required keys to attention pruning config (#1360) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### What does this PR do? Type of change: ? Bug fix The config `examples/puzzletron/configs/llama-3_1-8B_pruneffn_memory/pruning/attn_pruning.yaml` didn't have required keys to use attention pruning in the example `examples/puzzletron/main.py` ### Usage ### Testing In `examples/puzzletron/configs/llama-3_1-8B_pruneffn_memory/Llama-3_1-8B.yaml` change `ffn_pruning` to `attn_pruning` ### Before your PR is "*Ready for review*" Make sure you read and follow [Contributor guidelines](https://github.com/NVIDIA/Model-Optimizer/blob/main/CONTRIBUTING.md) and your commits are signed (`git commit -s -S`). Make sure you read and follow the [Security Best Practices](https://github.com/NVIDIA/Model-Optimizer/blob/main/SECURITY.md#security-coding-practices-for-contributors) (e.g. avoiding hardcoded `trust_remote_code=True`, `torch.load(..., weights_only=False)`, `pickle`, etc.). - Is this change backward compatible?: ✅ - If you copied code from any other sources or added a new PIP dependency, did you follow guidance in `CONTRIBUTING.md`: N/A - Did you write any new necessary tests?: N/A - Did you update [Changelog](https://github.com/NVIDIA/Model-Optimizer/blob/main/CHANGELOG.rst)?: N/A ### Additional Information ## Summary by CodeRabbit * **Chores** * Updated pruning configuration for improved KV-head pruning support, including enhanced importance hook settings and attention output handling for memory optimization. Signed-off-by: Grzegorz Karch Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> --- .../llama-3_1-8B_pruneffn_memory/pruning/attn_pruning.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/examples/puzzletron/configs/llama-3_1-8B_pruneffn_memory/pruning/attn_pruning.yaml b/examples/puzzletron/configs/llama-3_1-8B_pruneffn_memory/pruning/attn_pruning.yaml index 01886607e4d..53d7e4bd9c6 100644 --- a/examples/puzzletron/configs/llama-3_1-8B_pruneffn_memory/pruning/attn_pruning.yaml +++ b/examples/puzzletron/configs/llama-3_1-8B_pruneffn_memory/pruning/attn_pruning.yaml @@ -1,8 +1,15 @@ defaults: - pruning_defaults +hook_class: ${get_object:modelopt.torch.prune.importance_hooks.base_hooks.IndependentKvHeadContributionHook} + activations_log_dir: ${puzzle_dir}/pruning/pruning_scores/attn_${pruning.activation_hooks_kwargs.method}/${pruning.experiment_id} +pruning_mixin: + _target_: modelopt.torch.puzzletron.pruning.kv_heads_pruning_mixin.KVHeadsPruningMixIn + layer_descriptor: + _target_: modelopt.torch.puzzletron.anymodel.models.llama.llama_model_descriptor.LlamaKVHeadsLayerDescriptor + activation_hooks_kwargs: method: independent_kv_head_contribution optimize_for: memory # IndependentKvHeadContributionHook implementation that consumes less memory From 484acbf961c9a09ffa7a817fbb6b9b788f7e86a7 Mon Sep 17 00:00:00 2001 From: Jenny Chen Date: Tue, 28 Apr 2026 09:39:41 -0400 Subject: [PATCH 07/24] Support EP mcore import for TE Spec and Fix mamba moe config (#1342) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### What does this PR do? Type of change: Bug fix - Enable EP (expert parallelism) import for HF to MCore when using TE Spec - Fix bug in mamba moe config which doesn't skip attention layers properly in MCore (Mcore uses different naming for attention layers than HF) - Add getter for Quant Config (used in MLM modelopt examples to get quant cfg fields) ### Usage ```python # In Megatron-LM/examples/post_training/modelopt MLM_EXTRA_ARGS="--export-default-te-spec --trust-remote-code --moe-router-dtype fp32" EP=4 HF_MODEL_CKPT= MLM_MODEL_SAVE= ./convert.sh nvidia/NVIDIA-Nemotron-3-Nano-30B-A3B-BF16 ``` ### Testing ### Before your PR is "*Ready for review*" Make sure you read and follow [Contributor guidelines](https://github.com/NVIDIA/Model-Optimizer/blob/main/CONTRIBUTING.md) and your commits are signed (`git commit -s -S`). Make sure you read and follow the [Security Best Practices](https://github.com/NVIDIA/Model-Optimizer/blob/main/SECURITY.md#security-coding-practices-for-contributors) (e.g. avoiding hardcoded `trust_remote_code=True`, `torch.load(..., weights_only=False)`, `pickle`, etc.). - Is this change backward compatible?: ✅ / ❌ / N/A - If you copied code from any other sources or added a new PIP dependency, did you follow guidance in `CONTRIBUTING.md`: ✅ / ❌ / N/A - Did you write any new necessary tests?: ✅ / ❌ / N/A - Did you update [Changelog](https://github.com/NVIDIA/Model-Optimizer/blob/main/CHANGELOG.rst)?: ✅ / ❌ / N/A ### Additional Information ## Summary by CodeRabbit * **Bug Fixes** * Corrected expert-slice assignment so each expert-parallel rank loads the proper expert slice. * Improved detection of pipeline-parallel layer indices in submodule names. * **Improvements** * Relaxed constraints between local and global expert counts for grouped-local-expert imports. * Added typed helpers for managing quantization configuration entries and expanded quantizer disable patterns. * Exporter now accepts an additional hybrid model type when available. --------- Signed-off-by: Jennifer Chen Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> --- .../torch/export/plugins/megatron_importer.py | 20 +++++++++++++------ .../torch/export/unified_export_megatron.py | 7 ++++++- modelopt/torch/quantization/config.py | 16 +++++++++++---- 3 files changed, 32 insertions(+), 11 deletions(-) diff --git a/modelopt/torch/export/plugins/megatron_importer.py b/modelopt/torch/export/plugins/megatron_importer.py index b1d37c1ad90..e485731b3d8 100644 --- a/modelopt/torch/export/plugins/megatron_importer.py +++ b/modelopt/torch/export/plugins/megatron_importer.py @@ -39,6 +39,7 @@ has_mcore = False with import_plugin("megatron"): from megatron.core.parallel_state import ( + get_expert_model_parallel_rank, get_expert_tensor_parallel_world_size, get_tensor_model_parallel_world_size, ) @@ -294,9 +295,13 @@ def _grouped_mlp_merging( assert module.num_gemms == num_local_experts, ( "num_gemms must be equal to num_local_experts in TEGroupedMLP" ) - for expert_id in range(init_expert_id, init_expert_id + num_local_experts): - tensor = self._get_safetensor(prefix.format(expert_id) + ".weight") - state_dict[f"weight{expert_id}"] = tensor + # init_expert_id is the global index of this rank's first local expert. + # TEGroupedMLP stores weights as weight0..weight{num_local-1} locally, so we + # map global expert_id -> local slot (expert_id - init_expert_id). + for local_id in range(num_local_experts): + global_expert_id = init_expert_id + local_id + tensor = self._get_safetensor(prefix.format(global_expert_id) + ".weight") + state_dict[f"weight{local_id}"] = tensor # TODO handle weight_scale module.load_state_dict(state_dict) @@ -653,10 +658,13 @@ def _import_transformer_layer(self, layer, layer_id, layer_pbar, is_mtp: bool = layer_pbar.set_description("Importing MoE grouped local experts") num_local_experts = experts.num_local_experts num_global_experts = experts.config.num_moe_experts - assert num_local_experts == num_global_experts, ( - "num_local_experts must be equal to num_global_experts during MoE import" + assert num_global_experts % num_local_experts == 0, ( + "num_global_experts must be divisible by num_local_experts " + "during MoE import" ) - init_index = 0 + # Each EP rank owns a contiguous slice of global experts: + # [ep_rank * num_local_experts, (ep_rank + 1) * num_local_experts). + init_index = get_expert_model_parallel_rank() * num_local_experts self.rules["experts.linear_fc1"]( experts.linear_fc1, diff --git a/modelopt/torch/export/unified_export_megatron.py b/modelopt/torch/export/unified_export_megatron.py index 62053e549c8..24983a24891 100644 --- a/modelopt/torch/export/unified_export_megatron.py +++ b/modelopt/torch/export/unified_export_megatron.py @@ -72,6 +72,11 @@ with import_plugin("megatron"): from megatron.core.models.gpt import GPTModel from megatron.core.models.mamba import MambaModel + + try: + from megatron.core.models.hybrid.hybrid_model import HybridModel + except ImportError: + HybridModel = MambaModel from megatron.core.models.multimodal.llava_model import LLaVAModel from megatron.core.parallel_state import ( get_pipeline_model_parallel_rank, @@ -121,7 +126,7 @@ def __init__( moe_router_dtype: str | None = None, ): """Create a GPTModel exporter instance.""" - if not isinstance(model, (GPTModel, MambaModel, LLaVAModel)): + if not isinstance(model, (GPTModel, MambaModel, HybridModel, LLaVAModel)): raise ValueError("Input to GPTModelExport must be a megatron.core.models.GPTModel!") self._state_dict = OrderedDict() diff --git a/modelopt/torch/quantization/config.py b/modelopt/torch/quantization/config.py index 3f24ac09a41..794a669337e 100644 --- a/modelopt/torch/quantization/config.py +++ b/modelopt/torch/quantization/config.py @@ -236,10 +236,18 @@ def find_quant_cfg_entry_by_path( _mamba_moe_disabled_quantizer_cfg: list[QuantizerCfgEntry] = [ {"quantizer_name": "*fc1_latent_proj*", "enable": False}, # Skip Latent MOE {"quantizer_name": "*fc2_latent_proj*", "enable": False}, # Skip Latent MOE - {"quantizer_name": "*q_proj*", "enable": False}, # Skip QKV Linear - {"quantizer_name": "*k_proj*", "enable": False}, # Skip QKV Linear - {"quantizer_name": "*v_proj*", "enable": False}, # Skip QKV Linear - {"quantizer_name": "*o_proj*", "enable": False}, # Skip QKV Output Projection + {"quantizer_name": "*q_proj*", "enable": False}, # Skip QKV Linear (HF naming) + {"quantizer_name": "*k_proj*", "enable": False}, # Skip QKV Linear (HF naming) + {"quantizer_name": "*v_proj*", "enable": False}, # Skip QKV Linear (HF naming) + {"quantizer_name": "*o_proj*", "enable": False}, # Skip QKV Output Projection (HF naming) + { + "quantizer_name": "*self_attention.linear_qkv*", + "enable": False, + }, # Skip QKV Linear (Mcore naming) + { + "quantizer_name": "*self_attention.linear_proj*", + "enable": False, + }, # Skip QKV Output Projection (Mcore naming) ] INT8_DEFAULT_CFG = { From e906fb6befafac55d7179a4afd44e2ab9c3e5367 Mon Sep 17 00:00:00 2001 From: Wei-Ming Chen <17592131+meenchen@users.noreply.github.com> Date: Tue, 28 Apr 2026 15:22:50 -0700 Subject: [PATCH 08/24] [NVBug 6102977] Add _disable_use_cache context manager to fix PTQ AttributeError on custom configs (#1324) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### What does this PR do? Type of change: Bug fix - Summary: Running hf_ptq.py on stepfun-ai/Step-3.5-Flash (and any model whose custom HF config doesn't assign use_cache) crashed in get_max_batch_size() with AttributeError: 'Step3p5Config' object has no attribute 'use_cache' before calibration could start. - Extract the existing "disable KV cache during calibration" logic into a _disable_use_cache(model) context manager, apply it to both get_max_batch_size and _forward_loop. The CM sets config.use_cache = False unconditionally (not only when the attribute exists) and restores the prior value on exit if one was set. - Behavior unchanged for normal configs; the NemotronH hybrid-cache correctness guarantee from #1251 is preserved. ### Usage ```python # Add a code snippet demonstrating how to use this ``` ### Testing Step-3.5-Flash PTQ now passes get_max_batch_size ### Before your PR is "*Ready for review*" Make sure you read and follow [Contributor guidelines](https://github.com/NVIDIA/Model-Optimizer/blob/main/CONTRIBUTING.md) and your commits are signed (`git commit -s -S`). Make sure you read and follow the [Security Best Practices](https://github.com/NVIDIA/Model-Optimizer/blob/main/SECURITY.md#security-coding-practices-for-contributors) (e.g. avoiding hardcoded `trust_remote_code=True`, `torch.load(..., weights_only=False)`, `pickle`, etc.). - Is this change backward compatible?: ✅ / ❌ / N/A - If you copied code from any other sources or added a new PIP dependency, did you follow guidance in `CONTRIBUTING.md`: ✅ / ❌ / N/A - Did you write any new necessary tests?: ✅ / ❌ / N/A - Did you update [Changelog](https://github.com/NVIDIA/Model-Optimizer/blob/main/CHANGELOG.rst)?: ✅ / ❌ / N/A ### Additional Information ## Summary by CodeRabbit * **Refactor** * Improved memory handling during model evaluation and calibration by consistently disabling KV cache for both single-batch probes and full dataloader runs, simplifying and stabilizing inference flow and ensuring cache state is managed reliably. * **Tests** * Added unit tests verifying cache-state handling across models with and without cache settings, including correct restoration behavior even when errors occur. --------- Signed-off-by: weimingc <17592131+meenchen@users.noreply.github.com> Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> --- modelopt/torch/utils/dataset_utils.py | 128 +++++++++++-------- tests/unit/torch/utils/test_dataset_utils.py | 88 ++++++++++++- 2 files changed, 161 insertions(+), 55 deletions(-) diff --git a/modelopt/torch/utils/dataset_utils.py b/modelopt/torch/utils/dataset_utils.py index 01cb3abe88f..73cb917f37f 100644 --- a/modelopt/torch/utils/dataset_utils.py +++ b/modelopt/torch/utils/dataset_utils.py @@ -18,7 +18,8 @@ import copy import json import os -from collections.abc import Callable +from collections.abc import Callable, Iterator +from contextlib import contextmanager, suppress from pathlib import Path from typing import TYPE_CHECKING, Any from warnings import warn @@ -437,6 +438,36 @@ def get_supported_datasets() -> list[str]: return list(SUPPORTED_DATASET_CONFIG.keys()) +@contextmanager +def _disable_use_cache(model: torch.nn.Module) -> Iterator[None]: + """Set ``model.config.use_cache = False`` for the duration of the block. + + KV caching is unwanted during calibration / memory-probe forward passes: + it wastes memory, and for hybrid Mamba/attention models (e.g., NemotronH) + the cache state is mutated in-place and breaks correctness. Setting + ``use_cache`` unconditionally (rather than only when it was already + present) also sidesteps configs that never assign the attribute at all + — e.g., ``Step3p5Config`` from stepfun-ai/Step-3.5-Flash — where forward + code that reads ``self.config.use_cache`` would otherwise raise + ``AttributeError``. The prior value is restored on exit if one existed. + """ + config = getattr(model, "config", None) + if config is None: + yield + return + had_attr = hasattr(config, "use_cache") + prev = config.use_cache if had_attr else None + config.use_cache = False + try: + yield + finally: + if had_attr: + config.use_cache = prev + else: + with suppress(AttributeError): + delattr(config, "use_cache") + + def get_max_batch_size( model: torch.nn.Module, max_sample_length: int = 512, @@ -467,42 +498,43 @@ def _get_free_gpu_mem(): torch.ones([1, max_sample_length], dtype=torch.int32, device=model.device) * 100 ) - # Calculate single batch inference with dummy input. - with torch.set_grad_enabled(enable_grad): - infer_method(sample_input_single_batch) - free_mem_after, max_allocated_after = _get_free_gpu_mem() + with _disable_use_cache(model): + # Calculate single batch inference with dummy input. + with torch.set_grad_enabled(enable_grad): + infer_method(sample_input_single_batch) + free_mem_after, max_allocated_after = _get_free_gpu_mem() - mem_diff_per_data_batch = ( - max( - (free_mem_before - free_mem_after), - (max_allocated_after - max_allocated_before), + mem_diff_per_data_batch = ( + max( + (free_mem_before - free_mem_after), + (max_allocated_after - max_allocated_before), + ) + * sample_memory_usage_ratio ) - * sample_memory_usage_ratio - ) - if mem_diff_per_data_batch <= 0: - print( - "Warning: No measurable memory usage found for a single batch. " - "Falling back to batch_size=1." + if mem_diff_per_data_batch <= 0: # pragma: no cover - GPU memory probe edge case + print( # pragma: no cover + "Warning: No measurable memory usage found for a single batch. " + "Falling back to batch_size=1." + ) + target_data_batch = 1 # pragma: no cover + else: + target_data_batch = max(int(free_mem_before / mem_diff_per_data_batch), 1) + target_input = sample_input_single_batch.expand( + [ + target_data_batch if index == 0 else dim + for index, dim in enumerate(sample_input_single_batch.shape) + ] ) - target_data_batch = 1 - else: - target_data_batch = max(int(free_mem_before / mem_diff_per_data_batch), 1) - target_input = sample_input_single_batch.expand( - [ - target_data_batch if index == 0 else dim - for index, dim in enumerate(sample_input_single_batch.shape) - ] - ) - # For some models on multi GPU, we observe the memory per batch is not a constant. - # So we just test the target batch size and make sure we do not go OOM. - while target_data_batch > 1: - with torch.set_grad_enabled(enable_grad): - try: - infer_method(target_input) - break - except torch.cuda.OutOfMemoryError: - target_data_batch = target_data_batch // 2 + # For some models on multi GPU, we observe the memory per batch is not a constant. + # So we just test the target batch size and make sure we do not go OOM. + while target_data_batch > 1: + with torch.set_grad_enabled(enable_grad): + try: + infer_method(target_input) + break + except torch.cuda.OutOfMemoryError: # pragma: no cover - GPU OOM retry path + target_data_batch = target_data_batch // 2 # pragma: no cover # Regulate the data batch target to be 1, 2, 4, 8, 12, ..., capped at 64 if target_data_batch < 2: @@ -601,28 +633,16 @@ def _forward_loop( dataloader: DataLoader containing the batched input data allowed_non_tensor_keys: Set of key names whose values may be non-tensor types """ - # Disable KV caching during calibration — it is unnecessary overhead and causes - # correctness issues with hybrid Mamba/attention models whose cache state is mutated - # in-place (e.g., NemotronH). - config = getattr(model, "config", None) - prev_use_cache = getattr(config, "use_cache", None) - if config is not None and prev_use_cache is not None: - config.use_cache = False + with _disable_use_cache(model), torch.no_grad(): + is_enc_dec = model_type_is_enc_dec(model) + infer_method = model.generate if is_enc_dec else model.forward + max_working_batch_size = None # Initialize max working batch size as None - try: - with torch.no_grad(): - is_enc_dec = model_type_is_enc_dec(model) - infer_method = model.generate if is_enc_dec else model.forward - max_working_batch_size = None # Initialize max working batch size as None - - for _, data in enumerate(tqdm(dataloader)): - # Process batch and update max working batch size - max_working_batch_size = _process_batch( - data, infer_method, max_working_batch_size, allowed_non_tensor_keys - ) - finally: - if config is not None and prev_use_cache is not None: - config.use_cache = prev_use_cache + for _, data in enumerate(tqdm(dataloader)): + # Process batch and update max working batch size + max_working_batch_size = _process_batch( + data, infer_method, max_working_batch_size, allowed_non_tensor_keys + ) def create_forward_loop( diff --git a/tests/unit/torch/utils/test_dataset_utils.py b/tests/unit/torch/utils/test_dataset_utils.py index 9a89d53672e..94a2a5a6aab 100644 --- a/tests/unit/torch/utils/test_dataset_utils.py +++ b/tests/unit/torch/utils/test_dataset_utils.py @@ -17,8 +17,14 @@ import pytest import torch +from torch.utils.data import DataLoader -from modelopt.torch.utils.dataset_utils import _process_batch, get_dataset_samples +from modelopt.torch.utils.dataset_utils import ( + _disable_use_cache, + _forward_loop, + _process_batch, + get_dataset_samples, +) def setup_test_data(): @@ -145,6 +151,86 @@ def mock_infer(**kwargs): _process_batch(batch_data, mock_infer, allowed_non_tensor_keys={"base_model_outputs"}) +class _Config: + """Minimal config stand-in; instances start with no `use_cache` attribute.""" + + +def test_disable_use_cache_no_config_attr(): + """Model without a `config` attribute: CM is a no-op and does not raise.""" + model = torch.nn.Linear(4, 4) + assert not hasattr(model, "config") + + with _disable_use_cache(model): + assert not hasattr(model, "config") + + assert not hasattr(model, "config") + + +@pytest.mark.parametrize("prev_value", [True, False]) +def test_disable_use_cache_with_existing_attr(prev_value): + """Config that already has `use_cache`: forced to False inside, restored on exit.""" + model = torch.nn.Linear(4, 4) + model.config = _Config() + model.config.use_cache = prev_value + + with _disable_use_cache(model): + assert model.config.use_cache is False + + assert model.config.use_cache is prev_value + + +def test_disable_use_cache_without_existing_attr(): + """Config that lacks `use_cache`: set to False inside, attribute removed on exit (no leak).""" + model = torch.nn.Linear(4, 4) + model.config = _Config() + assert not hasattr(model.config, "use_cache") + + with _disable_use_cache(model): + assert model.config.use_cache is False + + assert not hasattr(model.config, "use_cache") + + +def test_forward_loop_runs_under_disabled_use_cache(): + """`_forward_loop` runs forward on every batch and restores `use_cache` on exit.""" + seen_use_cache: list[bool] = [] + + class _Model(torch.nn.Module): + def __init__(self): + super().__init__() + self.config = _Config() + self.config.use_cache = True + + def forward(self, **kwargs): + seen_use_cache.append(self.config.use_cache) + + model = _Model() + + def _collate(samples): + return {"input_ids": torch.stack([s["input_ids"] for s in samples])} + + data = [{"input_ids": torch.zeros(8, dtype=torch.long)} for _ in range(3)] + loader = DataLoader(data, batch_size=1, collate_fn=_collate) + + _forward_loop(model, loader) + + assert seen_use_cache == [False, False, False] + assert model.config.use_cache is True + + +def test_disable_use_cache_restores_on_exception(): + """Restore must run even if the with-block raises.""" + model = torch.nn.Linear(4, 4) + model.config = _Config() + model.config.use_cache = True + + with pytest.raises(RuntimeError, match="boom"), _disable_use_cache(model): + assert model.config.use_cache is False + raise RuntimeError("boom") + + assert model.config.use_cache is True + + @pytest.mark.parametrize("test_local_path", [True, False]) def test_get_dataset_samples_with_unsupported_minipile_dataset(tmp_path, test_local_path): pytest.importorskip("datasets") From 82a856dcf05488f0bf9ce0db7c2b128f16cd907b Mon Sep 17 00:00:00 2001 From: Wei-Ming Chen <17592131+meenchen@users.noreply.github.com> Date: Wed, 29 Apr 2026 00:01:01 -0700 Subject: [PATCH 09/24] [NVBug 6108145] Fix PTQ calibration and export for fused-experts MoE (Qwen3.5-MoE VLM) (#1340) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### What does this PR do? Type of change: Bug fix Fixes a 4-bug cascade that caused silent PTQ failure on Qwen3.5-MoE VLMs (Qwen3.6-35B-A3B): calibration appeared to succeed but produced token-salad at inference. Root cause: HF's @use_experts_implementation dispatches expert forward to torch._grouped_mm / torch.bmm, bypassing the F.linear hook that captures activations — so gate_up_proj_input_quantizer / down_proj_input_quantizer never calibrated and no input_scale tensors were emitted. Changes: - examples/llm_ptq/hf_ptq.py — force config._experts_implementation = "eager" (recursing into text_config / vision_config / …) so per-expert F.linear calls are visible to the calibration hook. - modelopt/torch/quantization/conversion.py — normalize plural ModuleList quantizer names (weight_quantizers.N → weight_quantizer) before fnmatch, so wildcards like *mlp.experts*weight_quantizer match fused-expert quantizers. - modelopt/torch/export/unified_export_hf.py — hoist the _QuantFusedExperts export branch above the get_quantization_format() gate so _export_fused_experts() runs even when the top-level format query returns QUANTIZATION_NONE (happens for experts-only recipes). - modelopt_recipes/general/ptq/nvfp4_experts_only-fp8_kv.yaml — layerwise: false (VLM nested layer structure breaks the layerwise walker). ### Usage ```python python examples/llm_ptq/hf_ptq.py \ --pyt_ckpt_path Qwen/Qwen3.6-35B-A3B \ --qformat nvfp4 \ --kv_cache_qformat fp8 \ --calib_size 512 \ --export_path Qwen3.6-35B-A3B-NVFP4 ``` ### Testing Testing End-to-end PTQ → vLLM deploy → NEL eval on Qwen3.6-35B-A3B (256 experts × 40 layers, 35B params): Hook-call diagnostic: 0 → 6720 per-expert F.linear calls during calibration after the fix; 0 → 30720 input_scale tensors emitted in the exported checkpoint. FP8 fused-MoE path still produces gibberish — separate follow-up (vLLM per-expert weight_scale handling). * vLLM full-FP8: the FlashInfer TRTLLM Fp8MoE loader doesn't stack the 256 per-expert scalar weight_scale tensors into a [num_experts] per-expert vector — it ends up applying one expert's scale across all 256, so every routed expert dequants with the wrong amplitude → coherent token stream collapses into multilingual gibberish. * SGLang full-FP8: qwen3_5.py::_make_packed_weight_loader rejects with AssertionError: Unexpected scalar for tuple shard load: loaded_shard_id=(0,1,2), split_sizes=[1,1,1] — its packed-loader has no path for "N independent per-tensor source scalars combining into one fused-shard parameter," so the fused QKV (or in_proj_qkvz) load is structurally refused and the model never finishes loading. ### Before your PR is "*Ready for review*" Make sure you read and follow [Contributor guidelines](https://github.com/NVIDIA/Model-Optimizer/blob/main/CONTRIBUTING.md) and your commits are signed (`git commit -s -S`). Make sure you read and follow the [Security Best Practices](https://github.com/NVIDIA/Model-Optimizer/blob/main/SECURITY.md#security-coding-practices-for-contributors) (e.g. avoiding hardcoded `trust_remote_code=True`, `torch.load(..., weights_only=False)`, `pickle`, etc.). - Is this change backward compatible?: ✅ / ❌ / N/A - If you copied code from any other sources or added a new PIP dependency, did you follow guidance in `CONTRIBUTING.md`: ✅ / ❌ / N/A - Did you write any new necessary tests?: ✅ / ❌ / N/A - Did you update [Changelog](https://github.com/NVIDIA/Model-Optimizer/blob/main/CHANGELOG.rst)?: ✅ / ❌ / N/A ### Additional Information ## Summary by CodeRabbit * **New Features** * Better fused-expert export flow, a plugin to force eager expert execution during calibration/export, and a representative quantizer discovery utility. * **Bug Fixes** * Reliable matching/discovery of per-expert indexed quantizers enabling correct calibration and mixed-precision export; fixes for calibration in nested decoder layouts. * **Documentation** * Clarified PTQ config guidance on layerwise calibration. * **Tests** * Added fused-experts calibration, export, and name-normalization tests. --------- Signed-off-by: weimingc <17592131+meenchen@users.noreply.github.com> Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> --- .../torch/export/plugins/vllm_fakequant_hf.py | 63 +++- modelopt/torch/export/quant_utils.py | 9 +- modelopt/torch/export/unified_export_hf.py | 19 +- modelopt/torch/quantization/conversion.py | 34 +- .../torch/quantization/plugins/huggingface.py | 60 ++++ modelopt/torch/quantization/utils/__init__.py | 1 + .../torch/quantization/utils/core_utils.py | 58 +++- .../ptq/nvfp4_experts_only-fp8_kv.yaml | 4 +- .../plugins/test_fused_experts.py | 315 ++++++++++++++++++ 9 files changed, 533 insertions(+), 30 deletions(-) diff --git a/modelopt/torch/export/plugins/vllm_fakequant_hf.py b/modelopt/torch/export/plugins/vllm_fakequant_hf.py index 42baad912b8..ad0b88f2f78 100644 --- a/modelopt/torch/export/plugins/vllm_fakequant_hf.py +++ b/modelopt/torch/export/plugins/vllm_fakequant_hf.py @@ -47,14 +47,18 @@ "merge_amax_tensors_for_group", ] -# Matches ``…weight_quantizer``, ``…weight_quantizer.0``, ``…w13_weight_quantizer.0``, etc. -_WEIGHT_QUANTIZER_STATE_KEY = re.compile(r"(?:^|\.)(?:\w+_)?weight_quantizer(?:\.\d+)*$") +# Matches ``…weight_quantizer``, ``…weight_quantizer.0``, ``…w13_weight_quantizer.0``, +# and the plural fused-experts form ``…weight_quantizers.0`` (per-expert ModuleList). +_WEIGHT_QUANTIZER_STATE_KEY = re.compile(r"(?:^|\.)(?:\w+_)?weight_quantizers?(?:\.\d+)*$") def is_weight_quantizer_state_key(key: str) -> bool: - """Return True for weight-quantizer state keys, including SequentialQuantizer entries. + """Return True for weight-quantizer state keys. - Matches ``weight_quantizer``, ``w13_weight_quantizer``, ``weight_quantizer.0``, etc. + Includes ``SequentialQuantizer`` entries and fused-experts ``ModuleList`` + entries (``*_weight_quantizers.``). Matches ``weight_quantizer``, + ``w13_weight_quantizer``, ``weight_quantizer.0``, + ``gate_up_proj_weight_quantizers.0``, etc. """ return bool(_WEIGHT_QUANTIZER_STATE_KEY.search(key)) @@ -142,6 +146,56 @@ def disable_rotate(quantizer: TensorQuantizer): return False +def _fakequant_fused_experts_weights( + module: nn.Module, + module_name: str, + state_dict: dict | None, + fakequant_weights: set, + inplace: bool, +): + """Apply per-expert fake-quant to a ``_QuantFusedExperts`` module's 3-D weights. + + The base loop in :func:`_fakequant_module_weights` only handles singular + ``*_weight_quantizer`` attrs (one TensorQuantizer per weight). Fused-experts + modules expose ``*_weight_quantizers`` (``nn.ModuleList`` with one entry per + expert) that the base loop skips, leaving the fused 3-D weight unquantized + in the export and breaking weight-fold round-trips. + """ + for w_attr, q_attr in ( + ("gate_up_proj", "gate_up_proj_weight_quantizers"), + ("down_proj", "down_proj_weight_quantizers"), + ): + quantizers = getattr(module, q_attr, None) + if not isinstance(quantizers, nn.ModuleList): + continue + if not any( + isinstance(q, TensorQuantizer) and q.fake_quant and q.is_enabled for q in quantizers + ): + continue + sd_key = f"{module_name}.{w_attr}" if module_name else w_attr + if sd_key in fakequant_weights: + raise RuntimeError(f"Weight {sd_key} has already been fakequantized") + + if inplace: + w = getattr(module, w_attr) + for idx, q in enumerate(quantizers): + if not (isinstance(q, TensorQuantizer) and q.fake_quant and q.is_enabled): + continue + slice_ = w.data[idx] + slice_.copy_(q(slice_.float()).to(w.dtype)) + else: + if state_dict is None or sd_key not in state_dict: + continue + w_3d = state_dict[sd_key].clone() + for idx, q in enumerate(quantizers): + if not (isinstance(q, TensorQuantizer) and q.fake_quant and q.is_enabled): + continue + slice_ = w_3d[idx] + w_3d[idx] = q(slice_.float()).to(slice_.dtype) + state_dict[sd_key] = w_3d.cpu() + fakequant_weights.add(sd_key) + + def _fakequant_module_weights( module: nn.Module, module_name: str, @@ -159,6 +213,7 @@ def _fakequant_module_weights( """ if not isinstance(module, QuantModule): return + _fakequant_fused_experts_weights(module, module_name, state_dict, fakequant_weights, inplace) for attr_name, quantizer in module.named_children(): if not ( attr_name.endswith("weight_quantizer") diff --git a/modelopt/torch/export/quant_utils.py b/modelopt/torch/export/quant_utils.py index 4ceb51cd2c0..76f304a478a 100755 --- a/modelopt/torch/export/quant_utils.py +++ b/modelopt/torch/export/quant_utils.py @@ -42,6 +42,7 @@ QuantizerAttrNames, quantizer_attr_names, reduce_block_amax, + representative_weight_quantizer, weight_attr_names, ) from modelopt.torch.utils import clear_cuda_cache @@ -546,7 +547,7 @@ def _compute_kv_cache_dtype( def get_weight_block_size(module: nn.Module, weight_name: str = "weight") -> int: """Returns the weight block size.""" - weight_quantizer = getattr(module, quantizer_attr_names(weight_name).weight_quantizer, None) + weight_quantizer = representative_weight_quantizer(module, weight_name) if weight_quantizer is None: return 0 @@ -572,7 +573,11 @@ def get_quantization_format(module) -> str | None: """ def _get_quantization_from_layer(layer, quantizer_attr_names: QuantizerAttrNames): - weight_quantizer = getattr(layer, quantizer_attr_names.weight_quantizer, None) + # Singular form first, plural ModuleList fallback (fused-experts). + # Strip the "_weight_quantizer" suffix to recover the weight attr name. + weight_attr = quantizer_attr_names.weight_quantizer + weight_name = weight_attr[: -len("_weight_quantizer")].rstrip("_") or "weight" + weight_quantizer = representative_weight_quantizer(layer, weight_name) input_quantizer = getattr(layer, quantizer_attr_names.input_quantizer, None) if weight_quantizer is None or not weight_quantizer.is_enabled: diff --git a/modelopt/torch/export/unified_export_hf.py b/modelopt/torch/export/unified_export_hf.py index af936a3002a..a76783ac172 100644 --- a/modelopt/torch/export/unified_export_hf.py +++ b/modelopt/torch/export/unified_export_hf.py @@ -88,6 +88,7 @@ QUANTIZATION_W4A8_NVFP4_FP8, ) from .model_utils import get_language_model_from_vl, is_multimodal_model +from .moe_utils import _export_fused_experts from .plugins import SpeculativeDecodingExporter, has_spec_opt from .quant_utils import ( fuse_prequant_layernorm, @@ -642,11 +643,20 @@ def _process_quantized_modules( if is_modelopt_qlora and (hasattr(sub_module, "base_layer")): continue + # Preprocessing: restore unpacked weight so the export path can read + # the live quantizer state. Falls through to the export branches below. if hasattr(sub_module, "weight_packed") or ( "QuantFP8Linear" in type(sub_module).__name__ and sub_module.weight.element_size() <= 1 ): sub_module.unpack_weight() - if get_quantization_format(sub_module) != QUANTIZATION_NONE: + + if hasattr(sub_module, "gate_up_proj_weight_quantizers"): + # _QuantFusedExperts uses plural `gate_up_proj_weight_quantizers` (ModuleList), + # which get_quantization_format's singular-weight_quantizer check misses. Handle + # it explicitly before the format gate so fused-experts get split + quantized. + with fsdp2_aware_weight_update(model, sub_module, reshard=False): + _export_fused_experts(sub_module, dtype) + elif get_quantization_format(sub_module) != QUANTIZATION_NONE: # Skip QuantMoELinear - it's handled separately in _reconstruct_fused_moe_linear if type(sub_module).__name__ == "QuantMoELinear": continue @@ -677,13 +687,6 @@ def _process_quantized_modules( with fsdp2_aware_weight_update(model, sub_module, reshard=False): for weight_name in ["gate_up_proj", "down_proj"]: _export_quantized_weight(sub_module, dtype, weight_name) - elif hasattr(sub_module, "gate_up_proj_weight_quantizers"): - # Generic fused MoE experts (_QuantFusedExperts) with per-expert - # quantizer ModuleLists. Split into per-expert modules and export. - from modelopt.torch.export.moe_utils import _export_fused_experts - - with fsdp2_aware_weight_update(model, sub_module, reshard=False): - _export_fused_experts(sub_module, dtype) def _export_transformers_checkpoint( diff --git a/modelopt/torch/quantization/conversion.py b/modelopt/torch/quantization/conversion.py index 55f7fdf6fc3..3f97f8380be 100644 --- a/modelopt/torch/quantization/conversion.py +++ b/modelopt/torch/quantization/conversion.py @@ -16,6 +16,7 @@ """Quantization conversion/restore utilities.""" import fnmatch +import re import warnings from collections.abc import Callable from contextlib import contextmanager @@ -286,6 +287,33 @@ def set_quantizer_by_cfg(quant_model: nn.Module, quant_cfg: QuantizeQuantCfgType set_quantizer_attributes_full(quant_model, quantizer_name, attributes, parent_class) +_FUSED_EXPERTS_QUANTIZER_LIST_RE = re.compile( + r"(weight_quantizers?|input_quantizers?)\.\d+(?=$|\.)" +) + + +def _normalize_fused_experts_quantizer_name(name: str) -> str: + """Strip the per-expert index from per-expert quantizer ModuleList names. + + Fused-experts modules register per-expert weight/input quantizers in a + ``nn.ModuleList``; its children surface as dotted names like + ``...gate_up_proj_weight_quantizers.0`` (plural) or — if a variant uses + singular naming — ``...gate_up_proj_weight_quantizer.0``. Neither matches + the singular-suffix wildcards (``*weight_quantizer``) used in the stock + configs, so the experts stay at their defaults. + + Return a normalized name where either ``weight_quantizer[s]?.N`` or + ``input_quantizer[s]?.N`` collapses to the singular form without the index + so the standard wildcards match. + """ + + def _repl(m: re.Match) -> str: + base = m.group(1) + return base.removesuffix("s") + + return _FUSED_EXPERTS_QUANTIZER_LIST_RE.sub(_repl, name) + + def _match_quantizer( wildcard_or_filter_func: str | Callable, name: str, @@ -296,7 +324,11 @@ def _match_quantizer( if not isinstance(module, (TensorQuantizer, SequentialQuantizer)): return False if isinstance(wildcard_or_filter_func, str): - if not fnmatch.fnmatch(name, wildcard_or_filter_func): + normalized = _normalize_fused_experts_quantizer_name(name) + if not ( + fnmatch.fnmatch(name, wildcard_or_filter_func) + or (normalized != name and fnmatch.fnmatch(normalized, wildcard_or_filter_func)) + ): return False elif callable(wildcard_or_filter_func): if not wildcard_or_filter_func(name): diff --git a/modelopt/torch/quantization/plugins/huggingface.py b/modelopt/torch/quantization/plugins/huggingface.py index 990d0c0348d..48fba1e145c 100644 --- a/modelopt/torch/quantization/plugins/huggingface.py +++ b/modelopt/torch/quantization/plugins/huggingface.py @@ -900,6 +900,33 @@ def forward(self, *args, **kwargs): self._down_proj_linear = False return super().forward(*args, **kwargs) + def fold_weight(self, keep_attrs: bool = False): + """Fold per-expert weight quantizers into the fused 3-D weights. + + The base ``fold_weight`` only handles singular ``*_weight_quantizer`` + attributes. Fused experts use ``nn.ModuleList`` of per-expert quantizers + (``gate_up_proj_weight_quantizers``, ``down_proj_weight_quantizers``), + which would otherwise be skipped, leaving ``_amax`` on every quantizer. + """ + for weight_name, quantizers_name in ( + ("gate_up_proj", "gate_up_proj_weight_quantizers"), + ("down_proj", "down_proj_weight_quantizers"), + ): + weight = getattr(self, weight_name, None) + quantizers = getattr(self, quantizers_name, None) + if weight is None or quantizers is None: + continue + for idx, q in enumerate(quantizers): + if not (isinstance(q, TensorQuantizer) and q.fake_quant): + continue + slice_ = weight.data[idx] + slice_.copy_(q(slice_.float()).to(weight.dtype)) + q.disable() + if not keep_attrs: + for attr_name in ("_pre_quant_scale", "_amax"): + if hasattr(q, attr_name): + delattr(q, attr_name) + class _QuantDbrxFFN(_QuantSparseSequentialMoe): @property @@ -1438,6 +1465,38 @@ def register_fused_experts_on_the_fly(model): QuantModuleRegistry.register({mod_type: f"hf.{mod_type.__name__}"})(_QuantFusedExperts) +def force_eager_experts_impl_on_the_fly(model): + """Force HF fused-experts modules onto the eager ``F.linear``-based forward. + + HF transformers 5.0+ decorates fused-experts forwards with + ``@use_experts_implementation``, which may dispatch to ``torch._grouped_mm`` + or ``torch.bmm`` backends. Those backends bypass ``F.linear`` and so bypass + ``_QuantFusedExperts``'s input/weight quantizer hooks — calibration silently + does nothing, no ``input_scale`` / ``amax`` is collected, and the exported + checkpoint produces garbage at inference. + + Sets ``config._experts_implementation = "eager"`` on the model config (and + recursively on ``text_config`` / ``vision_config`` / ``audio_config`` / + ``speech_config``) whenever a fused-experts module is present. + """ + if not any(_is_fused_experts_module(m) for m in model.modules()): + return + + nested_cfg_attrs = ("text_config", "vision_config", "audio_config", "speech_config") + + def _force(cfg): + if cfg is None: + return + if hasattr(cfg, "_experts_implementation"): + cfg._experts_implementation = "eager" + for sub in nested_cfg_attrs: + if hasattr(cfg, sub): + _force(getattr(cfg, sub)) + + if hasattr(model, "config"): + _force(model.config) + + def _is_supported_hf_model(model): """Check if the model a valid model for transformers quantization specific support.""" supported_models = [transformers.PreTrainedModel] @@ -1665,6 +1724,7 @@ def _reconstruct_fused_moe_linear(model: nn.Module) -> None: register_dbrx_moe_on_the_fly, register_step3p5_moe_on_the_fly, register_fused_experts_on_the_fly, + force_eager_experts_impl_on_the_fly, register_sparse_moe_on_the_fly, register_hf_attentions_on_the_fly, convert_hf_parallel_linears_on_the_fly, diff --git a/modelopt/torch/quantization/utils/__init__.py b/modelopt/torch/quantization/utils/__init__.py index dfc23c42eee..dc6daa00842 100644 --- a/modelopt/torch/quantization/utils/__init__.py +++ b/modelopt/torch/quantization/utils/__init__.py @@ -30,6 +30,7 @@ "reduce_amax", "reduce_sum", "replace_function", + "representative_weight_quantizer", "update_quant_cfg_with_kv_cache_quant", "weight_attr_names", ] diff --git a/modelopt/torch/quantization/utils/core_utils.py b/modelopt/torch/quantization/utils/core_utils.py index 29661e18f52..1a177e04dc8 100644 --- a/modelopt/torch/quantization/utils/core_utils.py +++ b/modelopt/torch/quantization/utils/core_utils.py @@ -202,27 +202,57 @@ def reduce_sum(input, axis=None, keepdims=True): return output -def weight_attr_names(module: nn.Module) -> "Generator[str, None, None]": - """Get the weight param attribute names in a converted module, non-recursive. +def representative_weight_quantizer(module: nn.Module, weight_name: str = "weight"): + """Return the representative weight quantizer for ``weight_name`` on ``module``. + + Handles two layouts: + + - singular ``_weight_quantizer`` — standard ``nn.Linear`` / ``_QuantLinear``. + - plural ``_weight_quantizers`` (``nn.ModuleList``) — fused-experts modules + (``_QuantFusedExperts``) hold one ``TensorQuantizer`` per expert. Per-expert + formats are identical, so the first element is representative. - We consider the following two cases for each weight param attribute: - - The standard weight attribute (e.g. nn.Linear). - - The custom `weight_attr_name`. (e.g. Llama4TextExperts has weight attributes `gate_up_proj` and `down_proj`) + Returns ``None`` if no matching quantizer is found. """ from ..nn import SequentialQuantizer, TensorQuantizer - # the standard weight and quantizer case - weight = getattr(module, "weight", None) - weight_quantizer = getattr(module, "weight_quantizer", None) - if weight is not None and isinstance(weight_quantizer, (TensorQuantizer, SequentialQuantizer)): - yield "weight" + singular = quantizer_attr_names(weight_name).weight_quantizer + q = getattr(module, singular, None) + if isinstance(q, (TensorQuantizer, SequentialQuantizer)): + return q - # other weight and quantizer case + plural = getattr(module, singular + "s", None) + if isinstance(plural, nn.ModuleList) and len(plural) > 0: + first = plural[0] + if isinstance(first, (TensorQuantizer, SequentialQuantizer)): + return first + return None + + +def weight_attr_names(module: nn.Module) -> "Generator[str, None, None]": + """Get the weight param attribute names in a converted module, non-recursive. + + Covers three layouts: + + - standard ``nn.Linear``: ``weight`` + ``weight_quantizer``. + - custom per-weight quantizer (e.g. ``Llama4TextExperts`` with ``gate_up_proj`` + + ``gate_up_proj_weight_quantizer``). + - fused-experts ``nn.ModuleList`` quantizers (``_QuantFusedExperts`` with + ``gate_up_proj`` + ``gate_up_proj_weight_quantizers`` plural list). + """ + # standard: "weight" + "weight_quantizer" (singular) or "weight_quantizers" (plural) + if getattr(module, "weight", None) is not None: + if representative_weight_quantizer(module, "weight") is not None: + yield "weight" + + # per-parameter custom attr names for name, _ in module.named_parameters(recurse=False): + if name == "weight": + continue weight = getattr(module, name, None) - weight_quantizer = getattr(module, f"{name}_weight_quantizer", None) - if isinstance(weight, nn.Parameter) and isinstance( - weight_quantizer, (TensorQuantizer, SequentialQuantizer) + if ( + isinstance(weight, nn.Parameter) + and representative_weight_quantizer(module, name) is not None ): yield name diff --git a/modelopt_recipes/general/ptq/nvfp4_experts_only-fp8_kv.yaml b/modelopt_recipes/general/ptq/nvfp4_experts_only-fp8_kv.yaml index 220d0622327..7c557039631 100644 --- a/modelopt_recipes/general/ptq/nvfp4_experts_only-fp8_kv.yaml +++ b/modelopt_recipes/general/ptq/nvfp4_experts_only-fp8_kv.yaml @@ -20,7 +20,9 @@ quantize: algorithm: method: max # Max calibration is fast and does not typically need checkpointing. - layerwise: true + # layerwise=false required for VLMs where the decoder layers are nested under + # `model.language_model.layers` (layerwise_calibrate can't find them otherwise). + layerwise: false quant_cfg: - quantizer_name: '*' enable: false diff --git a/tests/unit/torch/quantization/plugins/test_fused_experts.py b/tests/unit/torch/quantization/plugins/test_fused_experts.py index 7e77bf1151c..29435827748 100644 --- a/tests/unit/torch/quantization/plugins/test_fused_experts.py +++ b/tests/unit/torch/quantization/plugins/test_fused_experts.py @@ -22,11 +22,13 @@ pytest.importorskip("transformers") +from modelopt.torch.quantization.conversion import _normalize_fused_experts_quantizer_name from modelopt.torch.quantization.nn import QuantModuleRegistry from modelopt.torch.quantization.plugins.huggingface import ( _is_fused_experts_module, _is_sparse_sequaential_moe_block, _QuantFusedExperts, + force_eager_experts_impl_on_the_fly, register_fused_experts_on_the_fly, register_sparse_moe_on_the_fly, ) @@ -297,3 +299,316 @@ def test_export_creates_per_expert_submodules(self): if QuantModuleRegistry.get(expert_type) is not None: QuantModuleRegistry.unregister(expert_type) + + +# --------------------------------------------------------------------------- +# Tests for force_eager_experts_impl_on_the_fly +# --------------------------------------------------------------------------- +class _StubConfig: + """Minimal stand-in for HF PretrainedConfig with optional nested sub-configs.""" + + def __init__(self, impl=None, **nested): + if impl is not None: + self._experts_implementation = impl + for key, value in nested.items(): + setattr(self, key, value) + + +class _TinyMoEModelWithConfig(_TinyMoEModel): + def __init__(self, config): + super().__init__() + self.config = config + + +class _NonMoEModelWithConfig(nn.Module): + def __init__(self, config): + super().__init__() + self.linear = nn.Linear(HIDDEN_DIM, HIDDEN_DIM) + self.config = config + + +class TestForceEagerExpertsImpl: + def test_sets_eager_on_moe_model(self): + """Non-eager backend on an MoE model gets flipped to eager.""" + cfg = _StubConfig(impl="kernels") + model = _TinyMoEModelWithConfig(cfg) + force_eager_experts_impl_on_the_fly(model) + assert cfg._experts_implementation == "eager" + + def test_recurses_into_nested_configs(self): + """VLM-style nested text_config / vision_config are also flipped.""" + text_cfg = _StubConfig(impl="grouped_mm") + vision_cfg = _StubConfig(impl="bmm") + root_cfg = _StubConfig(text_config=text_cfg, vision_config=vision_cfg) + model = _TinyMoEModelWithConfig(root_cfg) + force_eager_experts_impl_on_the_fly(model) + assert text_cfg._experts_implementation == "eager" + assert vision_cfg._experts_implementation == "eager" + + def test_skips_model_without_fused_experts(self): + """Non-MoE models must not have their config silently mutated.""" + cfg = _StubConfig(impl="kernels") + model = _NonMoEModelWithConfig(cfg) + force_eager_experts_impl_on_the_fly(model) + assert cfg._experts_implementation == "kernels" + + def test_no_crash_when_config_missing(self): + """Model without a ``config`` attribute must not raise.""" + force_eager_experts_impl_on_the_fly(_TinyMoEModel()) # no-op, no error + + def test_no_crash_when_impl_attr_missing(self): + """Config without ``_experts_implementation`` must not raise.""" + cfg = _StubConfig() # no impl attr + model = _TinyMoEModelWithConfig(cfg) + force_eager_experts_impl_on_the_fly(model) + assert not hasattr(cfg, "_experts_implementation") + + def test_leaves_eager_value_unchanged(self): + cfg = _StubConfig(impl="eager") + model = _TinyMoEModelWithConfig(cfg) + force_eager_experts_impl_on_the_fly(model) + assert cfg._experts_implementation == "eager" + + +# --------------------------------------------------------------------------- +# End-to-end PTQ calibration test — guards the full fused-experts path: +# register_fused_experts_on_the_fly → _QuantFusedExperts.{_setup, forward} → +# plural ModuleList name normalization in conversion._match_quantizer → +# TensorQuantizer amax collection via the F.linear hook. +# If any link breaks, quantizer `amax` stays None and this test fails. +# --------------------------------------------------------------------------- +class TestFusedExpertsCalibration: + @staticmethod + def _cleanup_registry(mod_type): + if QuantModuleRegistry.get(mod_type) is not None: + QuantModuleRegistry.unregister(mod_type) + + def test_calibration_populates_all_expert_quantizers(self): + """After PTQ, every input/weight quantizer on the fused-experts module has amax set.""" + import modelopt.torch.quantization as mtq + + model = _TinyMoEModel() + expert_type = type(model.moe.experts) + self._cleanup_registry(expert_type) + + quant_cfg = { + "quant_cfg": [ + {"quantizer_name": "*", "enable": False}, + { + "quantizer_name": "*gate_up_proj_input_quantizer", + "cfg": {"num_bits": 8, "axis": None}, + }, + { + "quantizer_name": "*down_proj_input_quantizer", + "cfg": {"num_bits": 8, "axis": None}, + }, + { + "quantizer_name": "*gate_up_proj_weight_quantizer", + "cfg": {"num_bits": 8, "axis": 0}, + }, + { + "quantizer_name": "*down_proj_weight_quantizer", + "cfg": {"num_bits": 8, "axis": 0}, + }, + ], + "algorithm": "max", + } + + def forward_loop(m): + torch.manual_seed(0) + for _ in range(2): + x = torch.randn(1, 4, HIDDEN_DIM) + m(x) + + mtq.quantize(model, quant_cfg, forward_loop=forward_loop) + + experts = model.moe.experts + assert experts.gate_up_proj_input_quantizer.amax is not None, ( + "Shared gate_up_proj input quantizer was not calibrated — " + "F.linear hook likely bypassed by non-eager experts_implementation." + ) + assert experts.down_proj_input_quantizer.amax is not None, ( + "Shared down_proj input quantizer was not calibrated." + ) + for idx in range(NUM_EXPERTS): + assert experts.gate_up_proj_weight_quantizers[idx].amax is not None, ( + f"gate_up_proj_weight_quantizers[{idx}].amax is None — " + "plural ModuleList name normalization in _match_quantizer likely broken." + ) + assert experts.down_proj_weight_quantizers[idx].amax is not None, ( + f"down_proj_weight_quantizers[{idx}].amax is None." + ) + + self._cleanup_registry(expert_type) + + +# --------------------------------------------------------------------------- +# Tests for export enumeration — guards the bug where fused-experts were +# silently skipped by get_quant_config because their weight quantizers live +# on a plural nn.ModuleList instead of the singular *_weight_quantizer attr. +# Missed enumeration → experts don't appear in quantized_layers → +# quantization_formats has only 1 entry from the non-expert modules → +# quant_algo lands on that format instead of "MIXED_PRECISION". +# --------------------------------------------------------------------------- +class _MixedPrecisionModel(nn.Module): + """A model with both a fused-experts block AND a standard Linear, so a + mixed-precision recipe should produce two distinct format groups.""" + + def __init__(self): + super().__init__() + self.moe = _SyntheticSparseMoeBlock() + self.dense = nn.Linear(HIDDEN_DIM, HIDDEN_DIM) + + def forward(self, x): + return self.dense(self.moe(x)) + + +class TestMixedPrecisionExport: + @staticmethod + def _cleanup_registry(mod_type): + if QuantModuleRegistry.get(mod_type) is not None: + QuantModuleRegistry.unregister(mod_type) + + def test_weight_attr_names_yields_fused_expert_params(self): + """weight_attr_names must yield gate_up_proj / down_proj on fused experts + even though their quantizers are a plural ModuleList, not singular.""" + from modelopt.torch.quantization.utils.core_utils import weight_attr_names + + model = _TinyMoEModel() + expert_type = type(model.moe.experts) + self._cleanup_registry(expert_type) + + register_fused_experts_on_the_fly(model) + converted = QuantModuleRegistry.convert(model.moe.experts) + + yielded = list(weight_attr_names(converted)) + assert set(yielded) == {"gate_up_proj", "down_proj"}, ( + f"Expected both fused weight attrs, got {yielded}. " + "Likely regression in representative_weight_quantizer plural fallback." + ) + + self._cleanup_registry(expert_type) + + def test_mixed_precision_config_export(self): + """Mixed-precision recipe (experts FP8 + dense Linear FP8 per-channel) should + show both modules in quantized_layers. Using two distinct formats would + trigger MIXED_PRECISION; using same-format still exercises enumeration.""" + import modelopt.torch.quantization as mtq + from modelopt.torch.export.quant_utils import get_quant_config + + model = _MixedPrecisionModel() + expert_type = type(model.moe.experts) + self._cleanup_registry(expert_type) + + # FP8 per-tensor for experts; FP8 per-channel for dense — two distinct + # format strings in quantization_formats, so quant_algo must become + # MIXED_PRECISION. + quant_cfg = { + "quant_cfg": [ + {"quantizer_name": "*", "enable": False}, + { + "quantizer_name": "*gate_up_proj_input_quantizer", + "cfg": {"num_bits": (4, 3), "axis": None}, + }, + { + "quantizer_name": "*down_proj_input_quantizer", + "cfg": {"num_bits": (4, 3), "axis": None}, + }, + { + "quantizer_name": "*gate_up_proj_weight_quantizer", + "cfg": {"num_bits": (4, 3), "axis": None}, + }, + { + "quantizer_name": "*down_proj_weight_quantizer", + "cfg": {"num_bits": (4, 3), "axis": None}, + }, + { + "quantizer_name": "*dense.input_quantizer", + "cfg": {"num_bits": (4, 3), "axis": None}, + }, + { + "quantizer_name": "*dense.weight_quantizer", + "cfg": {"num_bits": (4, 3), "axis": 0}, # per-channel → FP8_PC_PT + }, + ], + "algorithm": "max", + } + + def forward_loop(m): + torch.manual_seed(0) + for _ in range(2): + x = torch.randn(1, 4, HIDDEN_DIM) + m(x) + + mtq.quantize(model, quant_cfg, forward_loop=forward_loop) + + cfg = get_quant_config(model) + q = cfg["quantization"] + + # The fused-experts module MUST appear in quantized_layers. This is the + # central guard: regressions of weight_attr_names plural fallback would + # make experts disappear here. + layer_names = set(q.get("quantized_layers", {}).keys()) + assert any("moe.experts" in n for n in layer_names), ( + f"Fused-experts module missing from quantized_layers: {layer_names}. " + "weight_attr_names likely not yielding plural-ModuleList weight attrs." + ) + assert any(n.endswith("dense") for n in layer_names), ( + f"Dense Linear missing from quantized_layers: {layer_names}." + ) + + # Two distinct formats → MIXED_PRECISION at top level. + assert q["quant_algo"] == "MIXED_PRECISION", ( + f"Expected MIXED_PRECISION (fused-experts FP8 per-tensor + dense " + f"FP8 per-channel), got quant_algo={q['quant_algo']}. " + f"quantized_layers={q.get('quantized_layers')}" + ) + + self._cleanup_registry(expert_type) + + +# --------------------------------------------------------------------------- +# Tests for the fused-experts quantizer-name normalizer used by +# conversion._match_quantizer. Covers both plural (actual _QuantFusedExperts +# layout) and singular (defensive: future variants may name the ModuleList +# without the trailing `s`) forms. +# --------------------------------------------------------------------------- +class TestNormalizeFusedExpertsQuantizerName: + def test_plural_weight_quantizers_stripped(self): + assert ( + _normalize_fused_experts_quantizer_name("moe.experts.gate_up_proj_weight_quantizers.7") + == "moe.experts.gate_up_proj_weight_quantizer" + ) + + def test_plural_input_quantizers_stripped(self): + assert ( + _normalize_fused_experts_quantizer_name("moe.experts.down_proj_input_quantizers.3") + == "moe.experts.down_proj_input_quantizer" + ) + + def test_singular_weight_quantizer_with_index_stripped(self): + """Defensive: handle variants that name the ModuleList singular.""" + assert ( + _normalize_fused_experts_quantizer_name("moe.experts.gate_up_proj_weight_quantizer.2") + == "moe.experts.gate_up_proj_weight_quantizer" + ) + + def test_singular_input_quantizer_with_index_stripped(self): + assert ( + _normalize_fused_experts_quantizer_name("moe.experts.down_proj_input_quantizer.0") + == "moe.experts.down_proj_input_quantizer" + ) + + def test_non_indexed_name_unchanged(self): + """Plain singular names (no index) must be passed through untouched.""" + assert ( + _normalize_fused_experts_quantizer_name("moe.experts.gate_up_proj_weight_quantizer") + == "moe.experts.gate_up_proj_weight_quantizer" + ) + + def test_unrelated_dotted_number_unchanged(self): + """Dotted numbers that aren't inside a quantizer-list context are left alone.""" + assert ( + _normalize_fused_experts_quantizer_name("moe.layers.3.gate.weight") + == "moe.layers.3.gate.weight" + ) From f06190b512b829366daa66a7fc00c2a4ddc2cdc2 Mon Sep 17 00:00:00 2001 From: ynankani Date: Wed, 29 Apr 2026 10:45:40 +0000 Subject: [PATCH 10/24] [BUG6108338] Update windows documentation for onnxruntime quantization with Cuda13.x (#1368) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### What does this PR do? Type of change: ? documentation Update windows documentation for onnxruntime quantization with Cuda13.x ### Before your PR is "*Ready for review*" Make sure you read and follow [Contributor guidelines](https://github.com/NVIDIA/Model-Optimizer/blob/main/CONTRIBUTING.md) and your commits are signed (`git commit -s -S`). Make sure you read and follow the [Security Best Practices](https://github.com/NVIDIA/Model-Optimizer/blob/main/SECURITY.md#security-coding-practices-for-contributors) (e.g. avoiding hardcoded `trust_remote_code=True`, `torch.load(..., weights_only=False)`, `pickle`, etc.). - Is this change backward compatible?: N/A - If you copied code from any other sources or added a new PIP dependency, did you follow guidance in `CONTRIBUTING.md`: N/A - Did you write any new necessary tests?: N/A - Did you update [Changelog](https://github.com/NVIDIA/Model-Optimizer/blob/main/CHANGELOG.rst)?: N/A ## Summary by CodeRabbit * **Documentation** * Updated Windows installation guide with CUDA 13.x-specific setup instructions for GPU-accelerated dependencies, including CuPy and ONNX Runtime configuration with nightly builds. Signed-off-by: ynankani Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> --- .../windows/_installation_standalone.rst | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/docs/source/getting_started/windows/_installation_standalone.rst b/docs/source/getting_started/windows/_installation_standalone.rst index 500b480e12e..1fd1c3fca55 100644 --- a/docs/source/getting_started/windows/_installation_standalone.rst +++ b/docs/source/getting_started/windows/_installation_standalone.rst @@ -64,6 +64,22 @@ If you need to use any other EP for calibration, you can uninstall the existing By default, ModelOpt-Windows utilizes the `cupy-cuda12x `_ tool for GPU acceleration during the INT4 ONNX quantization process. This is compatible with CUDA 12.x. +If you are using CUDA 13.x, update CUDA-dependent packages manually: + +For official ONNX Runtime guidance, see `Nightly builds for CUDA 13.x `_. + +1. Uninstall ``cupy-cuda12x`` and install ``cupy-cuda13x``. +2. Uninstall ``onnxruntime-genai-cuda`` and ``onnxruntime-gpu``. +3. Install ONNX Runtime CUDA 13 nightly and the pre-release ``onnxruntime-genai-cuda`` package. + +.. code-block:: bash + + pip uninstall -y cupy-cuda12x onnxruntime-genai-cuda onnxruntime-gpu + pip install cupy-cuda13x + pip install coloredlogs flatbuffers numpy packaging protobuf sympy + pip install --pre --index-url https://aiinfra.pkgs.visualstudio.com/PublicPackages/_packaging/ort-cuda-13-nightly/pypi/simple/ onnxruntime-gpu + pip install --pre onnxruntime-genai-cuda + **6. Verify Installation** Ensure the following steps are verified: From 4879789ddef12f59d9319c07353df4f820736b45 Mon Sep 17 00:00:00 2001 From: h-guo18 <67671475+h-guo18@users.noreply.github.com> Date: Wed, 29 Apr 2026 13:11:13 -0700 Subject: [PATCH 11/24] [Fix]: Relax Dflash Rregression Test Threshold fo 2GPUs (#1373) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Type of change: Bug fix The offline dflash regression test can be runned on 1 or 2 gpus. For 2 gpus, the total steps is half of 1 gpu. This PR relax the failing threshold for 2 gpu tests. ```python ``` Make sure you read and follow [Contributor guidelines](https://github.com/NVIDIA/Model-Optimizer/blob/main/CONTRIBUTING.md) and your commits are signed (`git commit -s -S`). Make sure you read and follow the [Security Best Practices](https://github.com/NVIDIA/Model-Optimizer/blob/main/SECURITY.md#security-coding-practices-for-contributors) (e.g. avoiding hardcoded `trust_remote_code=True`, `torch.load(..., weights_only=False)`, `pickle`, etc.). - Is this change backward compatible?: ✅ / ❌ / N/A - If you copied code from any other sources or added a new PIP dependency, did you follow guidance in `CONTRIBUTING.md`: ✅ / ❌ / N/A - Did you write any new necessary tests?: ✅ / ❌ / N/A - Did you update [Changelog](https://github.com/NVIDIA/Model-Optimizer/blob/main/CHANGELOG.rst)?: ✅ / ❌ / N/A Signed-off-by: h-guo18 <67671475+h-guo18@users.noreply.github.com> Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> --- .../torch/speculative/test_dflash_offline.py | 162 ++++++++++++++++++ 1 file changed, 162 insertions(+) create mode 100644 tests/regression/torch/speculative/test_dflash_offline.py diff --git a/tests/regression/torch/speculative/test_dflash_offline.py b/tests/regression/torch/speculative/test_dflash_offline.py new file mode 100644 index 00000000000..da951fdcda6 --- /dev/null +++ b/tests/regression/torch/speculative/test_dflash_offline.py @@ -0,0 +1,162 @@ +# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""DFlash offline E2E regression tests. + +Mirrors test_dflash.py but exercises the offline pipeline: + 1. Dump base-model hidden states from a slice of synthetic_conversations_1k.jsonl + via examples/speculative_decoding/collect_hidden_states/compute_hidden_states_hf.py. + 2. Train DFlash with data.offline_data_path set (triggers _derive_dflash_offline, + which deletes base-model layers post-convert to save memory). + 3. Verify loss decreases on the offline path. + +Aux-layer ids 1,25 match build_target_layer_ids(num_orig_hidden_layers=28, +num_draft_layers=2) for Qwen3-0.6B (28 hidden layers); changing the base model +or draft layer count requires updating --aux-layers accordingly so the dumped +aux_hidden_states dim matches the draft module input. +""" + +import json +import os + +import pytest +from _test_utils.examples.run_command import MODELOPT_ROOT, run_example_command + +DFLASH_YAML = str( + MODELOPT_ROOT / "modelopt_recipes" / "general" / "speculative_decoding" / "dflash.yaml" +) + +CHAT_TEMPLATE = str( + MODELOPT_ROOT + / "tools" + / "launcher" + / "examples" + / "Qwen" + / "Qwen3-0.6B" + / "chat_template_train.jinja" +) + +SYNTH_DATA = str(MODELOPT_ROOT / "examples" / "dataset" / "synthetic_conversations_1k.jsonl") + +# Match _DFLASH_OVERRIDES in test_dflash.py so the offline run is comparable to online. +_DFLASH_OVERRIDES = [ + f"data.chat_template={CHAT_TEMPLATE}", + "training.training_seq_len=512", + "training.per_device_train_batch_size=2", + "training.logging_steps=50", + "training.answer_only_loss=true", + "dflash.dflash_block_size=8", + "dflash.dflash_mask_token_id=151669", + "dflash.dflash_use_torch_compile=False", + "dflash.dflash_architecture_config.num_hidden_layers=2", +] + +# Number of conversations to dump. Smaller than the full 1K to keep dump time +# bounded; large enough that loss-decrease becomes visible across logging_steps. +_DUMP_NUM_CONVERSATIONS = 200 + + +@pytest.fixture(scope="session") +def qwen3_model_name(): + """Qwen3-0.6B model name (downloaded from HF on first use).""" + return "Qwen/Qwen3-0.6B" + + +@pytest.fixture(scope="session") +def dflash_offline_output_dir(tmp_path_factory): + return tmp_path_factory.mktemp("dflash_offline_output") + + +@pytest.fixture(scope="session") +def tagged_synth_data_path(dflash_offline_output_dir): + """Tag each row of synthetic_conversations_1k.jsonl with a stable conversation_id. + + compute_hidden_states_hf.py uses conversation_id as the dump filename and + resume/skip key, asserting it is non-null. The shared synthetic dataset + only ships a `messages` field, so we materialize a tagged copy here. + """ + tagged_path = dflash_offline_output_dir / "tagged_synth.jsonl" + with open(SYNTH_DATA) as src, open(tagged_path, "w") as dst: + for i, line in enumerate(src): + entry = json.loads(line) + entry.setdefault("conversation_id", f"{i:04d}") + dst.write(json.dumps(entry) + "\n") + return tagged_path + + +@pytest.fixture(scope="session") +def offline_hidden_states_dir(qwen3_model_name, dflash_offline_output_dir, tagged_synth_data_path): + """Dump base-model hidden states once for the whole test module.""" + dump_dir = dflash_offline_output_dir / "hidden_states" + run_example_command( + [ + "python", + "collect_hidden_states/compute_hidden_states_hf.py", + "--model", + qwen3_model_name, + "--input-data", + str(tagged_synth_data_path), + "--output-dir", + str(dump_dir), + "--debug-max-num-conversations", + str(_DUMP_NUM_CONVERSATIONS), + # Two draft layers — matches build_target_layer_ids(28, 2) for Qwen3-0.6B. + "--aux-layers", + "1,25", + "--answer-only-loss", + "--chat-template", + CHAT_TEMPLATE, + ], + "speculative_decoding", + ) + pt_files = list(dump_dir.rglob("*.pt")) + assert pt_files, f"No .pt files dumped under {dump_dir}" + return dump_dir + + +def test_dflash_offline_training( + qwen3_model_name, dflash_offline_output_dir, offline_hidden_states_dir +): + """Train DFlash from dumped hidden states and validate loss decreases.""" + output_dir = str(dflash_offline_output_dir / "dflash-qwen3-0.6b-offline") + overrides = [ + f"model.model_name_or_path={qwen3_model_name}", + f"data.offline_data_path={offline_hidden_states_dir}", + f"training.output_dir={output_dir}", + # Two epochs over the dumped slice gives enough steps for two log entries + # at logging_steps=50 with batch=2 (200/2 * 2 = 200 steps → 4 entries). + "training.num_train_epochs=2", + "training.save_steps=500", + *_DFLASH_OVERRIDES, + ] + + run_example_command( + ["./launch_train.sh", "--config", DFLASH_YAML, *overrides], + "speculative_decoding", + ) + + trainer_state = os.path.join(output_dir, "trainer_state.json") + assert os.path.exists(trainer_state), "trainer_state.json not found" + with open(trainer_state) as f: + state = json.load(f) + logs = [h for h in state.get("log_history", []) if "loss" in h] + assert len(logs) >= 2, f"Expected at least 2 log entries, got {len(logs)}" + + first_loss = float(logs[0]["loss"]) + final_loss = float(logs[-1]["loss"]) + assert final_loss < first_loss, f"Loss did not decrease: {first_loss:.3f} -> {final_loss:.3f}" + # Sanity ceiling — same threshold as the online regression. Offline trains + # on fewer samples so we don't tighten it further here. + assert final_loss < 5.0, f"Final loss {final_loss:.3f} too high (expected < 5.0)" From 1aa57755c5f73ba6fb43c7efa50e10ecba30c78b Mon Sep 17 00:00:00 2001 From: vishalpandya1990 Date: Thu, 30 Apr 2026 03:52:36 +0000 Subject: [PATCH 12/24] Ensure removal of temp files on error in ONNX INT4 quantization (#1359) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### What does this PR do? Type of change: Minor bug fix - Put quantization steps inside try-finally to ensure removal of temp files on error in ONNX INT4 quantization. - To avoid redundancy between awq_lite() and awq_clip() methods, created a utility _remove_augmented_onnx() for exception-handling based removal of augmented onnx file and its data file. ### Testing - Locally performed ONNX INT4 awq-lite and awq-clip quantization with Llama 1B model. ### Before your PR is "*Ready for review*" Make sure you read and follow [Contributor guidelines](https://github.com/NVIDIA/Model-Optimizer/blob/main/CONTRIBUTING.md) and your commits are signed (`git commit -s -S`). Make sure you read and follow the [Security Best Practices](https://github.com/NVIDIA/Model-Optimizer/blob/main/SECURITY.md#security-coding-practices-for-contributors) (e.g. avoiding hardcoded `trust_remote_code=True`, `torch.load(..., weights_only=False)`, `pickle`, etc.). - Is this change backward compatible?: ✅ / ❌ / N/A - If you copied code from any other sources or added a new PIP dependency, did you follow guidance in `CONTRIBUTING.md`: ✅ / ❌ / N/A - Did you write any new necessary tests?: ✅ / ❌ / N/A - Did you update [Changelog](https://github.com/NVIDIA/Model-Optimizer/blob/main/CHANGELOG.rst)?: ✅ / ❌ / N/A ### Additional Information ## Summary by CodeRabbit * **Refactor** * Improved reliability of the quantization pipeline by ensuring temporary conversion artifacts are always removed, making cleanup more robust. * Consolidated handling of external-data companions and added safer deletion behavior that logs failures instead of raising errors. * Ensured consistent session teardown and forced memory collection to reduce resource leakage and intermittent errors during model conversion. --------- Signed-off-by: vipandya Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> --- modelopt/onnx/quantization/int4.py | 845 +++++++++++++++-------------- 1 file changed, 437 insertions(+), 408 deletions(-) diff --git a/modelopt/onnx/quantization/int4.py b/modelopt/onnx/quantization/int4.py index b17431fb9b4..d680b47cfcb 100644 --- a/modelopt/onnx/quantization/int4.py +++ b/modelopt/onnx/quantization/int4.py @@ -480,6 +480,23 @@ def _augment_graph( augmented_outputs.add(act_tensor.name) +def _remove_augmented_onnx(onnx_path: str, use_external_data_format: bool) -> None: + """Remove the augmented ONNX temp file and its external data companion (if any).""" + try: + os.remove(onnx_path) + except FileNotFoundError: + pass + except OSError as e: + logger.warning("Failed to remove augmented ONNX file: %s", e) + if use_external_data_format: + try: + os.remove(onnx_path + "_data") + except FileNotFoundError: + pass + except OSError as e: + logger.warning("Failed to remove augmented ONNX data file: %s", e) + + def _change_input_type( graph: onnx.GraphProto, input_name: str, gemm_io_type: onnx.TensorProto.DataType ): @@ -533,147 +550,152 @@ def _quantize_awq_clip( augmented_onnx_file, augmented_onnx_path = tempfile.mkstemp(suffix=".onnx") os.close(augmented_onnx_file) - save_onnx(augmented_model, augmented_onnx_path, use_external_data_format) - logger.info(f"Saving the model took {time.time() - t} seconds") - - # Creating inference session and preparing inputs for calibration - session = create_inference_session(augmented_onnx_path, calibration_eps, input_shapes_profile) - inputs = [] - for inp_d in data_reader: - inputs.append(inp_d) - assert isinstance(inp_d, dict) - layer_info = get_layer_info(onnx_model, nodes_to_exclude, block_size, **kwargs) - # Apply AWQ clip on selected weights - t = time.time() - alphas = {} - for i in tqdm(range(len(wa_pack)), desc="Running clip search..."): - act_tensor, weight_tensor, do_transpose, gemm_io_type, _ = wa_pack[i] - - # First capture all the activation values after calibration data sweep - output_dicts = {} - for inp_d in inputs: - np_inp_d = {name: numpy.asarray(tensor) for name, tensor in inp_d.items()} - output = session.run([act_tensor.name], np_inp_d) - out = np.asarray(output[0]) - output_dicts.setdefault(act_tensor.name, []).append(out) - - # Concatenating the activation tensors over all calib data - x = np.concatenate(output_dicts[act_tensor.name], axis=0) # n_token, ci - w = numpy_helper.to_array( - weight_tensor, base_dir=os.path.dirname(augmented_onnx_path) - ).copy() - if do_transpose: - w = w.T - w = np.asarray(w) - num_bits = get_num_bits(layer_info, weight_tensor.name) - # Updating the block size as for 8bit quantization, per-channel quantization is used. - block_size_updated = update_block_size(block_size, layer_info, weight_tensor.name, w=w) - awq_clip = AWQClipHelper(w, block_size_updated, **kwargs) - _clip_search(x, w, awq_clip, num_bits=num_bits, **kwargs) - alphas[weight_tensor.name] = awq_clip.best_alpha - - logger.info(f"Clip search for all weights took {time.time() - t} seconds") + session = None + try: + save_onnx(augmented_model, augmented_onnx_path, use_external_data_format) + logger.info(f"Saving the model took {time.time() - t} seconds") - del session + # Creating inference session and preparing inputs for calibration + session = create_inference_session( + augmented_onnx_path, calibration_eps, input_shapes_profile + ) + inputs = [] + for inp_d in data_reader: + inputs.append(inp_d) + assert isinstance(inp_d, dict) + layer_info = get_layer_info(onnx_model, nodes_to_exclude, block_size, **kwargs) + # Apply AWQ clip on selected weights + t = time.time() + alphas = {} + for i in tqdm(range(len(wa_pack)), desc="Running clip search..."): + act_tensor, weight_tensor, do_transpose, gemm_io_type, _ = wa_pack[i] - # Compute quantized weights and scales which are needed for DQ nodes - t = time.time() - for i in tqdm(range(len(wa_pack)), desc="Quantizing the weights..."): - act_tensor, weight_tensor, do_transpose, gemm_io_type, _ = wa_pack[i] - gemm_io_type = cast("onnx.TensorProto.DataType", gemm_io_type) + # First capture all the activation values after calibration data sweep + output_dicts = {} + for inp_d in inputs: + np_inp_d = {name: numpy.asarray(tensor) for name, tensor in inp_d.items()} + output = session.run([act_tensor.name], np_inp_d) + out = np.asarray(output[0]) + output_dicts.setdefault(act_tensor.name, []).append(out) - if force_fp16: - gemm_io_type = onnx.TensorProto.FLOAT16 + # Concatenating the activation tensors over all calib data + x = np.concatenate(output_dicts[act_tensor.name], axis=0) # n_token, ci + w = numpy_helper.to_array( + weight_tensor, base_dir=os.path.dirname(augmented_onnx_path) + ).copy() + if do_transpose: + w = w.T + w = np.asarray(w) + num_bits = get_num_bits(layer_info, weight_tensor.name) + # Updating the block size as for 8bit quantization, per-channel quantization is used. + block_size_updated = update_block_size(block_size, layer_info, weight_tensor.name, w=w) + awq_clip = AWQClipHelper(w, block_size_updated, **kwargs) + _clip_search(x, w, awq_clip, num_bits=num_bits, **kwargs) + alphas[weight_tensor.name] = awq_clip.best_alpha - w = numpy_helper.to_array( - weight_tensor, base_dir=os.path.dirname(augmented_onnx_path) - ).copy() - if do_transpose: - w = w.T - w = np.asarray(w) + logger.info(f"Clip search for all weights took {time.time() - t} seconds") - alpha = alphas.get(weight_tensor.name, 1) - num_bits = get_num_bits(layer_info, weight_tensor.name) - # Updating the block size as for 8bit quantization, per-channel quantization is used. - block_size_updated = update_block_size(block_size, layer_info, weight_tensor.name, w=w) - qw, scale, _ = quant_tensor(w, block_size_updated, alpha=alpha, num_bits=num_bits) - if has_cupy: - qw = np.asnumpy(qw) - scale = np.asnumpy(scale) - if do_transpose: - qw = qw.T - scale = scale.T - scales[weight_tensor.name] = scale.astype( - onnx.helper.tensor_dtype_to_np_dtype(gemm_io_type) - ) - gemm_weights_quantized[weight_tensor.name] = numpy.asarray(qw).astype(numpy.int8) + session = None - # Change the input activation type to the expected type, fp16 by default - # TODO: cast input C for Gemm - _change_input_type(onnx_model.graph, act_tensor.name, gemm_io_type) + # Compute quantized weights and scales which are needed for DQ nodes + t = time.time() + for i in tqdm(range(len(wa_pack)), desc="Quantizing the weights..."): + act_tensor, weight_tensor, do_transpose, gemm_io_type, _ = wa_pack[i] + gemm_io_type = cast("onnx.TensorProto.DataType", gemm_io_type) - logger.info(f"Quantizing actual weights took {time.time() - t} seconds") + if force_fp16: + gemm_io_type = onnx.TensorProto.FLOAT16 - graph_gs = gs.import_onnx(onnx_model) + w = numpy_helper.to_array( + weight_tensor, base_dir=os.path.dirname(augmented_onnx_path) + ).copy() + if do_transpose: + w = w.T + w = np.asarray(w) - gather_block_size = kwargs.get("gather_block_size", DEFAULT_GATHER_BLOCK_SIZE) - gather_quantize_axis = kwargs.get("gather_quantize_axis", DEFAULT_GATHER_QUANTIZE_AXIS) - gather_w_map = None - gather_s_map = None - if gather_quantize_axis is not None: - gather_w_map, gather_s_map, _ = _quantize_gather_nodes( - graph_gs, - nodes_to_exclude, - use_zero_point=False, - dq_only=True, - layer_info=layer_info, - ) + alpha = alphas.get(weight_tensor.name, 1) + num_bits = get_num_bits(layer_info, weight_tensor.name) + # Updating the block size as for 8bit quantization, per-channel quantization is used. + block_size_updated = update_block_size(block_size, layer_info, weight_tensor.name, w=w) + qw, scale, _ = quant_tensor(w, block_size_updated, alpha=alpha, num_bits=num_bits) + if has_cupy: + qw = np.asnumpy(qw) + scale = np.asnumpy(scale) + if do_transpose: + qw = qw.T + scale = scale.T + scales[weight_tensor.name] = scale.astype( + onnx.helper.tensor_dtype_to_np_dtype(gemm_io_type) + ) + gemm_weights_quantized[weight_tensor.name] = numpy.asarray(qw).astype(numpy.int8) + + # Change the input activation type to the expected type, fp16 by default + # TODO: cast input C for Gemm + _change_input_type(onnx_model.graph, act_tensor.name, gemm_io_type) + + logger.info(f"Quantizing actual weights took {time.time() - t} seconds") + + graph_gs = gs.import_onnx(onnx_model) + + gather_block_size = kwargs.get("gather_block_size", DEFAULT_GATHER_BLOCK_SIZE) + gather_quantize_axis = kwargs.get("gather_quantize_axis", DEFAULT_GATHER_QUANTIZE_AXIS) + gather_w_map = None + gather_s_map = None + if gather_quantize_axis is not None: + gather_w_map, gather_s_map, _ = _quantize_gather_nodes( + graph_gs, + nodes_to_exclude, + use_zero_point=False, + dq_only=True, + layer_info=layer_info, + ) - t = time.time() - # Apply column-major optimization if flag is set - # Transposes the weights and scales in-place - use_column_major = kwargs.get("use_column_major", False) - if use_column_major: - qdq.apply_column_major_transformation(gemm_weights_quantized, scales) - dq_node_attributes = {"axis": 1, "block_size": block_size} - else: - dq_node_attributes = {"axis": 0, "block_size": block_size} - scales = reshape_scales_for_per_channel_nodes(scales, block_size, layer_info) - qdq.insert_dq_nodes( - graph_gs, - scales, - quantized_weights=gemm_weights_quantized, - attributes=dq_node_attributes, - layer_info=layer_info, - ) - # Add transpose nodes for column-major if needed - if use_column_major: - qdq.insert_transpose_nodes_for_column_major(graph_gs) - if gather_w_map is not None: - assert gather_s_map is not None, "scale-map not found for quantizable gather nodes" - gather_dq_node_attributes = {"axis": gather_quantize_axis, "block_size": gather_block_size} + t = time.time() + # Apply column-major optimization if flag is set + # Transposes the weights and scales in-place + use_column_major = kwargs.get("use_column_major", False) + if use_column_major: + qdq.apply_column_major_transformation(gemm_weights_quantized, scales) + dq_node_attributes = {"axis": 1, "block_size": block_size} + else: + dq_node_attributes = {"axis": 0, "block_size": block_size} + scales = reshape_scales_for_per_channel_nodes(scales, block_size, layer_info) qdq.insert_dq_nodes( graph_gs, - gather_s_map, - quantized_weights=gather_w_map, - attributes=gather_dq_node_attributes, + scales, + quantized_weights=gemm_weights_quantized, + attributes=dq_node_attributes, layer_info=layer_info, ) - logger.info(f"Inserting DQ nodes took {time.time() - t} seconds") - - logger.info("Exporting the quantized graph") - t = time.time() - model = gs.export_onnx(graph_gs) - # Set ir_version to 10, remove it once ORT supports ir_version 11 - model.ir_version = 10 - logger.info(f"Exporting took {time.time() - t} seconds") + # Add transpose nodes for column-major if needed + if use_column_major: + qdq.insert_transpose_nodes_for_column_major(graph_gs) + if gather_w_map is not None: + assert gather_s_map is not None, "scale-map not found for quantizable gather nodes" + gather_dq_node_attributes = { + "axis": gather_quantize_axis, + "block_size": gather_block_size, + } + qdq.insert_dq_nodes( + graph_gs, + gather_s_map, + quantized_weights=gather_w_map, + attributes=gather_dq_node_attributes, + layer_info=layer_info, + ) + logger.info(f"Inserting DQ nodes took {time.time() - t} seconds") - try: - os.remove(augmented_onnx_path) - if use_external_data_format: - os.remove(augmented_onnx_path + "_data") - except OSError: - logger.warn("Augmented ONNX model or external data file was not found") + logger.info("Exporting the quantized graph") + t = time.time() + model = gs.export_onnx(graph_gs) + # Set ir_version to 10, remove it once ORT supports ir_version 11 + model.ir_version = 10 + logger.info(f"Exporting took {time.time() - t} seconds") + finally: + if session is not None: + session = None + gc.collect() + _remove_augmented_onnx(augmented_onnx_path, use_external_data_format) return model @@ -1085,316 +1107,323 @@ def _quantize_awq_lite( augmented_onnx_file, augmented_onnx_path = tempfile.mkstemp(suffix=".onnx") os.close(augmented_onnx_file) - save_onnx(augmented_model, augmented_onnx_path, use_external_data_format) - logger.info(f"Saving the model took {time.time() - t} seconds") - - # Creating inference session and preparing inputs for calibration - session = create_inference_session(augmented_onnx_path, calibration_eps, input_shapes_profile) - inputs = [] - for inp_d in data_reader: - inputs.append(inp_d) - assert isinstance(inp_d, dict) - - gc.collect() - - output_data = [] - - if enable_fast_path_using_high_sysram: - logger.info("Fast-path-using-high-sysram is enabled\n") - - tensor_names_list = [] - for i in tqdm(range(len(wa_pack)), desc="Getting tensor names..."): - act_tensor, weight_tensor, do_transpose, gemm_io_type, _ = wa_pack[i] - tensor_names_list.append(act_tensor.name) + session = None + try: + save_onnx(augmented_model, augmented_onnx_path, use_external_data_format) + logger.info(f"Saving the model took {time.time() - t} seconds") - for i in tqdm(range(len(inputs)), desc="Caching activations..."): - inp_d = inputs[i] - np_inp_d = {name: numpy.asarray(tensor) for name, tensor in inp_d.items()} - output = session.run(tensor_names_list, np_inp_d) - output_data.append(output) + # Creating inference session and preparing inputs for calibration + session = create_inference_session( + augmented_onnx_path, calibration_eps, input_shapes_profile + ) + inputs = [] + for inp_d in data_reader: + inputs.append(inp_d) + assert isinstance(inp_d, dict) - del session - session = None gc.collect() - # Apply AWQ lite on selected weights - t = time.time() - awq_lite = [None] * len(wa_pack) - clip_alphas = {} - - msg = "..." - if enable_weight_clipping: - msg = " and clip-range search..." + output_data = [] - act_to_wa_pack_map, act_to_quant_nodes_weight_shape_map = ( - get_act_to_weight_map_and_act_to_wa_pack_map(wa_pack) - ) - if run_per_subgraph: - # TODO - add support for handling awq_lite mixed precision for per-subgraph implementation - awq_lite = run_awq_scale_search_per_subgraph( - wa_pack, - act_to_wa_pack_map, - act_to_quant_nodes_weight_shape_map, - augmented_onnx_path, - block_size, - use_zero_point, - session, - awq_lite, - inputs, - msg, - **kwargs, - ) - else: - awq_lite, clip_alphas = run_awq_scale_search_per_node( - wa_pack, - augmented_onnx_path, - block_size, - use_zero_point, - session, - awq_lite, - inputs, - msg, - enable_weight_clipping, - enable_fast_path_using_high_sysram, - output_data, - clip_alphas, - layer_info, - **kwargs, - ) - assert len(awq_lite) == len(wa_pack) - for i in range(len(awq_lite)): - assert awq_lite[i] is not None - - if enable_weight_clipping: - assert len(clip_alphas.keys()) == len(wa_pack) - - logger.info("AWQ scale search" + msg.strip(".") + f" took {time.time() - t} seconds") + if enable_fast_path_using_high_sysram: + logger.info("Fast-path-using-high-sysram is enabled\n") - if session is not None: - del session - session = None - if has_cupy: - np.get_default_memory_pool().free_all_blocks() - del output_data - gc.collect() + tensor_names_list = [] + for i in tqdm(range(len(wa_pack)), desc="Getting tensor names..."): + act_tensor, weight_tensor, do_transpose, gemm_io_type, _ = wa_pack[i] + tensor_names_list.append(act_tensor.name) - # Compute quantized weights and scales which are needed for DQ nodes - t = time.time() - # Use a common mean scale for weights within a sub-graph - if fuse_nodes and not run_per_subgraph: - for wa_pack_idx_list in act_to_wa_pack_map.values(): - group_awq_scale = [ - awq_lite[wa_pack_idx].best_scale[:, np.newaxis] for wa_pack_idx in wa_pack_idx_list - ] - mean_awq_scale = np.concatenate(group_awq_scale, axis=1) - mean_awq_scale = mean_awq_scale.mean(axis=1) - for wa_pack_idx in wa_pack_idx_list: - awq_lite[wa_pack_idx].best_scale = mean_awq_scale + for i in tqdm(range(len(inputs)), desc="Caching activations..."): + inp_d = inputs[i] + np_inp_d = {name: numpy.asarray(tensor) for name, tensor in inp_d.items()} + output = session.run(tensor_names_list, np_inp_d) + output_data.append(output) - for i in tqdm(range(len(wa_pack)), desc="Quantizing the weights..."): - act_tensor, weight_tensor, do_transpose, gemm_io_type, _ = wa_pack[i] - gemm_io_type = cast("onnx.TensorProto.DataType", gemm_io_type) + del session + session = None + gc.collect() - if force_fp16: - gemm_io_type = onnx.TensorProto.FLOAT16 + # Apply AWQ lite on selected weights + t = time.time() + awq_lite = [None] * len(wa_pack) + clip_alphas = {} - w = numpy_helper.to_array( - weight_tensor, base_dir=os.path.dirname(augmented_onnx_path) - ).copy() - if do_transpose: - w = w.T - w = np.asarray(w) + msg = "..." + if enable_weight_clipping: + msg = " and clip-range search..." - w_scaled = w * awq_lite[i].best_scale[:, np.newaxis] - alpha = clip_alphas.get(weight_tensor.name, 1) - assert enable_weight_clipping or (alpha == 1), ( - "clip range enabled without enabling weight-clipping param" - ) - # Updating the block size as for 8bit quantization, per-channel quantization is used. - num_bits = get_num_bits(layer_info, weight_tensor.name) - block_size_updated = update_block_size( - block_size, layer_info, weight_tensor.name, w=w_scaled - ) - qw, scale, zp = quant_tensor( - w_scaled, - block_size_updated, - alpha=alpha, - use_zero_point=use_zero_point, - num_bits=num_bits, + act_to_wa_pack_map, act_to_quant_nodes_weight_shape_map = ( + get_act_to_weight_map_and_act_to_wa_pack_map(wa_pack) ) + if run_per_subgraph: + # TODO - add support for handling awq_lite mixed precision for per-subgraph implementation + awq_lite = run_awq_scale_search_per_subgraph( + wa_pack, + act_to_wa_pack_map, + act_to_quant_nodes_weight_shape_map, + augmented_onnx_path, + block_size, + use_zero_point, + session, + awq_lite, + inputs, + msg, + **kwargs, + ) + else: + awq_lite, clip_alphas = run_awq_scale_search_per_node( + wa_pack, + augmented_onnx_path, + block_size, + use_zero_point, + session, + awq_lite, + inputs, + msg, + enable_weight_clipping, + enable_fast_path_using_high_sysram, + output_data, + clip_alphas, + layer_info, + **kwargs, + ) + assert len(awq_lite) == len(wa_pack) + for i in range(len(awq_lite)): + assert awq_lite[i] is not None - assert use_zero_point is True or zp is None, "zp is not according to use-zero-point setting" - if do_transpose: - qw = qw.T - scale = scale.T - if zp is not None: - zp = zp.T - if has_cupy: - qw = np.asnumpy(qw) - scale = np.asnumpy(scale) - if zp is not None: - zp = np.asnumpy(zp) - scales[weight_tensor.name] = scale.astype( - onnx.helper.tensor_dtype_to_np_dtype(gemm_io_type) - ) - weight_dtype = numpy.int8 - if zp is not None: - zero_points[weight_tensor.name] = numpy.asarray(zp).astype(numpy.uint8) - weight_dtype = numpy.uint8 - gemm_weights_quantized[weight_tensor.name] = numpy.asarray(qw).astype(weight_dtype) - input_tensors[weight_tensor.name] = act_tensor.name - pqs_value = ( - awq_lite[i] - .best_scale[:, np.newaxis] - .astype(onnx.helper.tensor_dtype_to_np_dtype(gemm_io_type)) - ).T - if has_cupy: - pqs_value = np.asnumpy(pqs_value) - pre_quant_scale[weight_tensor.name] = pqs_value + if enable_weight_clipping: + assert len(clip_alphas.keys()) == len(wa_pack) - # Change the input activation type to the expected type, fp16 by default - # TODO: cast input C for Gemm - _change_input_type(onnx_model.graph, act_tensor.name, gemm_io_type) + logger.info("AWQ scale search" + msg.strip(".") + f" took {time.time() - t} seconds") - logger.info(f"Quantizing actual weights took {time.time() - t} seconds") + if session is not None: + session = None + if has_cupy: + np.get_default_memory_pool().free_all_blocks() + del output_data + gc.collect() - # Fuse Mul nodes with parent node if possible - if fuse_nodes: - logger.info("Fusing pre-quant scale Mul nodes with parent node") + # Compute quantized weights and scales which are needed for DQ nodes t = time.time() - updated_nodes = set() - name_to_node_map = {node.name: node for node in onnx_model.graph.node} - initializer_map = { - initializer.name: initializer for initializer in onnx_model.graph.initializer - } - for parent, child_nodes in parent_child_nodes_map.items(): - if parent == "root_0": - continue - parent = name_to_node_map[parent] - if parent.name in updated_nodes: - continue - # When fuse_nodes or run_per_subgraph is True, - # scales computed for each child_nodes will be same. - # Hence, picking pre_quant_scale corresponding to any child_nodes is acceptable - input_scale = np.asarray(pre_quant_scale[child_nodes[0].input[1]]) - weight_tensor_names = [node.input[1] for node in child_nodes] - if ( - is_fusible_scaling_op(parent.op_type) - and not all(initializer_map.get(inp) is None for inp in parent.input) - and len(input_name_to_nodes[child_nodes[0].input[0]]) == len(child_nodes) - ): - for inp in parent.input: - if initializer_map.get(inp) is not None: - tensor = initializer_map[inp] - old_dim = tensor.dims - tensor_array = numpy_helper.to_array( - tensor, - base_dir=os.path.dirname(augmented_onnx_path), - ) - new_tensor = np.asarray(tensor_array) / input_scale - new_tensor = new_tensor.reshape(old_dim) - new_tensor = numpy_helper.from_array(new_tensor.get(), tensor.name) - # replace initializer with new scaled array - tensor.CopyFrom(new_tensor) - for w_name in weight_tensor_names: - del pre_quant_scale[w_name] - updated_nodes.add(parent.name) - else: - scale_tensor = onnx.helper.make_tensor( - name=parent.output[0] + "_pre_quant_scale", - data_type=onnx.helper.np_dtype_to_tensor_dtype(input_scale.dtype), - dims=input_scale.shape, - vals=(1.0 / input_scale).flatten().tolist(), - ) - mul_op_name = parent.output[0] + "_pre_quant_scale_out" - mul_node = onnx.helper.make_node( - "Mul", - inputs=[child_nodes[0].input[0], scale_tensor.name], - outputs=[mul_op_name], - name=child_nodes[0].input[0] + "_pre_quant_scale_mul", - ) - for node in child_nodes: - node.input[0] = mul_node.output[0] - for w_name in weight_tensor_names: - del pre_quant_scale[w_name] - onnx_model.graph.initializer.append(scale_tensor) - onnx_model.graph.node.append(mul_node) - - logger.info(f"Fusing pre-quant scale Mul nodes took {time.time() - t} seconds") + # Use a common mean scale for weights within a sub-graph + if fuse_nodes and not run_per_subgraph: + for wa_pack_idx_list in act_to_wa_pack_map.values(): + group_awq_scale = [ + awq_lite[wa_pack_idx].best_scale[:, np.newaxis] + for wa_pack_idx in wa_pack_idx_list + ] + mean_awq_scale = np.concatenate(group_awq_scale, axis=1) + mean_awq_scale = mean_awq_scale.mean(axis=1) + for wa_pack_idx in wa_pack_idx_list: + awq_lite[wa_pack_idx].best_scale = mean_awq_scale + + for i in tqdm(range(len(wa_pack)), desc="Quantizing the weights..."): + act_tensor, weight_tensor, do_transpose, gemm_io_type, _ = wa_pack[i] + gemm_io_type = cast("onnx.TensorProto.DataType", gemm_io_type) + + if force_fp16: + gemm_io_type = onnx.TensorProto.FLOAT16 + + w = numpy_helper.to_array( + weight_tensor, base_dir=os.path.dirname(augmented_onnx_path) + ).copy() + if do_transpose: + w = w.T + w = np.asarray(w) + + w_scaled = w * awq_lite[i].best_scale[:, np.newaxis] + alpha = clip_alphas.get(weight_tensor.name, 1) + assert enable_weight_clipping or (alpha == 1), ( + "clip range enabled without enabling weight-clipping param" + ) + # Updating the block size as for 8bit quantization, per-channel quantization is used. + num_bits = get_num_bits(layer_info, weight_tensor.name) + block_size_updated = update_block_size( + block_size, layer_info, weight_tensor.name, w=w_scaled + ) + qw, scale, zp = quant_tensor( + w_scaled, + block_size_updated, + alpha=alpha, + use_zero_point=use_zero_point, + num_bits=num_bits, + ) - logger.info( - "Inserting DQ nodes and input_pre_quant_scale node using quantized weights and scales" - ) + assert use_zero_point is True or zp is None, ( + "zp is not according to use-zero-point setting" + ) + if do_transpose: + qw = qw.T + scale = scale.T + if zp is not None: + zp = zp.T + if has_cupy: + qw = np.asnumpy(qw) + scale = np.asnumpy(scale) + if zp is not None: + zp = np.asnumpy(zp) + scales[weight_tensor.name] = scale.astype( + onnx.helper.tensor_dtype_to_np_dtype(gemm_io_type) + ) + weight_dtype = numpy.int8 + if zp is not None: + zero_points[weight_tensor.name] = numpy.asarray(zp).astype(numpy.uint8) + weight_dtype = numpy.uint8 + gemm_weights_quantized[weight_tensor.name] = numpy.asarray(qw).astype(weight_dtype) + input_tensors[weight_tensor.name] = act_tensor.name + pqs_value = ( + awq_lite[i] + .best_scale[:, np.newaxis] + .astype(onnx.helper.tensor_dtype_to_np_dtype(gemm_io_type)) + ).T + if has_cupy: + pqs_value = np.asnumpy(pqs_value) + pre_quant_scale[weight_tensor.name] = pqs_value + + # Change the input activation type to the expected type, fp16 by default + # TODO: cast input C for Gemm + _change_input_type(onnx_model.graph, act_tensor.name, gemm_io_type) + + logger.info(f"Quantizing actual weights took {time.time() - t} seconds") + + # Fuse Mul nodes with parent node if possible + if fuse_nodes: + logger.info("Fusing pre-quant scale Mul nodes with parent node") + t = time.time() + updated_nodes = set() + name_to_node_map = {node.name: node for node in onnx_model.graph.node} + initializer_map = { + initializer.name: initializer for initializer in onnx_model.graph.initializer + } + for parent, child_nodes in parent_child_nodes_map.items(): + if parent == "root_0": + continue + parent = name_to_node_map[parent] + if parent.name in updated_nodes: + continue + # When fuse_nodes or run_per_subgraph is True, + # scales computed for each child_nodes will be same. + # Hence, picking pre_quant_scale corresponding to any child_nodes is acceptable + input_scale = np.asarray(pre_quant_scale[child_nodes[0].input[1]]) + weight_tensor_names = [node.input[1] for node in child_nodes] + if ( + is_fusible_scaling_op(parent.op_type) + and not all(initializer_map.get(inp) is None for inp in parent.input) + and len(input_name_to_nodes[child_nodes[0].input[0]]) == len(child_nodes) + ): + for inp in parent.input: + if initializer_map.get(inp) is not None: + tensor = initializer_map[inp] + old_dim = tensor.dims + tensor_array = numpy_helper.to_array( + tensor, + base_dir=os.path.dirname(augmented_onnx_path), + ) + new_tensor = np.asarray(tensor_array) / input_scale + new_tensor = new_tensor.reshape(old_dim) + new_tensor = numpy_helper.from_array(new_tensor.get(), tensor.name) + # replace initializer with new scaled array + tensor.CopyFrom(new_tensor) + for w_name in weight_tensor_names: + del pre_quant_scale[w_name] + updated_nodes.add(parent.name) + else: + scale_tensor = onnx.helper.make_tensor( + name=parent.output[0] + "_pre_quant_scale", + data_type=onnx.helper.np_dtype_to_tensor_dtype(input_scale.dtype), + dims=input_scale.shape, + vals=(1.0 / input_scale).flatten().tolist(), + ) + mul_op_name = parent.output[0] + "_pre_quant_scale_out" + mul_node = onnx.helper.make_node( + "Mul", + inputs=[child_nodes[0].input[0], scale_tensor.name], + outputs=[mul_op_name], + name=child_nodes[0].input[0] + "_pre_quant_scale_mul", + ) + for node in child_nodes: + node.input[0] = mul_node.output[0] + for w_name in weight_tensor_names: + del pre_quant_scale[w_name] + onnx_model.graph.initializer.append(scale_tensor) + onnx_model.graph.node.append(mul_node) - graph_gs = gs.import_onnx(onnx_model) + logger.info(f"Fusing pre-quant scale Mul nodes took {time.time() - t} seconds") - gather_block_size = kwargs.get("gather_block_size", DEFAULT_GATHER_BLOCK_SIZE) - gather_quantize_axis = kwargs.get("gather_quantize_axis", DEFAULT_GATHER_QUANTIZE_AXIS) - gather_w_map = None - gather_s_map = None - gather_zp_map = None - if gather_quantize_axis is not None: - gather_w_map, gather_s_map, gather_zp_map = _quantize_gather_nodes( - graph_gs, - nodes_to_exclude, - use_zero_point=use_zero_point, - dq_only=True, - layer_info=layer_info, + logger.info( + "Inserting DQ nodes and input_pre_quant_scale node using quantized weights and scales" ) - t = time.time() - # Apply column-major optimization if flag is set - # Transposes the weights and scales in-place - use_column_major = kwargs.get("use_column_major", False) - if use_column_major: - qdq.apply_column_major_transformation(gemm_weights_quantized, scales) - dq_node_attributes = {"axis": 1, "block_size": block_size} - else: - dq_node_attributes = {"axis": 0, "block_size": block_size} - scales = reshape_scales_for_per_channel_nodes(scales, block_size, layer_info) - qdq.insert_dq_nodes( - graph_gs, - scales, - quantized_weights=gemm_weights_quantized, - attributes=dq_node_attributes, - zero_points=zero_points if use_zero_point else None, - layer_info=layer_info, - ) - # Add transpose nodes for column-major if needed - if use_column_major: - qdq.insert_transpose_nodes_for_column_major(graph_gs) - if gather_w_map is not None: - assert gather_s_map is not None, "scale-map not found for quantizable gather nodes" - assert not use_zero_point or gather_zp_map, ( - "zero-point setting and zero-point map not in sync for quantizable gather nodes" - ) - gather_dq_node_attributes = {"axis": gather_quantize_axis, "block_size": gather_block_size} + graph_gs = gs.import_onnx(onnx_model) + + gather_block_size = kwargs.get("gather_block_size", DEFAULT_GATHER_BLOCK_SIZE) + gather_quantize_axis = kwargs.get("gather_quantize_axis", DEFAULT_GATHER_QUANTIZE_AXIS) + gather_w_map = None + gather_s_map = None + gather_zp_map = None + if gather_quantize_axis is not None: + gather_w_map, gather_s_map, gather_zp_map = _quantize_gather_nodes( + graph_gs, + nodes_to_exclude, + use_zero_point=use_zero_point, + dq_only=True, + layer_info=layer_info, + ) + + t = time.time() + # Apply column-major optimization if flag is set + # Transposes the weights and scales in-place + use_column_major = kwargs.get("use_column_major", False) + if use_column_major: + qdq.apply_column_major_transformation(gemm_weights_quantized, scales) + dq_node_attributes = {"axis": 1, "block_size": block_size} + else: + dq_node_attributes = {"axis": 0, "block_size": block_size} + scales = reshape_scales_for_per_channel_nodes(scales, block_size, layer_info) qdq.insert_dq_nodes( graph_gs, - gather_s_map, - quantized_weights=gather_w_map, - attributes=gather_dq_node_attributes, - zero_points=gather_zp_map if use_zero_point else None, + scales, + quantized_weights=gemm_weights_quantized, + attributes=dq_node_attributes, + zero_points=zero_points if use_zero_point else None, layer_info=layer_info, ) - if pre_quant_scale: - qdq.insert_pre_quant_scale_nodes(graph_gs, input_tensors, pre_quant_scale) - - logger.info(f"Inserting nodes took {time.time() - t} seconds") + # Add transpose nodes for column-major if needed + if use_column_major: + qdq.insert_transpose_nodes_for_column_major(graph_gs) + if gather_w_map is not None: + assert gather_s_map is not None, "scale-map not found for quantizable gather nodes" + assert not use_zero_point or gather_zp_map, ( + "zero-point setting and zero-point map not in sync for quantizable gather nodes" + ) + gather_dq_node_attributes = { + "axis": gather_quantize_axis, + "block_size": gather_block_size, + } + qdq.insert_dq_nodes( + graph_gs, + gather_s_map, + quantized_weights=gather_w_map, + attributes=gather_dq_node_attributes, + zero_points=gather_zp_map if use_zero_point else None, + layer_info=layer_info, + ) + if pre_quant_scale: + qdq.insert_pre_quant_scale_nodes(graph_gs, input_tensors, pre_quant_scale) - logger.info("Exporting the quantized graph") - t = time.time() - model = gs.export_onnx(graph_gs) - # Set ir_version to 10, remove it once ORT supports ir_version 11 - model.ir_version = 10 - logger.info(f"Exporting took {time.time() - t} seconds") + logger.info(f"Inserting nodes took {time.time() - t} seconds") - try: - os.remove(augmented_onnx_path) - if use_external_data_format: - os.remove(augmented_onnx_path + "_data") - except OSError: - logger.error("Augmented ONNX model or external data file was not found") + logger.info("Exporting the quantized graph") + t = time.time() + model = gs.export_onnx(graph_gs) + # Set ir_version to 10, remove it once ORT supports ir_version 11 + model.ir_version = 10 + logger.info(f"Exporting took {time.time() - t} seconds") + finally: + if session is not None: + session = None + gc.collect() + _remove_augmented_onnx(augmented_onnx_path, use_external_data_format) return model From e88857dcd3d88dad0fdbd1794a321e451ae6ee36 Mon Sep 17 00:00:00 2001 From: dthienan-nv Date: Thu, 30 Apr 2026 00:32:18 -0400 Subject: [PATCH 13/24] [6034518] Remove return statement preventing remote auto tuning (#1361) ### What does this PR do? Remove return statement from the code checking remote auto tuning config arguments since that results in skipping adding the actual remote tuning config to the trtexec cmd. **Root cause**: The necessary flags do not get added to `self._base_cmd.extend(trtexec_args)` when remote autotuning is enabled. **Before fix**: ``` ['trtexec', '--avgRuns=100', '--iterations=100', '--warmUp=50', '--stronglyTyped', \ '--saveEngine=engine.trt', '--timingCacheFile=trtexec_timing.cache', \ '--onnx=baseline.onnx'] ``` **After fix**: ``` ['trtexec', '--avgRuns=100', '--iterations=100', '--warmUp=50', '--stronglyTyped', \ '--saveEngine=engine.trt', '--timingCacheFile=trtexec_timing.cache', \ '--remoteAutoTuningConfig=$CONFIG', '--safe', '--skipInference', \ '--onnx=baseline.onnx'] ``` Notice that the remote autotuning and related flags are now included in the `trtexec` command. **Related PR**: https://github.com/NVIDIA/Model-Optimizer/pull/1259 ## Summary by CodeRabbit ## Bug Fixes * Fixed an issue where remote autotuning configuration arguments were not being properly included in benchmark commands, ensuring all remote autotuning settings are now correctly applied during execution. Signed-off-by: dmoodie Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> --- modelopt/onnx/quantization/autotune/benchmark.py | 1 - 1 file changed, 1 deletion(-) diff --git a/modelopt/onnx/quantization/autotune/benchmark.py b/modelopt/onnx/quantization/autotune/benchmark.py index f931ae6c11a..df6dbc877d0 100644 --- a/modelopt/onnx/quantization/autotune/benchmark.py +++ b/modelopt/onnx/quantization/autotune/benchmark.py @@ -220,7 +220,6 @@ def __init__( "Remote autotuning requires '--skipInference' to be set. Adding it to trtexec arguments." ) self.trtexec_args.append("--skipInference") - return except ImportError: self.logger.warning( "Remote autotuning is not supported with TensorRT version < 10.15. " From bc89749c9fb0331a2d60c00b40671ff4fc9df6f5 Mon Sep 17 00:00:00 2001 From: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> Date: Thu, 30 Apr 2026 14:47:16 +0530 Subject: [PATCH 14/24] =?UTF-8?q?Add=20Nemotron-Nano-9B-v2=20=E2=86=92=20P?= =?UTF-8?q?runed=207B=20e2e=20tutorial:=20Prune=20+=20Distill=20+=20Eval?= =?UTF-8?q?=20+=20Quantize=20+=20vLLM=20deployment=20(#1325)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Summary End-to-end optimization walkthrough for Nemotron-Nano-9B-v2 showing how ModelOpt techniques stack: - **Pruning** — Minitron structured pruning 9B → 7B - **Distillation** — Megatron-Bridge knowledge distillation up to 80B tokens; near-parity with official 9B on MMLU Pro, GPQA, LCB, AIME, Math 500, IFEval, SciCode - **Evaluation** - using nemo-evaluator - **Quantization** — FP8 PTQ via \`hf_ptq.py\`; checkpoint deployable on vLLM/TRT-LLM/SGLang with no extra flags (quantization auto-detected from \`config.json\`) - **vLLM Throughput** — BF16 vs FP8 benchmark on single H100 image image ### Files changed - `examples/pruning/minitron/README.md` — index page for Minitron end-to-end tutorials - `examples/pruning/minitron/NVIDIA-Nemotron-Nano-9B-v2/README.md` — full repro doc with 6 sections: data prep, pruning, distillation, evaluation, FP8 quantization, vLLM benchmarking - `examples/pruning/minitron/NVIDIA-Nemotron-Nano-9B-v2/nemo_evaluator.yaml` — NeMo Evaluator config used for all benchmark numbers - `examples/pruning/puzzletron/README.md` — index page for Puzzletron distillation results - `examples/pruning/puzzletron/Llama-3.1-8B-Instruct.md` — Puzzletron distillation results (renamed from puzzletron.md) - `examples/pruning/README.md` — updated Results section with direct links to new locations - `examples/megatron_bridge/README.md` — updated results link to point to `examples/pruning/` - `examples/puzzletron/README.md` — updated distillation results link - `examples/dataset/MEGATRON_DATA_PREP.md` — tokenization commands for all datasets used in the data blend 🤖 Generated with [Claude Code](https://claude.com/claude-code) ## Summary by CodeRabbit ## Documentation * **New end-to-end tutorial** for model optimization covering Minitron pruning, knowledge distillation, FP8 quantization, and vLLM deployment with reproducibility steps and benchmark results * **Dataset preparation guide** with ready-to-run tokenization templates for Nemotron HuggingFace datasets * **Evaluation configuration** and results documentation including ablation studies across multiple benchmarks * **Updated navigation** across pruning, distillation, and dataset examples to streamline user workflows --------- Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> Co-authored-by: Claude Sonnet 4.6 Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> --- CHANGELOG.rst | 1 + examples/dataset/MEGATRON_DATA_PREP.md | 242 +++++++++++++ examples/dataset/README.md | 82 +---- examples/megatron_bridge/README.md | 4 +- examples/pruning/README.md | 28 +- .../NVIDIA-Nemotron-Nano-9B-v2/ABLATIONS.md | 76 ++++ .../NVIDIA-Nemotron-Nano-9B-v2/README.md | 326 ++++++++++++++++++ .../figures/learning_curves.png | Bin 0 -> 252727 bytes .../nemo_evaluator.yaml | 194 +++++++++++ examples/pruning/minitron/README.md | 11 + .../puzzletron/Llama-3.1-8B-Instruct.md} | 0 examples/pruning/puzzletron/README.md | 16 + examples/puzzletron/README.md | 2 + .../utils/plugins/megatron_preprocess_data.py | 22 +- 14 files changed, 911 insertions(+), 93 deletions(-) create mode 100644 examples/dataset/MEGATRON_DATA_PREP.md create mode 100644 examples/pruning/minitron/NVIDIA-Nemotron-Nano-9B-v2/ABLATIONS.md create mode 100644 examples/pruning/minitron/NVIDIA-Nemotron-Nano-9B-v2/README.md create mode 100644 examples/pruning/minitron/NVIDIA-Nemotron-Nano-9B-v2/figures/learning_curves.png create mode 100644 examples/pruning/minitron/NVIDIA-Nemotron-Nano-9B-v2/nemo_evaluator.yaml create mode 100644 examples/pruning/minitron/README.md rename examples/{megatron_bridge/results/puzzletron.md => pruning/puzzletron/Llama-3.1-8B-Instruct.md} (100%) create mode 100644 examples/pruning/puzzletron/README.md diff --git a/CHANGELOG.rst b/CHANGELOG.rst index ae08ee3a04a..3444a043a43 100755 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -7,6 +7,7 @@ Changelog **New Features** - Support full Transformer Engine spec for Minitron pruning (``mcore_minitron``). Now we no longer need to use custom ModelOpt spec. Note that this does not affect the usage of the pruning workflow but makes pruning slightly faster and may result in slightly different pruned model because of different kernel and numerics. +- Add end-to-end tutorial for Minitron pruning + distillation + quantization + evaluation + vLLM deployment for Nemotron-Nano-9B-v2 → Pruned 7B along with data blend preparation steps (and ablation study). See `examples/pruning/minitron/README.md `_ for details. - Add Puzzletron - a new algorithm for heterogeneous pruning of LLM and VLM models. See `examples/puzzletron/README.md `_ for more details. - Added iterator interface using CalibrationDataReader in ONNX quantization workflow. - Add N:M sparse softmax support to the Triton flash attention kernel (``modelopt.torch.kernels.triton_fa``). See `examples/llm_sparsity/attention_sparsity/README.md `_ for usage. diff --git a/examples/dataset/MEGATRON_DATA_PREP.md b/examples/dataset/MEGATRON_DATA_PREP.md new file mode 100644 index 00000000000..c3904d2a0fd --- /dev/null +++ b/examples/dataset/MEGATRON_DATA_PREP.md @@ -0,0 +1,242 @@ +# Tokenizing for Megatron Frameworks + +| **Section** | **Description** | **Link** | +| :---: | :---: | :---: | +| From JSONL files | Tokenize local JSONL files | \[[Link](#from-jsonl-files)\] | +| From Hugging Face Hub | Stream or download HF datasets and tokenize | \[[Link](#from-hugging-face-hub)\] | +| `reasoning_content` for Post-Training v3 | Control how chain-of-thought traces are handled | \[[Link](#reasoning_content-for-post-training-v3-datasets)\] | +| Nemotron Pre/Post-Training Datasets | Ready-to-run commands for all Nemotron datasets | \[[Link](#ready-to-run-tokenization-commands)\] | + +The distillation and pre-training scripts in Megatron-Bridge or Megatron-LM expect data pre-tokenized in Megatron's binary indexed format (`.bin` / `.idx`). +Use the `megatron_preprocess_data` utility to tokenize any JSONL or Hugging Face dataset. +The tokenization scripts below print the list of output prefixes (e.g. `tokenized_qwen3/data1_text`) that you can use for the `data_paths` argument (with relative weights on different files) in Megatron training scripts. + +**Important Notes:** + +- For Pretraining / raw-text data (`text` key) — use `--append_eod` so Megatron can tell where documents end when concatenating them into long sequences. +- For Post-training chat data (`messages` key) — omit `--append_eod`; the chat template already appends EOS at the end of each conversation. +- Set `--max_sequence_length 256_000` to avoid rare OOM errors if some text is very long. + +## From JSONL files + +```bash +python -m modelopt.torch.utils.plugins.megatron_preprocess_data \ + --jsonl_paths /path/to/data1.jsonl /path/to/data2.jsonl ... \ + --json_keys text \ + --tokenizer Qwen/Qwen3-0.6B \ + --output_dir tokenized_qwen3 \ + --workers 32 \ + --append_eod +``` + +```bash +python -m modelopt.torch.utils.plugins.megatron_preprocess_data \ + --jsonl_paths /path/to/sft_data.jsonl \ + --json_keys messages \ + --tokenizer Qwen/Qwen3-0.6B \ + --output_dir tokenized_qwen3 \ + --workers 32 +``` + +Instead of `--jsonl_paths`, pass `--input_dir /path/to/dir` to tokenize all JSONL files in a directory (`.jsonl` and `.jsonl.gz` are both supported). + +## From Hugging Face Hub + +To tokenize a dataset directly from Hugging Face Hub: + +```bash +python -m modelopt.torch.utils.plugins.megatron_preprocess_data \ + --hf_dataset nvidia/Nemotron-Pretraining-SFT-v1 \ + --hf_name Nemotron-SFT-Code \ + --hf_split train \ + --hf_max_samples_per_split 10_000_000 \ + --json_keys text \ + --tokenizer Qwen/Qwen3-0.6B \ + --output_dir tokenized_qwen3 \ + --workers 32 \ + --append_eod +``` + +Omit `--hf_name` to process all subsets, `--hf_split` for all splits, or `--hf_max_samples_per_split` for all samples. +To quickly test, use [nvidia/Nemotron-Pretraining-Dataset-sample](https://huggingface.co/datasets/nvidia/Nemotron-Pretraining-Dataset-sample). + +For very large datasets (tens of millions of documents), or datasets with complex nested message schemas (e.g. `tool_calls`, `function_call` fields) that cause Arrow type-cast errors in non-streaming mode, add `--hf_streaming` to avoid downloading the full dataset — only the rows actually consumed are fetched. Optionally pair with `--hf_max_samples_per_split ` to cap the row count; without it streaming still works but re-downloads on every run with no disk cache. + +> **Performance note:** Non-streaming mode downloads all Parquet shards once and caches them as Arrow files on disk. +> Re-runs read from cache and are much faster. +> Streaming re-downloads on every run with no cache, so it is slower for full-dataset processing. + +## `reasoning_content` for Post-Training v3 Datasets + +v3 datasets include a `reasoning_content` field in assistant messages (chain-of-thought separate from +the final answer). Use `--reasoning_content` to control how it is handled: + +| Value | Behaviour | +| --- | --- | +| `strip` (default) | Field is discarded before `apply_chat_template`. Safe for any tokenizer. | +| `inline` | Wrapped as `` and prepended to `content`. Preserves reasoning in a tokenizer-agnostic way. | +| `native` | Passed unchanged. Requires the tokenizer's chat template to handle the field (e.g. Qwen3). | + +```bash +python -m modelopt.torch.utils.plugins.megatron_preprocess_data \ + --hf_dataset nvidia/Nemotron-Math-v2 \ + --hf_split high_part00 \ + --json_keys messages \ + --tokenizer nvidia/NVIDIA-Nemotron-Nano-9B-v2 \ + --output_dir tokenized_nemotron_v2 \ + --workers 32 \ + --reasoning_content inline +``` + +--- + +## Ready-to-run tokenization commands + +Tokenization commands for all Nemotron Pre-Training and Post-Training datasets used in Megatron-Bridge distillation experiments. + +Two parameters vary by model — set them before running the commands below: + +```bash +TOKENIZER=nvidia/NVIDIA-Nemotron-Nano-9B-v2 # HuggingFace tokenizer (or local path) +OUTPUT_DIR=tokenized_nemotron_v2 # Output directory for tokenized files +``` + +> [!TIP] +> Token count for a `.bin` file = file size in bytes ÷ 4. This is also printed by the tokenization script on completion. + +> [!NOTE] +> Tokenizing each of the datasets below will take anywhere between 10 minutes to few hours. You can tokenize all in parallel to speed up the process. +> +> You may tokenize more datasets or skip some datasets depending on your needs. + +### Nemotron Pretraining dataset + +**[nvidia/Nemotron-Pretraining-SFT-v1](https://huggingface.co/datasets/nvidia/Nemotron-Pretraining-SFT-v1)** — raw text; omitting `--hf_name` tokenizes all 3 subsets (Code, General, MATH) in one command, producing a separate output file per subset named after each: + +```bash +python -m modelopt.torch.utils.plugins.megatron_preprocess_data \ + --hf_dataset nvidia/Nemotron-Pretraining-SFT-v1 \ + --hf_split train \ + --hf_streaming \ + --hf_max_samples_per_split 10_000_000 \ + --json_keys text \ + --tokenizer ${TOKENIZER} \ + --output_dir ${OUTPUT_DIR} \ + --workers 96 \ + --max_sequence_length 256_000 \ + --append_eod \ + --strip_newlines +``` + +--- + +### Nemotron Post-training v1 dataset + +**[nvidia/Nemotron-Post-Training-Dataset-v1](https://huggingface.co/datasets/nvidia/Nemotron-Post-Training-Dataset-v1)** — STEM subset, capped at 5M samples. v1 data does not contain reasoning traces: + +```bash +python -m modelopt.torch.utils.plugins.megatron_preprocess_data \ + --hf_dataset nvidia/Nemotron-Post-Training-Dataset-v1 \ + --hf_name default \ + --hf_split stem \ + --hf_streaming \ + --hf_max_samples_per_split 5_000_000 \ + --json_keys messages \ + --tokenizer ${TOKENIZER} \ + --output_dir ${OUTPUT_DIR} \ + --workers 96 \ + --max_sequence_length 256_000 +``` + +--- + +### Nemotron Post-training v3 collection + +Datasets below are from the [Nemotron Post-Training v3 collection](https://huggingface.co/collections/nvidia/nemotron-post-training-v3). All use `--reasoning_content inline` to preserve `` traces. The collection contains many more datasets — if you care about benchmarks not covered here (e.g. multilingual, agentic/tool use, SWE, safety), pick the relevant datasets from the collection and tokenize them the same way. + +**[nvidia/Nemotron-Math-v2](https://huggingface.co/datasets/nvidia/Nemotron-Math-v2)** — tokenize `high_part00` and `high_part01` separately: + +```bash +for SPLIT in high_part00 high_part01; do + python -m modelopt.torch.utils.plugins.megatron_preprocess_data \ + --hf_dataset nvidia/Nemotron-Math-v2 \ + --hf_split ${SPLIT} \ + --json_keys messages \ + --tokenizer ${TOKENIZER} \ + --output_dir ${OUTPUT_DIR} \ + --workers 96 \ + --max_sequence_length 256_000 \ + --reasoning_content inline +done +``` + +**[nvidia/Nemotron-SFT-Competitive-Programming-v2](https://huggingface.co/datasets/nvidia/Nemotron-SFT-Competitive-Programming-v2)** — stored as raw JSONL on HuggingFace, download before tokenizing: + +```bash +hf download nvidia/Nemotron-SFT-Competitive-Programming-v2 \ + --repo-type dataset \ + --local-dir datasets/Nemotron-SFT-Competitive-Programming-v2/ +for FILE in competitive_programming_python_00 competitive_programming_cpp_00; do + python -m modelopt.torch.utils.plugins.megatron_preprocess_data \ + --jsonl_paths datasets/Nemotron-SFT-Competitive-Programming-v2/data/${FILE}.jsonl \ + --json_keys messages \ + --tokenizer ${TOKENIZER} \ + --output_dir ${OUTPUT_DIR} \ + --workers 96 \ + --max_sequence_length 256_000 \ + --reasoning_content inline +done +``` + +**[nvidia/Nemotron-Science-v1](https://huggingface.co/datasets/nvidia/Nemotron-Science-v1)** — stored as raw JSONL on HuggingFace, download before tokenizing: + +```bash +hf download nvidia/Nemotron-Science-v1 \ + --repo-type dataset \ + --local-dir datasets/Nemotron-Science-v1/ +python -m modelopt.torch.utils.plugins.megatron_preprocess_data \ + --input_dir datasets/Nemotron-Science-v1/data/ \ + --json_keys messages \ + --tokenizer ${TOKENIZER} \ + --output_dir ${OUTPUT_DIR} \ + --workers 96 \ + --max_sequence_length 256_000 \ + --reasoning_content inline +``` + +**[nvidia/Nemotron-SFT-Instruction-Following-Chat-v2](https://huggingface.co/datasets/nvidia/Nemotron-SFT-Instruction-Following-Chat-v2)** — stored as raw JSONL on HuggingFace, download before tokenizing: + +```bash +hf download nvidia/Nemotron-SFT-Instruction-Following-Chat-v2 \ + --repo-type dataset \ + --local-dir datasets/Nemotron-SFT-Instruction-Following-Chat-v2/ +python -m modelopt.torch.utils.plugins.megatron_preprocess_data \ + --input_dir datasets/Nemotron-SFT-Instruction-Following-Chat-v2/data/ \ + --json_keys messages \ + --tokenizer ${TOKENIZER} \ + --output_dir ${OUTPUT_DIR} \ + --workers 96 \ + --max_sequence_length 256_000 \ + --reasoning_content inline +``` + +--- + +### Expected output + +After running all commands above, `${OUTPUT_DIR}/` should contain the following `.bin` / `.idx` file pairs: + +```text +nvidia--Nemotron-Pretraining-SFT-v1_Nemotron-SFT-Code_train_text_max10000000.{bin,idx} +nvidia--Nemotron-Pretraining-SFT-v1_Nemotron-SFT-General_train_text_max10000000.{bin,idx} +nvidia--Nemotron-Pretraining-SFT-v1_Nemotron-SFT-MATH_train_text_max10000000.{bin,idx} +nvidia--Nemotron-Post-Training-Dataset-v1_default_stem_messages_max5000000.{bin,idx} +nvidia--Nemotron-Math-v2_default_high_part00_messages.{bin,idx} +nvidia--Nemotron-Math-v2_default_high_part01_messages.{bin,idx} +competitive_programming_python_00_messages.{bin,idx} +competitive_programming_cpp_00_messages.{bin,idx} +MCQ_messages.{bin,idx} +RQA_messages.{bin,idx} +reasoning_off_messages.{bin,idx} +reasoning_on_messages.{bin,idx} +``` diff --git a/examples/dataset/README.md b/examples/dataset/README.md index 15cb21613c2..d073237cf6a 100644 --- a/examples/dataset/README.md +++ b/examples/dataset/README.md @@ -5,7 +5,7 @@ | **Section** | **Description** | **Link** | | :------------: | :------------: | :------------: | | Building Chat Datasets | Scripts to build conversation datasets from Nemotron and other HuggingFace sources | \[[Link](#building-chat-datasets)\] | -| Tokenizing for Megatron Frameworks | Convert JSONL or HF datasets to Megatron binary format for distillation and pre-training | \[[Link](#tokenizing-for-megatron-frameworks)\] | +| Tokenizing for Megatron Frameworks | Convert JSONL or HF datasets to Megatron binary format for distillation and pre-training | \[[Link](MEGATRON_DATA_PREP.md)\] | @@ -140,85 +140,7 @@ In `generate` mode, assistant turns are stripped so the row ends with a user tur ## Tokenizing for Megatron Frameworks -The distillation and pre-training scripts in Megatron-Bridge or Megatron-LM expect data pre-tokenized in Megatron's binary indexed format (`.bin` / `.idx`). -Use the `megatron_preprocess_data` utility to tokenize any JSONL or Hugging Face dataset. -The tokenization scripts below prints the list of output prefixes (e.g. `tokenized_qwen3/data1_text`) that you can use for the `data_paths` argument (with relative weights on different files) in Megatron training scripts. - -**Important Notes:** - -- For Pretraining / raw-text data (`text` key) — use `--append_eod` so Megatron can tell where documents end when concatenating them into long sequences. -- For Post-training chat data (`messages` key) — omit `--append_eod`; the chat template already appends EOS at the end of each conversation. -- Set `--max_sequence_length 256_000` to avoid rare OOM errors if some text is very long. - -### From JSONL files - -```bash -python -m modelopt.torch.utils.plugins.megatron_preprocess_data \ - --jsonl_paths /path/to/data1.jsonl /path/to/data2.jsonl ... \ - --json_keys text \ - --tokenizer Qwen/Qwen3-0.6B \ - --output_dir tokenized_qwen3 \ - --workers 32 \ - --append_eod -``` - -```bash -python -m modelopt.torch.utils.plugins.megatron_preprocess_data \ - --jsonl_paths /path/to/sft_data.jsonl \ - --json_keys messages \ - --tokenizer Qwen/Qwen3-0.6B \ - --output_dir tokenized_qwen3 \ - --workers 32 -``` - -Instead of `--jsonl_paths`, pass `--input_dir /path/to/dir` to tokenize all JSONL files in a directory (`.jsonl` and `.jsonl.gz` are both supported). - -### From Hugging Face Hub - -To tokenize a dataset directly from Hugging Face Hub: - -```bash -python -m modelopt.torch.utils.plugins.megatron_preprocess_data \ - --hf_dataset nvidia/Nemotron-Pretraining-SFT-v1 \ - --hf_name Nemotron-SFT-Code \ - --hf_split train \ - --hf_max_samples_per_split 10_000_000 \ - --json_keys text \ - --tokenizer Qwen/Qwen3-0.6B \ - --output_dir tokenized_qwen3 \ - --workers 32 \ - --append_eod -``` - -Omit `--hf_name` to process all subsets, `--hf_split` for all splits, or `--hf_max_samples_per_split` for all samples. -To quickly test, use [nvidia/Nemotron-Pretraining-Dataset-sample](https://huggingface.co/datasets/nvidia/Nemotron-Pretraining-Dataset-sample). - -For **very large datasets** (tens of millions of documents), add `--hf_streaming --hf_max_samples_per_split ` to avoid downloading the full dataset — only the rows actually consumed are fetched. - -> **Performance note:** Non-streaming mode downloads all Parquet shards once and caches them as Arrow files on disk. -> Re-runs read from cache and are much faster. -> Streaming re-downloads on every run with no cache, so it is slower for full-dataset processing. - -### Nemotron Post-Training v3 (`reasoning_content`) - -v3 datasets include a `reasoning_content` field in assistant messages (chain-of-thought separate from -the final answer). Use `--reasoning_content` to control how it is handled: - -| Value | Behaviour | -| --- | --- | -| `strip` (default) | Field is discarded before `apply_chat_template`. Safe for any tokenizer. | -| `inline` | Wrapped as `` and prepended to `content`. Preserves reasoning in a tokenizer-agnostic way. | -| `native` | Passed unchanged. Requires the tokenizer's chat template to handle the field (e.g. Qwen3). | - -```bash -python -m modelopt.torch.utils.plugins.megatron_preprocess_data \ - --hf_dataset nvidia/Nemotron-Post-Training-Dataset-v3 \ - --json_keys messages \ - --tokenizer Qwen/Qwen3-0.6B \ - --output_dir tokenized_qwen3 \ - --workers 32 \ - --reasoning_content inline -``` +See **[MEGATRON_DATA_PREP.md](MEGATRON_DATA_PREP.md)** for full documentation: general usage with JSONL and Hugging Face Hub datasets, handling of Nemotron Post-Training v3 `reasoning_content` fields, and ready-to-run tokenization commands for all Nemotron Pre/Post-Training datasets. ## Synthetic Test Dataset diff --git a/examples/megatron_bridge/README.md b/examples/megatron_bridge/README.md index 571a0c49884..1e384acfb19 100644 --- a/examples/megatron_bridge/README.md +++ b/examples/megatron_bridge/README.md @@ -47,7 +47,7 @@ hf auth login --token ``` > [!WARNING] -> Use `python -m pip` instead of `pip` to avoid conflicts with the system-wide installed packages in the NeMo containers. +> Use `python -m pip` instead of `pip` to avoid conflicts with the system-wide installed packages in the NeMo containers. You may also refer to this [doc](https://github.com/NVIDIA-NeMo/Megatron-Bridge/blob/main/docker/common/README.md#installing-packages-inside-the-container) on how to correctly install packages in the NeMo containers without breaking existing torch installation. ## Pruning @@ -189,7 +189,7 @@ For more details, see the [Megatron-Bridge conversion README](https://github.com ### Distillation Results -See [results/puzzletron.md](results/puzzletron.md) for MMLU results demonstrating knowledge distillation on Puzzletron-compressed student models. +See [examples/pruning/](../pruning/README.md#tutorials--results) for distillation experiment results covering Minitron and Puzzletron pruning algorithms. ## Post-Training Quantization diff --git a/examples/pruning/README.md b/examples/pruning/README.md index 9e84622269c..294f00031dd 100644 --- a/examples/pruning/README.md +++ b/examples/pruning/README.md @@ -20,6 +20,7 @@ This section focuses on applying Model Optimizer's state-of-the-art complementar | Support Matrix | View the support matrix to see available pruning algorithms and their compatibility with different models and frameworks | \[[Link](#support-matrix)\] | | | Examples | Examples of different pruning methods | \[[Link](#examples)\] | | | Pruning Guidelines | Guidelines for choosing how and how much to prune for best results | \[[Link](#pruning-guidelines)\] | | +| Tutorials / Results | End-to-end tutorials for Minitron and Puzzletron pruning | \[[Link](#tutorials--results)\] | | | Resources | Extra links to relevant resources | \[[Link](#resources)\] | | @@ -186,16 +187,28 @@ If your model parameters are already sorted and you just want to prune the weigh ## Examples -### Minitron Pruning for Megatron-Bridge/ Megatron-LM Framework LLMs (e.g. Qwen 3, Nemotron Nano) +### Minitron Pruning for Megatron-Bridge/ Megatron-LM Framework LLMs (e.g. Qwen3, Nemotron 3 Nano) Checkout the Minitron pruning example for [Megatron-Bridge Framework](../megatron_bridge/README.md#pruning) or [Megatron-LM Framework](https://github.com/NVIDIA/Megatron-LM/tree/main/examples/post_training/modelopt#-pruning) which showcases the usage of the powerful Minitron pruning algorithm developed by NVIDIA Research for pruning LLMs like Llama-3.1-8B, Qwen3-8B, Nemotron-Nano-9B-v2, Nemotron-3-Nano-30B-A3B, etc. Both frameworks support importing from a Hugging Face pretrained checkpoint. -Some of the models pruned using Minitron method followed by distillation and post-training are: +Some of the official models pruned using Minitron method followed by distillation and post-training are: - [Minitron Collection on Hugging Face](https://huggingface.co/collections/nvidia/minitron) - [NVIDIA-Nemotron-Nano-9B-v2](https://huggingface.co/nvidia/NVIDIA-Nemotron-Nano-9B-v2) +See [minitron/](minitron/README.md) for end-to-end tutorials and results. + +### Puzzletron Pruning for LLMs (e.g. Llama, Qwen, Nemotron) + +Checkout the [Puzzletron README](../puzzletron/README.md) which showcases MIP-based NAS pruning that produces heterogeneous model architectures — varying FFN intermediate sizes per layer and selectively removing attention layers — to meet a target parameter count or memory budget. + +Supported models include Llama-3.1-8B-Instruct, Qwen3-8B, Qwen2.5-7B-Instruct, Nemotron-Nano-12B-v2, Mistral-Small-24B-Instruct-2501, and others via the [configs](../puzzletron/configs/) directory. See the [Puzzletron README](../puzzletron/README.md) for more details. + +After compression, use [Megatron-Bridge distillation](../megatron_bridge/README.md#distillation) to recover accuracy. + +See [puzzletron/](puzzletron/README.md) for distillation results on Puzzletron-compressed models. + ### FastNAS Pruning for PyTorch Computer Vision Models Check out the FastNAS pruning example usage in the [documentation](https://nvidia.github.io/Model-Optimizer/guides/3_pruning.html#pruning-and-subnet-search). @@ -279,16 +292,23 @@ After pruning, distillation is required to recover model accuracy. Below are rec | **Hyperparameter** | **Recommendation** | | :---: | :---: | | **Sequence Length** | 8192 (or 4096 if dataset has smaller sequences) | -| **Global Batch Size (GBS)** | 768 | +| **Global Batch Size (GBS)** | same as the original training or 768 if unsure | | **Micro Batch Size (MBS)** | As large as your GPU memory can accommodate | | **Learning Rate (LR)** | 1e-4 → 1e-5 (linear decay) for 30-50% pruning
• More compression → higher LR
• Less compression → lower LR
• As model gets larger → reduce LR to avoid divergence | | **Warmup Steps** | 100 | -| **Training Max Steps** | Num training tokens / (Seq len × GBS)
• Recommended: 80-100B tokens | +| **Training Max Steps** | Num training tokens / (Seq len × GBS)
• Recommended: 80-100B tokens for best results. | | **Data Composition** | • Standard models: 100% pre-training data
• Reasoning models: 70% reasoning data + 30% pre-training data | > [!TIP] > If you know the maximum learning rate used during the original training, a good rule of thumb for knowledge distillation is to use **1/5th of that maximum LR** when compressing by ~50%. +## Tutorials / Results + +End-to-end distillation results with Megatron-Bridge after Minitron and Puzzletron pruning: + +- **[Minitron — Nemotron-Nano-9B-v2](minitron/NVIDIA-Nemotron-Nano-9B-v2/README.md)**: End-to-end tutorial of structured pruning for Nemotron-Nano-9B-v2 to 7B followed by knowledge distillation up to 80B tokens, quantization, and vLLM deployment. Achieves near-parity with the official 9B model across popular pretraining and reasoning benchmarks. +- **[Puzzletron — Qwen3-8B and Llama-3.1-8B-Instruct](puzzletron/Llama-3.1-8B-Instruct.md)**: MIP-based compression followed by short distillation runs on WikiText-103. Shows MMLU recovery and illustrates the importance of using larger datasets to avoid overfitting. + ## Resources - 📅 [Roadmap](https://github.com/NVIDIA/Model-Optimizer/issues/146) diff --git a/examples/pruning/minitron/NVIDIA-Nemotron-Nano-9B-v2/ABLATIONS.md b/examples/pruning/minitron/NVIDIA-Nemotron-Nano-9B-v2/ABLATIONS.md new file mode 100644 index 00000000000..1786e88fdda --- /dev/null +++ b/examples/pruning/minitron/NVIDIA-Nemotron-Nano-9B-v2/ABLATIONS.md @@ -0,0 +1,76 @@ +# Distillation Blend Ablations + +All experiments prune Nemotron-Nano-9B-v2 → 7B and distill with teacher = Nemotron-Nano-9B-v2 (official). The final chosen blend (**30pre_70post_v1v3**) is in [README.md](README.md). + +--- + +## Baseline: Pre-SFT-v1 Only (no post-training data) + +Pure Nemotron-Pretraining-SFT-v1 data only (no post-training reasoning traces). + +| Tokens | MMLU | MMLU Pro | GPQA Diamond | LCB v6 | AIME 2025 | Math 500 | IFEval | SciCode | +|---|---|---|---|---|---|---|---|---| +| 19B | 72.7 | 70.5 | 53.9 | 58.8 | 63.4 | 94.4 | 57.9 | 19.2 | +| 56B | 73.3 | 71.9 | 54.3 | 62.0 | 63.8 | 95.0 | 58.7 | 17.9 | + +**Notes:** Highest MMLU of any blend, but AIME stagnates and LCB lags. Pretraining data alone insufficient for reasoning benchmarks. + +--- + +## Baseline: Pure Post-Training Data (pt-v1v2) + +100% post-training data (no pretraining data), Nemotron-v1/v2 blend. + +| Tokens | MMLU | MMLU Pro | GPQA Diamond | LCB v6 | AIME 2025 | Math 500 | IFEval | SciCode | +|---|---|---|---|---|---|---|---|---| +| 2.5B | 71.0 | 69.3 | 52.6 | 54.8 | 58.2 | 94.1 | 51.7 | 14.4 | +| 5B | 70.8 | 70.7 | 53.6 | 57.2 | 63.8 | 94.1 | 50.5 | 14.2 | +| 20B | 69.8 | 71.7 | 54.7 | 57.5 | 64.7 | 94.6 | 41.9 | 13.4 | +| 40B | 70.0 | 71.7 | 53.2 | 57.4 | 67.6 | 95.2 | 43.3 | 16.2 | + +**Notes:** IFEval degrades badly at longer training (41.9 at 20B). LCB lags behind other blends. + +--- + +## 30% Pretraining / 70% Post-Training: v1v2 Blend + +30% Nemotron-Pretraining-SFT-v1 + 70% Nemotron-v1/v2 post-training data. + +| Tokens | MMLU | MMLU Pro | GPQA Diamond | LCB v6 | AIME 2025 | Math 500 | IFEval | SciCode | +|---|---|---|---|---|---|---|---|---| +| 2.5B | 71.9 | 68.9 | 49.8 | 56.4 | 55.3 | 93.3 | 58.2 | 14.6 | +| 5B | — | — | — | — | — | — | — | — | +| 20B | 71.6 | 71.2 | 52.7 | 58.0 | 65.1 | 94.0 | 55.7 | 14.2 | +| 40B | 72.7 | 71.1 | 54.0 | 59.7 | 65.5 | 95.2 | 53.8 | 19.2 | +| 60B | 73.0 | 71.9 | 55.9 | 60.0 | 67.8 | 95.4 | 56.4 | 21.7 | +| 80B | 73.4 | 72.7 | 54.7 | 61.8 | 70.7 | 95.3 | 57.8 | 19.9 | +| 100B | 73.5 | 72.8 | 56.4 | 62.4 | 71.9 | 95.8 | 59.1 | 19.4 | + +**Notes:** Best MMLU of the 30/70 blends (~1% above v3 blends). IFEval ~56–59 (lower than v3 blends). GPQA shows instability at longer runs. + +--- + +## 30% Pretraining / 70% Post-Training: v3 Blend + +Refined v3 blend: dropped exercism/text2sql, added Nemotron-Math-v2 part01, boosted Math to 30% total. + +| Tokens | MMLU | MMLU Pro | GPQA Diamond | LCB v6 | AIME 2025 | Math 500 | IFEval | SciCode | +|---|---|---|---|---|---|---|---|---| +| 2.5B | 70.5 | 69.0 | 51.2 | 59.1 | 62.9 | 94.3 | 62.2 | 11.6 | +| 5B | 71.0 | 69.8 | 53.0 | 59.4 | 65.0 | 94.4 | 66.8 | 20.3 | +| 20B | 71.2 | 70.8 | 53.3 | 60.0 | 69.1 | 95.3 | 63.8 | 22.6 | +| 40B | 71.0 | 71.7 | 54.0 | 62.3 | 71.3 | 95.3 | 66.8 | 17.9 | +| 60B | 72.0 | 72.3 | 56.3 | 62.0 | 71.6 | 95.6 | 65.5 | 21.5 | +| 80B | 72.3 | 73.0 | 53.9 | 63.0 | 72.4 | 96.2 | 65.5 | 21.3 | + +**Notes:** Better AIME and LCB than blend 1 at 40B+. GPQA still unstable (53.9 at 80B). MMLU ~1% below v1v2 blend. + +--- + +## Blend Design Notes + +**Why MMLU is ~1% lower with v3 blends:** The heavy reasoning-trace format (chain-of-thought, TIR) in v3 data suppresses general knowledge recall measured by MMLU. This is structural — v1v2 post-training data has a more knowledge-dense format. Upweighting Pretraining-SFT-v1 General (to 20%) partially mitigates this. Given that MMLU Pro is better with v3 blends, lower MMLU is acceptable. + +**Why GPQA is unstable in blend 1:** Science-v1 MCQ (497M tokens) and RQA (278M tokens) are repeated ~14× over 100B training steps, causing overfitting to MCQ format. Fix in v1v3: add Nemotron-Post-Training-Dataset-v1 STEM (~60B tokens, ~0.13 epochs at 80B) as primary science source; reduce Science-v1 to low weights (3+2) for format alignment only. + +**Why 80B is the recommended stopping point:** SciCode degrades or crashes at 100B (blend2: 1.6; AIME also degrades). Best overall profile is at 60–80B tokens. diff --git a/examples/pruning/minitron/NVIDIA-Nemotron-Nano-9B-v2/README.md b/examples/pruning/minitron/NVIDIA-Nemotron-Nano-9B-v2/README.md new file mode 100644 index 00000000000..620c5780a4b --- /dev/null +++ b/examples/pruning/minitron/NVIDIA-Nemotron-Nano-9B-v2/README.md @@ -0,0 +1,326 @@ +# Nemotron-Nano-9B-v2: Prune + Distill + Quantize + vLLM Deployment + +End-to-end optimization of [Nemotron-Nano-9B-v2](https://huggingface.co/nvidia/NVIDIA-Nemotron-Nano-9B-v2) demonstrating how ModelOpt techniques stack: Minitron structured pruning to 7B → Megatron-Bridge knowledge distillation to recover accuracy → FP8 quantization → vLLM deployment and throughput benchmarking. This document covers: + +1. **[Data Preparation](#1-data-preparation)** — tokenizing the training blend for distillation +2. **[Pruning](#2-pruning)** — Minitron structured pruning from 9B to 7B +3. **[Distillation](#3-distillation)** — recovering accuracy via Megatron-Bridge knowledge distillation (up to 80B tokens) +4. **[Evaluation](#4-evaluation)** — benchmarking with NeMo Evaluator across MMLU Pro, GPQA Diamond, AIME, and more +5. **[Quantization](#5-quantization)** — FP8 PTQ on the distilled checkpoint using ModelOpt's `examples/llm_ptq/hf_ptq.py` script +6. **[vLLM Inference Benchmarking](#6-vllm-inference-benchmarking)** — throughput comparison of BF16 vs FP8 on a single H100 + +**Environment:** Container `nvcr.io/nvidia/nemo:26.02`, ModelOpt 0.44.0. See the [Megatron-Bridge README](../../../megatron_bridge/README.md) for environment setup (including ModelOpt mount path) and container usage. + +## Results + +![Benchmark Recovery During Knowledge Distillation](figures/learning_curves.png) + +| Model | MMLU | MMLU Pro | GPQA Diamond | LiveCodeBench v6 | AIME 2025 | Math 500 | IFEval | SciCode (Subtask) | Average | +| --- | --- | --- | --- | --- | --- | --- | --- | --- | --- | +| Pruned 7B (no distillation) | 67.8 | 11.9 | 17.7 | 1.4 | 0.3 | 6.0 | 41.8 | 0.1 | 18.4 | +| Pruned 7B + distill 2.5B tokens (400 iters) | 70.7 | 68.4 | 52.7 | 57.0 | 63.0 | 93.7 | 63.2 | 11.6 | 60.0 | +| Pruned 7B + distill 20B tokens (3200 iters) | 71.3 | 71.7 | 54.8 | 62.0 | 69.1 | 95.2 | 63.8 | 20.9 | 63.6 | +| Pruned 7B + distill 40B tokens (6400 iters) | 71.1 | 71.6 | 53.7 | 60.9 | 70.4 | 95.6 | 68.0 | 21.1 | 64.1 | +| Pruned 7B + distill 60B tokens (9600 iters) | 72.1 | 72.1 | 54.9 | 61.6 | 70.3 | 95.4 | 64.7 | 24.1 | 64.4 | +| Pruned 7B + distill 80B tokens (12800 iters) | 72.2 | 73.0 | 56.9 | 62.6 | 72.0 | 95.8 | 66.2 | 22.2 | 65.1 | +| Nemotron-Nano-9B-v2 (official, pruned from 12B) | 74.7 | 74.9 | 56.1 | 64.4 | 73.2 | 95.9 | 65.8 | 21.9 | 65.9 | +| Nemotron-Nano-12B-v2 (official) | 78.5 | 77.9 | 58.2 | 66.6 | 76.1 | 96.9 | 67.9 | 28.4 | 68.8 | + +**Key observations:** + +- **All benchmarks recover dramatically within the first checkpoint (2.5B tokens).** The pruned-only model is essentially non-functional, but a single distillation run recovers most capabilities. +- **Math 500 and IFEval plateau quickly** — essentially saturated after 2.5B tokens, with minimal gains over the remaining training. +- **MMLU also largely plateaus** after the first checkpoint. +- **AIME, MMLU Pro, GPQA, and SciCode continue improving** throughout the full run and benefit meaningfully from longer training. +- **The 7B model at 80B tokens closes most of the gap to the official 9B**, and actually exceeds it on GPQA, IFEval, and SciCode. The table below compares the 7B→9B gap against the 9B→12B gap — both are ~25% compression — showing that the second pruning round recovers more efficiently: + +| Benchmark | 7B (80B tokens) vs 9B | 9B (official) vs 12B | +| --- | --- | --- | +| MMLU | −2.5 | −3.8 | +| MMLU Pro | −1.9 | −3.0 | +| GPQA Diamond | **+0.8** | −2.1 | +| LiveCodeBench v6 | −1.8 | −2.2 | +| AIME 2025 | −1.2 | −2.9 | +| Math 500 | −0.1 | −1.0 | +| IFEval | **+0.4** | −2.1 | +| SciCode (Subtask) | **+0.3** | −6.5 | +| Average | −0.8 | −2.9 | + +Distillation uses the **30% Pretraining (Code 5, General 20, MATH 5) + 70% Post-training v1/v3 (Math 30, Coding 20, Science 15, IF 5)** blend (see [Data Blend](#data-blend) below). Blend ablations are in [ABLATIONS.md](ABLATIONS.md). + +> [!NOTE] +> Exact numbers may vary depending on deployment and evaluation setup. All models above — including the official 9B and 12B — were evaluated with the same [nemo_evaluator.yaml](nemo_evaluator.yaml) for fair comparison. These numbers may differ from those reported on the official [Nemotron-Nano-9B-v2](https://huggingface.co/nvidia/NVIDIA-Nemotron-Nano-9B-v2) and [Nemotron-Nano-12B-v2](https://huggingface.co/nvidia/NVIDIA-Nemotron-Nano-12B-v2) HuggingFace model cards. + +> [!NOTE] +> The official Nemotron-Nano-9B-v2 model was itself produced by pruning Nemotron-Nano-12B-v2 using Minitron. See [arxiv:2508.14444](https://arxiv.org/abs/2508.14444) for details on the exact steps used there. + +--- + +## Steps to Reproduce + +### 1. Data Preparation + +See [examples/dataset/MEGATRON_DATA_PREP.md](../../../dataset/MEGATRON_DATA_PREP.md) for tokenization commands for all datasets used in this blend. + +For this experiment: `TOKENIZER=nvidia/NVIDIA-Nemotron-Nano-9B-v2`, `OUTPUT_DIR=tokenized_nemotron_v2`. + +#### Data Blend + +**30% Pretraining (Code 5, General 20, MATH 5) + 70% Post-training v1/v3 (Math 30, Coding 20, Science 15, IF 5)** + +```bash +DATA_BLEND=" \ +5 tokenized_nemotron_v2/nvidia--Nemotron-Pretraining-SFT-v1_Nemotron-SFT-Code_train_text_max10000000 \ +20 tokenized_nemotron_v2/nvidia--Nemotron-Pretraining-SFT-v1_Nemotron-SFT-General_train_text_max10000000 \ +5 tokenized_nemotron_v2/nvidia--Nemotron-Pretraining-SFT-v1_Nemotron-SFT-MATH_train_text_max10000000 \ +15 tokenized_nemotron_v2/nvidia--Nemotron-Math-v2_default_high_part00_messages \ +15 tokenized_nemotron_v2/nvidia--Nemotron-Math-v2_default_high_part01_messages \ +15 tokenized_nemotron_v2/competitive_programming_python_00_messages \ +5 tokenized_nemotron_v2/competitive_programming_cpp_00_messages \ +10 tokenized_nemotron_v2/nvidia--Nemotron-Post-Training-Dataset-v1_default_stem_messages_max5000000 \ +3 tokenized_nemotron_v2/MCQ_messages \ +2 tokenized_nemotron_v2/RQA_messages \ +3 tokenized_nemotron_v2/reasoning_on_messages \ +2 tokenized_nemotron_v2/reasoning_off_messages \ +" +``` + +| Dataset | Tokens | Weight | Notes | +| --- | --- | --- | --- | +| Nemotron-Pretraining-SFT-v1 / Code (10M samples) | 7B | 5 | Pretraining code | +| Nemotron-Pretraining-SFT-v1 / General (10M samples) | 16B | 20 | Upweighted to better close MMLU gap | +| Nemotron-Pretraining-SFT-v1 / MATH (10M samples) | 12B | 5 | Pretraining math | +| Nemotron-Math-v2 / high_part00 | 9B | 15 | Hard math reasoning | +| Nemotron-Math-v2 / high_part01 | 11B | 15 | Hard math reasoning | +| Nemotron-SFT-Competitive-Programming-v2 / python_00 | 7B | 15 | Python reasoning traces | +| Nemotron-SFT-Competitive-Programming-v2 / cpp_00 | 7B | 5 | C++ reasoning traces | +| Nemotron-Post-Training-Dataset-v1 / stem (5M samples) | 20B | 10 | Broad STEM | +| Nemotron-Science-v1 / MCQ | 0.5B | 3 | GPQA MCQ format alignment | +| Nemotron-Science-v1 / RQA | 0.3B | 2 | GPQA format diversity | +| Nemotron-SFT-IF-Chat-v2 / reasoning_on | 2B | 3 | Instruction following (thinking on) | +| Nemotron-SFT-IF-Chat-v2 / reasoning_off | 1B | 2 | Instruction following (thinking off) | + +#### General Guidelines + +The optimal blend is 30% pretraining and 70% post-training data. Exact proportions may vary depending on the benchmarks you care about. The blend above was designed to maximize recovery on important benchmarks reported in the Nemotron-Nano-9B-v2 model card. The key design decisions were: + +- **30% pretraining data** closes the MMLU gap that arises from training exclusively on reasoning-heavy post-training data. The General split (20%) is upweighted specifically to recover general knowledge recall. +- **Math (30%)** is the largest post-training category because AIME and MMLU Pro respond strongly to more math reasoning tokens. Two `Nemotron-Math-v2` splits are used to avoid repetition at longer token budgets. +- **Science (15%)** uses `Nemotron-Post-Training-Dataset-v1 / stem` as the primary source for volume and GPQA stability, with small allocations to `Nemotron-Science-v1` MCQ/RQA subsets for format alignment with GPQA's multiple-choice structure. +- **Instruction following (5%)** saturates quickly — IFEval reaches 60+% within 2.5B tokens — so a small allocation is sufficient. + +This blend intentionally omits capabilities not targeted in this experiment (e.g. long context and multilingual benchmarks). Depending on what benchmarks matter for your use case, you can substitute or add datasets from the [Nemotron Post-Training v3 collection](https://huggingface.co/collections/nvidia/nemotron-post-training-v3), for example: + +| Capability | Relevant datasets | +| --- | --- | +| Multilingual | `Nemotron-SFT-Multilingual-v1` | +| Agentic / tool use | `Nemotron-SFT-Tool-Call-v1`, `Nemotron-SFT-Tool-Call-v2` | +| Software engineering (SWE) | `Nemotron-SFT-SWE-v1` | +| Safety / alignment | `Nemotron-SFT-Safety-v1` | +| Long context | `Nemotron-SFT-Long-Context-v1` | + +When adding new datasets, reduce weights of lower-priority categories proportionally to keep the total at 100%. + +--- + +### 2. Pruning + +Run on **1 node with 8x H100** (~1 hour) + +Non-default arguments: `--hparams_to_skip num_attention_heads` (default: none; attention heads pruning is harder to recover hence skipped), `--seq_length 8192` (default: 4096) since dataset has longer sequences. All other arguments use defaults i.e. we optimize for MMLU (10% subset, 0-shot) for the pruned model (without distillation). + +```bash +torchrun --nproc_per_node 8 /opt/Model-Optimizer/examples/megatron_bridge/prune_minitron.py \ + --pp_size 8 \ + --hf_model_name_or_path nvidia/NVIDIA-Nemotron-Nano-9B-v2 \ + --trust_remote_code \ + --prune_target_params 7e9 \ + --hparams_to_skip num_attention_heads \ + --seq_length 8192 \ + --output_hf_path /path/to/Nemotron-Nano-9B-v2-Pruned-7B +``` + +Important pruning logs: + +```text +Only considering atmost 40% for width and 20% for depth pruning hparams +Skipping hparams_to_skip=['num_attention_heads'] during search space generation... + Search space for num_layers: [46, 48, 50, 52, 54, 56] + Search space for hidden_size: [2816, 3072, 3328, 3584, 3840, 4096, 4352, 4480] + Search space for mamba_num_heads: [80, 88, 96, 104, 112, 120, 128] + Search space for mamba_head_dim: [56, 64, 72, 80] + Search space for ffn_hidden_size: [9728, 10240, 10752, 11264, 11776, 12288, 12800, 13312, 13824, 14336, 14848, 15360, 15680] + Total search space in consideration: 17472 + +Top 10 candidates with scores: +{'num_layers': 50, 'hidden_size': 4480, 'mamba_num_heads': 128, 'mamba_head_dim': 56, 'ffn_hidden_size': 15680} -> 7.00B params, 0.2019 score +{'num_layers': 56, 'hidden_size': 4096, 'mamba_num_heads': 96, 'mamba_head_dim': 80, 'ffn_hidden_size': 14336} -> 7.00B params, 0.4363 score +{'num_layers': 48, 'hidden_size': 4352, 'mamba_num_heads': 120, 'mamba_head_dim': 80, 'ffn_hidden_size': 13824} -> 7.00B params, 0.6789 score [BEST SUBNET] +{'num_layers': 56, 'hidden_size': 4352, 'mamba_num_heads': 112, 'mamba_head_dim': 80, 'ffn_hidden_size': 10240} -> 7.00B params, 0.5203 score +{'num_layers': 54, 'hidden_size': 4480, 'mamba_num_heads': 104, 'mamba_head_dim': 80, 'ffn_hidden_size': 11264} -> 7.00B params, 0.2615 score +{'num_layers': 46, 'hidden_size': 4480, 'mamba_num_heads': 128, 'mamba_head_dim': 72, 'ffn_hidden_size': 14848} -> 7.00B params, 0.6165 score +{'num_layers': 50, 'hidden_size': 4480, 'mamba_num_heads': 112, 'mamba_head_dim': 64, 'ffn_hidden_size': 15680} -> 7.00B params, 0.4214 score +{'num_layers': 54, 'hidden_size': 4096, 'mamba_num_heads': 112, 'mamba_head_dim': 80, 'ffn_hidden_size': 13312} -> 7.00B params, 0.5894 score +{'num_layers': 56, 'hidden_size': 4352, 'mamba_num_heads': 120, 'mamba_head_dim': 72, 'ffn_hidden_size': 10752} -> 7.00B params, 0.4688 score +{'num_layers': 52, 'hidden_size': 4352, 'mamba_num_heads': 120, 'mamba_head_dim': 72, 'ffn_hidden_size': 12800} -> 7.00B params, 0.5596 score + +Dropping decoder layers [43, 44, 45, 46, 47, 48, 50, 52] from model. +Original hybrid_override_pattern: M-M-M-MM-M-M-M*-M-M-M*-M-M-M-M*-M-M-M-M*-M-MM-M-M-M-M-M- +Pruned hybrid_override_pattern: M-M-M-MM-M-M-M*-M-M-M*-M-M-M-M*-M-M-M-M*-MMMM-M- +``` + +> [!TIP] +> Here we skip the Knowledge Distillation (KD) step for candidates for simplicity. If you want to find a better pruned model, you can take the top K candidates' `export_config` from the logs above and then export all models separately and perform KD for ~2B tokens on each of them before selecting the best subnet based on your desired metrics. + +--- + +### 3. Distillation + +Non-default arguments: `--seq_length 8192` (default: 4096), `--mbs 4` (default: 1), `--train_iters 16000` (train upto ~100B tokens — can stop earlier and take intermediate checkpoints for smaller runs), `--lr_warmup_iters 100` (default: 50), `--eval_interval 400` (default: 100). All other arguments use defaults. + +Run on **96 nodes × 8x H100 (768 GPUs total)**. ~600 H100 GPU-hours per 1k steps (~6.3B tokens), i.e. ~45 min wall-clock per 1k steps. Full 80B token run (~13k steps) takes ~9k H100 GPU-hours (~10 hours wall-clock). + +>[!TIP] +> While we use 96 nodes here for faster training, you can also run with 1 node. If you dont want to do full distillation run, you can stop earlier and take intermediate checkpoints as well. + +```bash +torchrun --nproc_per_node 8 /opt/Model-Optimizer/examples/megatron_bridge/distill_minitron.py \ + --teacher_hf_path nvidia/NVIDIA-Nemotron-Nano-9B-v2 \ + --student_hf_path /path/to/Nemotron-Nano-9B-v2-Pruned-7B \ + --trust_remote_code \ + --tp_size 8 \ + --pp_size 1 \ + --data_paths "${DATA_BLEND}" \ + --data_path_to_cache /path/to/cache \ + --seq_length 8192 \ + --mbs 4 \ + --gbs 768 \ + --train_iters 16000 \ + --lr 1e-4 \ + --min_lr 1e-5 \ + --lr_warmup_iters 100 \ + --eval_interval 400 \ + --eval_iters 32 \ + --log_interval 10 \ + --output_dir + +# Optional: Weights & Biases logging +# --wandb_project \ +# --wandb_entity \ +# --wandb_exp_name +``` + +For multi-node Slurm runs, see the [Megatron-Bridge README](../../../megatron_bridge/README.md#slurm-usage) for details. + +Distillation saves checkpoints in Megatron distributed format under `/checkpoints/iter_XXXXXXX`. You can convert any intermediate checkpoint to HuggingFace format using the Megatron-Bridge conversion script (see [Megatron Bridge README](../../../megatron_bridge/README.md) for full details): + +```bash +python /opt/Megatron-Bridge/examples/conversion/convert_checkpoints.py export \ + --hf-model /path/to/Nemotron-Nano-9B-v2-Pruned-7B \ + --megatron-path /checkpoints/iter_ \ + --hf-path /checkpoints/hf_iter_ +``` + +--- + +### 4. Evaluation + +The eval config xin [nemo_evaluator.yaml](nemo_evaluator.yaml) is for Slurm-based evaluation — it submits a vLLM serving job and runs evals against it. For local model execution and evaluation, refer to the [NeMo Evaluator documentation](https://docs.nvidia.com/nemo/evaluator/latest/) or this [blog](https://huggingface.co/blog/nvidia/nemotron-3-nano-evaluation-recipe). + +Before running, update the following fields in the yaml: + +- `execution.hostname` — your Slurm login node hostname +- `execution.account` — your Slurm account +- `deployment.checkpoint_path` — Hugging Face checkpoint path (original, pruned or quantized) +- `evaluation.nemo_evaluator_config.config.params.extra.tokenizer` — same path as `checkpoint_path` + +> [!TIP] +> Uncomment `limit_samples` under any task to run a small subset and verify the end-to-end eval pipeline before launching full evals. + +```bash +pip install "nemo-evaluator-launcher[all]==0.1.90" + +# Set required environment variables: +export HF_TOKEN= +export SLURM_JOB_DIR= +export HF_HOME= +export VLLM_CACHE_ROOT= + +# Set additional unused but required environment variables: +export API_KEY=xxxxxx +export INFERENCE_API_KEY=xxxxxx +export OPENAI_CLIENT_ID=xxxxxx +export OPENAI_CLIENT_SECRET=xxxxxx + +nemo-evaluator-launcher run --config nemo_evaluator.yaml +``` + +**Tasks and exact metric names reported in the results table:** + +| Benchmark | Tool | Metric name | +| --- | --- | --- | +| MMLU | [lm-evaluation-harness](https://github.com/EleutherAI/lm-evaluation-harness) (5-shot) | `mmlu` | +| MMLU Pro | NeMo Evaluator | `mmlu-pro_pass_at_1_symbolic_correct` | +| GPQA Diamond | NeMo Evaluator | `gpqa_pass_at_1_symbolic_correct` | +| LiveCodeBench v6 | NeMo Evaluator | `livecodebench_pass_at_1_accuracy` | +| AIME 2025 | NeMo Evaluator | `aime25_pass_at_1_symbolic_correct` | +| Math 500 | NeMo Evaluator | `AA_math_test_500_score_micro_avg_of_5` | +| IFEval | NeMo Evaluator | `ifeval_pass_at_1_average_score` | +| SciCode (Subtask) | NeMo Evaluator | `scicode_pass_at_1_subtask_accuracy` | + +**Key vLLM settings:** Tool calling is not enabled in these evals. + +For more details on NeMo Evaluator, see the [GitHub repo](https://github.com/NVIDIA-NeMo/evaluator) and [documentation](https://docs.nvidia.com/nemo/evaluator/latest/). + +### 5. Quantization + +ModelOpt allows stacking multiple optimization techniques. Here we stack FP8 quantization on top of the pruned and distilled model to get an even more optimized model. See [examples/llm_ptq/README.md](../../../llm_ptq/README.md) for the full PTQ documentation. + +Similar to the official [Nemotron-Nano-9B-v2-FP8](https://huggingface.co/nvidia/NVIDIA-Nemotron-Nano-9B-v2-FP8) model, if you want to quantize the pruned 7B model to FP8, the Mamba and MLP layers are quantized to FP8, while all 4 attention layers and the Conv1d components within the Mamba layers are kept in BF16 to avoid accuracy degradation. + +This is done with the `mtq.MAMBA_MOE_FP8_AGGRESSIVE_CFG` config defined in [`modelopt/torch/quantization/config.py`](../../../../modelopt/torch/quantization/config.py). To apply this, you need to modify `QUANT_CFG_CHOICES["fp8"]` in [`examples/llm_ptq/hf_ptq.py`](../../../llm_ptq/hf_ptq.py) to use `mtq.MAMBA_MOE_FP8_AGGRESSIVE_CFG`. You may also consider using `mtq.MAMBA_MOE_FP8_CONSERVATIVE_CFG` for more conservative quantization. + +> [!NOTE] +> You can also quantize to NVFP4 using `mtq.MAMBA_MOE_NVFP4_AGGRESSIVE_CFG` or `mtq.MAMBA_MOE_NVFP4_CONSERVATIVE_CFG`, which may require further distillation (QAD) to recover accuracy and Blackwell GPU for deployment. + +Calibrate and export the HF checkpoint from iteration 12800 to FP8 (takes 1-2 mins on 8x H100): + +```bash +python /opt/Model-Optimizer/examples/llm_ptq/hf_ptq.py \ + --pyt_ckpt_path /checkpoints/hf_iter_12800 \ + --export_path /checkpoints/hf_iter_12800_fp8_aggressive \ + --qformat fp8 \ + --trust_remote_code +``` + +The quantized checkpoint is directly deployable with [vLLM](https://github.com/vllm-project/vllm), [TensorRT-LLM](https://github.com/NVIDIA/TensorRT-LLM) and [SGLang](https://github.com/sgl-project/sglang). + +> [!TIP] +> You can run the evaluation using the same `nemo_evaluator.yaml` file for the quantized checkpoint also! + +### 6. vLLM Inference Benchmarking + +Benchmark throughput using [vLLM](https://github.com/vllm-project/vllm) on a single H100 GPU. Run the command once for each HuggingFace checkpoint. vLLM automatically detects FP8 quantization from the embedded `quantization_config` in `config.json` and applies it with no extra flags needed. + +Results on a single H100 (ISL=32768, OSL=1024): + +```bash +vllm bench throughput \ + --model \ + --random-input-len 32768 \ + --random-output-len 1024 \ + --trust-remote-code \ + --mamba_ssm_cache_dtype float32 \ + --kv-cache-dtype fp8 \ + --load-format safetensors +``` + +| Checkpoint | Model loading memory | Output tokens/s | Speedup vs Nemotron-Nano-9B-v2 BF16 | +| --- | --- | --- | --- | +| Nemotron-Nano-12B-v2 (official) | 22.9 GiB | 585 | 0.74× | +| Nemotron-Nano-9B-v2 (official) | 16.6 GiB | 794 | 1.00× | +| Nemotron-Nano-9B-v2-FP8 (official) | 9.6 GiB | 1,012 | 1.27× | +| Nemotron-Nano-9B-v2-Pruned-7B | 13.1 GiB | 963 | 1.21× | +| Nemotron-Nano-9B-v2-Pruned-7B-FP8 | 7.8 GiB | 1,147 | 1.44× | + +In this case, FP8 delivers a ~20-30% throughput gain over BF16 at the same parameter count. The NemotronH hybrid architecture (Mamba + attention) moderates this gain relative to pure-transformer models, since Attention and Conv1d layers are not quantized. diff --git a/examples/pruning/minitron/NVIDIA-Nemotron-Nano-9B-v2/figures/learning_curves.png b/examples/pruning/minitron/NVIDIA-Nemotron-Nano-9B-v2/figures/learning_curves.png new file mode 100644 index 0000000000000000000000000000000000000000..40c507bd1b80892bb08c52ab3de6f432da2a316c GIT binary patch literal 252727 zcmeFZcR1Jo|3CVYY-MD$2vM>{Das0khF7WVRYX=<*<=(_QdUEFl}ZX(*|RcQNGT%} zMUj#0ac+;kpYJ*6`@7B`=bv+3=eo{uU7t_yx3^xN%!uzyOZe#H4`mN+Bab7lb{^-f-E1ibtUb;+xq3J`*a&&qy16^J zx+uuV%gHE63E6vioN-r{m398#e?rF9?UbyYSV0s%h2_jXV|NOLpPT%T?o>+4VG13E zqN}B0=zV*r>zpA^TL;sqtohAUn>|kULu+|$=%|JUhB^FF8ke~)uYF;3-Tt`@pVad& zDVjftA6s|M_8D84RJ^ z|I3H%lU^OC{y#rV>vVPM^8fi+B8S-+#W?@xr^TAc{Xcza`IR1YxZZ(Z-@kqhdFI-^ zj3RM7E4r$xO4f`fTl@a~Dh&;dns2oiIchsQ=h+hQgFU~0UsguQ&VB(u+O~0 zL%`nIS$e+G=g;>~Io7qO{6?}H63e=2G+I-+-?`qWd3g%EcJB_an=#+WP=2iIW42|? z<#kG|F>OywH<#T1_uFenTt!4^LHz$7!4{zopR9=c_cu-a{cCM!ccH}d7j4hQl><#_ z^orYVL`1OejlaSxEiJuz&6>2+A0F9%Dck8gITVS147kwJ{6natchdn=({<_ki8`Ni zZJQrbH|EwmBdK?F zGM@Nb<~g=y_4ZVU7Wqxd*qk_#mXQ$|#Vbkk{Pl75)|1Q<#~zDM-JEZ~%-!DJE~lud z_2JRc=6r`EMMXua$DWw{oLwzZ>_4-0rGRo+Le&V?@exQ?r3i<6S>YqcXm}* zhcZ#qG#R2ds0kiCc+j%M+y4Fi6_1WRnN7cx=CyO@&e4ep9tP3Y%;OxH$FuF!m+?k= zlrMgL!M0w-i#_w$lbQ&wjdJ!)LgCresfF*u!#m}+^uInnp{FF>y7h`d3?F&O(5vu$)Mha;hIQMWs`4G)S9nk>^c)-Pjk}I z(G7elR|%XOR${z`+OnnEbswq@Z=B@umCo(?T+XlR>#I7RxpD~#Ug_*Kqg0jqO^wct zRGwIv{cA}}nvgYkWVprScc-z7pQh$ALAAi>`ucqoAA!qwoSN8Ly+cFu#g}wXf5aLy z%(f6vw=iGD&wo+n+;5?6+juFXiwl0QvC~l0k`x_U5~Y3yiO6l*yqU@U`)e-G``G^c zjvpUu47Q|KS~U!$+&`GCO!ioCneY6!x)`#=xmT^C>-+Hc%a<=jK7TgxNt-Okwr~FR z!KbpTGH3&pO4UnI+P3Y*>l+FmCKq3DtzUn8!JbmDVbvoJi|Zywx?||*8B=vG zueB@@w@iMOD7#TwdRU+T+ak2Xx8+FAxe*J)EHmT5r}sK8b4Nx-dQS|*j&=y+K5i+w>R^kh&ADFu`>X9zyTPVS zn@Ban!aei%=h1r)9&A)n;x~QfqJ4Sowx%bh&)%7+dR|?)b@J4yZ^IqW$Q`!4e@{n= z*`W6Fo!U}#SLwN+W+~kz9XmTanw6IJ)~#Fnvh;25b+~e;s0P%$e{g7Q=(B=#2UZO= ziHAda zn+cX#p-Y#R|vt|t?7}s+5aJt^=UEZVo z;{8E;&`$ZJjDk~Bg{)1Jl|5HqRo8C0e_&no8U>wvX;izqD+0B-wy&R0t-G5Yg$S=BMkv?_n*ORiWnAhj;&-`JEyb^}N+1av<3DRs7@5T9@j2wbe`OZtTzp{h0 z4MK7&bqx)9cJ0~~c7CWW+wxo0dHU5m&h(s;>8&)y3u9%!H~;6ITKLj&?1FBJg!Zsf zWNW~E*$M!uBqId+V%F;O<=`fN2)%xr9X@Tgi^TC7f<40ElRVKguCIDhHV zC9_$h&jp)~XPHGx9DQUp_Voob2ghDtU*)%Vc4r8$h`Qw ztF*SdT4QkTq^)i5K=a)lor;%NCYb z3DUK|45<$uY-V59n6*uik8jg#<d#!ddVcA5v%T$HqiN{Cij5Uaoll@+H}psD1!E zGVRY?V*m#tTWWoMe5$*iJMmmeGvYZ$w*Qb1zY$Cj3w&v~b`L$$GubDllhi-JVy zX|oK{MvXJvWm<(@!^Ok%524IYbIs1qK0Nkh?fUiWo3bs{(mid^i3*)Qhq(WEE4*nF zBLR8u?;nr{NLW^ROSGn@rsvnMOQ-=N=s@ujM?>!3-H2Zs#V>2ogU-f3nY_(0p>upW zWpto<<4{*AKkjYs{{8HVii!p(sklAzW5@N|gV1SqsHohvDED2x-6@H)uLuuuV(9ZK zEMvaOHzR@9z0pA?hdV9;4ELa#?>ltpA}}6#_+G>9X|CNBKwyTx`-NP0E^pW^YJ9@@ zLrMA$f*A1@zb-D!O^^1{yScj)gnR$MEt{UI^R(^3PClY`@9s#qTH<+MC@wBGGu1N| z82jC&GCeo>eD7dM0%|B$t;c#g$?2CrOHR5VpDUSi<=0}eBD z^Y{d5VPU$(#YJWomTI6cRA2Tzd-fP+9A=O<$ycuoTrhZQquuOy?^jSHI!M}rP{z5a zmc`eNjWwvE|F*mLD20WErRl>5?V0g@0$L}2eqdX%ZhPF^`rN68iNW{H&s;6B-K_u; zy{AWo4<9~kkgmrIbg~9iZ>Dg}5Dy^=z^AXTPYCr-#cN1>qW!nJMbE3TZ*^zK`*g_; zKwD{QZPj`a#XHt@uG_jPRl{xAQvGbHjI#1t0HHd-RIYXF*zk0XOigRMy3Bj!MJ>v_ zNoNcT5AQ`&+^4S}8X3s~vcaIDqCyq~R%rO56E15NWAFL0$tNB=2_Tj+FJX4hQ#ke3 zYIb5U{LX=FK=lnl^IZ5X{Oy-p@=A_UJH333jFLv@_T|Rhd&5cr^$3fP=gl%JUM4i6 zBDHN>!M-1VQ>wr%1O)|Gs|A+j45LYC8faR#_q6=A{`39KQ|nsB;;A{a;R(F)>6sZ} zQPJhGv9a~__3DK9JT_KfU#qxp(A8ULr1a#}?=J7r9y(wOgM!mXJ%-zlJ~_XZV}t5* z%b*3ZU6Z4u;%?jIs~0@PaZo$&{O|6{n8;O| zMYeB0mcM)ZcE0NBYUY(IUxJy)pZ)sWzxx-K(e&`M?lV8$3N%k*MSN{9a7@D1tNAR8 zWnxs~ks;S4%C%-qBoO8L-M&00UPg1RFlqoCs>M&soo*HH-K#_?3z*{z3=CvmvEl`G zns%r_E)b#ox!-1;6#=FbzT~Z3ym+Fc(D_s8IoV^6jf8#8?d`>DYHOc*{yNr7SwUH^ z7Pvm|)H?vG`}PFGqCRl)@m)ZPx565j@ydT$S6BP$m1OtC2dtgvR^xpQnip?{hclFw zm+LDEX`sQ=lMNrjP3Zv%rd@eWI$}*2i}%cU`al7;lKQ@V%$k~-Jp%&~z&DLg&aGLu z?h+mi+2OQdU0vNG&tH5fhv5Nplhy!>4-GS}*{2*zQSFUUTQt~l;%IK*Y3#R(g-KH> z<6NzQs~%_0aC&-Klz8Y`pZf8uEn8b4WBc~)wBRt%Q7NMw_20_gf4;ByQsxtio%A|c zMX5VDIzL}=`SRuU4BTLP5h2{9`^t&;Vztof(5R`i7TWmDk`uqX)`1de-dI>zkVl>3 z4wC)X;J?4VvG3qP#$XZ44lo*{GH?0G-(8aJi=(}Jq%6u-a|rIL!paq%Qr@s(gHv~f zdHyhVl+jZghAhjo zZ>3Dn&hnk^2?9JbDs;Nhc`m@lMuY$m?D$%ALHyK1<2*ZEzDgrPY=31}ejWOp=k`up zTU%H{f_3x3+k{s(m7a4gh$9pq@8PL^Drddpxd3d$-rnBPzkgqp`TXfin7DSdwXj`H z1Bd`SQ#fB9SbHTNT5;98c~FZJ4cEkIv%K}Dt_uKIU@KficR+RN^Gm*;o?iX#9S3<^ z&P!-?fO;zkH z$FWUj|E=v$swR3Ki9cMJ0f^gwQ<3FeiWL!HP44KanR5QlXm&IRXE;w zfZyHLc04P20l(m}>9dvCN?|cEhtd}BJPnL--)mvXoWFXN)2XwF z`uiEf)v07iQjFGema_$C&z>#v{VU-$^yxfcg9JNsL(EXO33b#Q6)!wBthLpUloe}= zpT@+*gq4+*klDMLg@Z@#6%9|kyu6OAh8i#Jsr;dlytufyAZLSR6h-?c^@(Y81-&Wh zjO|4dQSApS-TGPYA4<6b-J^MOV8EWJ4|uo17cVj~u2?HN)sCK4&v0Z{;QVUI<5|za z2-pH#Gep29vB3D-^*(<5IJ2l~sC{FrZP+5$iWQfG{{Ge1(lT7Hy%etjt``DK-Ma*R z=i!kDR|aMPnaR8|ic%5hW zMk`N@yjkat!hX|0tT|j!)!Vl#58T?$yL$?d%Bib_7o`4tdb*gh$AHjHvBaALGv~PEJluP|%v$3-KnS?BpeKUAtAaw6)W~{s&sp*={PiT39`B0*u{s;HIdD zW>Qj;6;`JNhY6pyre?^%(;=xJC_k5$aj)HWIt=%D$?oB8WlvUAN`8UOK%r=sD3&G# zr_->?X^ykNnxuK!_iO?e$F}EZ5XEPvLQo{PRP5FtHcU3x=qTr+9UmVb?QJm#dxLo` zrH?W5_U+qYOBXL(uz7oXXN~*{EK*QjkAWr)?-3|8MB9Kc6{}LH1qo=oODA(cKtR50 zH+hgK^W3jEa^Kx)g6@CA0P_q8r9c# z5-Q1ZXj}Fz_jF^gxPt`|gu3l?tNM?($^CYl1y%i56P)3EeYuM6_0GEq2?<5sV>CHV z%WKE7%vit=d(S>GDaZk=)9mU1dzwbcOHE5Vk2n9dbfh8_e_?bUwK3=P2Z`b~A0MB{ zW!zxgO@+>;9)m3m-@kvanf&oC<;2jZr<#pHpR3&lM>Yw$`Uq2Kz6Wv-kcwKf^yc+z zLkro!#rYXLX2788=4LK{JWka=XtZ*kKMjjDmQd%&C*ygg?O`HM*Ilma}igryo6p8w$PsM5DNYiQK3)3FkwP_ zV+0=_&$iI@&-Zu!oU3hEck9NDG%$#qyu4M6MtOO8z5V^+pa_)uQd!@L0b%~Q)-oUW zw4q1$qtycEcYtPZ_RrRBcQ7yY;tfy}+r0S%fC6n!E0&i3VR6f~YuB_C{5OD&Zrpo? z=jUp)rd150hL4T71XR4jo;-OX*TV;r$icxO>>eQkfHpcdrUAX9xgx-?*~-z*ZWCEM zva+%#?d)oro2gle z2F+*p=4SZ4d!p#e)h}P_C_cOe1qk<2+=eI3KpsWAud=do&eNw{?SkC$^75;dT+Gtw zZ`xKe*>0e}YAW(5XU$52v4u>FMboyQ!A+ zpD91*D>C&St;mX^<-bqao4<1|z^cL>4nj73Mk!>H| z{GHK`KUfR2)X{6C+;oLO0jV&sWHm0^Yim_eE*&n?y`X zGB7beJqDp;weURW;>6%88MC5b2t3QCyQ586=;`U5uojcJCMJhX{g)P|G-r-wTU zS;W;HFeQNUo%-a-WnxUcd2?X(F7#~;U*D3ZH1D1&I%?fpvPlfTZP~H~>@B9L7u31e z|CQI=q;pY8i497RL^j<{tgo~qd-m>S(oc|TP+H-17BD4}aZhw~w3dE2s79o+Hsm+$ zJo)rfJc|&a&v<(BG7bZe#Xx;zo+~LTVy1tj{^c42co^kso4hZ?Z|gJj^|2d45xFcn zK=6R1G?^uI_4RA}8m@D#g&A3NZba}uk}akFSAF8`y2p>DpvGz!>4@HA+J*WXe&Z}i zF`*AlX{Ml*Xg5cTbksnIvAa4U+e6|2{#=0tzHMCKxm|Rd!}(>w(Fn zSs&{jIKZ2}7k$h*?bO{2>=Y9sH4(vCT_#Ukw=`hRt76dis3%?}K(*D&=rv6(Et+VK zL`}cF(_JS&#zW%F=iudo(OY3Y80A=jNwx(|Zx>RcX0Uj-HKTNjslIw1^5^V_GVCu+WdvRi|8)(ClPX)P!A zQFJsru+2I=iB-$I$ODkrgU*-Z+_4?f_8JH@)f)n*g}bMw{{H2)WxV=kd7i`j`~IQa zKv!Wy%hfF|PJ_RNTZK(edq=O9Uj`vS>+oT2bS;yil}@(NmoHN@k4Y1PUA46J#J*Ey z2H(NhYbZBw+*q@0*)py30nnUkKzNviT(ACwo@d`xvJEy5rH^4mcdIYFgYv3^Bz-*> zmn`v|de|`_eKiYSvl9;cQ&eFqSZM-{1uo8vk+|8-JvY!h zn^R9_98T}~_H85U_)hOpArx&Up%II5Ae;|exvU+><~9@W1e&Oot*uyRDV(%$umZhM zx#s3(P19u*CuirU-eWdt=2PF_tYcrdogDx;B0BmxII*1fs8x7f3B0qRp`jd)!Nc|r z4!!WBpb4(}9g0uVfL(@_u8!_|^~MdBbLY+phiaoZqWX$Tl@cFpr~7y2zmWiGP|<~M zYOLet4gpJKor9drTv%Ax)ZAPR3xJ)Xd4U4OC_ChL`h0r|v90E&M#AgR8J|CoTos-g zjy3&HQhjo6&rvk?TALddnk8Pe94**RSPH)^iFAO+z!q z9zD9H9{XOGN7UKHXbREo3#I-y*fHk%g&)OT!QPtcD|47$Y(AB+g-m%#hnAKrgiUOh<&qmtf!j3Sp6gX zhbX}xO1CUxi7VwjsvhUO@Az?kv`VSgv%y~r-m~S}y;)1G`g^WBAknojCW!tsyHMk@ zgy3tAp_w%C;R#;JW6YnItzc)*0jckO=0JMR+uM=oeRTDi@9yQfJEI6pECSH+{-icM z6$^kAD=e(hx@oArAhTsQR|t3PJvp=nWigDR3`8v=eSD9W)`VsCeq!=VfK6U3C@2_6 zxqNvAHqFM9+vVittMR-53x$21Vbs~1kDxw8ehg}bLVrkYL6rT=^SZiAjOT2(xpW$} zQjPBKUt?``{3~#2%@PQcsL5%K!n?U{{fz_Nm1?M&L^Ve|pMKOb9t7r0J1C{?sC{L%yyigkBPgjv6zDA;8ghacXyQHe{rd%I5Nu2L z+TLtaRODH)w(iA?ewAB|ut>hWynJM*kTu#Zt#TN4STo>DyoIR|>?$J3g|V#C`jxwK z{SJfHvzvF9zG-PWEVDZwO8My2tHwrde<+@Rp{W2 zX@%Z|u%Ga1yD+511J*xM0k!Ne6c26X4O&`wChixeoZxa5wg+dxOW^5(wR4$cStxWE5~2&?x_kAbUl%$v=bbQw8= z%QRLu1iRx2hF9M}zm5FqG&scih*7Nm?IQsNyJ-8upxO)nfNby639IWv0gC@N6kYwT zw4F|$a{Rk3ODdo#K9{YW7kR`VG~LTYy}!!%U#0b@SRoKWE9k`4YKPLnB0I7pv*2ER z){&I%sRYHrcSOurQtu?q(6Ukx!2IRl`v=|sA2ZVN><|w{wA4_OdWucb z9qzDj2lLgba*11>_fEaLLo4i9n48kp@Fx{cQih+OpStd7vM7BqXi&}6B#OtM6)ji2 zz~-o&u`qhJ#5J4i^ldfF3 z;Ms>`UuNvQ$)MS(MN_{0kX@!PR2{2Meo0} z%h*s2T|$$GU796ILZYF&4KJU$^=(5#tm#(G8)O}!>1igD_S3vglXg%Kq6$28{MnijO2Dl0k!1#DUcP~#N8zY_5rrAKoh@pg zfvoy+PonuA7+e(lz<6BSx?^ypyOQ75mxEN(Fn8|a z9d0hJfKA{-AbBjLuTM;PP&dEEZE7X&12;%gIhWNbpI=ZAzA^_3f(FXL+MNlm;?{BU zlz=YB{?0O=BNMl21^c?Xx_W>A&Kg}f+3+^18Q=tzLVNJ>s#UAvv+jzk6j`~rtlYhO zw{@yH(1?p%6*j%1+?L(b5`zz+UW!5597ChKcmF;sy-uz`COE#{DHT&=g(d;R?R z^WqU2B0F~)EABXwXLlfc3vm_*LP+~Ig$2G(PcP)@Q+cFSB3vsMjwLk+pc&r2y*{mW z5|SLPpZi{-%hk7rB0f(hgtG5NlO#n^(2^=Bz1VRb^@==G8kSH_Vc|=NCv<(b3s+Za zUb85vMj3=q${KUz@X72C5LhFZY3NmvNnc`Q z1Z>|lReTvG+Sc~ykcKdeHhs9Ywe>WNUwP}AFwG5zjFY7W=EZgkt%ct<-whE~aoqu5 zKOz~uyB!x7w)xPVRA6=l2e?D}?}c*Ti|HsjdtzWcwC!;AcIWnp-**8wC6fP|e}cM5 zMFym|uTS)(?&qf`m`K4$l-{2(bPm#RY5&K4iGsHS#`_}0_arDPC@2*Fk?63Cd$Py2 zE${fTrDX=nInW_Dv%W0x`Ey@<^ifV+ftqVqiP-790*CiHz3gtE3k#e&EKSIAX}7TN!_MADpdiCm5AZ=Qz8FD$->i&fSNj&FZR;?`Lstsy^&!MVt z!Y4tM`ikB0snB^HIOi6R>4y95{$pRmALbp61;-E%IOx0k?BKZW^~LkDr7s=M272F0 zN)q3{S5_rdV|KxIi=KuvOv2lnj~qEtyx~u7^2U~AH*nk}wIJ0ig1aBJy#6ph+2FeA z$z$N^OUlNE$qgplpvB>^G44V-p&1`*am=OnX}jaWYOO7*uneNTq_R?Y@j*D%lFIxu zD_W0c9M)#mtAF!G+ruMYZsTV_gn^;xn3y_<3}I4hTDDkYnM<-}#g^CM-S#QR+4i4{ z(S7HC{R2{h!s5bPw{ph_?$*SjxO(fBq~3<#0|O5yTk~@1U0p z9SD3Vd0tba>G0$JkSDY^Q(k-+GM>v5 zyOWZW&8^B&R6}91Aor*D+x79lS}+Y(SjIdbyC0Pa*54|>uQzkPEHy9Wp@MkY!>K|f z9*J<^&recV*lf$M@((nj%jY|H@Da&n&SIej*gXu``>0In{@81855o)F;`?~0r0kzVUkZRF0^)#_a9ojEpvuL!VhK72Nvj zmz`PzG9duYrP<*MB?MZ{mYmCQ2)E4x3DTTg8+i;ji_gBES@4Qg9nsOz(K~fovUDYK zoU&4;!ccpSYsR-i9j}Fg9q#^T@cm|Lur0C(+QzTr82ssdZk##!D+^2>23rrR$22q% zkU~yQ&R`-=y?=ki-V&7N(lVcW0;>K-JA$;GAG|_dDE!&8XZ8)8@Iv?J?rN*7Mj;LR zlfECxi{8P(8?H?nx&_rosh_+`oB{$zzU-!|KNh*xedkeO_xPOoF(aTtE`~6kP4wDW zP+B+M2A2h2N=jPYiQE|U5N2RgP)d`0`}4N168in#p(`&O{}%+_#{d;rPsP~X-TiD| zJr@$7>p*Y_5rl5i^Wz7TwDgl@r$)rDS+4cc4qeqk|33F5lhVmkLOd68&&SrhW;U4k zW>zvE)tK1*!!+NX>&p6_wDGwgdp}6A1kYR5fbBPB9+!>(*eLywJy)RwR9)=mU%OMM zF2dHYFF&@`+hp}M$0s}5B8MFRsKn$d2&7y7>gge=$=*)5f-K#EbJB;890^yaGz7LJ ze-;af=|3I_5tLfD1gV?);Gh7!1*6upS&i&Zzk;`MGpvUtnJKNm%%I3`qgvxnPTQ%X zAt?>pe({WPI-gK8$VktD>)Fth$osBwM?6mhM3!gel`V%qy^~a#%afbB)>nW~8rsrL zxgrldZNrBK8ug3@=tBG^#SL0-ZJ(-SQ{q)W^uYy!6U z*)q`#-|j7?KX|q{nGUBs!p9@1B?NhxQOK2I>SeseSZpTw$$+bv-<|YCF@uRR#iJbx zGX=I$nxE%*3Ioy~AJ}|=dPY9A;up%_D)>p9A0vatzA_N82?dU=TO$_=Bpo!QaL+*M ze=l_`#P^SCT3dM#Ks%2Z*})Xm%b?%r?WE%mbd@T>%+S(cX?exk;O6C}gC)5i%_qmR1uJI0uZy?r{!ajJ@ab`Q z-zTj}dx!Eh2052MyNRfon*V}<5}Sa_cc`d*12e1{N`S)u4aSLmdwxb|jGvN{5+b#W zdGUv~GpCA&%35Z^Q(Dfzz!LL!VnWXS`~FYu?KSoF%xl-Kg@J!8l0kZ(f}C6x+`^o| z1>ch=PokF*m9QEimZ*4}JQxXXbaF7h@qnJmS$%_>hixA5acFtp(nk;qqoOludLyQ{ ze`yuwDOIkN`26D@Q_Cv=VF)&C8TkmyMm#-H_m#-CR~$am1%38&g8!qJXeFM7_nrpY zujxAvJZy~^678=ckY)Id6F=QZ&|s&-r<}Bwfb>(#b>}wUTba1x3U#|WHSQqQwzI1% zywVch35mMGfT62j7j}D>&sK1(tOOy>&yK}d;u8U%K%1!sAtxP#DK+DAsC39PWyxhq zBa2b5!GfTN=4N*!D@d9o-ZFE*RZ1lyDLy`3F}`x>SNw3IW%4G|rJ1?8%=O-fTaeJz zzF`LZf9#>-8^ABr$!d}U9&8aKLM7+`O)LQ^mY%`ENcQzw6|@-$})cd*n=)J@ii(ryMK&4*-!oE#+>VSE}itR__A z)tz|=y){pjLkj^QT8<)!i`*2~d$KW-?oS$+qgs3GSA*3g!3*;5!?|Ua6aFAPY-GL}W6)L4#yQUgFlRTLwz~eSJJduV4N^>lF4y z&+%@}II=fMV2gl3+REIFr||(7`_H{wNCe2V?yHZj`PS5AKn=eB_}xOX`RYd&Y`L#; z;!paH(c<<_s+5anAAodWZ7lUUh(Wqb=uK_y9{Uu3(Ni(6sjFjxAIK~C>({RnP%FjOUWH^E3Bs5$pN-P+@MtC<$^{oEXKF%Wva;^8 z8Qv~_6z>_CZH8tq9Mm#e7U}y&@@UF_laB|^>N4dxIygjDupznh;Y*+}7U5q+A@dO)l$&g2 zDf#*HQOsU&?Y_n7AIK+3qX zY`RJ-z^|;*E%FFxaoN!4ln`qQt7}P4$N{y(tgJcK=Z22IPxH@4v>ns93Fl*vI@n@+LP@wCdMWvQOkC@d@0+KI3p}3rafp< zFQx$*7|~N&^>vhUmcd5-o1)k0t{$&zxZle({&V}FBcgC_Ir;fKg8EE6L^(F%b#8ZC~T}2W3=8j@fl_u#HH9xiR-BdPza~4T%6VS2AYXR z*o{mRX-iz(3e=+kE$-+d9-<0qS!4zQf%P1Sq}ZLsc$A-l zmUae{DV^nh{GceB$wtt9s0J_KW%uWrL-f||m1mPwQabLDjKJPgqp9m6*j%*z$E(^B z3oc+1&9REGU>Y|`g;ML{fS@?pVP{JJOoi-LJ^S~Uc6K+?ewz;6iRRj0eWSuH5ExjUWe(Yjrw7G4bToeN5 z`xK8zAuo!#9`WWW%D7z(5ik|if5Avnn%=Z~M{;Z|i3>$3JHAU+VFE-XkuM_Ndk*QH zqSQAuRD&yOXLptPM1MU2Mm>#cF)GYJg1=G>@sXhQ11L+Xn zg@DsD`-DhZTn@S~86pX8u#1F{e1cK0I|x*xKN^5v zySfyy8Oc`4y{s%5yv<8k`y$8!laFQK-mv40I?oy7+m#Lw(8|inv^rSlcFR)Zyh`qM zi3D_k3Gk!COB`M^uHZ3Xa9cH?#9#?YGI$eM?J=^2(OOhp+5SWfEFjilT^tHe;RF`! zY3C8tPYaLTq#vMvaSrE!50Drj?2V%J+0V-_ni$vw5b|5svggGuZjRb8n^U>@Y z{`I5)RTLr@f{`@8BIuR>HN>P|+G9>;LQydOY2lObu6G5@8oIoBhk|yAF_8G#Jv~;v zd;enHoShlBZOt-Yhe;0H zm(HSWJLm2YMVHRi;GLQ?fJzr3A*BM37M=aZ`1R{oIhW2IWO8GG`IgE#8K}g^Xvv-B zzJF`5amiQ&rRvUZzb0sN+#7PHCp%n;RQme1-v0g1Ni}G@pP4C%=`tygCdfQIsQcRR zS~yp&+W7Mm=-VCf2?GOkWA<((2$?Q05GTw4A|v2$qoq0Gx@}#yu^MqAGmzS z*{{q0g~Dak@8Of6G;6{Fg@mRhunpro)RON`1ZHX&4o=N~zvZfx?8;5J+K#g#fiwO5 z6rW@x6cfnhI&l2YK$xWdoJ8ne;=r26kMPi~5oU!<&F5*D*gAU;-Hqm}v6bFr`-%Z< zFEZ;|+@_+e9A$TJxVv)0-Ye_LxE@Ach+L|E=+L30=Bz8m-D0+wBA{syt@|q(Qowq` zH|WXDpnlgZJdnYKR%4j+sqHI1iqDTB#NIA~QDfHF%x2_0SQpa%`U9;2;N-AiL*b%m zf~Z5+2COY?AV5fSoZ>UNYwb$9+^BQ#2^Ji-{alRZU`6g8!LPN!Pa42Wr1ok`0 zIl~Ub3>D+z;+$*mG~O{yUB=PTkxX(jFN?TzsTS3ksPbr>M0JRlJg!4r*v~nkj4NsY z9>RSWp*)bhJQ)hcIw!;0-Huw8mK*S*UZU61^}Tt$PSt-0TnPrm{)j81IIRd2+`PZJ zrU*@YKov>8mk7)DwMZhSV1?_G*wBV62a{4)SEmG*ou^?Q@!Kueit`l}su+Xi6cm(c zt;M^esVfS}PU~Fj--LlvT#+WUcq_nu>&EKDi6aBszN37`Akl4V^u4C7jbB(;7%476xPB4X5RoB*aHKb$JLE6@pL_$A zGh75)yoN}Gb-*%`=s291LxuaR(4>FVyboDg^fTLZCI~t^=b&Sw z#_JB)2Ve}=OfD2c0fd|5ZVXF7XmDgqEC+E8;+1UNNY5{0#yNZ!++Y60%lZ|E{oi1s z9}jg$^U#LDKN4$Ni@nE0K(Miwa&~zQt;PhfiA_IpR;b;p!mj**X8dm^;zC=6$&Z``kS}wVj+E=SabzyKkTDBs3zp zgwY=>SJ&*CHPrph=PDIZAVJVsF%NLeWUs`x-rn;lsjSsHQ9lDA@Bd1)4Adamax%Ls z)3|jNPD8+OK6MkHCm9KbR}j{-vEkY=^)JzX5THZnTUJp~p=qUOWV9OlRUX>Je@0ok zKGa>)xCxk$X~+mjcrQKO8c}yL59Q@mShHUo$oKg*F@4s8vWB2Y(T!A%U%5+2&qF-B zn^Q3lN4W3sI0?hQdc}s(Tj*W94jAV~H@bik2qBCjzzJYYWR^fu5R3kSoNW-MsXae~G(vRE3Q@1Cnd+Dw|%F`Nn9j)g% znQQ@R(5z(ocRa+uS+Xb%i~*w=k&#rq{T`5EHE6Rv_)MRFLsIzz#1QP-P zrEq+}gF_qAPK=}dRL#%)ISVg?3^}4VVfIkE*$rwpA;6mVgNcp+EDv=M#86&Ap_Xi8 zwWWEubN0<5z~e0R*OASy!XtxNL^gOYD%3P`zL+<^1n;!^^XD~qDuhRvLUq#j~ z$7|KZkfr|6bPMu6Rv4SaR5Y35K-MH2HKSz#sR@GA5O1PG30FsGmLyfc{j3rHK&#<@ zvM2L*KSz`!ikN+8kvtiLg{!a!f(`^asB60cjSm%zoz=_DJ9-nB^8H;(47Mek=KwI< zQ#`)cH@9Uz|FRb|?v)FZ>mYny!GB;=`0MWGW`O9K;^|gqz$ZYQe36Kgd>YsDnA0|qSY;YnQ zuq4b9YGW{q+z-x^AgJu2MY%HmHF3mK8N>2MCMM(@67TtGdGg>fV!C7JPHUipnw|$? zw2^k~I5I0k=IC@-ot>Qx9~rVB22aos#CnSX_v;%lIh+<)@boDLIM) z!&>_9sdYFkK%SaFlRzN^nqv?02CigcVwwk+_*CwvB(ia18l)$TkJOv%i;?4OupD2) z96I~`^_ntF(vibiuxo$_)tHALbq6g80PN`CWDuft&&rIUa^t4XVksdXT!mf)z)ba- zVWW^O1l$%5WJ;hfupzXfJwWwjoQ)XNh(^uVoxDEF4d^S|IB+i-u{pfwP+-lka2kpN z=C%Wfky#gV?}*$Cv}JFQFnl--9I5rYyg4!BS>!*Hdv&%0fR_xgpkXy(R0+#SQ%lVq4Wr@Tmm@Qh)G|B zFv;EnswQXK;QjLMbiab?kRW3o1$h4z*-rw6G1p<`S<_m=wS z>Z`9_yOwV*i5uw{5?v>y4aEv93?vNIr|R( z(X-sBZlSY=&?)jYyX!w^V;M4|wUSqgZ-zNTgFZS}@@wg+UtA zE^mYUmyVbCq^F-v$9yhu&Jn%GpkoVu8@n#8O zKt23ET`seyqA;zB1WfF;Ya&jW)Vqx2hzbC-Uep}guT$7es9a0S#KivHByQ97pC`b@ z70ZgL37l>q+75-_65w5Rd;2U~{Le~Z&@^ZP1VXh0U?fY<@uL)&%9uhzbOj$=x}@#S zH^t>N60jFH9lRZZLv8$DUiX zkMR5NbMjv4y{fNyQ5x&LeugV!@J3VH`)Z}5ja6Ax-nbUqBmd`f1f#CpASVGX-o182 zA!l*l4n`T~ny=-@&-RR;SkK{io<4EIiLo!4tMlcU*Il`=ap$&g`uy<`1DDR8JBMk6 zzlk!80pTdN7lE2;U%q6loF8YS^q~H%2J^(Eiabs-qi9!Nz~OP$&d!%{V4q0*yB9AQ zFy*A}E6vs1cEMAI?9XPkbq~}?xQVGeW+`m&mTZsb{%b7(*A6T62K91jj#R#&aXC|~ zTYD4|MuYHS@_v&}=(^TuHY65Tvw(CRj`?Cgbm-kX(<%{i<_Qr|J?;cv7ELZjXucNX z)jYqyA;f^=2yjM5z{_jAhcFvN!aXQx-vH~-YtJVq3t({V5>CHkp2I0s*3hWjvfJ9) zF2aR?STnjX)m{7i`Mu|_rcr&JzLc#2vDyQUIvR-Hela$d8d+&cMQSl8+SazGTuD#d**AS8myV)lL$#(4<{!SGJTlh+e! z@STVR3!#g6aF`lNLv{shBp5?=IM9vviyxRY0fK;C7Xgte6f&TPK4}BgXy5vfi$adV z0eL);_R1D70W;|&diY{4nBi(`KRM_J2%h)Lvuny{LA;kDG@b#gi2T>dfH;nfRrl?K z3l|tMHWN*e=fGufFG(R7P4cJYAh`T|MIuUp z-jI|mpz86jBYV+jQ`&;=CSJw5U*3?hhJjIZpP#^Wvh1urB(0QdX*=pZ7^i>eP#pwR zLKJ0>UAX23K#RChzy}Q$ZamuyNMZmuOr&2r>_iMl!UPI zq7Wd$ZX3?*iD10-?b|m3cFA!?U?O!SSxFOc!xiHwD=Bi9l|ey7B_L-RVH}!7i*aB> z;5Gp_UteF^%2&`KNj?!4FFDx));F`o0PPzRA&&w#^KDOou#^G|Ts+WNyH11Y{ z>|o6)hSv!(bO+QcK3R)s;*H>4pO#jl!w#y&Oh^bB!)$L~ix(#crO3@~D_$rZy2~J2 z*3C3+97=V5pa5g!8hDmH-@k8aw}Tu2a&G*|4w3YGxkoXehh-tt&LkNEg`x%^8EW{9 z(@2I)(86E-*Kgibw$p7TQ5zy@;og>_sz)K8o*-je$e)wf2d^5<+y+uV8H9p#v3cs| zJ97?~_IwsdS9`FH$KD@O)4*XTCQ^2z?Abhw2 ziP@Ro{J>6Nvt(izJA5x_EEz=vDegs7hb$5-NHO0782ht2bqaHMoiARz5ZX=-b$!&uQ69zJ0wPu9*b9 zBWIyPb%L4`H?R%T?fKBqPmJ$67^Z97Rxv(u0Wa#dy+CE;kT8Z8gaZqz$iO{DMac<`6aKiAvv_NeJLw>n2ICgM|2F%U zpFh9jl7WfI&CbFgR!K=ooQuH)W{tHt*YaXZF6?HQtrSfD&-_k|k7vMmoUF2(E%a@| zl`t|+&&WQF=e7k%yb8+?^Od2n-RSGrwL0+}g`x9g{UwN94As`@kc>_ai;E2kIdMhNdDN&`4-c}oEHXAF?E zH83=-Cl=7v28PfyPD|UI=rN@KGcdR^_IX zbD4-_4ETB$+K^0!4hH@>rbbc<2^R>-kCs|rI%}5i+#bv~>XE0NIr}|FPC*BM;NM=f zE=p_73H)#Dy?Hd%@B2Qwn@h7u2~9LmLMqABpxQKP!VG>~XVMHw<=3Q6Xv zk#}T_kg0)$P?_g*-PL>g{Jy_)&RXY>bJjYaz1C-_*!#6#ujhI0`@XL0zV4h}p5qHO zgA!Ah0Tq@<6T!nHuh{XNV!HjYbm$00kw)-0il$4@!F-_Jl@SyGQ$REgBc8#p#SI+y z!;Pj$xMB2_fRpP zA3%>kcrd0b3q2S0-awG@9Xp=fSRqZvE#eii{;C1S??Fj($`TMZP!+uivx2Jxo)l3NSDqVFP}8w7Vbjr6FKI1Oj#gl{<~<}<((jl)wXP!vZ!9vEk<&8wssW5+HC z@5rO|SyDB=E+Om)3-fxFlCuZUEJ_bXLPFxn+qd(vmVkpSYK$9u<}Ya_NPz~I?yFCJ zs=$Urpo1fa17sl=aA*`%s1a<2-0V7110)Ke;15u=uO(#!p+iu{fjk$MwElV-E7I`e ziz{cxMqy-xnwFL&*lFaD1~{1sltxKiwNgZ64rnsk;&V7SCgRIbN1|MP16+1yQPjl~ znT-XBQ*6iCcIYQRpc@rULv-zNad99Crhw^lb0irx{jz4_WFjoCRl|>l2nz_t+$28P zPUzk1Fa8v}S~a`j#*G_f14?YIwYBw^s)Xg()vF!yn1qMG1_zdeOQ0Mk+?+sJYltk=4OdMWqz&&I8!uq3TuJ8Y z-E4oIP3dglAS^v`}qjf~}QK`q1R1V?m-zS8tfN{8o_M z@#oKqPsZupd#vNV-Jqqlm8=>1n{Mep#5n>#BR@oZy0h47#2lfq$(zMGhisXi_U+9U zE07D0W1z2m9D@)!T6dFshz$f4dkE@-RO@zbCv>YCWME4mBU%yGA^WRs{t|8%S-_AP zAMDh~o95fM)1;c&{k@~9X%d6Lh}y5R@Vx+vN#`@|7uZk;Lmh?2fN?--TmXE*XJ95G zA4Kk)#Mf*F*i2A9&VmpqmPnugjWmH}R~K!Xg|4nOh1jXaHqhlej%grk9I zHXNJAk=b5*4Hww)pYF0i>foCD5u7D^Ko|bsVbU8}T3$f*Na-noz4#jmhyI7&tGD@f zX_ttNgZ&d(IDjPBQG-5X>Rq5qgQ^VhnWby9d|8yB<{OQLq?MPHB-$A`@Qp>0n;i$m z8RUL~Y?BOiIQUkA31V=>B@Az14cCQjx#^lIAWqpIcvnKE-DKh#(>L37uOc-f#Quc7 zA_QXN$-|rErzpcMW&%`7qITWJIG97w`%1={bm6!;(U#JyJ|RIwtwcU#f;nWF3~Md8 zOMc)Ah`n>i?@H~SiV3CS-HNZm%M17Jg*R`UNqptoa~Jm*-L8GGrqSq#V7>mXMck9h zcTL)i%oL6#Y&r@Mn2m$hhA?R8mc~P=MkprIkW1m=0*Efu@{LnXNi_$`SRv`plm>n& zSxOFzNI1!y4J(}pC*;_BTV8+r<%K(yj%#SS;kydn3ft@9IC8B<9_`^EgCuYgyitjO zfp$hvWRe|3Qx|71z6QgX84E-hC^=rjw1U7UVsl9Bi(oQtxU)V0eQCICYDkC0-zVVC zAZ9m0i3OKOP*AhU$PzJU8#Ep$%GPE!fG;|OE@#-#kfSC1>~iUVHLI7}v1^whs!ci* zfWQ;?KqwY&xw&ElnJ<7|%Y_=`f*m|5tg!Lt`EN9J$NF4~%$|xYuxj5LU(F*Od-rC@ zYOmnm?%`~satnIaE3jsuwm__K!5|B)9a1SH+1E%i@l{ZqLQ5NQi1a2T?}5z%RbrHy zAg6SDYQ@wfk7ndrp$J+em_zT#8F;0j8Z|)Qu$yGg0Qz8l(wD9Y_!Y8b!TYJSWOCf+ zd!UshNct2Ujijl4JVOO-;|Qio*$%e&l3M{(w@5D=2;2ah#;B{RXCGL+)M$QGw02*> zbOyAuPtee$I{5Wb^?QDI$y)>^#~8tdi!Rd`9(#9Ph)E)nXsA?D{%9kt7)+L;O;cLOQn@AWayyZO;ovd6C?qH~$)>@@zHyuJ%Y+ z^00a?6hla9MsAsSC(6(;D`APlH8{vlonRuTXMGzHlIqmU?(Tvjo@tNqD-I2ovGyrcl{P-osIg&9@BV6jI#- z2y-u;EvInqtmM(>6A@3clXojlzdcFepWpM&|MIdg4+-uufoI9syr|&@E`^dX|K` zaL1U=PiE+g?CTJR(Ety%>1Rh3F|dH0aR^1|K*vfy*HiG=M#%c04+Sk%50rY~QG`@d z>Am3YWEXIJY&GHQC+<#$C(Ag-Z_Exi`ijw)akjj93~KxGl_BU!o|lC?LN2$5PMi?h zl&E8VvRbtpc@0@;nGQk0$&eG?5EFgW1adSuT`M2*FNZq80E|6sMmQuNA)}axn~PAg zgzR^n(U8`p+^_IxHt;LPs@$N`J%Upp#acu;pF57d;mihYBus=NA^E?6 zG%Ez`7zwD2ok51X1q8Y^LC3+(Bf4?pPMnxKDs2o}8O%9uUmHTurX^prFX7Cmof!Xd z4^0FLePF3M;?{xorUI0LZ5Z=L<7D5+BQNvvmT%dj-85?q!jB34`dO+xEXm!yqx3z< zEbrK{W8v|Xbf+ z;8>2)6Cmq-3vMp6m))Jc|ER15u(9g_0c+PN;gN1;$f4xfF@)81551U#q5*+p8+Jfx zY~D8jdaq&xeduJVS$F(Yg^$CS!0;4q9mXlUr$em(Vd}G{ zn+hR($e%gH)-!Ju?F`!kZzt$!F5>N=+Z1TnYG9>^aTPeizmJaCrwIhX=J`VNkzzw@ z>*Q~DkL*l%j17|MJf>&xm=xLViG9`!@Ot1=!NC zwIPFIoW1jf7{vc&&-!Vr*S)xR#foY3jq)eV*308}%q@$|g_sN3QWXwk)Or`Ns8-c> zqM&7rZZeMDO-2F-X3CvPzI5bm(4%(NEJNlcpOA5eO#gh~L_%vXiC6crc(lu|tSZFT zK$`=skv)y=u_N5X6m}bNE9Bor!LdK+h2}co=K^acEIonFLFyfbhKkI-!kfI{8VR)Z z3bF;D7F#<8aH5wQVx^?{bUr$|_VvJCMa3K1pDJNWhSY(66&k&(NIT>pc0ur>LkmJ& zg|w+9cU75O56YP@=kVn36{~6xs!d z-3paz2wAyku(|N$GU2};ob(l7*??`kE=3n422RN5xZ#aPsSun^;%8erWfUxaHP{Bx*rh5S@2MX#6OPu$>Ey=97 z%%@CMik0ILU7)x-F818bD@tyS?|cF$&1^h+@_PB~d`L(R;p1_%KbW&%<=*6LYuO;L zqoe~Z2B^7iJll%HXU7X*yu!l!45XI;d-8&CW1834@rFd!nvIVd67yS^w1_#Q|EHtV z;K-4%(vq>2$Otwa zLJRzHn0y>YWkf|TwE-cJ(kVnn+cb3qC&u*kr^2JOW`AyM{3G^NTx{%2D5l8n67cm} zBthUremdi{T*SGZf3*F&Q#_oE&_Ft#l=9FVr8YDlM&-LkRKPv};hnAcl~KcWJ%0tI zCAv6AEQXDR+Ae2bd#Ss`m zrC{lIXU!-GOGv1TZ1z2JEIfdPbUHD%$$4CWRtigLCVGBJii2>m9ed!kGz)>UN{1`vBBp439+-px~}01s9T3`71(A}hf4?4+gvnzmh0G3JSd+5#|$Xl{Liw8o>`4o_gIshIbz zd)skVL#Cwe|53AdP*_lH{314$DZmuBqJTD-Vy1L%6?8OH8Iy#44DT3{SIQ1?=JL83+?#P#bGpP};I6BO z_l^6}>H&c+Y2@fbD68nT{r2AVVcZCWRZ8)C()X^SZar{TM=dV#+1&bVpsUOySD2@fLrf};xn!m1>g$n?YBkSAnJhkL~TNvTZ zQ~tQ~a_yEI8Ic;zpS`T&`l}vTsKr=#m;g?tkbH*)#?W z&iU&7bpjcdyWlP`cfo?GOX8hmfR(^I-vd1kAHiyoZ#J@|d*2*DUKs#YBY#pHG;V00 znLm&SR=u>+?H4O0r!thX>g>u%Wnzy+j1A_nl)H?qbh5eJR5s~71GJasxWDe~6>oc` zXNHE3yeIEgEeyBJap)q~NFw&ZKwZG4(Y%RMR2zcOeUG+~>x+zqkF%=f-6fm(Yd8w! zgkC2cJvOGeIA_&_ECl6T+-vl5wbpWr9-fH8{37y_>22JKr9!m8oT{T5bAP)xlWq3COAa02DPXN6VpC6-o|CtzUn=r8iVdoNZ zO`(|%N;?5c9N%Tey#63do5L+?0<6}((N>QdHtjXJw;MDUJD(^!e=}#L434+3d z`ZcYXm9D;UCNHls$M@Rlc_r&6d7iUT8MQMtHI5YZwS&%{B*3MwrR4$Jp8-1zs`udS zry4vg%_3y7$`VsUUQ@+}u!pM15F}+tGmKf3znU{+bTc+ip0U6MB{9J|=EaKHHt=9A z*PG24+j1;u$Ua#jj}NUDI_{P&R_~7#h$#j-k$STN4Dz?tG%jg&8}98G9~zWbvV2kjC~Ci!-C`4 z`GFZv0Y5b0eN}%89=UHwi43!|lmjH`>et*NsHe~v*q*pQ*3Q3FTEnrn5 zJy-2WgD!!8#R|c_igy>oQOvip$CtQZL>E!VK}%5e`O@qBMpoQm+k_5YG|a5BTquBh z4EvAiGvWx>1$}-Y4;edkzhlv``FhH+9;vZkEGueJ3IuLC-K*rSOepn`3R=gvPaM^!5PE zVd2o6IPpCu8kRPziAjIQy6yH7jf5b1?YfRX*wVP%WUX~_za7o{o_DRleB87O!3Bl~ zh6aNYZwsPBgN?XYc=)C@^G`Sj)2onn2!U>k7>p-nGAJVU)w|&CX8;ujZgT$G@Y8V3 zX3PoyRDl5zwlm)mP*qmO4%&sR+`epPx}QB7}6mrpbI-di{X5d+)# zoP4oqjS&x<)&1~xA>irEqjZt?TtVpuLPNbt>?NjWAz#7%fc3;ph4>aMRmOWfP$;O$ zZ@5wU9r(q$s8?SCuyvPSZ;XYEfj4H^V>lrUK-I&Ogrf~B)VwYeh7OOVMMMrXm5;e| z+@Ixb>8@b(Rsmyhbeg(fu(RamsKrm0r;dN9tJ7TP2x>wwM*XV^=(B%j^+MdoAZ;4)?UpS8(oBQW(Oq* zP>UR%#80#@DRXdMtIfLDQk%9O7CWa~%U6Sv2nC#BK5rrpUm8CGBH{8lauXg?)-Sf) zg>!Fk&DCzJ1mhjrdm0?Z#T&BNYIC$E8Lk<=xAtoH^psg`%}}S01GRbb-D76QW^|i@ zxgZGvEkqt(-s>>nhu0+Dk>FK9*d!{5VB2kPcAf?5KgwD~6u9 z01uS|$pD2tKiPaHV9Vp z35{~~dk$1Lt#)rk#HOJRD308dGIyS-uBxK%4Z>&_sxX?H29eD&5Z8z{ZhA6XLvSDi zTKf*2g8>>|=nO_OOJmHi{&@(|#wo`k+#YIE+b%N4gjsO8CCd@wl4KU3QO#`hzMKyai#G^dNEN zRnVqm6HP>E;ue1bssYmWqZc6i_0-wnrHd}$g@1mlW@_+YM%W+>ojLd7$& zwJ>c%R`(q66}e~A7zfxgwj`kOcs5MRALwSxN56zC`YCDW4R#+VMif;5K-r^L9B-cU z5!U4Lj!Yd`2craKp{#ON|TjQx8jg;#m9%@+;_7pMujj;@)6*C zL=RHRjeZ4ct@ueikB!*4E%aA-#D|1IdcLsEW$azlVTv7{PEh8dn|dCq#AkTEJD$#C z$MX&9=^W92CYf@GLdpF5MUK5+{k(OzmFdAFhil{_$P5MpbQl{R9d(OvjOKdRtCI~( zh$E&0T)+*X@t)LL3J67Y^fgM6OOXAe;>3~9lNOu+(48JM@vy+>Ic}EL)@B|kpkd*q zm6v`c`CB>n%zS9FXUbPe`^8jQD@QxHpGH0};ix|E{JY9A!3pgAC~q#Jg=Cqa!reO3 z(?Rh`91^AHIHiM0TY`KO0M5ZLMWN>~xMU`ZSyE5H)RR&LJfTC-rI>G>f_K@8xsWuw z391i{1wP+sVjUGW;3$#WT_Oo41W%}sUZOx#Bgbz<@9Q0>IW*x{hG96Esq!`c-(tjnjsQd{xTr1y$0_kD`}1 zY{xY<9ufo*tBUYQgLQyvUx21(bhv9j3Wy1CXOx#b1=JXY0yGD+uvGI9lb~011vEt8 zhHd~!qY+7b&uyZS4Z~(5&BK2PpGO9Ap5|d=(LoYoeEnA3k_zVAC|TS;dIBJFXp}ki z{s3JNfho8ICOqPq2n=SgH+n6H_VYOl7FI^b_60><^@Rqy<1YpnsT|{639s-_JO6CEoPMi=ZUN@{GG|B~bG8Qt((}Ji^ z5Lj;2x!525D7Z?I)v=H8ME`6g#;P|`8t7$DF=R1TS`~%axNSNB*9xL)~_Nr>T5}N zpa=*>BA*$;8t2gqqc1n4-lk0=NsW4iWFN2zeE(468swroa+ZS{VK8vuPGpcRPRn=x zAq7eY;4Cqh4@u?o*RN*~&((xx&egf|9kbSfF zes{uAGMGnLK!@>GuF7iRcPJ9KB7Ika?3mhWF)I%V z>J9tYUqi2BzNJz>D*y-XjVCXo(g}tonL+_2-bhP7qAEU%Iqopafvnz(jZ^SF#@cO1 z{;LF+IbWG#z@pzE(PQ%hVQ>b88X0znS8{b%!zY5~10!ptMt4NC!;4s#N8e~nV4v$K zPgNaQI%H#eJJN8aN_id460_*MAF=m2vgmtGdHrkH$-_P#Mejw@orCLudBApc1q zb995ypXrL&41sfSl%I3RH&mHd_#*D#Cwo66xd+m|4uIs|io1#yPWpx<(?!Cv3;t3& zhAN^|rXUHJ%y1jWi&}I(MW4ZZ05wd5<_@G?Ie(m?$M>Znl)vWk*xTOwm^Vxs&<|(O zv4zD&>L3E20b*#RnG2u*>Yb#>M0D)7y}QL`7BToxFfxL{>QIsa==cgi5mdv=QH7!D zlqS=XybpC&2vi|#UYkz8mqWt6^DGZkBWPTH4}k{5g=7N|5{A4d(*y&^*uk(*reT^4 zFrZW$p+R35KETi>E{FCJsv6`f&+F} z-SS_i?vR1xf!!n5^$#21-&eG0%fz{*Ir5|T4&&=oBk7yOAftsezoTIFw>{4cE>9G& z;-O<@uy56?jURKevNT~H@fZq>{$^M-0M#RazN#6i30h)@&V}>iXO(k+lG80(Dy85BvYj_jhT_DE&#Kc0c)dIwx-sc1a7Y6uVay1&1c zkBUHFH9DvYe+HQVH!zZ)NY{D~l zzWE6^5qD`9GkNYgVf+@!C{gx)N1jhPC?wGEh~eNRxe#e~ z@i7yC0+TKP7Ku;slkl}4*epw%Ny(M$wz19`E+``KG-vAS>ZXmDRK|IN?p{dsIbb9a z@?6SMhxFEM3_g$h`=QOtIY2}$>7Utd8DSTYI^#kfECsef`TDF+(sA(dH)n_oArl|Z zK=p>6pO@QiNz3BYqdFe#)C>m3*FaDn{LKL)w@YqXo;h>feguFV-Ez_uBiY9=$mgVT zM@RlrjUC&Pb^U?CU_Nm$7AgH3RNde3@6&^d%~Ms#dYS-Uh<;~;cF=pYw`e+1RM zC|$Qv-vu_eTeITedgjfKkR{3jU!01YLyCU@X_y`DZEuS0M7;;-=O!gR)(i&8jr;iN zlbU41j_w}n8ZT8RmsMb+Q@n4nsrxr3KyF`~ZrCdM&jadRz0d358wbQaIIG7(N@Z0Blku znzVMIIlLi0jF@+PvuB2{hPWDW08rBp6*~Yc#{*{vNAVK%B0;i5Yb)sAfwmtiO9LG1 zmpz7Ldpo40@_!7D^&h*r=7!ru_GdR42GcwqJX*D7S;7Jx24}x!Y;3rGOeCjPQbjV` zLDN{R6fMrAN%9jnu_<$K&Y3uE%Wj{Fhx}`%c{}>O?j2fg^!mt(rAu$@8Q)oHbw;%7 z?y=45iyL1B_^t2T9Li#`w^t|bLgTzL%oq8b`VD~T?ncBE6cT!aDioTDHJHX&X()?B zW3XwRb5zbqyV<)0!%IK9x{jvQSV3eLtDmEXHz+gol0IlU?sA1m1*VriSQ&VB8*ni1 z3KKOfTA9&)!#dmM0K9FN##pEGSjOmRUrrm2mPcB#w4P;#zil6BRnPgstv|t7Zxyt( zE1sR)m@#xkEmous*P}aq^g1^apZH~4=nOb!#;5Az1xS`W?kCuCH3R-$&GiyeuQ{C4}V(h55q0{ zyCW~T^iz+?M4F&hh2kaXb5THyeP@>XiW^Mjfvft?gKvT!1Hh90_GH4~NT+kB(^u;Z zU9+s7n|V%rwvBW9UUOoD+r4+4(v9(8CvXgjEttW|*S@n>FSF_xAF9hzZL_R!{o#*X zc7rVu(nc>NFaivWSW`B}lAp@ztGDkM0^;%(ZIH2-7CUL6M_CIE0-nVSUGB#Yvcwp2 zp^GFfBeO>bPw;Lwr|F>XUS8^YdV$!z%bE107g@>TNu-|cx43#%^x7t&{fRX&L$Uf= zReB;IGgE4$C*HXNV661bbs7(!jC{5RK;BbpQJmJWAiFg>#0eB zR^P_j3F8=yNH+{E@iM!gf-vy(T9bJi0+LwYh`Y4`xEH>~^_51GFCxYWu3Wi`Wup|; zxOv_)5MKOD;3eEL4yoDwAE#@^9zm~&?{0h;t+oSe(R$G;jVY1DBlP3WJmAe3}UjND(n7mX|VzeW-2ke_-tR) z35Ns+%Z&{;%L=>-m97C@R=eJIq36!5{*NB5^;sGG$Du?;7l5yx)4)+?84Q=K-t66! zmY3?=)&Fvx(>=iP=_y<6x+8}=FQU;bZMv;c&SY?x_jATZIy*+t^`!=M#6GE*%jLe8 zA3Gy>TGSW)6(bVT?*Q_)2gaR*v$$vaV%&HgiQ&yVGrjZffLv6AWNxT|OW)xh(9_ya z+J-5$<{rg*x$w(Q|L881c~d=5iJ}Rg+1trerr0|eK1kQg#(Q{1nZyX{Cmxdi{$d`t z)Y*r{&XCFWq%~mCpS_y&bI)2;>J9>&;b&rD%8s<`7x3lUc~|ICw4hAtt$f*&@vlFe zPR%hj6ImdYYsxu^jnzHS92C?gVa3VK-FhYm9SylXkYDwD3H53%-L<$P-l<0j`q#9e z^%->?dhsEQ&}e8MY?RLP>)N%1chu|*RRSO0F$7T0ydmzk{5Dqh@DIhlb{s_+)7b^# z#P`Y(flnJJ%QwFQgk7l)^-*oKF4Q#nkncSl6#Eg_SQRE#eUoC^QVNHYg5E_B?@e$w`(r@a z_K=Y8SORvfOYYADS*%{1+Y>t8a5TwA;jK?#H&n#6we9_pZziW$-@uAV52!Hleg5H_ z;3=#><2_iRk2KbF9KHZ-v!~@vo?!rVAe!oWw&6pg7RA!)?I}$$^Ef!dfd8I=R^1?qhISEqscaEC__8mQPB(+mrH|58SsYuK_HqKSU#&;ZOy2Y2MSy735 zTblymck$+(b&1EkfSAj~CzzV5trd)N$c%o;UD2d$%hPsZhLEq9p^!U!^fh~dfs;x~ zb>9Vx`98Fr1-m1YGNDB38*>grT$Ex0&gzvh712mT+rXiBG`UvoH&x@~I2dRh6OFw8 zY^|5OyNS00zU2GDAj22atup=Y-hHq#$l=kcR~5mno4$g7$a=e4SlGH@*36mVOK=Y- z-hFzgMBtXr$5YRVyV!eMTY|M{}%|mxp6gN~G`bzrE~gYj4AouN(YAKf9mO)%f;24t!ry^#owG! z;&6b@iaon`UnVEnmOL3PPz|xx?hh{3?>F`2*<2ScxBJcG!V#Iu3m6G7P=*ZJezUr;} zod<7J=ImPRSgyN7>)^r9O1n>Iv$3&tUy>c(inD_ihOV57s#y=VH9gb1Q}{VMz}L6w zy1JEH`lyJ}>ji!7piy9L+^~EUS!ssZN`C(Gj8v$fvpZ^n9DDtEW?dQ@MQJ3hWx+6Z zGH$>fDFJr>7&->8A=j#6e7~NWhSB6)tB%aBec%N4A`hW$S#Q_2mRS}PpB)X#|2Uu) zfaDbzCESg@mOh4dfN&3#*9Y@#79-=jW$JnYz4XEWGt z<{X3WcRfT2JN((#&u?{9X6JW=Q-vZ0CzT46tbZF8Q-~eWm z{7l575>yWHV6>RZsJJLc^DAr~HI9Q$@EwfM!+y*jLm*VclAw(Y9{L7NyJeOkmwAjC z+Q>-b-`c)C2=SIUwt-DT9$JdU4RdEeEkQXV-^(X9^|lciXB!>D!@`UKD6#U7xu=ee z4q0J42q7fKXZ8A12znPd?sozmX|lM!tGz(GVWhFS*?vZq)7)gltIGVX|@#1*O=qXL1?|{p- z=djqVj6}Kn_4?vS1e$UA?AY)%s2aPG=C#l=#TjP>t|$xHqDID3cxiKHIB013*C7@UR23Y0^B}g}oZ;I%nMK z&wiT8Y+B2M<<7Bip=OK}*IKPGeDuIot1l&6i>11*B9%HA<5*{1V>*bu7^}e&9Ph|? zFJ~YAmoHv?L816oy04aotWp9wTk4w&qG^j(drBNs*`btxLoywUNEl9;)QNyC9jV*Z zb*ZZx(Z$tTofv`sSl_#Mbzv?Y zy6J=&@;$B0vkCQ1L#8=yXWpheg;W`!tP#r^>8^wSULxB9) zKbG;9(%u9U+vwag)X`LdJR?Bj%(jhp_TreWNUGG+ZoWHn@_C?gltZC&X&@6JWAJsUm4LLat*ET^Ai`+{S6Xg>yqAV8GpC?8@<&5^k zJ0_b=C~;VTRV`G)#69`_YBt9zgmJV!s*FJX)q;WwK@LBzz+hzj-Gz)`$y~XWfBqzc zQN9xq(h9RI3l53wW?`Co_q!AElO%)~a(i#DMo9bpDnv_%R4~q&j7}h&y1*z=$PRv5 zJQfMOkzq825qT9k)SoYLAw0PB_T(C$fPi++Az1Ko^77X8GsiI;LL4Xj`BMyk-_%qI zy3R^HdtfRi0ykcTA^!D1Hif6?x^Fh)z%-0A`TONE88f~A{v#uJE5)$CU)&cqmrN5s zhNbAkk2SgWi68x2Ql7+o`XAy)vv7h`?O5M)Za8C^QF2B-9)4Jo2uDf|K< zj&H)nchulPmm#HF>G3iIjUzM|6?qz+L39pE4}J;lv8%mmfVUbSBV)MP9f!%UR|Rb| z;j~7U$tQ$2JDhe$MHYwy#zLv7#1cmbr@Xv8U9?u3d3YS!Hggpg;y3qR7A^Vf*7(N* zaG_%!46g+jziKKno}*%ZkD=^+!aoH8$t85vpU?L6zKW#%7;|!+A1Y|umuOny#5h4% zpm1_=Z901mZ*6Qxaq-tH@*rw2jJOX*0$Y7@Bes(=km)jnD^4DsH6AaQG4l%wy3!nx zYVofY5>f;d9QR!V)NpT_0~FTo9v(O9PRWi0H7Y>1f);36U;{o{}2RB-NDaf4>#QDw4h*|V&6#4rk(OhHU-ROj0lp)zUGq@H*9 zij1QD8X8r$W{OJqsGqMV1wi#Hl1`}id?|e%T0#>{?7k+@1e*KMN0|)we^=PI?=4c+ z5d24(;xv%!P_PoMvSR~Y@JxhR^V-CLTC-iULthhRZbgKLziP1*M1^7IH5;GcdH$~* z;BpH+j4@bTdIg?4L-CF1L%<4iYChq?!Tk694%-A$$4xa*@0OjT^tOkI!xvj@DB-Bi7Jt?S<1R@(wizhs>)xLq~@i z#svBKcK{5Sbto6VHkIS=WyJ?b21oOXh&NF{1ngfzgDb#9F2h#<-+a7nV|EbQUMQDA zd%YO9=gd3ecn5#}f4l=e7kG9USON-Yt+i>1Ta1#l=tO0ffj$CuLCcg*O$tp&MQYoG zby5$od|0fq$N_wz;FjROmficOheW((pv@HE?h8%mkI>QGz_O4jwTG*Ww_Rf?oV+E;{$eB&>Y2E1x=#7 zv@y8VfhY&77+WvM!%CQV(1QE<{fLMY$S7#ngBs#My#>Gv!OlL(3IdrdnSsW6D6js& z!$rrK?7*x2;_$|{Yks-_$HvKaUXs+Cv=YCl<>zN_>Utg zpwpX%DI-xsb`=Rp8HCX+#O;y=tA9r|7yVxmN&F!k<^M2A8oWw)Gtx$7$WHm0@Dg}~ zf+`jV@oMx{pd(4rX*gA8q+7)lj#`u}ptHR0cFXT~dt&?RoHjm$%%VFjGc{EMT%bB~ zo4D*j6D)HRct}v;0sS68zb3sDpksC)udI9waS^J9hy-Oi3h{EW6P+wcNB;Z6Nhplz zbiKBv!~4|{WuVph`T6CLj(MG%7?rtdD+`?naRk$1>jK7!s!ocrfN<+3x`FmDzReDzKBXG4*!Ngu{|DRorQ-T6ZC z#?XCtG(pZ-g0iSJQ0?BvK*wIqpn^ALWsR1SS=VOLJK;6D^JmZ~`2!!nBC+~GOk8@t zP%;#nP!;Y+L_271{sna=@Y9?U!Z4+JDQhbDa5@r4GUX%E zhr!<X_niiOBsg9=trrR`K< zppeaf9C}iMJo5}4P*n3Xfx^878z*&b0I4%D$=v|+)0=K;OC4V9@XZyK-m=ir$f1sm z;^n<4A_zKUh1HzO*efb3+EE1<7(Qfz%qD3sdqSrIq->QtGrO+ww*XUqgYvmn^wRTt zkpJni$`NmM;bX{`Hw=&<7K8<|mzG;O!{NJ5;s}<+G*tc;l+3+f+M#4yLA!v}JvQ1f zrUDeMHUZHW@Y+#UX0Zj3*I0!Dwnf_9?kDx28Lj&`*7LWrs_#q8cAR;3VX<#>K z6Gawg39Zw0?5W_(a{!W|s-Y2yBB<@Hwpq_*wY$g8Kc#UuwlDB^5s0@6ET`VYpw$~U zzU3W4Q0sZMGH5r8q7$4?Y}OzTg41_0|8e+tP))eyv!x_)158%BWea&F+45MRlN6}X zRk-}$!^v^>1(m6YvP_my(cLI-?>ROPe%J7_ifM@cM{C zcCW}n*H`2B*lMJAa8udA5(ni8#I2lHXzg0!xz;eLcEdN5x~6$15QYyui9sW;m-hnf z2AHbLvhn(n-#mc3qy}ANT>^EUqOrh-pY9}D7H@62i7W%Tl_EVk9WNAOMEGu zvh34Jw~XRH4q+{v6kWF53<`>;jDZCY64vh|;C6NPa!Nec*#{oWv%y5-3@Adeau z2?8qXtF`dj^_6l>lydycoZ+T*be3L2wWg@=@ZF8S1@JTx)^!QUia|JOu*4nuPg9}L zaU1L(vcvsMWwb$vgAc~4M6L}GS{<14sf_lGUR6+b^q@9XWDyDr?!@LRP$P5a6GJz( zPbul>+})B4rWOx#>;^wD(5={+)_GLtK~KEf%(3(L5r!jmm{>8yCda^L zsJ>gFkGa85R3qWaRXDH+x?-07u0?Ft;Gryb?7q(5;w}_d0*Q280?I%-$5_am7RS%N ziayVyh$PV<0MZ9O-;q9ySx-clEI#CMzzZ zLlPBMA@7zmA1@#)%HUPoE%8WvtJ_tVO!`vZ!&ln&a zOqNsEorXV-h@}|9-2MD}_*1`lDWj^@{kd{;;ZuNZhSl5bg@uQ#DT4q6>*tMfP1rj&UtofvI zh5joy!Ie7?4gormTXG*>HCXsu?BdZG1q%)y#(&)uJ!!%7T-oQqexMw<@az25?bco} zYVxBzS^Ds-OV4?QgoS(Gb)u67WO7!0pxmbo{%cW2uolD?w2orb5|R_s>&Cs7THdhP~G0! zB~uJQjw;20!fOA(45)jw0-F8f+%}9zsgF*GHjkt0l2qsPipABM$8FF}DQ?z6Z)$7T zJDoFuj=C#P*w@7kcIgaAsTZmZeoyPFScieHyYQwm@r4`kR!)I!LtBblETKa`sF&d0 zrPK3WF-f*~gYNV;yFh#^RUMtL^j>vw!#fq+@|)9IE!5u>TQupMNS}Yu>HK|Co}g7& z=={_7oLd@#n*EEq(&F5ZU4hJrJabaTDu{C^x-jYC=4#9y=C{pMQZm2^VJ^YLEq3H` zWl}0ox>hvPz~3bV^vSB#tE2R6MgCq(xe4sFmaZC3*l_LTfWgnjv&K+^mXyG+b+s{1 zbdCJ99zx00!quG5z=$WqP;H&liW{$ZyLz4zRI!<7R?q|jOt4JsRF|q@?66H|N^3B` zxcK9DI+&DNn-HDjEhM{BpQ|72*lK?M7oi~!%;ONlWv1-j(8SHo{_&bxRGVA3gYnL!wz~Ug=B5ioLk=ZN(~Jz++1lJ*U=oY zOkKaI@7N1t@waCKF^zWhox+2kiZ92HQ#LKOsAw)axiP5E7G4@Rt53##ml`^ppkZPa zi02bnT%1^Us&7C-aP0SGy|Ef{h;us_la$-_wQcsRcY2^3wFjhz()X2j*Ahmt;zm?a zbK3o&=E2b-i!_~zz+?EV_%59{Ne|`}wC*DBn;c>MOb^ACN)> zQPL(2*;%9}u^%=dISM)p6mi}V9A3O>j)})_JSsx;q{@F5tijih96kE#!|DFJ?^Fdl zrC|?YjNE14Y?=OGn77BEiT~)aV{M7WGAd}!2E@>7$I1UQAqK)_81l1pH|FP*=071Q znA#SXQH~x(VjKtGpPya(CW=5{!t9Ha2@RoFiK^01Y)7005gpQruv{y^6zKAZfI4u4 z^1)v}#V9%bs~Vs`p4EJGwAl7>?* z05==M`$+y>R-F3_!jVftB^IkFoEG-(EbayxLqd>O7Q$5k*VUncw6f>Ly#2;dqWP~s zw$CtZI{kX?f-ipp1B^oT|4n5@57?jatE!@}eD_8{6RXfj<@D)?c|QQ|YOa}&$53FD zX#3bIa@9`wJjGXtLQdCiF@SDqpmHlS>YM_r2~UxUzBaDypVzFcqLPFZiqH$AM2foT zX7zy~STvfE{R)lhP)N}|4Rjv0tvmgECQ;Yd7R$y_LIyzz$}p|0uG5kMNQ=HbUye#m zP8U%mpf4Fn|L<3{ufKmZ?jyv&Oq}q82$!T-fj)3Q0W`pfa=jME<9ztytFBiHX05Kq z8Zfw*%wQyvUF8@<>Hh%F)_D;u;(6(>JMVG}>~%CycRD}`MJmX9Y^yJL3-j)aS_3!H?UyFWWU19a6H?RTJemzwNP>b#nb&H ziLSU({6T;TL2fBj$5!x`Y-(;6TCrj$nBo%nRG}iil)IY{BzpM={LhG98%t5R^q+}@ z+kYk!%DZ>(HbgrNYSeLsajOi_?uoD{V@My2=tNj2vkWL_H|hp>6<8}UnM$gvKJ-}j z%Z{i*XjP6GP!G@vw8?1j=e6L=e1-f$k%d|XN>pLQftd@%UYuA#a#KPjXn--1b+8a) z9HE6NXJRcs?f)@^VD!`|@i;0$bIAab9;r*MU$4Rf-5B%ekrp4|DpCv@@xbEis{gwR1QD`G-wzzTWl>niX zK724n4RFZ~)%w7D_)e@a;GM6qDIVD4(4*ys{#eZ8J?T@6k1TmCp5e#tKs30_*suk# zyciIXtb8HMwevlYEFVp*2F%x}mlLvJDH@EI3=j%|mz;``$O1}l+oB=@%izL;Sw;`~ z`_BeQ+aE;7e(9BYE5#wTB}U>W^!|T(Tv}_cxyg_v;AG;t-%zCE$Uw34TOyIMPY5c` zLRBwY3EM$&@N56M1%c)cb_I<+fW{-hV(g!wxeAhtkzNhF^?^+=8F1ZqubK(ps)DAF zpmq2bF31i*MrdL5LM0Y+5@g;$6i5dEbP}j`uW{RZOUWc47<67s!ZjB}6OewQvOup! zI**O^CDTP(i>6Duzrv+Xx1Q*KYH zA_>`FhsDg5qM}LwMmslGt_Kym>C{~u{FPHR@w9?BAMcf)Mh_Q|4^sUmPHk*!oD~s} ze+U5xCR_|kN8-t8-;-Puv;K-~uO16>eB1m3ls*(N@rMaj`tw!t+JhY?9Y}@lL;1KG z)yi^fm_cTN8f=A?Aj;n5i353;@ZC=r21;wM%kB@+vuhDRNqpC47NWf2Ula%4G59XP z+wwe=3j9nID?RR13qslmgy6d%H#{ia$TtQ84QK_!l*}jnc@YG!y?Nllj%s=}lh`|T zI92Rn0RnS8WgjQ;_m43mgN(F&4yDxig1v`zoBWO)XI}zKS;_?GaD&qIuQ;7ZD7kIh zHlN$Kt5*HFV(C3jCx`~w``#yh1s4V6M&=V8T99Tz9waH<|NcDp7V8ISmc&^XTP!oQ z2QX1ONKe3Z+?y@_aTUU|3X@med-(7G%Vf>}64H+(X>p>o!eVSG+S1uw-AnM{pRfL1 ziE9<$Mp1@wn_Ovurr^24X^iH zzmI^E|5LDI6*QGiUzZsS4&4N(WI8m5zIX1#K$Bq%Lv6eM50@&y>E!NN{OipaMT>qi zBXScKNt_NYZURf`FYBBQ_|B@Q0)Q3ipy{o^mlN!1Uk9zdF$z@#!RdcdD*iKn!NZI{ zgAy>Kf+L7p1hb)n(zU6x;%kEsxsqAt_nk}1erItmUB8gvISf$B8g+(qY$7o3*L9+m)_sh|BKgjs^-EQwyr?SQORn$Ii44PCQZTD z+p7|`qbIro0AM8SoBMu{l%x#Uq%VqhWa|5%r6`3Iq6aV?Ve80YuY*d9*5Yjjn$wde zwRY{^eGzXyRpCwL)BpgLLfoToDh1^LAafObl=4*M;`Px_Dh6LUub zi0m@{<2iS0HVnHjCp|hcS!Nl4lxRGUFVJ39LLmwPQkC8QffFJNied!l9t+F-Gg)V{ zkhVvg@~&W_t^O@!85OuilMCkJ2lsMyZljp(xe7@K@<4Qt-NpeYeHdvgD-0DvI^?qO zlM%v@-3G_L06;VkkpBGQ#<|jvJyl9P-%4sa1i+!IruCVc6m7x9e%_Fa6YI&#r@wM{ z7mrteANyEqOcp5TVpAda1<1CzS8T>LJn;gJy#e|G0r(jKd?{(cU+Rb`6nvG`J`i0d zW*@8)#rZ40=PpG=6JUak?{gR6=U=sK**0j^ufJ*WDgC0k-2q}9rt+RWd(Jo0j#EYH zeI&8kq)y>0tN_%UoU%zw3*9COU#Zyymy@O)h+|miqw3vbw*O3 zGR(U4rbTt@)~9Lec&@2W9By5Y;gTKQHz2SvzUqU@B)cYO`|cut$Mimbz1D?`>y~WB zEU~`|3s3%^q3n6x{inB(a3}$rehX1t9N_sVrEQyqEtWIRF_1&W zZK8M^adrDyL8U5-JFXXgcyK0bnElRZ?+3oZw4;&2E#;4mw8DQEzNmy){2nir+a-~K z`9s$r)Bk<~3F*;O;ER(GQZ~!yGdD$|a`E&P#v2Qp)6GHVRuQ^uwwGTvoudCPV>%Ob z4<5Wp+OMtWCr1H+bt=d^B+|de1;3qofK%JluJwGo-lS>MVwYE>nflBMdi^Hh>*8aq zN1N{s)(qA33_5<|oX=if)pF2dF4{U6CA)#7JwEb^Wf8|UfBt>6M5v*(JF)x-#pM3C z0Kb#W8tPA^l^UzU-w~Zuk0Hy754k`&0x$%~$vu6Mzdo6CyfAJ?6Q^%l=dN8BzC4ZE zz9G_n(ZU&%aVJf;oK|qwex#Y^c95y8r{wpxqv$}s_Y1l3_Lu)GeiZ>)@JEG(y%0)9 zm5q`XDRj#9L~P;IhYxDat3HjMSGiJOplk$6fZ6opd)~>$}U0gEb z$@xMfr~&Uh#+w9v2mH=d#whZmETe3HQ%2{?$Jfe(J4_wo zwxrrG5_o*gvSGihUp)5Y`DP96t89O!Fns?PS_bLlEyPR_=H&Qwj}EU&B-Fk)ZiE7z#Ytp>%uz3qf44G#TmIE+);vgfgQuPvJkf8jJKOYw zdT0FjTQsa)lX)e=aksPH*4e%H5!=U=-nlZ_wDgD()0_W%L6i9;$t$0srP+(s0F9UH zKD@>I5J-D?zHs>S<|g10Twg9o?jT*1iw9FFYVZE66gBPHM{L)4U7K1ufu3pKMo9MX z9p2#s0=0gwls>!l@$C^lORqQ;M#&gf-B;oJ^FiPZpUAK2+H>!-@SdLA1Hs!)-BX*L zm8SQv6+hm~D&*Pu4axcrhT+;me`68&)I~YRR>kg5;r+gFvFwtqoAgV!Z@i?U?`*#<#^&f3<8V!O_L^jD zh9d5ob=j{9y$?`Z!QU@#O}*=CUO5@^$9EN-eR*}~`BNE>ZpFdTf+=?AD)*)(TR%5=LzQm_dtPKeny9a__>;bd7lOi{FP9bfAHR-sc6hv9Kb*I3VNS=7 z9eWp+?%jF*-u3k-&i?yNWQ95`-LQP-eeor(?e6`tX;SM1u@c1=<%^lKVZ?oRLgDI7JplyDl8+e zC1O-pYs!+uC%>T5-?z%euF6S7(OLJAJh0+;4V}mGi7RHSUb=mmab*6#Z`5f%-2Ks^ zo<6%zZ=}uBGe6z3JQi;0`z2mv;+y8Mb z4WUKZ6lFwZC9)%Xk2142B_mrUNZYJ;{e42>|8TIK?tEmvPx9!0xbjwIQOiVLT9=bsL14lY-oSz zA3m}!8DnI~geE*Ru){=`r8KNv4m|lOTCt;qoFKJPWy{6maD!Z8TE9U-{bc!*$_u`% z9*4WHc@0yONL_Ar2?`b`Fq$j8c|rde{$l^&bWnytK{L(QtIvCuWp9NxUGAYM7<}>R z%uGRs%w)TH08L8$W_}-Cgnjd0rkmU`i7Iu<4a+91^{4uACfn66-!gOH^Tj0*nsMNB zsZ25t7~O78hB~f<%lYLkea?ncN~;_Op|iGJF&dZZANkVPa=a(+en|ztnjCe8qaxM; zR)efxMPS-&`I+o{33OuXTKodMKh$xl^O90~i0G7zz{C_qH2@u7YbSiZ1^E#@#N&X1 zN7a^sz^MZQrPD9jVSL>$e^S~aDNeMmJ-ZVtjNhIZNFTTB-&uMg^RqVP++|BJZ93vA@ zVsGzfU?8dTb#dU6nuXO%OPYoq`zKc)j}xV=On1X4$<-5`AieQjFU+EoyXU55_A1|J z%Bmr=`lO9HESa_hG*yw86+_=pHe^`2&>LZM;4c~Xsf9SX!p+&v*8oM#}~6GI528c+v#$S)LGE^@Pg|3zu0o2WDg+{6gZnIKtZ;YwhH_( z6X5P5S_5b@0nY6JMIJCD?nCRX6wA;8Adn~ni+o{U9J~#o^bu|p;-TyO2Z)8DcOI5; zIy-$fa&G&;63$+p?xWlr5*PVtJk5fwGRXw2OC4d)upRZiX$s-|{5V>8KvqtMtplpy zm{Lp@BC6X!jPJ72dKch;7T6nXI+azU)^mQbr4W(U?e;nN!pNiEXJ%MX%#f@&z+;!T zBf9d86ZA14-nh?Gx2qeRTU}3#qQWL%< zlh!8dSQLoIN@ff1h&}&qpJ-ER}d`{tyq4iogx2w z+R6JA_j%m6Icm6LcXzf6&aP8R$(kMZPJ!aN@4i*&N^c!Y9iq~7t&5AMV!YrZn$6c| zJ9t;lUupc%NA89A%3JT66nOVI)37?`QAO(wW@*PGn}?~zC*Al!6+oq>XvmNfJsD_r zXOj=+msK;zduOMt)BN+#%}@vs{xtuR(aJ_yM6l|Zrba&_>z#FJ(v)u&YKN)wCTgC{ zD~D%6YztL7ldt|Xpk7#e`NA_~Pp2KRhXjRQKtUSP%v4}$ft7=0g84u8^rlbAeQ&1X z_RN3Y_ixE`37Y7)zIEhyK}SZ_k17z$iMN5~xI!0#%mCWJI{N}^k<-Eu*Ht4D_@B#I zg`6OHaD!cXZ(yaOc_T%#7c*H`L8NclHMD1?)Kp5Z16~IK37|=Es)79c-09PCkktX|KN@SR@n zm?OtS9^|?5j4McA=zsOhW4XoLNCJ9n1XPbaEHe2y4%)#Ea79}M_6`QB*=Pkrfw%x# z5{5}#2IAB6^MCk$@TnSq6;g5~CJ*+dy;mqx&5&-m-V}XXZcgn5oacDLQ0t_>dNqg^ z3jC(s$MNvmfp57Aavpj}kYil2sZh*8f-5r1VnIM~EHZ5VF+$QWq2ATz>$NhG#(qn4 zaFH)cmwma{^#mcl>Gl{4H7*d9N2f(Bz;V8A+#1?SFtnti`^n!YOA6&xBkcn9hYQtR z6naqqAGYEw6TZegl-O)fzDyExJtaQLS3IE3*nF(N@!OdTG*|Mu;%qxrNrHDO0`^hoBGrTW0`&^Wi6JZcVjf!)z2{ zHgEVRaAK>Dhtku&^J^wM*o=Ua-`DUnCEWrPuwpO^G8;a!l!GFHp0>fC9vF``vQYjj zykp@?w)W;&k5Z!G3tk|?<0P}aE1``qZhF?ek%i^of0G{a{I1ajOL^BvVX7I*`YDF8 zlqqi>y*==C_J{C4gS+8pi*;Sqj+>(c)?AueIr9|nS0gakbP*I?mT~#3Q|INt_{M+S$uGgbWW}8t9MN_RR#=yEQ?s1#0MQ7ZG2N~AjXb~96QZh?WHxbluwZx~h{0r$6=_9_s zX9T-L){is6+v7EA*;bpQyb8gD91_v4nO#(-?^r7C(A-Rb5Mp*)#em9H0YfgdP$}(c z_lUAVLR+1-yKWZFdnqZoDW7S|hRD0D1}=J!G9Mq?ou&#0NxeE6-yi5hXSr=RnN^r&P8x{R(tz%jeHRT0uTzWRXDHTBsQt?8V` z=u*YPz0A~AJ|VYtH`aWgrF$yaA8upytMiKUuTxcTzUXJ|(F@ZXT4z)*&hNVAv27hL zrOEAhHLr3tE^@low4fr+!tDP1W=Sn;cWOJMi-{}TT{NrUgrEy=NHFuB)SEezf<8gx zm!EfSvL|yD%%Z*-wG@w>D7E&eW!)-O>QS-ESViZ#-d}$2dSUxb&xhIX z1lyD*O*C^R{b!vxr!>Yd$5oYRrYSsB#MTSdsWq708KOKPLo%7um}69B&SbV1eqysm ziR6dw?U4IJT8ye|B*AsDtRzZ|UeCtMrV`?^PEY<*kOD?GQxHmKb5a6U4ixK>1)l#c zarh|0DEtW_S>XzVLysC(rz+vhGgW54AN3F|nU&lu9PyW}yv< z=e3>)qJRQf1psxH5s+W#F5QCmOlZ894xI#i!#JTdA!e>x9L-j!{OVI#lR%k@PB@1{ ztZsXyGDSG?$L5nPsupBnv1x$yLDdP4za!iL6}!JydHBJ8mCf|auWd2B;XT%v-5*a+%V5R+b%*gw|m6d`af|?(=6$Tb;*j(jLwcxlk28q$d>J!R?Y4; z#d~7@8g_RFvQE>`q*=eB=Z`5cV0F+=j{L?Q-3)7AIZF`)%I%4)Wx@uW$?q=-og-3d z`rg*u{Sxb=6Sl9eGk)+ZqifosMl1&aS+G78KR@FkMWd+(I;D>#{4?@j0>a`9l=*)) z3!cP{q2lf|IlnqEs2^~3aOiVJJ71df_p|=wGxsm(T_;V%@`VZt!urF(l>1>P9&k!d zU|pmXR;#~rTP=ikE1l(v`6L+dZO@tXBrbn>Qlo+#Z}XMzmrE`Wed&jyA(B~>ULln? zd^}ZSz*FF;>e+0OMcFUjE_$VIquL9Ln7Dy9%seRs1kYcuYAa*sVAtnlS3t4h{|y z0A$068SIW!lWh?W-C^%utd0cMhK8@ETaO&~Y{vV9^^B28kxwuSByrAu5t3 zIkF%oHtv274+U`YthpL}Ir((=MLV&_|8FpoQw?-n`%}^fX@6Wh3fr6|q@_EdF-Fka zw=|$}0CFwJd*q>ia~d4ZAjkV(fSV2;LQq?Q?9y=o29jNSDCTMle@HSza!Xm_orIbU zZMrKAInoJGojc^OIm(d7qikr;C}u}Oz8ayNPzc)QZ%*pJ5SD+#R!h;Gb4Zc~D1>b@ z?>a)w4tzZY_e+0GLJZg=pfm}BvLHa*LV&TiBT@R-OMdjg_EgxsstpvX()m(M2bk9& zO)4S>BRUXJXr|NBv$!X7{;7!|A|aXXu7$c!xSy>P>*|l(0meL@3SA2IyHkUht#oDm z9W`n8^MZC>&&&x#?szb=X1FqQlOFm7*rC3}a$x;c0t}r_@FC9L01gd8sCR&jt*W7+ zfx`-K`D2YGh<;~!3(^s$4(%V;mu~2NnaZi9M7R^QgvUhJuaDIQI!=Tc0ojB}ymX~a zlH^nO09M$*k>f<*?*)pw1Vny^fL#UH z^6_H?FkMg9s6<5XJ}6UIk6t0E(z&CmY430z!5QC9+Wx98wR{DHEDk8(GZ4M5+}fE1 z?lUBMu`t}tNR$_FXy*yUmIsshcAi9d17S~GL0tTmli@XO=DM^jSQ2vTR~~z_+v}vg z(RZlwB08hrmaT|2m9sazU_tQfj3xy=MT0(Pgv(mTK*{4|c){r)KdhN7YMXiHS`IIS zOB+PbK=TP~pY8xzU&Rjq$^sGq&ifaDwZfQ@`ECabkDEB~!;kJin%vpn zJRl%OA*SQLj0LPsfKe*7&%1}<9m$`O8X5@&L3Z6%F%QX!EjXlux<&=G5yrD#@RGvg z_AJD^nV2fyR!=W^uTGuHbjqTNCDv(*ry#ChWTAyeZFvcn6V&3Q;QXXI3$g?xhC+G^ zkg2RML77W@+-eu-*`h!-0v@w*kTZPj!$(kjLTrlF4n%eb^Lm z^w7)!Sr1HA5bhx5J8~R>UhVIY+yfFlBpL#LA+V&5?Pde?;_bjlhZ$2XD6NE(y< z`PnbTek*e80-K=^Jc+~Lt?O@~GH@(og56X4e_{ujV&pV5GJpmu03uYRLVuS{Vd3N$u8;!3|g*@W58FxSIP3|UV`1q@-eVW0Ymr#Qy9e=k9 z#CS!7h_r}-kr#BdB|3X{^l{R2o`rHnmGn+C2XNLxRxF5E1&e6q2i&7TyttH92dHt> z(`~Cif8KY@0{W}Pa0bF(#>7-A0VC+Fi`31&ckku>b&YQAP@5k+A0{#cz0ZKpdkV}D zdVmq(t;zu@Az{h13$Zt82oSW&?f_&nI0<0OTqaon550muab)HIo|Lh;4Zr!S#!?aM zQ5}j4R*J7W=ZVaq2A`LFQYm8U1CBnf+me~Q5HR_7H){F8(NGic4VVTQda^$Rl z_@dyVP<~%p9u>+rgb@%^r<-$_>n!{hF6xJjo8-4aj!N^bjxehMuRE%c?SHPu?-0CHk;c{1^3Rrd(4)qu;>- z1qyJnbErfJU)#1%22}1Ri7ZLmeZ{-Bp{>wuqp?K}+;M&)Ef>-tBX2G+q40ZGr{4y9 ztL@m=)4=KYMJM?6Q8>jfBfnj19evGd=4O}$xgzUKDF1Zk>59IauDZr@qHgoEutI_3 zT?McCN@ zymF(26!*{7awoILR;qc=KH1zZSvBKv1aYZV8%}}uNqhlAb>h&+*}%VOi)=af{^hc;2^$7`ct(-1-;->K1a*H{qA!L90qv*r|kmUhpj%?Qu zBLqq;9H7E-n?nKE@2kbbrTms=_U;1Nttgi4d(9gmr9hhZ&OdyVn(c5vs}&C`=_>S_ z#DEvaVmHe_$2$^sG5*knwkp2CxjbqejlCzl58J6>%oSXjB(8&t2gtv`zGfy{^AA)C zUm56PxsH!|e`DFNj+kBS8 z!~b9yS}(|Kdm(hE;rja6$xE0quhW%^<`e4$@JlStP^p_9O&fXLN-r$-aL4)>LBJ>d zrT)A3!^R$VSvoxz^lUlRJQpQXr>3$+{;ZMp9k1JA4bBMt^{k0BPJGFf?p*FD**tO2 zSjNeA%TRMr994I3UIkRd>VUSCD9JOjVP~}lS|g|*v{DO;@ir$ncLzyZNA7LflxQfV z7OX+Bll8E&i*yg26U8^k37~TO3&i+w7k;(L74!SiNwrMEG|^h^atWMfb8Tw*R{BWE zo@DbP52MM4rE@Ubz#s@hp|83*DNx;JRdG19B{a;)B44Mim9dHq;UB!DyeSty25`bo zoac0#LV2lb3rLq#Hj6{mKNq)z=jHE`3!y)^&zQV!klPvPeeL_`X>;lY}%KJzT5uyA(LL#OQssaTrTXwvgV}WRcP~C=&rS+2E?f1yPba$&F zIp5L}h$>^&+?^GA$-P#0u1)<07U0L;>HnUa%C}-ZDQm@Z75!J;z@5IZH{VdzQ^oRD z#)=|;VUeVHyO1j3bOsZ)mI-JAp1d7|df!X+PU7JRj6UYl4BzW1!ajJu&{bdUSkQhg zdnR+3IcViWx871AsHQ`9OK_56*B-sfwQqhcG%rD_=273%xo88TBQ(Gr6hiq4K*mMS|&8Y*N49uH9E1{ z=YVl0ruD!LV>eq8(0qLV;*sMi996`0cR+aqB_WQi$B;RL3}m~jQ zsY9iO1P~g0+wQF#X_LIx37mAmlxSh%_cK)j1mg1Z(@|1h$9#T5NRRd}4z)a{YCTVn zsh6jCxolH#ZLKan|3nRiD~qEt(^it4OhqkVllaA>po}eumG*|52|PXf3Ha~hsng-> zb(H8}e3I>@>Yd+_&2f!I#+6y382pY9SdHA;kjE7;IDxzklG8L~zXi}Cpd`o> z>F-!2B?OMlwroO&t4I2f%9J~^XkSD>_mw33U8`}6h3Z-x(az{0e-aj2S1R$^zy3mf z?11qUasm$zJk%H=#Re}sH!8?p(DhLy>%(73hr(|4hRW|+L5ykrI)lTq9T--?u4;Y< z4xp2T^7HQxi!&DPI|Q-Zp1!io4uyV$@sKAq7loeNEUfNyvcxwtZWWEeqYVs59if4(q}G&w7A6nNFpP;4OqqqGS+JU)hf&8rbcO##3M! z=Zc#*3PR~-Jl_uJXCyTdJ*GT=P;3D|nL&(OF7DW7bfUdiJw35wr|NEookZ8rdFQ zZ)f^BCA?%O5R0vL_ctM|Vj0e~WBEX$;9ZkRBZ&IC?0qE67u)HS2y)o zQ}V-pw&~{PR%ZT&$`1zXWBLU_Mz_biaVEm&h|ef3+(x_VbXhzcsP;Eu`*0#ogA)Q0 z7Xi9Li_`9n4|9G(`iN9rl)P%A+`u6)ZGCp01HBAL=4coh3ryTZ$UIN-7wR0tw{CWA z8TS(MI3_u80@v|!e%sDXt+ex@bJor8h9#Are4q^HyyL=M+jY|dCX_qYyjH#<-#m1d zVBuLy*4+D*2=PPbC@9asm)=-3lmCqT3BiAp5yUvK4Tbbl$f1aqyOwt?zk$go+GvQPi=qqq~Vs*?7N$4#@u^- z<2l{;j7Nvguv-c|TRM@+nA6i)sqCR9>dE>21`GqKq|U2-Qeq#0AslYFCfF%jz=!Lu z9f=XJfVn`!%Bp|>f)EzXG$=()!HEPh&RgK`B2g(=Q6jgJ6UTKsZNun3WNRsi!tvju%3ZFGe#E5OSxM$s24=@V10gwA zEL0*ifRW-)iv+LsRa;S>1QauVe?~gx5*%l+XzcMkf9*rEDiQI6TUq^{agt^9w&oP6 z)Q_tRbl}ScHa9GLk``96SDeOHhQ*>+0w6tSr@*6_{?x$1ILLJzl!$P032aa86{nql|t8(d40IH(#~q zimXH`wvmemzyzy-G23^Lb3dIpk|ThWs=#Ji0hz)~f-&)bsapOUaDj(5egl*U0;6%K z#%C?m78iO&8Cb$OA)>{38l-OA2@rBu(cNap8Y$p#k_}1fZPh?>{86!DU-*>@o~ zuGrKRv^R;qA;6~M3j=X*pjRaHJar3tGqH8I{wMwz+edY{6A(Sx0vjE`RxWa=Q7eKS zNvYl3%xMfLX2ChAeVK0%+(2AduCcImL6JH5-MbW!vGy`)fzb`XgJ38w58KqF3$k>E zi~5weq{aLi-j=Iz?XVx`$&on088x^o-9BQd4Q$cqvD^Q*Y$VWTp2QLiE*;UtTy+8` zK@A_y1d47pD$DizKBQD+aXo=Y89D1=tHK$Y(L`!EgPbRoDTEZ<3DBnj)rrkqAZP%K z4=iO@!{--xi0q7nDd2&?w2SI1P(v=AjCNuNhs-Lf-+G~+p=lNu){MqxzE2v@J^xuVwJ{j#zIPWjZBul>Tns4VZTi^5{oS{tS$(oOE z8Fq9#d~!p%Li;Vach@&Ht=>Z(Q&2PliE*>p4wQ+rFM4885%wR_Q>{hdyPH9KQY;8S zvw#*>MF}KtbfpmXUXM-1{5E+6qW+mP@4;B7{50!9oZ%meYKGl;RF>VIR`sEw0Z^R>K@yAjzTpHGiW!Q|&dx4`7@fZ@wNx{pG>MiR zLRO%I?-Xz=Z3p-M+nuXi7H(_f?!)Vfc!vN!Ni>BONph=xQIX7(#X`NC!$;a1Z=siTqC*^}Ao3JICQxoW z7;O${q+;>?gCOeE0F8p#^q~Z{5vtDMiG-+UFw#aO zzPs56?SCoLw-Cclj%Qy7i|oZ(YV$n&f0AWiyEV29pvs+Ckh` z=(>c|GKh%>&aeYu-3(d>-(f$+KTq{gtWsIMclakTn%3-70JH0Q zn3Uu8VlC45L-G&D2I}nqz8I7kFc(8|YSezCS2N49ax9$koo^` zbz_HY3C$D;7&0K1M;$p3l8KskfWMFC*~?NXs+5gtcsRxWNNAMIw=K$fcObc?^eTf& zyXfWm+1%8WmM)y0xKyUUQB^@->qczjJxq~o`!%TgYTES}bbbM)Vb*@s!5v9!0@L%*;&6wddlrt~DBoalGpEz?y?QYuRK6(niuw=cSO1#A+ zzv(PE9(~SVv;HR%7cOF>`{Q%mG>_S2PzE{~HZ;-G1Vy z>roFtC_-v2dU8e3;6ifZIJkKoJ&I6{>(&jw5d7BDG>$0uof#CxeJ%(I7)pbHIH+Qo z7*_m+P-ykD74t4oaqOH-*ZGfhI);s+AK722RU|jSweQ0WkT;rN zkZ}9HMM^BVNLy|qfHHu&o++)8YT+}7(`rjT%}o2Pu1i`PrtMEhOXf*VZFhRoT>Wd7 zg>R~tMMASzl;gG4jROVptujSWQs}}EgCp((QGwCr^^Ds%ER;+|# zN{H!-8N{k_mgRj}g$-$?c2gVk%?t~1qlvcR8#vRZj0@d*uiKbYOy9seXmOz>5-fOO z0|o7-BG^~0dm9-VN+4fG2(~wsoq{J0nyx=lPiyT_;!`WIcY;!OXy1#~Weqa>) zwuM{ad1=vN$^7uVMNkWO+d%z@vE9=QPA{QnQoaF9!`J>u#KCy*U?c+5;5wN2-LySW zvhWEJtx~sN_r$}Q7kl#=>yJj?UmdSG%72cAHvf&%hp?Mok4COXG$E88d5i=6?{tpr z-ao+H@rj8Uz@z{o4?um2{1`ldU5w0fLA#m`%~^bRuH#gtHN)$ci~DZ6UOZG{p;Yi9 z1Exw;pD5S?NY>Auql@h^DHJ&lv{toTeetLw5Ek%c&%sT)FLOYj1t67n7k>lmFn0Kj zcpG&1%0?|0;1B|Kz5J6W#lMlxPm?#Bft2*lC1?3vt(S1YT&?}Fbf#%;XHE^Sv%AE=^g zc}rNR<zT8b++N9yJFcZF&x78-WR7)??J{U8fg$M-r_|NUz0|u8mqy5Os-#9l!}s|D(X{Hc*LP3EX6a zi59fG@KNMa_SZDRo8%p}5$(!qDlUtepQu+Qj1zalZsqd0l3%B4VEM@?>YqRzD32h0 z;;bviNl(TRP;ubHe`FDvDi}A=D+G+S7OiXi+X`u?KNt&vPiXtNg3Qx`@~_m{camMU zwUu65xo{kgZ;e%fO8u*oojL6<=2IVH+Wc^wh+d- z&CJDBEdS~5M&kyiJ8ho#q|_&61!mqgy()U0 z^dxC(S^0%H=?1FW1Mpze)7rjm-9ynWGnypaYN;uydKfWZ3{|#HMc)nKgWWpmivM87b=85+^g1<^)Y}Tv%km!lr#XLs{@J=%Ij70k4sP zspwA6Ca?*ZJqU~Zwnzbo|l;no!VJFx>8qoDYplb1S57*_L+dS(zV<> zH6mLq68VQCIH3OY1z*g+b3zVYGAI$?yu=UYYnj?;KtO}>9iUAiau^igz~WtV&Jk6M z!SOv6Qeri5cI|{TjEHi+>Sh+yyBiI6$pzZw{~%~=?R^Ie9@ul)mMY{3s#xaaE=dGw zF1l%n`wn%OiV0rUbND3qTCS$>L-A`uM$01alfgauD}9R>nJ*7KhWgdc&Ky+oorO0v zcAi{frg3dJ&=k=#7H~44N8ix-@W%#-glxI1zU$q$Fd9$I$Zw1@h^XwYO&v1(i*Am? z;Lc4tTjQRV=|eyBph8qpHA&F6g#_E7R|-wy z(!FfD06N(G!pNdllgMeX$;kPYSsr{FVK&wiz58~;`@>;|LXpjrM{}5kh|Zd9<{YD8 zReG!QsdKH@k{(UVU9xM5K+^k|t+K$KG#NVYKQOW>S;U#Qq*OZ{(6w+Y3+fN)vdI^a zMpZbVx47@j0poC>2w*B;kbB--((3l8Zxnt0Wo0hbj0N-l9dVrRJDQgK-}ZIyA&!y= zKsLKcbt$kIjMSNcyII zT0us~#KG(Z)d|AeyUk20@~OPkC1F0e1wO|%A9+5f+RfH*Zc(Z#5#iUuajyc}7qeQK zHeUB^=A(*m!|}~&BHopO9CC|6x7=Q#&%AfP88^RW^9C(%!iXkEmYY!E4lkZYhD;=L zn-hj`YMJ8h_`@zap9icn-mVnvkBr}|i75EsDAj1|?61(AzB!0VkV!2#tT|`p!B&gf z5g}e1gV@I-n242jY`eO4_jh~6fc;SPh&JXLKbfe`!TxaJbi(wexb|SfTN^o3u@Fnt za7Sq?j25MnDj0td&v{E+bH|}jDdq6p+)W(F&&BTuDsAv{-%hyirWgneLw77JDaS`W zGAPngZKP(P{hrX4JEKmk935=b?|xXi36ljP>(%dql~m3{5Gw(k>!GQ9dY(EkC~*(0=P}x zMdAwAp(kp$H2w-#NGIG6Cvq9t$b-(4DK$F#tLs|?X{51=0T{eU&-!vFE>lPf@3rnl zVIoS$#clFdzwMy9DfWj_eQz)G8rD{oQ}rerkHJ|a5vSrbJxFD|FYUKHlkU8 zz-bG9wYhsV*(p*m?44WM6fMC}=QeqwYWs3gvxOj5M?adSFl&&}l-YECy1q)ZWjyv8n0+e#RRJ+uV6c z841x!Y0=wH5B%Y;79L7oDWZ!AR=>AvXaU9!TIwZtg#&xPnzJ%~)ekE95mWM`@>9jt zlzQ%_P4yw;Z51jzFGZsz$pI1E`@!D$jNYfxwnFrT@mNV3@J_-DAXC=Tmy>^Zs_&u_ z2)T*$IdM^qExau7mWbn6rgS}*<`j_`6kLX+OOzHfFVRhx8;&0*#BbvWr8E6JEQ7_C zomDGYec7xhKAy+2W7VLO+3$ESblp6e;ZkX;eXYu>n?FvPEqj^3>#F{2+adiFPqo9q zpJE4O6!FxV=|yo&n})5GyPJ+p2K-xV_^|Z^0Re%{i6(;4dxzTw_B2KprPr@mQT;MT z=*!(pKpC9&8xwZY@)PHsiLM1*V=I15!FXO^yEr=T%jcKWfzy$gjlpX*XDd4jJH2QD zaw;%gl{YVNARTY%z4JfR9;`!$p^r)LdcdoL zAGF@9IXl@oGdjSG-O3hCja6`Zk-<1e8v|&>VrN$^75vA74&5ps8!y&9QT?7pE#7WRqCBFKODk8E^E*F%WO$n=FXmN=tpy zAMrLGG6(s-5S{Sb<>_AtXTMWB960}aCD1`_W!%+5j7MwK5wI3?WV^D$vtB(f%f$Y1 zXv25EC}VsM3Y4`w-z?(OcDjmw!N{%*62>g_(Lb;Hi1An_&1dp?H>|idyPfu*q+|b{ z@LfU#_XdBXot(UL3+qLXp6hVeLV!qxSl&*!sgk(4ZD+Ie@$k-yceL-)R+*v(-oV>7 z)tkOES;~EWreKs39Mt2I<;0qNZ%=K}7n~DR&w5>+32!(9FFya%Fp4n?R z98`O>i3sIEJ`j0tJ!&Q1xm{8OsERp3@+?6p{9^XoQXlLf@IL~h8jDc)^KS0(bvKQn zQ9BL{S7erHa;wOw@stXcVPH=A*H~+_7ZQ0C`QFEr{d=AaZUNy{`tGaFtSV$bvKF)C zeI&xSbUECWw1b%iCv4~I8HL?AV^|Em<$FPh^K-w2~^L z^yotlyM9>zejlB->V!}B8dZr;lP08l1)aZi&)E0i1_eJPV^B=MEpiwV=iT{ahZMP0 z%sm-LlL$J8B@~yecppm0GWX2pXw?sYn`viAx}mr>Y*RA!P>IEm>u%ihom$&%mD@Vm z4pDd|Pu^E%@&R|jHF6~O{V?@X?H2RrXAYy35cq!#+dk8D|6Q8!m8EiB_)`w2B>^Zt z5VqB`rD@8&b@*c4!_dfX)7;T4M3Aw9{j`Rjp3ZA$Zv=lCk}>|&aq{6BnGmaz|S zwVg?T$eu&AbT6i2>rHkp-yZ)i-z6unt6~Smzq4I^>q{N`?4*2LK|w*6O1f>wnwiDU z{hFs;g6irPnVHHh7hkFfiU=M#qE9;;g>(Fr5V6Os?Ux0=cX|YqhtE)7!K^ypc7FHR zZT``~!$aLA3z$kid{)wvtl{{XyA}=+6*({B)3_Vd|lW3N;r3CZDt^r zp1W~K-uYvb2o8?_Tg8Ohq27#Vy4};KNAdV)_4-x;x%UPpmAvv^uk@Got;G26g!87AB@tjG$Q*4hII+o^)s+hOIOq85svYA|d#h5XFrQL;L)JMqvO44I zU;E5WSBKUtILodUE}xezFSic0ZdMU9?Ny@Ey!4#iz*yBU3Ni%aox;S)=`aI^w3OUp zpH&QLmXpzCrjJDyxAXm8AM2p#Uh9_Db24svGm5qDJ|3)G8XZbIeqMR1Kz4nm3PUi{ zJ90ey7OVK_Pnmf?4;Cwxv7J~MD)7p`k~d;`G2q;p;YQV5bzm4S4lLS3HGyVm`GA=FW%K$2~6G5w|uTh z?tzP)ns~&^FPDtf_=#a% zpj^*!tHsh`2uv1o+9x${UTwL{mL=7(@ieF4ML*8%U*!s%j|K&H-N$!qYgm*!?`eMd ze9*;ap;3Qz>P#Bh7n|;+o#d7Atg95+-E?tg`u>;cYE6`?zS)RPeH2~CWUSw$v^$rT zADqeudnD#TUeD^pic=Dao@tRy14!0#f5| z)mU~H^v+^+CyztW>fveU*AaPxm%Z3YKVpwVpjNM_uV7THG9-g1VW+k^(thA4wRTfE ztNbYLUxnD|4fi#lGM@dZ)9_ihU|Kt79+ohPWvx@~i-Go~@aA2fIohs({aK7t42t#U3i&ETxHPC^#I=M|Lo0AVlE#H<^Z`Goo64m=vb z@tyChJOP!f=b|fbT8S#&2iq-W!1$QfjyRXBd8HKWh*cI+)v4;*e6YOGt~q-2uf>nV znbFYnvvB41j{N`)MLpx~h0@cXD#LHEijM+`A#gy>ea+KqX8+D5?UE7{npvb`c2V)Szd4~tatx1i~Ei`sl0?LyP5oJAO;F=DfkEuNK$@Iz>J zcN56U#m0ns#p8Z!wk$Hk#Az_!w-=l4F={Te)PEUc_(rPj9cThXanw#fAF#Aaks~<& z0lLM(WLgC_X`(RP$kd1OiVAgZ4^FS!#G!q=ddUS3OuSD>rfYn0E|B%1Ytg z@mzcVzW1{d6;8OcNU#X2*vQk$rS;Xu=Ump`lP-CQ=tW+o@6@xh=t^ZXV9}GOamB9s z3U}VR9ed=W+5MXF&i$!TIlAG1$4gh&+?QbHNoU-ZLM!qD>qIZ~kr(U^nvOos*N4}9 zOW9`7HeP6l-Aq3KW@Z0_8@F4cmAdaK8{{xtZK-X2n_HqOK>)N4I_lHF+cI|OZ?WKB zx~IWnSEk8Sq(24IC%AJ0+sVq^p}pQrnNj@mBk@auIU+?DoZ^0Z31hOaI*({zBg3T9 zsrU0id)Hfcs!DZUKHK40i>a6)P3N3#?n>*>t7pE-zfdX~F9<{3im&IGViYRZeE66% z_>P;kKUGv)>ZCCkI~QX=@b0=>6J@8S>e{QM3kN79)R`oPIre$P0)fgZ>MKRTW%+ih zA$^N91D9+k^A9Yo$E`B4T-%?glWIjgl5W@`g8La+isv+1nbhpHZVC*J5yGBiqF`N~ zMS3oo;_gxbBb}njmF7~-Zo@oGI`5U$&gekWcc6My2X*MSD#cjs*C8#mY!$j=# z0L$M0&3G^@GM148;k?}9mmF&zY96$}`K3s9zkJI!NlNxwPnwTL_f3_P0TVvcStOSN z_1PclA+=vqEli_nRTH)CalxX|_H411rctMW(CBL8>&Ex|==%j z!~QB=b}^?~_SI8Ky@KAA&gfoUkHF-HZ)KLw`mu(7Gg&&k@sJ4hf|(6k#yID}UlQH* z%8;#Bf2Ck22dmtAGikj}E~q9aA;yg(hZdS$&H)c6cyX z0=HT}6T#g!u`HOEP^y>TuU*!hj3C5YY0b7GPs@5M$b9Pl)~TtQZ93kQsk{x&dRr@xdeCR-KwUeR(;>jnuvb|ir>Jcztj`Xby9d)Z3h3&-s_ zW^Q%0an>#lMzN}BwmUjcE7ARo8gQC^{&>%qtVP6$^+Z0*pfxJ~Q9I2t&EDd@AkKOI zL|4sf4kIZ--zkbqfo~U%r6}v=OfN^(7R+MU$D=a27D@+ryrw)`2RN?BogHF(YUlCm z4Y{jh?2M*i$kWok%&-!Z4#ztg(WRs1dKC5tcLF0=u8yvooJA!j*18_^VK>Ao2X8MG z_xs>J4+y*%9Cp}oKA-Os{{?B?%FCcH^y%QMKsskGiG zrDE*v=XD8yV}ZLXBT1JW=h`c**U7hMJ}tug8)PN%jV8=^F8-3B>sk_FThl0GzGL5` z%)Y?HdeiQUr^V~}G?PMA=evGfBQV9jyrao~Q9kaev)dGXnpp5VB08A zcFMPw^lX4sXdRgR@K?qKle|O@Z%v21s(?ul&^5=G+FVGX7#G0+nL%;68q}5Amhh)b z9UqpXAm!0%LCQB5q8092v?iz4t_*&$iDG!6`wS3lJrrT5@zR+g#19G)!AGn5ihLS z8gM>I+N53iZWo&3)iar`w051`S%|pA$inY-L&#de1GYvPn$s6qP23BdyMKezJRl*D_2fY7A1Le-(A>(e!hec;w{imN9O_S1Pte) zyrb!|&QD-CWsxrOEL9fY;t01xsS-+*e79s$znDHQ+ii`!Sn+evt|G^<$`Fc8W?r^| zRoM^So+EcrUM{yaQKOs*h?*-?c>&JLr(+*;{e5~1vSHc9 z(F5$1zbC&8gbo#zjxl`wHXUmok4h$q7?=`oGqyg{@JSe$GMNwKTcE>jeLL%Dy{cNrQ{1Gb)V~nZbnVT ztm3UYyXY-D{Wb*67dDb$T&;xz$O#=lslsJ5PG=o&P%$C*UFZp#3f(n)wW{*-l@JvZ zeCvbXzHRk+8<)9hDjPyvStU$-lNsx^?$f5(j$7Z?&~fl>9LM4!tI3jo7DOJ|b9a~W z8OPtX<)HxCG|y~c{%U4lRN`2|F|>H99q-&=9yoIDobuEF{xsIS??z{2_JvQzEs&10 zFW27@@dVUo_{VS0s=3Ceqed%@ZRes`Qf)|V-LUhu`mUm(dG;f)V={grB} zJ4VmqXH;Q~pW0C{%N3Ub103N8v9w;mPM2-*U9`U{~?r_bqE z-VYp4;)EXfGx}gx?h+Z0+O0D8wr0jLorUo)+4UyMjQy~W0ww?k90g2GO~>Zu28Ox& zimbY!)ng{=O$uE@D18VrgKJFN8<^}(R@X*Z^l+cAjHYjywms;c7Ieote(_8*ntd=% z9D+efr|Uu;ZZa8Hi0XU(Ju48I7^E5qr>?EP4yx>&GZy zH$u}85g>3XNK5;IvkPiq3VitPQ~^-PN}=YIozP4RDQAH2YP^(#jNgkI5IGT=6M*a7 zYd*p!TK5fnqtz=ldHhPX@F45n(Z&8B_TD@m%e8GA)vk^A^ro;V zmWHOxCO+qrr6vX;NdsAa{Hr7t&lYueUDUF0GOjVVDBSw2_B_BAeZFCD>$S9LTrwZ) z_cau2TWUaiLp$BXzwbmp$EmUnzw3x--&!`#=BrLB_PuKNNm})-Q8tu6MP&HmGR$>B zV1#%ZB)~Z&8QvnqLzCDnz({2fsxE==^<2Yd!0c3ma>>H?TjJHGd7cJ4Wk z#-nS!IVPXexCS{Ce=z2J*(2|y->GXG;ij&k)3LlUCjGEnXp6YY7Z)d$*PoWV7xh25 zR9~!UctAFYU&B3@UzH{Cy!r9^lM34z6GsjWm$>RFp1mRV!9{O7eeclGywzj8+xQb~ z;`bkLQodbLz)I`6|B66t$2rx!%Kbdr=d)bCu43?9^x^sTM-8td%WBZs#o9SVjzh0! z{n+#&%9dstb|JPWtM(w>wd#VfU~8GH(g)p}G7z?3N=kDc_ud)oc*_k`&M7%7_q8!6_Z}tsH;NjUH>7{E zbw0IiXIkp5edbQD%-r*bAFVfza43oF;16fCfr|IS#w1Yj3t z@qU&MDU0O$t`G3kUDx8CL(L(aYm|2YDNyj~U~c^nj9a(*%77lEK**d#^aciUO9neI z>dL5Cm^JZ%di{}N-snk^AbPq9s;l#7r1#M5w{POPu)nrGtmlND#|u{(Qq?nys>iD* zSLO0^RSi4slkIT&b$5zcJg)GzMBUCV{lI`PeP4{nET0B9{wO_HHm)98-tGG2ncYM4 zM1_!}tOo6LrlzkGX5^jVC(^Akm%j^iFJvwN5L#z)vv(;9t7ZQ37R6SA&4F*%&p znK)(9EZ9nLKSm}dc3C(cH+-)2ahUq1-38rKc1}+FG&}r;~p1l+!5O~|j>!zXMDu?_x4ssi1+1XRuIxXnUXKvc-iykq^IpL&fnb@X#+<`Cg z#3&nK7$J-}!t26)G6D?^6DS${+-J-?^ww+zU#t@VJnELU-zS@J!UGxa${l>lmM$lnkakt2 zfTGc=H7P5XK)}%RQEzd`-Yf@x1ujx3}5&F=zdvdv3U9t7$s|o7{*~M8#YN<>`e{Lr{ z-r)Gs6d8nnGj+KH-b+5rwSYt%x;C!~eYE3%CrirM6(4mion&!t`PRr&(}J}L>+ZCp zeCEozQ{;>=4-76mEiuwX4*t$9f<10=&!0;{xs41C!868ABz=XjpOJH=%dhmh*CM)~ zco{c+&FuMbL`^wn0oley4`9GR!uSpzlByb)%Vp<^Z&&oZt&N#hPB&}6U-?sOOYdR@Qd7M!CN7GG%%^qrvO?ZIe6CIy+0Wr#d6=SLHL4cfb0W zO;}%VkoT*Vz|d6kJb$raKjZCnR9;hYv9I@Ri~lZRIOCdB$uDHteE&s0w}^kQ#j~b# z_S9T7Z~SOm=W}izZ1DeVpUaGFkmNY&T|tk%&D$x(<#?J3EccTrnWxmWqHS_s<}AK5 zBQSQSe{YmHqbCn)L}diL0j)Lmf1`L&*S1_s3&Aq43&Ix+{sReSejj;vxoGj_Ky%J~ z+6;=Zz)&)auld90E3F3vn4K*t7YaJbGG8b%_vh4T&$PtmF7}){P~6D5vgN*j{!w2~^gk_ahY} z&(AIxO6ZT&aW;6iGdBSq?C&zf0h-`{vIT*0=5ol}5^k@lYH{J1YEcYBSpiQ;_#m`5JAYgy`K@kuYboZ(d!qS~T_h6V@Aa8tjCVj(lj zgRL=!r&yAx=tT)cK^5Qe{Cz?B(IlsGIXe+CSG54LEMVF8;{LK=|0sWL90m;!cIyWP zJWM^U!t219`Es}z$SNtO0O~ZO7eZh+x>EzaKMyy&8{{5#EOETRV2VZ`onoiv0ludv z+6_aZQ>kCogdrSTtv(Ix(M#8N{`2*GI3A|HGFSDjOkhgQn_6THOW{H^u}aCXH83!m zcYH(gh6Dg-j22z>0*^0j5mtD=foUMF=Ph##YH4E(!Jsw7C93JEm#=<7K9yBg+PIZ~ z;>{T+Et3tYqg!jaINO^?EnDxaN+Vg7&5_whKvLdIaRj{Dsb-WtRL}CQrMOFbsgx}^ zJ)CH};+*a--e_kM6oxvr_s+c>czQ=FU9P4jo8C$J}So|$~7a`GVob}+iIp>T}>l> zvA{Gia<(x^L-Wy@f)b9E7ooLxuSX|y!^h6&SBgJVQG|^6dl)xsmFs4D*hAK^;t|=?B(Yos9^j!_N^Y@I{Z`}<)hc?I~>Xk`V=w5V94x+QLtcYV9>EYr#atOEd zj6Wkl+c6O|Cj7a&OM10V_`h4~fMOr6Kj4ImghAep8l ztL>p9Pj-{09GYQ9b58f)A3wC*E8oB58On+ngl~j4Teg#?13qMp0-~D8q}gBFQzfsF zS~qyseNu@2Cq=y1Npow%nPtz4q?b<$(;iWf{674s(q-Z5T0Mg`hj!Dw-Ddvx)(0Cl zeYWpC(t#G#=FN_RzHjXXt4kJD(svgSH*C+<2#kI4sQor~BsG;F?3K!sbW9b6U z4rg#CvTbJ?uS`x#)9LXv39@u_y*03wN;5=A`bp=oHSH$Ebza2?2c55@$JVYr*_2&+ zPPFWNYG=V)pe)u!DFrx`Y_bN3{Fx~?WjeA2S)Ws({UGjKi1(B-vs>JSORfpgh(9qgpl0%X)cHO%67n5bVAj2icUP8{jHsRdqo3aJZP@ zluzId^OgRGPLA4^V&X`F-%``!aHHtGKrGbuOq8_Y^#`4 zQZ8AU?Ug0wb7;5{F-O@Zi~>H^G;o%Navc#LA%gKl^bkifsn#)UO#HA;-=wFKNDV}L zD|xV}!)@1z5a*t}cQ&uLB>Fu)&v!&T+G+gh!iPOxv#VAFt8GxSG29;)qh87{$|huB zMjPh76QQAx zNgxVwJ;Mw(nWL1Jk#XU5)05FiRan%{Fb5vCyOO7TB}7bV2-YrTY~kMxMlWNYVU7HLMZFA-5Iw@11>kks%c_R88FX zFd*tOKiS?lI+~dn<0b7E?=|e}*f-&Of+>4hpXzGTd0X7MsHg0z0bBD z?JgtM(a;#wBGZQ_KELX7!28sb^zY5`f-kB!UsC2YCr!GQa9=L#IRlK)ewc#Dz0^pD zNSq|ohltOnAKiYvWjkm(WFPFooR!1s$#_XQD2 zetDlxC_Tvr_%Yk1|HK@CsIaO^vem8e2RO}{Z;b1mO}Yv-f? zN@{uOBt__0&<5rY%kM9<^6@Xw{&D}x7XB_{Al1dJRHp46(#p@FJ~IiYMN$LcHV3f6 zu$eMaGU{030ToGBwq3iP8XdyGx2#5({@?*xlDl9LGr#3F9}^3mtsL~TpwUx#W9mBl z@)l*$nkl6s<6YmtqmPOwy$U?UN7vGwylr|h!mnC?eOP29H?)c2zgg6kfWa(aOhowx zx_%#NcgDr|t%aDRCX`1wAJT3)_^FA2Yx(nq26OfmoRe!W4;^1Q2M!8NGT1Vl8mVET z$b7zPOxt8tn%}Sd91?44V`JmLgQoD_-o&@&3%a`Y#V_+N3jm%#<5LYF7|BGeijh+t z*yr@-Z2*<9pQG}FEQlT=Fqy%h+gE3ixZ|-xouOEnVt058RRiM#H8qQ>`)*e9_3cx8 zXV|$jt(&H(Wj{%lD+=ymnyWN>{*?RZDM~aN;Eg#2{%v1Z2E2c3kmUQ=_`BrOSA&y?Y){ z`|#zS!VO&l$+v#FuVd%nawq{g+|*-pD8^e^iR<=+|=zCF{ntbob~{eSahnrHo1GV?;laNDM&Ik~U*; zvfa=EcH~2C>0tyKkrxvav|C2SRcT#7xhxstXcFF-ardyCxH@M^nc3X;ae-tTz1y=+ zbm~2YN;nikE9?{HI4oFp3V!ubjn34*_W9c7=(cCR13ObTb{%s_$dz*pV_^Q8xT~Tg znJKMxGrO6N@2SAt%I(E;eOU-M0j!3G(>8lrxiWP}r7%c5;5dWPJT(8Ag2cak?gbK? zQ|mNX%{CojZlV_wtsNI|{=7Fl6?MXYbPf(*z3TSJdL8qt6S{{dhN?=8?d(`SrATJa zHnzG$tVfh(Yr`vk=VJN5?0l-~k~;B>6<*`!LZt zh*O2dgs#JfH>;>}8T-^3hhHCT0PDy~Bs}r(K6Qoe6MDzJ0ygdC`}>eomk3SmdHeF3 zi@;V~e#}QGTD{sn`}w@UHV6D*h+${bM4)8J`NPh&^|kR?y9}MfLTIyhZ+U%oSRwuI z;?<}rP3*!Jg&Ww1(Xs1)Xk*uz{i?n*tt5fI<=)8sC6xPOmE{w8>KaSlK$e)UocG^c z+$Y6IZ_v7`-;a_!xc9H~(U%zuO?@Lqc>hI)yGSoXL&X)RaN#|X6TzM2GjBO~kjGtDV_m?J8m*RWRwO4|-&$^zXq+uDeD<+_ zu*;QIeDGPscwd%-J_|dCe!pE% zIpyBPqD$I(2ISDrINgqRd7=9k&U{s`osUG1H=ec1EFpPNM>%P(x_OqAzxRA?Omth{ zQm6-Biygw%H!Mutp>L-Z-=3&2cY3f^Ut8E<)Hn*rIvNd$%Jag_yO&AaYD`$n?<};9 zV>Nqudm~+jsiinRH3yY`A67xCTHU?wFR#yV z2(hW4r#HmBeLs&~vkv!I@x@5goMNSrl$9C_K*r%5r=VH@qlcoVug%JuJ*{Hs>wUhwEPAzV4x zYhTpWjnt*{r1689%Rla}m8fb?7`;9Gp-43VUhdxUNr*%TB=6&{?Hbdwltd_+~C5a+z(#N&dpgspI)cJ zqfA9rl^;&$Rqueq6Qm9D6gBPBznZh2+5v@ZBiuoRw#xI*7P@Eq?n<5_L;qfkITX|K zNRI>3#z7g)ho`*h0BX>1RM(d*0e8%@VO(<6wY7n*=IS_4I-xq`PuzN-ZWJhW>{y=j zxJdzpXcvfyiPdd`nM*+Y;eA1J^ZDDKBg}*3%4g;p*cgoji}}Xt?1<%;DU7B@_||ew z^d8?=4EIzRdM=Dd@w?C3L)B#vD%!Jw_OL zguo6$Z>0^AGTT4Y_bf~mjCV6oLtjDL%JH%BW_Es2O*XS4;Y%q=HFK+{rZqsT0h5Mr zS4`k9_tMbaDpZk|Vs>Z*Jb&?GDPw|ZGo>wE)z&r}_krK6iM7jZybcp4Q)-2#PeG8H z|Iwv3{Py5Q5s%4_m*>x%vk6i7@R9He`oUA2cN*S3?-W7Fy%>Pjdo1V_&wOw&JvCbA zSkjeu>M!3X|0|y{_b;#wJ3Nk@#y2GY!hd1??qW-}J830&{pCpqpY;Lv!&aIelY5=o z*Keeu6`ffs%GYD$0zd_IEbi!2gI~W%dO4`M2_7u(l z$+h$oK^2)7);_B_8|Jqo-`1Q{qio`nqI(fXtuc}4ono8k^EhKk%5y62L~sgII0bI3 znmx$+^l;8{J-PLrCL;SV`O1W3gSn1PMNS9F?3n#Q%3VtZ&S}*577Ybm8>sgmKlm;x zOclT99vYfl0pmKx)H-8Mw3w&s6*USD3TrPtS)>b;!F{@U2gY-2A4L_&686DDmvElM zc|7+x;xFX2pB;h3FK$K-Y&(=nfKrh~70Xa#n=C~lP$2j35}Dsj#wAN=vTZ zPqD>HOyyK{?4B^Lmzt&5orEwG&GX~CtnwL7F&gFXJiBSxHsCs5hFYm zVQeZ?5_Y&?(*7kKjS;3VFTrypL191V(Scoi zSl&9TNIQU81Hi<5)oG5tPN|hn>r%GZO(X5?x8~O8;7wpwP(vB^)?%+qFmM@F}^%glyKqk4DNd?HgEU%H}$e zeqR~*omqA-bjvAvD_v5F2QP_LoMmBVrPq`RZahdRon4Hnt7cpWySLSV<dV_EROvgds@#|$^p_B#Dfo_Dl1AHRPI$c zjjyDoGUfMvoiag)4nfKv9e)j>A9hBCB}74HiMB=Mb^vdJa?!T4HPy4=Vi&#(im;^* zllVfIGNyrq{`BRf2hLfS3{bE`OZ{;zOurDgMh@-fWg5JR^&;I-BgJ+3fb2fEQ=b(6zBKUNqyboQ1rG@|>W;U5c@VoF+mQt9hG=;pYU z%^3MjJtO(H#~l>YD6tZ;4r_+A>2X3rt^4?A*981t*pwJI1 z&F!}Nte$Em9njsOMZ|HV;Sz3Cw^okfsFl5V-8(Xw@ha#J@84alxULnKb=Kr=>Qm>} zDdCV`qB`k*(~8{6oJPqw*#LCBqcEl}IW6B5>4r-A@XdQpZIf1Qw1Yc#!uNV37LOV@ zF5Q>`TAx<@D#g?7$q7K$Ume{f3KVB6Cs%Cltzy8$0+p!{@~tJ}(}&<;UK4?BljD?G z3v{%8KhH|U3{#q^^9%%VNB_=oTWvL08!m@5@`nC&Js59qFfCQRfDCa%)1^F-90`8L z_5!W?s@n5v71p2R6z?iwTkbjbd++N87^=mr8nsmKsL8Rey5M)JCA?Ae(QA@qat&FU z1x5ZEWn_NCZja<_vT23jI*rVCSxW`;65k}(_7CE^#qFZ*zX8{SYd1Bdv}mOfvJ6fI z?Ag?`t4;ey_ag3JD`{ukJ>g3qT<|+6^>JccV6eZL+FZ*&>33QF z^X;8qko|0fTkyk@yP(<+3S#!>l$ONron(gIc1PA&3X^CKXXWEvyTy1<0`7eMhOG&I zt!j+PM}hWX!hj+DDHC>@+@!0qYab`HE7oLzN&gl|oL`)T1# z%xdUupvi&>7MJny`6uu7tBgce4U}tNYZVILUP2y`Ho4{&5nXj1M%~qj{V+$c?nql#ykO*PILmjO1qE?Mk7#836PSpQNT@{2&Q+eKIf1gU-rZ)*Tb=k$~kOsEgFFx+#^qySt&WI`}*|Z5a56~OU`n)H)y#ARQLy@!-!!C z{6FnwVg_T=>CAS&ehn{Lv`?yd`D^bFY&7ck%_s99olU@XB2xhxebAVV@`n=0z>cAV z$HAjKF1Qi~AmP9|NjKzgvAg7vxg7T`(!BylhhVP|HGv z!TlEni}Xh>XwQG9Vd2?iBVus;(K471XoJQG`fCY1ngdLyt<5ZSz*wxXZR-}c3eEi;v5p< z2yN6V2* z=OssNfvWEfnwbGXyPNFXyh%%Wqp294Xk9$0(MHZa4x&jM5=V3$e=x6l+kf2S4tBjc zX^v&%Nf>W-o0dNCu3r9yi*#yi$`sd3$6|X@Y)6W>&0;)f#)hV19*UFhC<(4hyJ#)%XS5T2tMo(~LudPAVT3;Ap*VGLX$3nkT=r{sxZ^ zsBygg{hhQ&`X;;N>1-C&>w;rVCQ%968_;W4G9IA1&VAvnsP&+fpa=1|MbE-=80ZuAAuYoS)I#Fs;}?cCnw z>Pl)ct)2cNuNDg}Q+I}nNP$dL3pWp-EU4M)xjKh(qlAoHOhNbK32?A*17 zZft-5zZ>r38Jal5PIP~Wr(_Hy1kOC}h;Y_nq|6iu1sA+eD#v8U&rYf9w>MPCuK`D~ zK*L5nK~t<_c&aB~QL?<)rtJ(E-UUl3%&>fg$9zkL=OT60X=tJmb3ZXL1E{v&D<~*n zHG^_28Y#g<{10o;X(0Bx&C~_Tjp_W}1O6?_2*7Qm;Q zTWG-_B3xZYN}v0zH7^qb7#D@NcU7LCW*69_^kd@{@6|s7v%4qO0rLcL+&L&F`0hq*@_Jdazap)?wjvc&!irQ zeZJXYP51T&eehg++a~tZbYxyb_BA)!t!@XuAK)E6{3_?Ol5;>}JMhjFJR}9S&u%!@#c9Mk$r+fV*otEsK8|B2d1Kdv&gLDeYJF zFJd0We1nsr_(@5R?xplL`pfLXbVVi%t4~NuMHEY2htJ-_)v1*zsok>%lNRRR8x-(- zKeg?zOqNsMns+BKpdw-3BE80IJNEvhH@$hHA&8$JIGdU5*G{Rs9gqIJ=PJXNZAl=Y zIjpBOZ3!C#qC|4odY+z)P#2pgQPn)QY%=#)Q6gbN_F)uFN(P#T=q`@^5a8ha{&`x} zDqDCnTe$9rkL7{Ke4A%mw+yUnU~1K}6t+k%jvo{Q;WbX;*||KS|KJEr7Mokj*SE3+ zTeZG#dd1v~oa3~f)^P4R*^S<7#+?`>k$LKk`LO4Hj7XOgac&#_d6#38!!wrY(?13 zp-c`(A>sXnn|k{n0}Ka${pUXB`ew8-ox>E?t||OEGr_45)_IkBx{Jv?F(xS~zs=D) zNw;9zT9weSRufHI=e#9aw#R>;S(HC>zX=5453beD|6%Vc^xsq4{#3)A2-L)23BpRr2?<1Ik4exv{Oj8Yib^!5d({A3B3LuSgb>oi%21T` zn22NHA6jZ}b@u(MFCri2*sO|)TbEB|%yJ6#cH72bKHi?Yj0yEJqeW*)KN2XN$V!u- zR&)GsO3TCEy1Sj>sDjk3Gua*XWA@2@eZpII^)fs(oHD|=kM}S`($DJXj_22GXg5gx zDhnIt8pAzw`E=n+uGu0RKot2Zii0ef9l;%DHyDpWom2GE2eKUG_Bw6gt2H$!!%eLMYv$zxe8|Vf%CiljltUR%@32?wi zTdW80*<%IUz6I9RRXj%3YrpvI85zl&`G_qg>_wQ!@PZ+235dl^R&Fsgt@PdulkIyu zUB4X)sq5*o6NXkKFcSenh$8!zCa1UGfO6N1rJmK>iU=^Dz`}6VGqW*>h&L z7;{$Fr-@_^rd!|lVCQgPMtNX**W5>;l`*XJ{QYMxKbo@zZ&Xw)O51cEic*U00-*4k zCq>C*#7UnX2~Lo@(jK45F)=y;rgh$HrZx3m7Ie{YE!}ou5CU02o#nb^n@=kCidnQxr@)hozdp&>f^tr5c8fh@u9HcJfy?&^eWk)TCvUF>ojdBxPb$jEF zXzSShmMMc|V((1nG zE!lrO9%IDvUx*fM$|9Cn?*>9@7aaX)KJ0l#0=(cUv5%~*2O~&m^5!| zFU2(`!8g+kH>9J?wLn$*YG!68gtB;KnS?II??3e@nxLyX;5!?@6iwcI&f@Q_;l;eU z0;mL*m_^eE<{aqSK@jB`yaCFSm+(JyUL6MywZzF2x7TN4Uhd0Gk79|E&FqcmO)0e= zX;g-p*k0A6#VjvnR#x}i;-NKVMio_Bb_#>VZ1HQlXm>h~Z?9Srv1w*s$HU0fPi5!y z+T<1L)is_u(R%;9MtFbhX;5mX!hELG@uLg5O-iV|6 zLjxWcIWx6FiuS!`;|!c1dt188CPx|IcSH%x@s;eXPs0R<8@LtZ{My0Lzv#B`Bwq~Rd8K^b)9Jl~i!6?4^qN5tg9s}~?~m94q>Ul<6WvDkL($9;~Qb+EpJIRR!hW$mCy z!*T@?zupq&s3Qk8#KT((S`}k(ur)+Jsx#rzesUb0U9!%=s@Z`$M_niT+znQ8Z7^VC zXJ>ChUn<4+%PS#;#(NPF>DOrJU$?hw!dxLz%-2L+->CKZq7~+}-*X$8kTEbpiMY8- zHh6ol*p+;#pU2FZYq>N`46==8Z-A)E?$ms9S_Rqr5#gOL-9_f5P8C*^*5yXh{3@-3 zxW^NGRekLa5j}8q(?=M|3mEYYO6X^ zU^w#XCvz3)`WAF{4j(mDP{#BskdOWm8VI(jsyMv&-~5p>X;pQN>?7%wuY0N}PgrP8 zZPrH~iTj6ca4#x&BqAhD?8lpCy6D0GXepE(Ro$w!$i>O0Xi;m_ZYiD+EZ&jMixe_rl=6ddqE<-x@@I;wlVu>mnXQ2(IS zQT%5<)h8|MY{ktF2QL}Q0%t^d5x#UI*!iB`5_;z)ucU0%_&*7Xz=4ynX?5gxVm5cE z_4Jgvx_Lt9j|Ke~4%3s@7<%hR!2u(cUoKtxzFS>k&N@&C`aE^Rf2Y3G#M5uP*Y5Yk zI;=|rUG{rRG9AFde^AS?nJY0MFiYC@K?KZAaQ^w7Un-DKu*uS2drcBKA!ZNnecro# z^Y)F%G9;vL{H!&N{|V&>tkO`vBL;!gRVWlvyv=>^gRvhCFODHDG%2muMdbu~p#8zr zhG9pHCNu!S44}sdXS8%79^mlXCiDbf49_=iEQ_5SH(T*Y4xXG(>Ae5aDFT8bRcyahy?!p|I(pk@H5lOj>(r>xY_LD*TX(i=TQ`TCc!{Ie zuTbYu>=vqzdRqdkW?e!iYC~$69$fwuT!|kF3y=KFlloIR$^W&3Wn299M;na4G7JJj zFmeX$RmEu2cDuU~)gPG{=N^*zKC^Y5iha@|Swxy%8<>ack5-VYzO-JqXL|oclf#P= zIAi!R=sHfWj&@9k(pC>er!7w zb{Bd2M-{IPj04Af^vD-`B42aXzdtxBt~8Wo{;^KXof=yg z*I`MdoALo<(0DpCQxoXRU+1JAgsArNIfbyUnsFwwN?aSs6-ul}G3h06hg@g$Oe0&& z=CSD?O`~58cJt(n+tlO0N$&~l3t0I_BFpX(J~q1Y9x)<{=Gx!A zSdGXlq;}Ai3746qoKh!WPKWM4Z%o~-btysP_;$G*fAJ4{ACKTlZ%RJ=7!!e;8GAdM zV#qhU)6~>7K?IAT4-fAP*ugg;>9+@av)0t!`dq%#r~z^BuJUKmM8BWVHeeZS0V#HG zUtg=3!=~Z0LY8LIc~9ye)L7@S?P;layyQ;Hq!wIdAb3;~G6QUhS*I`q!>|Y6PjAnY zWPz7ewek0;ShUmfAFjvkpi+1M2M!`-Zfk2BDrgf4TbEpT5|>T3>B3~WP2Z`jPs<4z zoeL$spry#}0vX7IK{@#|A~6r2Tp9nPW4C8vPjmG`^JOsu6C8s{rLo0Ja!6E6b@;q9 zo;Dy;{vMGLK;sM_8yyl96f`*MxmW_%hRKQM)3u!?ju{=WjWLA}i}0o1+n8nAykke& z>(_F`t)slW+~xC~18~In{0`m~?u1gy1u6vc8=K^2vbgZ_k|7OxJYy3>_sJj-A?@4s zorKo@#ad&^SIn>_91c;ft*t}dWhJVTSJiYM?2-jFsE*~2T^^MlHEFxzptOYhF)x!Z ztkxg00^w$oMU9P*tE;N+<>BF}($YU9rKhJC=QsE98hx?87}z8* zGJy#<-=tdTn{#=LV$3#~)v|Y+0zdXL&~9=e5M0n#?eE5b55gdqeQ>lO};O}Iv1{ZB-( zB@X*|dk3&?^UWA^f~z!Za%`+rKrSft7=6~at=!CQGXxtq;(9=84%mBz3Yq_TheG>5 z3p5Q|ARzwk-F3m)zka#Nsw65#u*za-2bd?iUF?_o^5^#(LVrs}@Z}Ij7A|0BtnbNt zOTRuwA8Dlc4~cE5UO)=~2iYd98!YQDxly>yBpi+X_@NAA`&+T30gIQH&xP{uKrxV5 zNlQsx{rV;(Zo1p`G^sBTvedM-lQYX_KYnDplJt3MH$jYk$xL_>1|!QLtZp*!URtfJ4eKYN#eYPZ_e)PPo#;I5{xo)qk65j6wty{Ye6b2!HYToZ0L_4M@Yt@?9B zQ1R)9c|0kDr5ZtM1-tA6@j!ubv9q(v;*7B_NYync=)S?JSJ*H-hOv5}8$!$_LLsSK zDfhL}qFg99*eOJsA*%0AwT_MRvK_3}>^4!AzxB^LFgL$tJ;>Wf%`P5(qsn5N0e8Vw|vaa*8D*xbFSpEgXZ`>Y zqmG@WrAHGd|KnGm_O+WVSqHMo;(&A9MuCqk+!NL_3-|Jx6m&X#e7elp)s=J{>zHw& z|9UZNgmKDcX>q=;u2w?4Vc5ELfOiBlCNPZ}gc*yRf`S86XRcY`B|AHQ$8TCQv$HPj z=l}J$aalX;paR6g88jwr3 zF6v{Xmmvumd0EvyF|N&Zn;E07IwmDGffGoGtkWsBPe$7!5wjs+R+}Rd6wiDOL&EVb z8EDbcE4ps!M=UI(Pd;D={uuw?oHU}tuF;59VB9w`)G7}nv67E2XciEYlcO6p{(ft! z+|%>3ld7=4*}fwa2P-ABf*Xdf#K{-S{2nU_oNyQdh#8D4z9jnV`-{l6gq)Us5)a|e zdr;-BCys<;uQ^&D%Wollsk6UFTSVIaTFNdr|IskQ?_ZQe2u1G(gkndXouIjbEZ;dS zfL=yGJP716%wL|-u-KQl{cG2nNxj?x@xt13d9RVj;Gd>{^UWQ?oe6z@F6Xs*-fMGy zS61_!ckBQnov!-Jso4tY1#(t(F3-R;|*XaOQgm=)*Tgt~EtP!b^}`MRe3{>tQ|(X^^pN+oH-M1H1pp8^iPJz`f4XJdc!Ut zpiwc`HOigbWy5^!873lM_MtK|7SKj_8ykKp?@RZq+pn!ajda1k_eAjPcTN99DJq>P z$Q78r%Q~yLYE8g-J|=#v4nEl?K>ZP~Qc_bV6UFCciASB+vK6F7z<%`T2cts>LD7?D z^TwOVUBb&tW8WWK`IDB$f5)p&VDu(QX!^gOAk~5YN%bMVWBr={V78lrh`rO7fEej$ zbCORZuLBs!Al)xyU&JBl`t_AnTF8N*$Ff`2@9GUKjX|*5f7?Fb*S$9V`>84dQn#*{ zcf|ao9Z9JYPKCt6$dcs47VQ~GjT7yM^V-cJ&pP=`zGd>Zl&KabIMUn3sEuPA+6C>Kpfcf+a7Z*}@hW zUGR4w7tled0Hf`->L+qPgZ>7s*j1jUQG?h5VhkJtECMC^I0>+!WUf8iTSd^CwVegd z-ZiH8o*kR<4i4TTC=ga1=$tm+xuZ>n6s$VQ9i4G`LTQaB*LUhT&l;q)ng_O;OuyqN z@rl8)6M#Hwal;^bhXPpuV#>0U!gbtWV8SLXn51V9~piY zt^MysPJSiyKi0$l!5`l!dc0yFi3C>^30m-uT)S?a8lFP=;`}HFE9=YQS-bwX4kd0# z;R)=#Z)qW8*Dzz^Q4;>;M8ty3P~srdTiA4^N^c;v24iMKK;`>KNg}q3=~vVn{+qXN zr@X5wYQGOE1K9EW_5(_FA%{=kEq00?;L9Xx5hYHeZRiOJqAWwsf?^vvCk0O&hcgqO z={=8MUsyPf9@PtY5FDInxG)y1IQL_6QXR(|-=*G@7&nkdPOwva3!gtu(J~w>9VJeg zD0^n7hEt&d-z-IEdIQOsIqXOI6mlW55oXFs$dWU* zoD3jV^6IG?gKtGf)lloJDJgv{)F3wAFz~ySZolE5`#1Lc{`u@+bf`YtsMecRxV1Oy z2+JicJ`LCUvmxcrUV!~Y#Gp`ITc8i^4I36b9qZ!3{g|>jiKR(rO+gCN(4dsKxP19? zy*65ujwn)~ne_kzvp6JmySuyPWM$3XsvbLR_J%(Pj`O5+0cN9FVBOV%DbFsrxUu7k zz@v$n>i_cPOQA<>Y%Cesf;ZA6G3v#F)*axq6+-*L{Kx0YrQX9Brd}l^B_(Goc|2ke z>XR_jJln_PnT-6$6pAK_l-ZdXp^U&skG3j=^2tKzNl8x58<%t7G))~MzSw=#`Xl9Y z)^Ii_UZ2H_3v>K7-AA=K88fyVal^7(EEzbqaW7!Dj#PfU}!E5c2z`~pEK|z6F zJ8gJNg(9m^25nf~tGp)3doN;>p*nr2+Q@b{R>Im#TL1b9w5ahD@2POKU>cU=8|kxK zsH&=xfPek^^;(y-Cr^0E$npL_*}wy*%hTp=_LJ!H_AN;_YaJUK8-Z(CU#t)aQ-B>c zDL?t0$Ij2p%^6O;`=lKvVATZtZ89fn{*9S4a&h<^+9|i+fm!R@2$4LGc^9}Xjz6hd zSD%A48R~1WdxRaa!~Ewh6%F+sQ-_!GT7t=b%YEUvKn{@%$KEDH^Lv*Tu9nK^`s({_5#pU1#Xq~Nps zvd6|Q$(nx`uBoqg(wRcFN|t(nd4FkOyhTQl4DvB|yBqZM>|9)+wY0*=n2auoPBa{Q zU02tNSu7UOxo8WNvPN=Zqn!9N(T?IiBsqGxF%@eoLg z=scHE zqoxI6wKLw`Ha(Z+cU%mocjjcehP5KOp4RDiTwIEY)HsP^e;+l`B)a+vb~nI3ISvO| zopI#7S%RP?)~{K!26ucGZ=QkumxR#bmyHmDl2Akpn3O&NVi8@47#pe&{e{3QC;05^ zR#H<7r^F9&@bEaz`0U%YO9E!Ne)Yg?nEpQFi;| zk`>1UPBmDt-XYOkAwpO^papUe4nr-nG%fph70G?oTEmwbSPnmOoX1VHqVO z#k3ioNS)jlX8MbfKZLXEuV)eCl=b%WEB`14mho=lyIE14)EK~RU$+jK2M9Onzp*Z8 z2uVxByUj)|5AuP{HGu~c*v!CS`t$pzEk|V#F@gT^{OsFq_2S;{V?-0ddHkzT2kp)y7bOD7Sz2|ZUA}5b zGLHxg+}^l%M{Q0{&c4?2%`fwsY=4J&?D_1aJ&*M~1g``<-`{&4_u{VKJw*PGXOWu& zF|TMvhT+S^FNba9z%-4-4k0e`^5-?!&YBQa(Ev#=->NeK$bma9M)l&wto1vNMDLS- zNL1jsx15G1Txn@3YpcN-oCZq`5WFF*9%@w+y=&tSwZ8%GF^Q^n2v*Zgu<>R`Bu9x4{STPmxt4a(kESwT?*l%ob887JZD7p1^ZLls*hT7mRkho-3a0UVmd90mdLL9-D zZsm5HQe`6Qfqh2AOM*SK00r-8m+cC78ZDcSv=q>dD}Na`87AtQPh5ZJk+8Mj-FDc1 zcEXJ0S|nCd(=o;4P_iqNfKDkq+mi?dqPqGdzP&se21JhoOOZ_NP7ZelTRdKYyp-I@ zZ0r;U+)3o7Ex$L<(k>xCA~%=mn}ewF$BoEziP|62j*7vAFv1}j46gX~c%<;l`l{01 zResFPfNF@ZvbXnI=$@Dpv+qMjH_=*R{U_O9N|oXeScqkDbAm!(W~R+}#EwvD@r7(8Ry!!TtadY9{1h2BLHvo^u(tUH;F&}Qk7DCF7xqQ? zh|5G0BbQD*jH-d%$m3t_ct9y1Kc11zv(uU%3$C#5@NYC#L#p3`x$7?bW`*3HmM5?; z(kgwnP_U?xRySdlPLS3E8eRc~21;wF{_h_z-6x5h|NplocMXve1g=ds!8|l^Z3Zvu_`>D+!wN76g$Js0e% zGFx8hOk(8A|(ot(u0d7MW%HWyzTbZDZeubOU?N zLj{Pq(IR<&l9QvZqO!-((9mJ9N&48$b&Vu7h{Ptgf21%q9<`_k!6GLSNOAV{_b1|r z@!fhz3Z;N5B82FUvvc=DLXwe25yle2feL~b(Dt;bU%dz9PM$O!{7#T>hZl5$1DIy_ zt0xUQA_YR-p6Pjbk_0G}bj+RZ6OAYQvSAEP1d{d#))=#F+qRA59T5*jr`{QnZQALs zUyTOPB(6P2nPPJO`~Z>9H8)Q|aohXlOB{rD&5*s4W-+3pUcL};f6US^nj(3Hp0E%DA;+Yy zDb8Q4BZ-g#2Tl(4=sq+b)y}|og}f6t<>AAJ=4d#gRqkgjKKH)f9!03jo|f9!LzQ&I z6dZ-d=Uh&_8;Kn%Dk`L`htL5jrky_6-YRw)9dQKzWww>CD#QTRfJ(l-dy+kN#y*fhEThO@SphKUw_4WBC0uv`Xh}76siR!_M(ZFXs zNeeensUl(azO|s#`7VaM0K}11pg<`r!9Nk8S$5;&Tz4)vFb(`cbBKIOmTAVK+3aM-7pBy!c0Vs2nJ^s%fzY*c2d8 zp{l?C{`2T|p5sn!qK|+mcA%3@4$Z z`oB!EsW;Ht)SHCV0FiI5yaS`JlQCc*-w>ajVYv7-%lf<6v+euj=w~47am=8!-Jz|*>}%HzPaObkjZR1wjPqcvcHi9Wp0!FX z@E*EG3$ThYF)@`SF9)?-el-Nst?Q4~!B_!&1>b8di)wKN9v`lSbOgKh*y)&Hf7Z!4nFw?(e>bbytzWhD=7z_eRxxRJ zfddkh6y!%52edU#Kk2;TaMp^6sbX8T`r`+7IBKzb85xIJ4kcpg9Uyvd9B>VDL(
ggLW*TDUL_vY3Jm2e+d#r!tFC35Z8v z$81L#JrT3U3_HjQJVY3`*B?EDDkT9Gh6Q|kID=agaj*xFF^m=XmE8;FPp{|uc8U@A z4C#6_Ch}^vzzUoJEP3Q1Pt?-f++2y0On}x72OLjzfYmsjc=))q=Ws>|MmXvs7N5Vj zbS?fKYl@ThP|X3KVuAUBiXAtD)6tr_eCjJg>?c)VqBwiJyDkBXtzT_|K|*YN)*jZt zYi}X>GBZ4YtTtZ-VFntyYK%w<5u^t2^i07}-EX~Aw(bAcOZq6{qX;An?|pl99Fu{6 z(QU9+v>bqa2ujmMr$smr1FI?vPMEh#OEAao?zl)*Jf>L*gtMTus>IS8?}8DO<6>oQ zz6uYEaMa9x4rx{dikq-d1nKBAQf;hAx%#vXYgIUCWR(aLm%bXUH;uLToWC@Mk5wwLHHpP z15s)Q>$<`tBh5Q2Bgk$5I-3BMH*b!QckEI=@ki=r$Oy3J&$>!lJM9nCUtxf`2p#%!UxgN=bvIX>KyfCZ|illLjAU3m{_H2f&-#ZN*U=yfp> zELG8W!&LQRqSc}L5UE+bR&US#N0?kR??{1DUtdpRbwf+=K)!yN*v)b+WzH@$gaC#W z4mv~@x+l5PJG#oj=%^Ju1hL1pnV@QZ8Mdn%Z^H^F=@zyeTAMIBp)I<#SFP&y$j6Us z#KqOn_4&31>JpFOrK0KYO*=2@5WdZB8HVpr9blus;xM6@g1jRk&rgZeT>u;og2IZ7 zQTSKH<%q0WwI`;OQ&xJ*0N6tAP=kf9Yt3LyQ<$2nY7O0V!dL+MQ+1B>z@y@s+4^Y) z-lJX4c1xl2sj7ZtSa`|qO%8Ut6F-Smw?ujfzU%IeMc^Do0Ra_i$a0L0R10cYXZKyl zQZQ;2#!SbWgV+`5-~%u{gg*NRxRV;JjWf!qf`4`Z$c8?oGQLLmiiE)7Q10-Vf+x4o zL=`X`w9zOFR$`0ZQK*BHX3-Bnv?i%xO7-Lp>-%TA+1uNbc!f4N=jKyUZB}ve<;s&e z&F;g@A`W)GT%fQGiH6=TFby2x@68*F5B8im74_(M3=?7qXmJnr7&TK3R?IBmHWina zI)1X0Fqzus(GVGGySQW#@B|7pU96=m0OtjS zP00qtcgzOJ;Cp5o28gI;WCzTBz%p)zH{d{+#6*q~6eSPaHR?pRvS_ow6o66S9@a9@ zs|V2~tFY?|v;IQR2o@YX@mn0IHkQU(+**c36z!V9qn{;9Nm36IGu_Z}~D!WlGkZxiFtS=u;Jj}o27Y!>dlz=>X`rpl%TY-faA9IIynY8R8k!mH= z;h~{gqCSC9W?C9RCucC;rA4%G)d%RI;&f8?(BcxCkms^r!U;+QbF3^bt*2-5@5s{N zDs@8PZ;DA>TIm?t;}Kq+5jfOn)s@g@y$rpU-uG?iwZ8GH6+?$nx(z0EF!EX9`Pk=u)9+vmQafJcfkxb+ zAfrrdH>49T9e2F>r_V-NR)ZophU#B6nY|dD2dNnHEk}vx^y}`Wn5p_FUr(pU3%Ilf zs=KDb2gIYie>4a3EpYf#d+`zjxBdAapGsiMTupiWA#RoXH@Gdw*rt+>IM1S_&Z?+b zB;@&)6Py6DOHjb=PH7U+qYK$_A7#RG8p*AwTz(x#uBBS%Rq#3L5($8T+SUJ@wWQRxGe^jn8 zoN*1(7y+BDHL9;|6H)!}%P5wY_EYc8NIgF%?lfRz!i|eKqg7Xc#>skYPTvwX!P{2o z;Cd2kDeVwN9B48#eA)?oBu)-#|0EEkJK^}|+10{8h`+XYo8V)ZCf)h_R=N9o&%-{1 zOfl)+JZ$hIxs5GXj+V+-%I-H0%`9f*y#*UjO?f^7~E#ChCduu3K zF=Tqxt{ZOCU&67=SrS` zjK7r_T}mw{Dwq^2wlqTOo|TuYqg&DN@W=~q$cgtjZM+?oSKEikhRh4R%icQaBouzE z3YiE;Z7a9A{MM$TK6^8>=r)eMjb7cP+a0-c&fEs{=`O{6r?7osrr2qtYALt7rW?Z* zAEI*UXp`PvTx$&`K1)Ywb z=3I6sf7kavhBxeKfkeaT`0)&W8^qoDhM-UmDodQ$j25i&$kFZFam=nG7FYpg4?QTQ z)W%{KE?DsT#CADw4^-S^sNe>al)gbtA@9C_79`4{ThF3cUBU1TXuu@1Ryb0fiLu3> z-vYsab@@?18frBKD3KlXcCkP?+cw{w7M=D>WY3%@czpa6>KEbLzB6n;w%63~hECD0 zK*duUP@PgA5CJ4p%pk8Zu~bDOHyo`f6gi)Ail*GjS<6B+to+Z(BcwWFrLqcx@e=Hl zMArFt;5rA;p2whuqB}gLn>}W{@fNt_dpj42ShCZ&XJkyFMUO?u>#CxUz*u+m=Jflv z^c_?Uf#2ypmY|$%pG(Gu~9!2sK6BBCFD>!r>5LLUNF}hOg^sDf> zw2c-Cy6)}GpPbmSgn|zoenOG;65@v?jMQNR8))t@po%vHadFrXi2>8uwW_4=j0Q;- z5gsn9dUh=X{3xm?fcX|DEC&sa#9WLJm$9)NZWsm5afi@ra5JNTG{OLJKrGf39TiMy zKB@OlGE`YAt6N6ptedVE4%)Zc*O{3vrr(VmexCR!i`AK-ph%a(pBcJUNA@0OpGF z)8&+XcPRZ`Zf`Kk(SqZ<<;2i$0>X1Ws#QT)&{yL|5MgTBYimHRd z!1QeGK-YDs3;<5=dcEC{_3n>_tGh6H{ehx@Y=BU@^QJ_+_#Gjf#1h8S-YD_yWbfp- z@~*xoP?Jg8za_rp9!3NeFkt9T7LXXX3{t#q2qbLX;tj61g3X4iZi<}69!QIMwOpv^ zh516_I7bNggm?tC9kY?m&b#~JTc{Sw{fy$-iJ5He1M%UWKRsY@dnyH z^m)u`IjE!@e!>)_W&{xDRF8e4M9f!Fa@DGxN}vfJtU>6C9>r}AqFxIc)^E+sJgRvt zHetfOY;>f3q`KV~B8%I=LBmWP5dwRAlZh|pE_(X`+_&z+Wj0U&q#NpHH}#}QIMV_Y z(O@5E@O#Bg#icxabg-{gGhj4<;j7v(N-a&FwA4a zi_D_?%IDAHYY+}V6fmpNY04oNgJMurMy9SW(^_84dGq9_{)^&!-VYAeEYi}Wde;<- zABsT7lotYx1AJPW4?CM)iowvc{sFr`@I+U&ZuiN$vpC4^n z(Aa(z-KUzskDn%#U7ZDm4mwVHep1?0KdiKTj_}cov!*TB_f7pzF`+5HOlN1+{G~3I zdQita|Cm)lL(OPhoa6SmyS0CYdL+*J>&)*-7Y;sRP5XAX%HpKnTioHID;uuu-F9eO z>!6$B$t!{Oy_uY{;>N(nk;d}AK^zu8u$nTs>o<_*E1PSAdjU2)&|(m1@(`oNBI&e~eaEo!M6*Y09GYKUU3XZlRKU>CPzpO7R$c}R zG2Gm8HZ6Pzz+KV-C)lMoLDED%4ubmR>Ma!u06bZxsc-*b+t~|TcQ?1j8EaB%54HUf zCXAI`4|u&L)9FRw?HS9ftE(N>tiQ8X z4+^YK56^Ef}C4>F6j%X(8=`7NeWN)6TVZ>^kh zj2nh5w8>5#hpAk|G{?50CYLO+yAo?T`_53-BkVPAe!OA}U1S$P=(FrhLp{Afz!bt# zA%;tqEK$f;+r8U1x&&0Jgh$^T&L=TQ9wx`~C)syKoPpABT60I~8ybcFNhqDV0od$k zGJ`gMZ2gYY7?&D)4h_BkSP^lSJvseqFr^SC1un0txwp(*(Z(jjlOKZZg!~B|4ffj< z%;7&TU2-tKEQaBlS;TI@>})7G*`vzS`e^Y_KfMWDbLh~aUzL=aZpjsxN=na>H&r~1 zh1Vg#*ADkoF5d(7_U*Xznpaelk{Tlm++v{2nQJUo;9!s6Uen)=Z}3~xO0TQ=VbP-Y zb;>CIlE)jB_6tFX{^6Tp+#H>E7P}<%<+rjnJq+vH_&wA8MIp>(ObxEII}^-9JZh`x zdex4e^_W8@dy)Yoi{f%J9SX4#>lIQhU3O$4R+*&6^KayfdplgpaBp}4`<7JlLY7sX zob=;xYk|A!*$4Hy)JQld+Y>})oZ^)NAs3Y=FMwd&whuMak5G)pyB?Oe! zE%OXvvHtp$h2R)~QYXs1SFT)1Ek(-GD_3sZh=8fC6Od3HPWZT8Bhxt7uljW>l(}9i zrQjd}?Q<@ix`(yHzF6BiDJdz?U~F`>3*g;80F!fPY!B#N;+)>tsOOHhODESQ6+~#= z{k*=$#O1LzZE{vsV}(AP6rc3&XPaSNv|1N{y2@JwEeBz6VcO zAQ3nV$7c@&_~WnVdscvZ=*DA~=&T5S@xAKPCr|EqPEN4Gp#WuYb+hiQg&nY?uSf_F z8hUWLNpX=ctQ6J9KotcU=*Au53_~Vtlntih5EPs&{)9eix1T(DviNcz9yu@o!OC8b zVFe7B7ZKK|+{IO_^l&}}M^*(_v)h;F+y&P=y!{nuR#R?k*I=t2Nd*F5YHaV(^5B5g z*}`GjWy))6-XGC$)&>zk#FZIZa2HfrdZQ;leY7*#Iju7w)39yJYgn?Lw+mXfxn^Pf zI(Rgl3@PkyMHSBZ@B{U@{-e{qhK`KP;89uyv=sBMT({{hP^da!D`oYy=)kAYG63mh zvPgDAiCmhN+wP}JGTPX*v_s$yUEUYWbOenPo0q3Ak^d>Fzwj?QU!-yMufOgg=iLDH zQ_y2jWEyrE<0UenCfr`w9_gl6!$AbB8am8UVOY7sb_k!y2YV3 zpkk*69#avyB;iH~F3Y3=()K8Z03Q}ZhB?K)90aOOkcfweN2#C3m2ja?tusl%2R;ta z(<+kvk$DOz?ciS*CFDb%+bDix^$xC+vcWn&s69=wZ;wJW)4eW0&OWmLZ4a@tMkRg( z*^|!#8!ZVe(FT(cm~@@OyW~>}7k>23=r2MU(tb3V~Blb00P$CCq7-R zsLZlpcESN-FaEk>{UIoKfq7^oTlM10nb;Nsov?w%aIL5)Blb{SB?N-Pa=~l(6`U;7 zhnE2YYb1d&rw;i*I4#$o%|#HsF<_8h##<#$_y|2xHk3yNRg7| z?kS)Sv`9OyZHRt+AKJS9F+h8=Hut0*>y7+HAzmOr3-LJ#XnHzGGKiZThXan?!77w_ zaqv*eZV9YIgucjwkGC!2acA>Xj%+;(_7u12ocXo90W`4ScHy|wWfk8WMv z!Xn0wSOJXGtk)NoWqrKFKg^s21@_E5)%;7h?%3ud69cm<3HAD4rxN^ME`$4s53M2? z6yJ=HzT2{PcOlJs9XNmf&l{hJTzmo+Byjj*i|3>}ibA;I7{Cw??AUQ$xHG$DJl@US zJ^K^?H+nj=04W;+tVqSTJCr`sJe*e-na1a5`zq&Dvi&x+YI=T5(;t>Ci*5PzR%R3g zpV-BxcQLO`fys-ru~lwQHYs-!r7{w#agWKd7Lt%aOhk&R+Pvc9#9nfL)N6hH`VLJ$ z!XoaH1B&F=?-Gk{bBCX;yu3zAszzHHl{92-HK#O3&isjf_ev z6@f5%-tO2*zQ|5rg`Nrs@~H(QqcO@2``TqbJ2BJlZMPUnt0Q$j;hX>?%R+L2fXF8^ zC8fbxsiz6{ZE8ow(4Og-8}Yg6K7a6a<7>k8)qc(E?vI` z&Ov}wL^SCotxhQ-j>vwafkDe5&9_;}_8c{n+Qix%2o&NXFIxqk-ox@3&TdJ~;L>uB z1_Z8FT8!)H*en9z2@D688pOtw&73CIi<44}lj}#usj4~_`A2|t76ZGujbo`)T?ZC1 z=d>}fik+yU?dzqXNX#E8wb6MefpL4=!sT*51}&&^I*6iwf|t}=kqM_ zp-I|4;|;Kb2z0yYY!tKyAt2SsbIYWYfYI`zZd(iEC-e*S#2 zE=SOG8W}*W=9XR1vX^A)-SqdblMDHY5LkO=6X|LmZ=DUwN(-r@SR0^(?O2?=$syfW ze33LZuUw2(q6AXh$DFWUD{z|OXu1rSsl5B8knwjWd`d1UM3+fc-Tm1UL_TYy2~KH3>Ytz}D|yOeW}9{E!a#U2C)<2d6R z$>WdFAzq>+PdeZmXAA?wx&rt9pVkTqkXf2NyRw`lj48;JprE6wDoxU^(H@`M>z+`5 zc0UvrE?7`q5a3>=DpPFg;FJ6cQWYs-VXkeY?9~?R+2v=K0mCIxPp>2;TD<#y#(Rdy^_RNpqCGJ7>Nh%BR z)mD;x0n?X22pWUp5D2gH?A=%Rx6USXwPHMoI7N}(Ki9_F>gr=4Av-}8FcO(}%TV?5QTU_5YWorR?B`&?%{J13=7VOce_}1V^C5^ftF8v@R)RHSegHERp>lKv* zsMpARcdF(oF(E8x|@MJ0TM;_heN#_yE*4#E zl&9{XJIDu)Kw=T8`S%Ag?X6p`KGbGaSqOn528+RzP2PI7!gpojQ4vQcFv_bmrWr zT3A8gFx!iwfavAbU+}6*!1OWze+KL2s~k4=8CE!8R0es2fIZQEU@W71_RJZ{lkbmU zW9dXm)6{5z-)&t6Q~v>Q@O1Jxwrl^a3lAGm22opkZIx;zVSdZ3Gq%zmgP{w0*b{wF zyoo%@r>PgW8dD4k9AO5yt>i$0l ze}Mj?>x6#b3_4OsL3N^Sop?9}&Lu`AkOpA}LJj?dk>hd@MN>o&G-v{w-3|lUL7I!a zdM-tQgo8Za%|iBLK+S@~SOw_SdaNpT@l{on0N}+^kq|>)2xaU6E2}tK5Mb+riLLmS z>QUe!B$y(AolNaiCXgcstsB1c;ndXBtg)VKB;Bwdo8a+#shr1wRZ!bPoCPUH#r-_H zBV1@`R;a71n+Tjv{ru@XE@Ajuzc5HDi^7VGr{wuTxkXM=GE)LxDK%oFE-m82fthqg zkUvGE!vs%4VqGFl($%BLeBBg*A_JQ$G<4S8v+vW~#LG6Z(-G^IBrqpf^Ctq?Kl|)d zy|r-%3R$`Nxip1zw6wKF8F4cf{qV1wQn~glX}!IgF>tpcOmaso`yW3&_O5dtz4w=w z*TseZqh0TRwrBo7^xG!~9KJ--@bEBU)Y3rDtf8ZG8@MN_lVLp}dk1RZGA`B)C{0=R zX~P6U06{$!%Ranr@stwBt?#^A$YwDd4~k`sN=ZH`^8+U8RF~u~{rgGSUL~_CHf=I` zp$RktTS;b5gz={UuO`Vf6%N9p%a?D(K46&`2e?!hlE?kvg5)lb0WwsjV`$+gp!Kw- zuC5#2O`M$Z$%K@fD6_hgNb0<$ciTQ1M)cAXrt%Ov7PtvDsd7$sqjT2a*r3HsX|P(8 zg;O7YVWNMwX4ix0RD*VxjA5v|;{@$djS%|@E+mS|&9VaJF zQ^jGiH((YK#3j06L!vhGvBmH7@ZVyRWG-cHe)0Z>WhQnxOTXq7cCE?8OE_XDVas6lZ*RZ|kX>fdvR9ea#Im z{pg!FZ#oIoS*JV4wD6LkC%qlm0rqDYx8JeaaP_U9mP2&=`6d1%9SI4*)}Bfh^419# zAKz&9=-g~Nq8Y}bNka0t1`KF6AS?DM~%nPI?T6_j@?a!}=JB~qAHVPp3p3gFw ztn0ndZ{Vg;Ry%sI+g$&t#CkNhv0P4jJ3&_1D9V!1N2{>{Z!P76a<`N6XTb156Vo-1 z&hOxHQTDU>^8Q{IpO7yA$ew9^l9BwW58;Rlo-%eSf7<6hI`9%Q>DNyAki@F6U{}(y zngl#S=6pX11L9{Hl>o#gK>%63da%_K`2&uC!3awgav!(~_3&WD_WAJ8CRnKdgip+! zF|`zJk^3fsMIlQBZbNNOcaxy$&b1Sz2D7XefN(tS#1^De2RY?nWVMTH8*F%vcn z%@C#fL8XE$p#wyG!@3-(A}4u}wk?5qf*B~6>5OPHvjV)jhT?a|JpOx>fCc8O^Zf`e zCUkhyt7V+}ZQ0>bQA7n{S5s<2c9)lmpTEDBy7TKmJiRzDdg+d1!@YIDoahWpQrIG& zppL*rv=0^-kE9R9S0yPWI%E>^Ti5Ye&z~xZm11RH9F>rKaTM3@a>(TEgX4QT!n?Z6 zKd*bhXU9kxQwxZ=1N}^bFj-Ofz@;b0qvoeD0%3-hp51v0ORwy~XNo}@2gJb~^`^KZ z$&sL1K!FhpG^Zt^2aUA`>M`M64oOJCOST5>TbB9;op>B}1rw&4nlIL3(J7HPCPDiN zU!s78sh1{5QqOk;fIcz9C>KbEtk*@MbD--=OeMQ0Q74*?HYK8jK_&GfLBXE;78!C9 z3>;cg(OPge$Pw!vfk$GDECi%US~tp?1cB41>t=Mdq>>YN zBs|`pn?vRwahv;Cok-!x3MV;YS5?&IWbw6YOEUHKtL(%fAcQMs1%q$Hv2(~&`Rl|B z8g$U>K{r;{m}$@KqPzzfch!Njj-ujxr;(1elupQGIbac4143Cl9Y8aU*C~M+hM8P9 zQl170-DVMz3$C|8TMIHw2)xu-gy(jGF?W!AOPLO!8-TG=*qd{-#Zpor6XqDvgP-l2 z+Hrt3!u4@3xd+iOl;kIDhRgtDF4)y#b?*Qoj65XRC~#MuYav{n0yu?%KzA#&jQEyp zAZamy;J~vHM&}%@3!f3$TZ{XuV?dfE$M?yWBTf#Gcv%(!-IEi&lfrVfjJ*h~s-^NA zV1}P?L}a9V-G0l8^<@<42fjW@wScyOnydu!Oh| z-4gDk>p*)KcL4?{c5%Zq0UrP1r6b8$#l%_8UGt6ORstTdaJcu+GMEJ=VyPm78ax_t$eVy2)> zNi+knVm;E?pQHjmL5%FuX-SFF`Sk`Z3R6_YqNlgBR07H{w;^GeO-Fs$>0Ot zN5H1PVIE>#5T2q5lHTLvW1+0-DH%9Y&Q>r+>&rdh+k10=3?4;N9x_Tf(v)L>l2n+1 zg3^BENDe5ggxn!ib0nt1F3R*Bp-L3}9#=LW01K)2vEEIo+M$WBrBo(XI4-xmJvt5s z1^2$EOQvLPbC_)*$y13_=&><|i$}7Z@fi)+@#$#hQb^@9xlnecz|aN3SXkB8*}O%@ zf+dQ6r`Gi!ew+Bh@K({r%SM9iP5polioWL-@%zcGvp@Xy!Hc7}9`B+5+33ynu|Is~ zM|pU<$mhm_14ge4uoPI^kW0xrab;OuU0uf9#Y>jJ=a!MPpYQsO8zAML8L`-*F)`2U zRKg9kIY7Qhrr&fPULSe9SVP+5nuY{GPR&sAZz)x}3f zMrvpCI`-7&L-s)$_C}Fe(J+qmun(ZvO)kR$j5H}D6PRszX#)+Y+_U%E^1(`iYkX$) zJf|>$S!HjgSf)DGa569;&&j7Kb2-z%5Q`)F1OKQfljJPmU|5(kKRvRn_?U_>BJOA< zVB*|+^+HzR!uT1yZH)sbea5=fcD1aO4v4F<6BiXF{RMmAci!Hsg$q+>SP9_cgPtDi zd@t({3=P6sLBUc_rIeutP|zu6fx6j?(}p~(Kg~SQmbZA(BB^Du7wj!MQ!N_nYU9}g znTu;{YqN5tWiBOWcOTbHV5_#2<08!rB5d@t(JUD|(zQaX;c8`V6{zc~>^$TgjHB!i zA3mI%b-~WsWkQ2pl$mOA)U_xRTPv75>x5F?IxFXwckwoz7Ih6{nRW)a2I;)5RiP;< z2f(-hM=9-+AN9Z+mOF|8&GD--w{o;@6Th}aH{pD44cRWCY5|e-Y$R9@3pLPDDQMpO zthW8?pOQlUHV{Q1rbTg~GVXz|aATyniIAt<{p^iI6p6Z5k`ZeXfNcZY!5 z_KzQ|UzC@ZYu7ofc=dT4j5SnRKvCZ0YY+S#z(teV6~25YRSDavo4@R`eN`Q{r0jqP z027~ARaJ4PL$kFHD2}y6nvX$tWWEDizO1KtC5MmW*R0Iz?idx;yCCf4t1ek@`~r*{ zqk?uaTg|&=W!$-nii+}zWpD(3c>mtiF|Y~YRaRB9=}uzro{iOGvM60H?ENx?otk={ zr9J?fRAc8f>^(IehOHFKdg2$x;s$f<2E4;l|LNoVWwn(?zVJnH>e{FlH==_-mW1vAX|#hM||+~>CqJEHTm1`)zG) zle021WI2klX3VzX-4<>9fY<&?eb6bZVxF1+7#_Wbh0_EtTc&Yoa=oPhpNVrqL1rP! zueC_M<;sqYj1)h999CmzJt_zwx~oD~CU0?;>*cv1bfG{M^R<7hSAYTdB8~>UeIMG( zD%osTceK_NNU!h8f~w2DDAEykqs$2jdl%ap8+vnU3o|2HP7MwYmU>L?)>V!W%0$Q?K{n|bOjV1HJpbO;pl7|48iVZ}5jo z`az|Zo|(f}IrE2^tnl~m-&=E6e)%njMQ5ha)cz;O66uT@teAiyOb^*WFpnrH3Ri>PMcfgZB+A}G zVFt-*nHKfI^a|Bfbjc!!|6#=4DTucwbMILN`?oJ%qB|tVJ5;z@U}CAGhlzeIKnSFX z;JC-YB7OQbytWj5x$f0y8(6~7C@fCz>+i1u$3~zx7;05&;t2{t6k!uY&}iW%N4ms6 z|2mJu?Jq=P?m?_^7&GvORaY@_ff>iq2M#(caS{0317DQ09NRF(`Dq8;dd!#oK@=w_JV+MAWj`1us$FQ z2R>fvb4$dt@BbIR(;%Vb`V|0RNlya>SW_eTh5e{yODiq>Os_NI7$NUlE1` zALemMwg*^p3aH0`Vm?QeLFovY?6H4b+QW$ZAm9GMF!GmdLgs-#&-oeu+OFDPzm`?K z_;dxMtd~s=FzXVN7dLV4;2M)`T!!pqW9jrS1C3|N1<(;!5cLIDGZKw05CK84kq~eo zkWLB|0-E%eYk*jVX~?mvqtu1S!UIP}9%V5>t4??YfM#G;dQoG?r_L69+3AGOsn`Jh zgHqEMR)S~C9G8Rpwn4V^X?&a_70`$4kXj$`X9}$Dgmc3rUPHtnCL$Uv70SRQWc2@g zfq@F40iboYrh29rC>B^2G>;rP;vc;F>z&4bmz>47JCb<#fQH6hiueGMVe3bFFVZVH ze0+bDrU9gCj^fdHyi+ov&ymLbavCIygJpaGkCn35gZ7NzfVPHQ{;zm3eT@INhM@12 z`237B;1^~-$ElaVa@H{@+@d56El8QyxfvkfDZUe02nY0();&`HxIq6%Tp%REB8-iE z0nQ2-v@D#YfV0kukDp+ZqM>ByAOAYkVlXE397qF(hLe+1au#}R2IclmA^H$W$s=qQ zc}vOMa#*>Nl>YIX9Xd4X;3h=*Nr4P1p{L=1B8?&=Y^M+F&9f`(|FJA_QNgSM<&XUi ztdxT1KX2(nktbnM3KfFjEXV~x5E5ljVP2ratO1aJ1A_^=EV*RS-Ic)YQeY3y#gc{O zyo0Dt2>wsCsCy2q7sv#zM(O0ve_W?t&RiT=*jyE4nm`}``~+1{djkW;*o=Z8aEj_Y zGZ&=)*F}u1Jy;+}ivu^%U~fyl=J;+52Y`6+P|4R2Z6#k;QW6`LUD3V%Gf(^CDEqP; zDAFzET`&04v}TCtt+_c1>*j!>bw`>|KeB1Pf}irIjo&1~dEkE8i=#fwS6^w(sjFQ* zwY-15@Ga@T{0(f=-#N_r4?n&f-1GH!LoE=|Xw~21osl% z1-QB#pv-_hm;`{Nk23OSI+e9iCl5KGW#VR-Ux0;nzYxXL4)tI0_XUZsfSr~yEr2CT z0kb0^4*d-pn#B_O_i5cksp@2H0|6btJtiWGYvJy3Aek&0C5zhoQ)?s$BqYsIC0aVkV|i;gPdUt zP9~&XHork3!QxWr*OejT_x~UEXEzH-qzHq6cO*Sbouvc31CTzc#E5JGX_q?k3PSTL zNIi(qg4Ba#9F%$s_*21gr7jDbBM=_>8seJd3jw}A9{X{sLiv1$usTqb4~7B>43ZlG zyyF(J!qHVQG$5Q4E@I8c&Qk&eLrSTzFG0z$p~s+&U{n&}2UIs%E15;1$2gwsi{yW%*<8qFtOaVQVKpl}|pyIf$_2 z-Vin^Jbj^E07ZV_H?w)+y`a2T+=EVFKXHPakcdtiJ(VqIaQewk5Q$6{ma@EfSz1;H zHlDljg#mrI!x)4oiTChqjidt>Aw|H;i-&4k*7Yk_eqjK;UUy7{0CN@Lpsa#WJun%;H27lBagF(5UMw`Xc29 zaRpF}OF}JdJP5VM^>E``B+0Wi`@9}?82?wDmaXnE&2a;d5u;c({v{7D64f*sqXtaz?Y?mMj9DyO9^>kZg0ubN=u&l9`;CrOO*bGh6Jxh2VRM}1S zber{#6PX8)br*&+#W~`jgiz=+++lqt=72K%{J9*Q?t&f`USoX?WVr*}Cft|m2_)wN zW%n;QVrxxQVk`o*5I;mC0%1y&M?4h?#1c`Vh96jSEYf>O!36q`vdJjXo--DK3wb!* zYif3f2GF55)+MpAu_QLe2G4-&(J*=R%fV0RMS&Rf^jKqXw5|ZfeFu@XU1a#c#z=0~ zii&-tLP90K9}Psw5Go{cywfg6v zNnz&EHUJuBr3yakp~&aOC)As?$M^}tkuDzJc~ z1z6At#>cXsE`Sp59VrgVo=v_->k^ol2B1+*DW$mhhY_bgfAyKijf^@;4}g;VMqps$ zeGfQrMxno?bE^>Y7PGzRFv7#wAAa6syas-X#TO&Cb8&H@_Gs%}vZ#iAca^9aoOll|; z+tDWSk0_>WhA2f{sA#0y1gH^IOck1#W_!waVvNr{xPTuXy7xsto@>F$WkJy*BfQF>Iz@W%D;qG2NVE zlwv8zp8o07y98&c+!_D!^U$cPRIU8|`j2(fe<-W|!;imIU0#Am(o6I;*+X3S9B2si z0!+&%R$ZH`9~?iX8>f+*$+bG^T71O8CpN9Fh9T|B9+KfRdOy^qCzPXTwEmVJ`(A?o z{j@Z$&H87pZQQK<(p5Dz>ACZR1vj3|m=HCQ2@tuMTB^F#&HeU`R0~C=y&F$T|Ezv- z$41?T<_p<<_v5a;v*K63wtq%~_0sYzm*U#OK1sgg?|a^y%o@M7QKi=RaugzX&AS_# zXTtxX7%;I{TUbS*@S4TB@9JjFj9)NMZ)IMA)T4sZle7W(fBSias9ed-+9{Pkwksfg z*nQxe=wfd^DfPTpZ|6(xT`A!|ELO#Qe&2Q8y@@7AtDdvv>OI%*s9#sn=3cZZ4%T`V zIqt)l{o$5ptjaC!M@CKVC#Qh1*1@yA&kUb!@lP6EWN}>SW=-Tc@A9o5m<|alw{q`q zOQkp8d{QlBUAbMD-t!mT`B#kk>hW@5_d(mV|_R4ix8M zW?QG6(S5~)WgHFTkz)ZbLaeOqm+7tK&)6wz>tox>*T;*S*b`5O+dPc$YA3Ph*B9z| z&21)Ehtdq)e~F0rPE<@_x7i5&^RK`DaR#12Wi#D*ytX7jipn01o^~4!8UMJXJ+s#; zTla6yyS%yL-FBa@qRHlH2S>jkgLDOEP$DBM>$Yc31OBEqOfE=GOKZ5$HE55S*;>(V zhdm{uQXx+A5k`Z5<)@tTbQTpWHHb)6h=@pJ>?ygOpHmSHiO+Ypg4E2k$0|MLZ&X$~ zWrbFWigh1ye}x|n-EL5eN|E#&J(#VjbHb;U@1+-wmfvmgeNA0_-kbj@d)8isdLrYh zhk4@-c?-90m^ZCV?gv@Umexj@uQ%rPL#e;$M)CQqwtqD=_3x*8D3`3Y__F@tXoHHU z3QfH~=gvqMl248OqpZhfBO@=U#O39{<6yzg?W>g!tY7L=T3c`Jyev3a4cGFIi zvDVkiPdW6czlannOmm$SAGwvqQ&?4^sZmr^b!5FmLHTv>H}%DRZ}?uDp0zk<=o!Dj zcRX{aTNQ3LKDd>2RB?C}-S4k2ZqWzcpe+q-wSm*u{pN+IIQ$aP{@=dE?MthpZP=Hq z)A3tM|A460wNzZ&?aFqfCv51cp(8)E8ozCYTa|)om8e`dt7rmK%Q(YVW z{s7$UVErXisMZ$&|5SWwZZ7ajgQ+BRXvj#wpPJQ6Izd6#e*=hONu7;4Hj@*q2~?Ug zq)}}%royn$s#69wSU(s-_9eTo#i1AAim?;nRK_tkXMulRgqMn1w}pV-kuM^7=oIUI zMmY;%J7HTC!IA#WY}NpXUCIPO2|4yJ@Q~icKLz6TqrJ_!C~R@w)(twG677sX&va)e zWXI4U0911V`zza}^<@_b-37DF|79~=8)>NXzmrQY30aP-a=G?@hltry6 zghPZnkEm{l}m21KzniX#+C=QOMH0vf1)a%I5$4Y^& z3t|upz`^3|VTkX6JDmvd9f8U}TPYHn97OiTpvd1&sW))hR$SG@^1nr*4&ruTH53R5 z$5(Acl$w$ZtMEuHp; zqD&Nv!Bw}699)3{1FgeVMxruB3Hl-;9*r66CqY4R25FXXz=WSTvnwr*lw!ax$PbHa zugDdksfL$V|1U5InVFeUh#qO@)}dLTbVcE4NE2C@aN@;@Z?*&MAO#EJwMZBR$Uit3 zX@;29X7Edh2`0Q=o*4Pf54?QO(ao`|JwM;bj{`thDO3=xSS64wM>L}fD{X)S!2mA7 zfbROba3^ISGMUU|X*{L@h>I=ID3V}m)9+u_v4;^+Y0Y=L%ev#6EOijpnNjlrT>mSbN1F*q_eX-FXa_zWZ?$n zY9`z+Kxb_Z<&I9O`3=I(Ae@fDw_he>eE>p0Kq%pa6O#t0DedKz+w0b?+Y3CS{ZzNY z)IH?!nzP+8NPF2(a7BYa(e)sMo^|Sf&a3nZTPP(brSH)ktn})!?=on7JkSzaKs-uE zd@Z}p2+=591Sd;=|41*|HVKk>ccAMi5tZ-UYzocP+Wmo}b9=TH;4zUeA_$*udjZW(DT*(BKXV@c6=9~wNPCbxv2D2QjZ=Vchw9t%kOa)_zg4Y4GM8lF zA)$Z=NH0cf)AJ&LxzQ5>-HhYX&Ar6H}Gc?%^Ju2#8I$#%vCkq!}U49g_ z3568I5|KxN0@fiYim>n^rUK5zGVNew-J-(;OazY)qmrE3B)Y}14|LMh>&7%X!T@0$ zKp07Q>^vNe@zeV4XDGL#lumn2Z->^$1)(x@1QvyPVZ)<z84onhv1T- zkf;wj;3cTM2k)f!lk_{K*Gb2>@AgU;3|82wHSdyN!I3Lq4$5A*jhj7nx^YPU+dU;Q zU4tWe3vX|hkB-V}vIbpJ2`VWJVGC0yiGm8od?OtrnJ6i^y=3S=5w!|u=L>iKUn#A7 zbMofld1u&uHlYq&q1-}LZ^V^jUnipi#Vboo*Mb3Wg`n=bFwn!C)md<&RdmNS*}Z5V z^XB-zytj2jSufbCoxFd(l)#@?CRm>E^UP>Fobde~1e;d?UKk|RIBFOG0F;tX+aRd_ zHHa`Xa)niZ>J$oqKuj3iqC&+$Rms&)p}`a5W9glw(N2JHVgN^um}C8b=%)h0J@QeQ z0^n!bpK!{4CzWc|(p%+4~7dzQLuG&6he%B^eM#SMLZDQvDr+}PZ+)~8NBYpowJdA>rsVMFlOPVj3ZX82f`&>+|rU?`tJ~#-_WP!fblCX-Z)qzs*v9};=b*8XgvQ? zt`mFzcVRbv__;&x`ErE>y@OJYABNt2Dt$FWUv;x>hdCBXN-|;R+#vVBsT6Q^;A zgsSWlAd|_L2PkcOz-6}nP#0g#rye1z=H}s-Gs9O2nO7^KEmp2x?~|j~oW9ptG`UAE zbkw;nHFN%EVv}{*gy~J=I5=vy!<;cGd`^gNN|@Z{SDKCPzh&V?03sYt zf#JK|r%E%*B3>j}U49{CZEC-4bN)xqjpt@$U$2>HJ6Nw$qWkLcjn1TRKIB>XsoWZT zzrQAb!~^d+9(HP@$grbmtm`;{EJOcCFXt^iLk+nsrGHSr`YL>xmz7tOHeUwO_Q_=$V#TN{wGDA6nl>r!AgP z6Pb~{(_z|YAH)2^&?}Qb!lpS#+A7~VsBOL<>N=<5KC3AXrYsj7#TJQ!2lmG*)jw^p zPdPkhzelg@N5eA#N+Y!okE~y_Vb!ajum8fBz9ZuMzTT`Y-}-IDv=8k{T%2 z-v_ciNy-WzD7@OyN|YKNm8ZCV=KDh71xUSY}%9(=ii(Vhn%DGxbvu;LrV1^ zLM?t377nkm(t@LlKzABunA;Gm)ONOQBI#Xq}TxG#RK@V(^5eeptThbp_SX#8zTa9aeH+d{zFUy9LWGp%UAZpiW zMe)O>XJ5Xu48o0m-6<+H!tnarj=p3c*mb8h9^tH>>PEudBKl*VpiQnS`_7>K!vX3t#?m zJCIL8xwqDU6aVp6*Q_rP@V_wRr&i#Q2A(WbUxO5Vz_)cUd5K^9!=V6Qy7GWYntAWb zZQN(i>hO#f`PHOk`-FG=!VF#-Uq19ECabLPoSvCckl-n!Yd@~|?#9xE_YF@OJr{Fe zepvphMdjLsBIn|%RzP6Tap>pqqfh7EI4D)Fb2Z2y-efXc-NsrreAio_+KMN?3w9js z{sJO)%s}PKAkv$pGO)zWDUlW<{Y#!GkpF! z*)3-;pWo8>1;tnSf`7Vm#Is2A&&LZN5BlH8K5l)TC7=G_uJ-yf!lzu84nE6y$#H9d z(v7D>&A=q&Wr9J=ihjAbPmB%@x;Tt19`EtB5YKox?aR&jH`SGGb}6x`s7srmx0xo4 z214aRjd@i;0l`&<$yORaK1wky+dXbL zsDH3JCCAVDL4MuzvFmiYwYWfyqcQG~ukkmt#f-)_>ROJDvlRv^&(<02D@gr9GBA*2 zgnkAfM`JAq$~-b_L9hWEaSE{ebT-jRA9~*KUBeqH#pPt_D{K-_+4%0oultPFZvD3B zo@KR@Jp(y;!lCYTe1XtFKpetg_bFdnNL+T6Oqqj{ap* z8g*Ess<+inS}y!hcYe7**xH$2M!JQGao$AAvFtr-yM^U8u5!!kJ%?ZHinf;dqKFFy z`m%?G6|bq54lBo60(AvNb?I1gaO$bqh6m_xTDL)t;kYhek3C=E+XeGN*7%h+=SiO# z&kfu<%YS&zwaZ#X);6Oqr%a#!K27!KtD#BH{;r=_fy-aX7tY0p-ab4^ek2S@O+iYh_{08XVzcuPdXcH(b_{Phm|2vffTUwlOs^U z(14m+-IU>upFa(`6_hLI>UfF%uO-*v4z3#~(^os1TlnjAWf2dUFEaajKr{|4Rw7aPLnc`C_H_o|km-L&{Smv3B8kA|sNIzHo9DY5lJNy7&K{an2d{yN)vQI%^|(9Nokjml;E3E*6wOm;AR%q9zGrx z;^flw)vZ?eA?>_9(GqIejPq*Ym&qW*wscqTc$wMFf&2eCY7SMh{QKCCLn}_5lAoAY z6-kV1J(@l1v7o^E9Ml0VJqcy7RX_ZIQPTnccl2w5wQQ{=CtDD?7EJ~%yjx@5lBAi) z5rfbS;c(OWV0c&1!<+)JD)3B!O@s14Rxv}y^#UOMAmr3r4-(AQ``F~J4hd;#u9W>%fCiikZf<> z`L)$Bi^LTZx!F*hWdqLQ;`rz9!)5zoh;j3BinMiI>yH8tS@UAq@X7K19Uc0GDMDMs zE0V+pjOE*%ZA}B!k)l%qtcRj0L=j8Z(gqh~86`0~U~Hl8h#YXTTV9UZ!-PmiiU#2) z(yl-DtR#TvQpH9MEC)%4|_|e*Uk7DgBTVGEB8eBXZH!7d0tCVBE z6YX%)P=zP^2Bu#A`!}bOLoDKtQqNp_zKs~-&9E`w%iO~BqTm%{-%_WcmNeg?sQoF2 zbb3>>!N?Yqa|e5ngpQ>TP7nr_JkQ-#|#?#bHkhj(mb8#%3L#jQpv;2QkY(sxvKO_G9-xV1T#!8h|X4_e@kdlX>K z5QO~kx@_S3`xR#gGbVQj_uVoOJDY+>2&2*#hR*~)X3zlE;=16H>#Q3b{yJ;TfkCiO zCie?*4Hy=O0?upgTe@y!u$^7-H-WPL-x4Ut-m9y0PLBXxh2&OjU!rtV(fOoG$5z*D zS|3CmwPV=2KMq8!^!5!|>43CE&!_*OX5jl*`Du*2tvHrV1CNj7hm5hVVBFT6)}nSF zkhdfGIBx~ zUL=hZ%(`~#RohrYcJ+z3Ah#g10lY|Vw%gAVVflS0s<^Co3BWUaKlyk6OEQF`h%%N- zm-vPOCr$MEwAkZBY1~k z06qX*L=E>SWu9G#8!n%M>u(yYM+aiKugq4Pxh(!0Dv(&4qGhN3YjiOIzVrX-*$@~+ z{jHdgkom;&2kW**?&{jt;2D!U;FWUJUen)YV-^SJX)duu_veNx)pi11EdQna!C|mPEFM|Xwu z`FI>od8IJ0L-c3;YyL9Ll-~mVb^^ZFdT;pSoh@G`ORaM9#XR)5EhAcNRWjzn^ic;s zPE4F`YKF%e;oRao?Bxin47-;Cg1qMLF>;+dWe1vZ= z$-mgx8XJwG7%L-R5V9akpl~6+kx=X%WEMGpGmCepL=}rMTQ6=mv-@z`a9)YIa(Q)2 z^tpg$g{8$iHqFmpzFE40VW#>Syz)oD}C)wOvx>YS-E1a9Xe z@#W?0S-D6%9Og}su(_W2Jt*>z4=f+|{b*79bb8n&@Y*`BGz61Wb(-tmCD z@0M;`hG`mKV@0C3y~w-lx>w6jKKAq6X-XiV02*A(=4aU zI{BxBu>{Rj zNSpYL`aC%!1*Vd35d5P00qOh&ZSxRgNa`kEG){rB_aS1ub+2r|(AADM>DNSzGS#It zdye)p8NU8WM}r8XQBc;@!1Cbsmp^I0ZNVe|B9Vk9lS<~M&0`zEF7&i<(%2E+GP+eh zy__*m+uFVTEzadFpIJzU)pF&C@$h$ky-x;-z@EpMF}M1A3c%g7IUP5jS1*uhL1xm> zB|%L9mg+jEn6>Ri_*(2q?O2k1E@Q>6mc`FlR)OL=-K3A%zY-T|-;5bn^aTr#9D z&g`td-rBIb*PF!1%rRly`kFol0*rr2+SU#yWFU_koOlpCA_u(r((O(c+H6@c16TN| zjg-AOXkyAGaL5(0xUqGlL*5!n!_kGs-O+Pvr0je$n}>(`8)QDV)~1q#J4EDpJ(fe( zvfUFzA$`$=4DY^y>#0Cwffyh?(af7ZDWn~3Qu0yEXEJ#q?YS>Fb=V><;S&+aiik9 zTINSC#;@r3Z2Mt?Gva!Xw)u_d5g0GwR_qHu%<|M8yP=Wx9_x`&i~CCVJX3QEpVlkZ z)EQSf>ta1~zr`^@0KT>A6I3dYMr**6yUe&pUrc6Fy)Q8Q*w&F>T*UExDvDp_Axt@d zafU2`y;AEVYkuBHM%TwBH#Y-`2Q7?6WxCwp79d3y{nN-O6I~qY4lvO(yF+=G^PZ$O zcp{s}nUKdsU!@Rw|9>GE;uz-u$Qh_9vsdmS+wLL<&4hYFQ@{XF{&=yIY$YHd0FVHs zPm<74TyDbUMdxd?O8K#_Bkuu^&ap!EelQbI=j1LaM+l&43Nqsf`64(S-!B|3UF?c&~@73_--c7ee z0q+d7SkHSnX}&2BJT{zN`sX4!ApvB3Uy!E+`7qor9XM+6*X8QJP{&_Kax&itVZ%l7 zyO&$UwL-}Hve$j3F9PFEg&M+CTLe73v+SRcyCu|Ke<`Cs zHROS@6vi=P-5jp>Y7MhuT{{J+a#Z?=J~-X8PGh)6Qfmf|94xuZI~nUnhFE`^57gfW zmV6;)&G`LvK2Q6eM(5;~At@zPN|ydx3VGv`!HqAogZJ>A8=i}w)YqnER}npu0pp#a zw}f*68$oXm1K?Kl#RU@Pj~@=Ny<8(EUdX2vQKqpk=P=P9AXd7T^gY)M9r_|j`{B@u z@89+V)oIaa0@z*Q_o8p|?RO-QEahZ+e5g_RgQ9eRqIAZ*@9TNnmu1FOR1Nni&F}D4 zZgNsu20Ub&G`!Unzb4d=x{+<>+{^< z{azT}AOsvFNzZ)qfP%u&;KBbNbb;g8-OFnQ5G?CECC@KVqyw#V-M|_^26DLcE6#PI zpCl7VI_>)(RUfz~xOhxE5hzDO5Ok8lIIOJkmKT3R)=iP?TQId!8Y7|Z-;nJ2z%mai zxv|OY&*4}BEoj~(WJ?)z-iG6lgR&InIdfn~PcuD8OU=})__2p=QDs)am;6>lQ-jgs z-mjGY)FmPjtJG&))^rob{d8yD&-k5Yf$6Z5xj;ov8uFG!)>HQqwzaRFCQw1agD5aL6KvylNHP+!RbRSf@SHHgm+<`ttzzXpuMWu}VZvbd_0 z{qu1yS^1C)eW?d31d;edO;toem`6G9r@pPb1U8=mtI{G~<+$7}8btt%G%i z&bw)&K0TrrMrb2~*TS(3iJHmRdh$#vei^S2makkvqT?EQ5%DPvkGcf9&NLZ$<^OHn z!!z9rrOzHZA4;U!e9wsP;P32~&=0xJ1nf6Nqe9u2J1aL!NksK+{pP9qrIwuqp%I%d z|A8aB-?)o+--hi(To{ZQbT{P6?B(OS?gDc^6>Gnj;^>mB_UpYQN^s5DUGqXB%fockiq5?X*XXtb0bzWwWaheWGhQ6xh7TtM8UiG+Nmkp5d7=!#~ZBycf>A$4Qv1? z)#DljT7);{U{mbdE#E^0L0J98{gy#hpZy__oRh#FTv(Q^@O5;j&78uIODkqj z&THmClP7k>pG_8$!kRkAH^GcSw$_bKK^S+Hm>uKW#?G3slwXTo4sNq)`JgkDP?%Tv zv7^09Vk$z|`nPPY;g{)l31|xC`lisvKZb}BN?C>{Z~G1vKeE_1##nDM*GoBKboH^N z!*}n@zN{WvX8P~i-r_X6WTq`A)MEH1XQ8;e&K@-VkC43Ya1mZ-j-hT4ze{@NWLF*K z1-0`x4+s?SM!+rXM&P-8xjsA~@{^o3teYi&2>zL`>wd|H&w3VDW3W`XFbD4}?(*O$W1#Q57t&hOJ*7HgI;Ug{=R(asB~ZsU*9FOaBaWGS>c z4Ve4%W`-=K19(RvSU1c(yTn0CX4~%Y%`LkU(yC7LyVfdCbD*?E${WZgQeLj3O}4;H z7nX-%uP^rHOIu4qWo4OEC-5)|9>7Zi2>v1%*5?D;#w%pJ4(sF(fkWP2cn>DR<-k;( z#QR7b2-twFp$2v%fdYC$NhAk2Pc&4Ucdu3rLf;{YEpNDCo^i~rKLs+(_<+0xeBLk`E8!HFUHxT1qL`1hfVVgzq*f^wTwj;%;iXgcPSs zJf%LzKJIJ}^9vf*7=8Ub4Fp_}%GTsnCBykH01J4ekAF zNjY+@^IxLiM&txfTReZP0DX2DrTB{DFNrX^=?M>CkBs)NDG&1upDL=?XS8&1XAtx}!B?cYpN1j4JN~ zO@+e>s=-9w%Ga?(!%Rb6#1ZCjEv^~o-GP(}1_c99P8v?#QWVRK`OSHb!kiamrJQHe zYpKKJY_8pLi|^Q1w}K_Fmg9RyW~mWl>@JvGfvzF+lHq#hE5AuO3Oo{sdyccKuJ~q} z(c@*OC5Xo4-hn38U=agmr=V`$lQX#+kSsxUOqqNoE$xL#=u4a`+PKHHF&=saTX*$I ziDm{^<2shUHI~C_Bq9}BSpyL%%9}QAL2;t{&7&b#1ePTi7=kcmIzH!0!ClQTqSGz3qk4NpPb_aCfi>5Z&GcEz9)|Wcew)e&LeR9r)Rfe ztM}&x8x*HUkiPK0)h>bp7>R;VQP3zbPeTW)x5z%_3e8|HHdo)h=Cd1^$H&$W46Ms) z-tvc;RR_Pa`B_Q;ZRAeY+$Xw``DOAfD;jFL+3@Av%ipj&7b&eB)7UNLnJSN$$R( zdmk%7@x9S)k>zh!Jv}{vz54x-J#1FKV{So8R*PNBd}ufGbJ0wUznIliPL<<2N5Zw{U5Kf1pO%FiSbBGp+498_BxSu|j_?WM1ZbIcJ8WqnVi2 z#Fp&q3?ED^53y~e0N5b z09p*Ijzd7$HUH{S>LX5ZGXwz(+qSSVmBSNJ7vMk#g_Cc&z}gkUX?jprc%>BXHInfke{J zLA>#1vf&qB#kaSe(kVJtfZ_-s9>5jAYA*60a){b)UPZHgX2Gx~7*V|^9oj;~lj+8r+q!_9- z&xP3gkbP^wBrtL@wpwtLgHLVCh$~Y$`)H8r3IP_yeJsP(#yu)*m#k2-h z|7EJ7rwPfBat47ZLV4N8>z-DXu4Nc2Qvh>nnAagWkglO$u~nI9P;fZDn2!<7S@rQh zh|O_DV#t;}_Zp9PegsQ+non61+;^$Jsi9$%0kJ^7i>(hDnR$8k=UZ1Gd+ga)mVcU4 z_a)fR;air-pT%$lruWNAcKrZ{0-1FxX^ z`O76pIFoF+I(ik*O#_lpapd~N*e$s?cDMS_xzzvS7)U+AHhT@iaFcwZU@e6ca< z~-bHda9H9aa(S^ z>o~Y;B=K;%t!I27^PoetP39hE~#LyJ#; zvX|y>zv#zTrb{%-BkTA4BA33g?jmgQhmnE5TP4w6XAn4iI*zqziZW2Hcf_W4PIdNY>55RO4JtDSxuvkd$Vz9+%ttjdi+8L6R zsA$@%4(HynNMfpQBtvm5P;*PZ#_!BG63&BoMG7RXVx{blHQ5 zbU7?DVPf1pH>)jt(~g!?a9g01c7Y#_=wcFdU6ZoZ?T?P#ZA4QZ5QO9R`+a}F*pA_* z`=j!$XILdQZ(;9VvWij$l{1XSxEZ;PMlSr42o6-3NSm0m)aBNjgCS@CyJ(D(rrh^E z^c0yNP_7*wzDwbAck49)zLgB%_tbH9bw&07{tL-6?0|cMDnk+w@*{%d%aA2$!&QY{ zFeU)eT3A`F03;|5w6zdw0t6V_vqlwn6Cp}{`jonB9U=Nb5N@zjkp)X%EWl|6yW4+g zl!22IfqBA?ncMT;7kq4eUL@Ls{3#1l-kBdH_7$K(9{YF3Px(qz-or&GY>4>7Ob2u` zo_({i+Gt5ArPLj-jg!5JynbYmW|3hB@QCf*Y|?Q}yoHY_tOgK2yebB z^e@aW(*2f3bux5i+fjm;P~o(PBXm4n+?${|-_AoY0GH0ka%OO&>_Lg1dC9w!n~n;P zYnxDD1|%mbrco=vL$e_6 z6)_jYTwxkfj|U@&Z-@IEH0-T1}usYS=Aw zWQG$06|e!=r~oD*4v_Eo#JGE(=i< zN*4f*P!19W5?#wX*g&C(ICsfNLywuSxAXHj)R5%S@9DrzpNuQJdEZ?TB_aMy(P8kX-ZwKUj8B1N<^ zfmzLI>QfTvtN?Mx9|9tvGa4+0^v_}CR$tOT8yfoGj$<^3R<^xw-t~4-Q+B07;oyaI z_GXU;lTh||ijXK{J@0iIePXJ0-+TDa%=m&sXs^+q%p&OH_@9b3r_YIwC%7MnV0!mT!&t=y#F|jWa>#G~+6Tq;3aoIjJPu;2`N{{9Sf)!~5a54x0^W zf8l}(DlGxi6o5U#NeDNW5dGbsmDVV7`b&8i7Ce@sWv=#vFd7LB&7|qzZ&dt>vpc=FNKeCB-+12s}?eG&<o_^dyq^OZ??We*uo zPZNtt2S`8>KwJaxPEXEwY@I7^)+;|Ts?M`nc2~n_V$1-1l0mCuje*OS>?swg5$Npg&u`Q~WhH{R> zl>oIKINBZd?K`6SfSKAaOu_`osG7?fTO)r!S$X;mu027xD@H#-2!X1&ISS8n2rQ{k z!y%AqK#n1BN)6TH_@7(5fR6K-17zXKoR(

4{tFDq z{tFEF%A@ddH{!xV8O@HoA!UpcV((shbHK*~^dbm#4sh#?AhJ?Gf^_9d4(!jZcugN0 zh5d4%f&l<9dj1?!sPxm31wfyn4>IwZ$uP z%_-?6oZ2@K{$rG*1OfW88e#7v*;hbXGP$m=qB2uT;};eNbKmHJENG3V|I;DoT3io^ z8S88{*D!TS<-##}U>72eR#%(T4qc$7Ze+ehrpdydAu3RYR}K{Q7ll6c^RTEuwTwUu z0TzYpjyd)~-sYLn@j~kMF*4;e?xD$0jfnfH1l_$9Z9B(OZy;B0;n&*Rw2b;+-T){VXufRmDih_JGQs@|0~?Yu zl!XT&D8oY7eYU)3entF=m}!zQ{DT3|WbQpP@l!yzOQO$HUz?J|I(9O&V@S+3YDm6; ziFMS`6_8`^_J=c{o9qId~xPSE6aD&vRT2pTG|V3yyikcL|I0g}yS6^9Zo9J&>a zpLY&r)j}FSVtvN6g@!2BC#-y9jVQrh^{7ASkEh(OWNY0VgMR?gY$`qvkAJ5fKpt-0 z$eDEm8r7y%;wQMCm}9o9QNo`4`D~hlZy4qNmo|wtiyTy7HI_Jpd8uy8l~UiJT!Lr< zgozBmB?O6Q*LTd7M9@q^NXuY*Z4eR8U4%{d!mB`xi=?2ytY&)~y#@Aqk?l6vK~y^5 z7C9gI<4na!JYlqA>k{7VWlCMm8;zZMic z6Y+ApdR5b7kMKm`*gRqN_$n>F|8=5g^cZL)L}H?QHOG1Onns0l&l*kpMN5WRiuDRB z%3~{763N2+a{T1Ua1{Jda258F3SOJP8twW0vT4Z)%{)c%z^5ob-f{isSHAsOf`u>6 z>&_xutjC+BOtX5@8N|>(*x1<#EGsLsp6Kr zTY29?Ly1vD>0ZylZ#$`MC0rlPzIxu%-PP6Fc(!|sTF8^+eac!^fd#jZN8eU zWxY+}JL%fFGxPJ#lV(Wg z9GEV#e%EbZgpKnTpmznMOcPkH-Pql2gBi-mbBfEV+T3?n9u?*M=)8BKqJuqGDy(5e zq|vYPqiK;tE9dnl(pjGq3Tfn2P4BZ~y{pm*${=TXwDVLdz7oUvk%p=Jo11g3^_+r( zmnj>X#0KZn1B5o_o?)C=SjAh$ko{aJ9ers`ncj)!ya)bQl{8c0Iv^;sNL4 z#bEg?aRmi@u$B+L=RTmmP3?QwWn5$M2A&|`YlBPQ;vjO~jOoSRv_pT;rN}&R;iD`ThF%3-wh@vel zF4Dm+u1!H+9v;ffuU`*e{Mz-b15FEF-!nmEaH5!auTWuxaoO=sPpsMeN2_s~T2-$U z@*g*8`M;yHdony$SdKDOi71oeX3FVH&lnhdCeMz?Qt>^(2UiUjZVkbsS)(hrJE$Zi z1XF(y%D`!WMe0okuC^>!u3f>iQPCS1EOb7q5uoSqpo#Xk55f_4c!yTq^*Vzn=J_JfFaPzu&hER%&Rowg8#HIJ+#0AK2~JBmwf;Cu_@F+(7FPUhlPU z3Lg6k%&M}?pmzKOmw^C9%j>rToU>JLavN|obUwqN1Kn z=oe5ZU9sNPm*#lB|1eL8*^L&Zul9W66~Eo_CfdRs-1GoJws9dztQrZrpHyEY_ll$( zC7tt$BUwB?;=NS7!Nn0b!cd!vWra5$O0@iz|6E~P6D|u}Z%M4`kFF9pP61)B^SXXA z(9_op524j?NpZ)~2ez=-XaAU6!`*Nx9Sd!qZhhgHj$FNd*MzTV95KVQONV&WLdGv9 zUg^11mSifjh?zea$G0q*HNx3@{3DE7@-$qY&g?cCfvk{TOyA&OAP|&NU4?D>z~JCu z4r2y3whKVB(h6KE(XU=TgZ4&wdHD>CKA70pK0%EIca8uB`86;y!(GGW;o$-8tB+ni zz^5*rHHUtolyHcEKhsW$d`%|*k;+VuY8vl5_5Q3q;hu|l;gLISOC6Pknl@DWPL&_X zMzya}YS{`@{(jScea1zMTZvr5Y}-e)FvdhD?*J0hpdZgaHS*$ zW0i0MV4F87C?v#FBW%h&3tybQhir&8yNZmU`^X_lJuAv2q%l7W#YEPRYFZqQn?5}; z&)%+i6JN7K(Aa;TJiq4dt=+*KVxF9}#qu{qgiq$-&m=DOrw9Yn9h^qp>}^ZE z8+HZ0+wi6)Q6TBY23mpJ7-FIi9?&AWLB0b{Jcr>kS{zU?{S0mYBCiHe9J&VJjLFUy zIk(D}B6JRaU(n#uA;)1dS7S?x_iH{YdLv}1^0IvYk(-v8UAV~S&j%7j7#c$>QHBTV zH-jRT{PcHjP}lbji(jy2^?@)W{qw??SYg1J35KD2LfTDLwr% z#WUK}^dbr?d|Ngt9QbP{k47<3gdHx1;5gZ?1QIO;(<)oZ>b_Axq6!a$t4Bpud!2C? zNUK!zfRm-eLQN98TEAxD$cy*!X>3S{U6D)}v@S1g0QU_@QeOgMY|@Bp{4O}a;L{E) z3N_w^04%%C$T+$22KX;bq0c?pK&I;Z;2yraS9{5mo9T@Sx2Xe8eOdN?{n`@-4=L;r zx$(&PQ)U${&NIJPT@k$M8Ud@%DlrX`8qB&0&S!8kchH1a{k{&MZG98VQ-Ijh$G48`VonC`&#BjAOI8r)E>0l+@!m^BK7@i>+6wlK?FCN(4E}eet%IIQ+)E|9$1;Vd1j${ z8mhzZezdy%l*{P{RabmZj*4ba*V#=2`oqI4TzS=X0#;in4OsG@=Z3L6NqFB4xwa5d z<#g*)ut)Q#zt`_eyYCI3Z*Pp8Czn56`%%zRD)rGsBk&d+D~4E8|2FBtKwN8*PniaD zys48PReF(KJ11{b?D3V0qGOgq)EbYg1oG2Xn~Q{a(luC)7x>X}f0hgyySRz%|GXg0 z#SuF)KhYaXl(+0U*{RI&n)YiurUayX#7n_+s6}jV8R;qgTneQ{(cF3SXLHsQU9o#i z1YSDpEE77EPcCW}DarRgiH~*d=Jjm2l<<=^$~6~{zdmab)T<>tyjXP>I zf~Fm1FAEL6yx`JW8uUV?=(_kUyttmhlDj7?D}A1jscAS0lh>o;@kC!WkJ@Af3o zdZ?^b2BSrqQO&6yZsb1^cK&PBTtr~3y>JV%?_&Kyyafa z@4{$LPfwY;TF8{9o%|z7*qahm<(-~92`($+LZKBAxds!JE4NK09SvSti?UQtgp7y? zfDvpwa&2<_8U7&=Q>{X6>Ym3%M}5WN^+qp6PG$)^Iy#zw9x4n-M7new95@gq6iCOhitD!U^8H^pq^Ie=9J!a3 zp=$LZR3LA`-Ny&*Zsgvvo9uk*rzKhA@JKta@4DmPpF!enf6kgCR#BNH@$>fEue`Cw z5$cCS-|Mcauv%Opn;RXLGl?Sq8_HD^9B3e<6y=fz#p;n5{_;_A&pMcqc1$w@yw!^9 zwXd?*F9jY}>z%gL&z+6W4$F`xNJ^SF^4F)@-ko<_QLFVfw8F*=KB%Rq@1r3`EyCG% z^$#`th&*`?`koc3eqrIx;FI`B7QWyhPa}`CuGjiOkRJIBWw3Gry_%S`G?wMpH>k$O zD?F^gg7g8n_Uy(h3E+P<1A%eh>-Za#DqyZ!zK^eDjkpj+$uqiJDhZwlVGTcFTJNz5 z55sM$dV>O?rqh$AhorI|)$RQUEP`$tuQy}qJhMwd^3?SzxT*HDe2|}R&Ke;uhW3_K z!=W@f68i1!>eZb3{8~CcSFU4gj$J(W{uOev<%#pGZ+)6dj3`Q5%Zk)_CV&QBzu%Nz z-zguiAAI!%+`3Xy2|w1Sli!=4BTrgmxgY&Bzc`+~rrV}g;f3V0PqHF)I z#B#zN6u;-v7Ng5Z?gmlQYg;R%=04;sJmosBf>lS;@5x%bmHRN&be?-O@-8j zVs(uqO|E~FbG5J?2ZJNn4aBciyhPm3o+G3E5HMDtc;VV=226GzAdOG1W9#=T-F?1& zuqPSKB+PJ|Jf{0g0z8V9bC{ z0AaVOjWbnZ*Rm5-8F$Aih_jt&44;$kq-P;iF%~Y?$%G!j~Zo5 zEp4C(inZ5gCowp5RE;HrFr;1dYdKT0FSaGXk5vi>HCd^TmuXzc2U6B2)HNuDMTM6o z49!@5_KU9-)%NnBSW}Z!F??>+-nvPj?w?hQU!(l>N@SIjW6uEcpgFy~Mg#u&W#fkY zxqNPnU<%M^06A}TSUwPScjvvK_y(o=Bzs`Rg&#)OH#JDri=6tnW( zbXDox+*x%$@fg2-MN!0+u3As<5qpqgXtq^*tG}-Z;+tw}Qw3OB;>R(OJFJ?b#<~cu ztHkAG?nykPMoD0yL{~i)h~M$M;`ts8q-=mbY-?8+zI@gt{-lfXw~T{nad2>PnD^uY^@&2Kf02cO8mI|gKhrqP>P{KL` zYM%duMpfz(V#iWLG-Enm@{-Ec*!$v&b*abfi&g{w ztLKCbf}`u-yUARpkA~dIz!9ky_f=4$;?B$#1e;x3JMH1K`}ydiLZ!;%YT{d`+cJaP z6;mfODXNf(JrYB!Rk#FS&`+gk&HZ+Ez2I5C`t)6lr5@@^87&uJpDh6;c`0^`pO1z9qXsgy$@Bz&`?h9@V&ys^%kX%ZW`$( zhB)+wgd79>E`0YrAZ&3JNCSbs5`+@5ftEBlmQ{NWXbS~^NU|nLFj{Ebm6z~buJL;4&2}tP zPtzQNJM<9NX&nimqN{S&K+3r-)^enTDTCfIdu~ueK;cn-1v}CbIn`}LLGdNL30*Oh zPqdEkQ*(F1`1hS4EeZWiG5vlq^w1&_I`Hdh+ zbeXunkabs)`Rw=hd{w1#3Pw|e&voa&cib)wa`Uc{?DRaNo0MMW%POKMe@!~j;oSTL z6pvN4wV#t4KP3hn+J{e#r+fM!A}|B9k52$ZB1B=jb*mlZFE-l9jc0PRlb6@CUaSuH zZ^s^tYiCPtLN`TF(1L0FZ2$6|ToNQWL5wPMAbj3z{kg4_-<*7KMp2c3BX+2Du=(b= z1^0cX{D&=5tjE1UN7iF_Bv>A>6os$kJ$UtTs!czYs!KJoly;>-)fj48U8UU4M#jyK z%u5H=0+sgFGmCc2;v3Fxfd)6$4)HG$6OWc;{AAtCE=)*wkc9439j6HD;Ua-SA4DAb zbRUWFf_B!G0cdnyr}SDP?}zs61(Y{}^l!!u;cH!0K5DyxIGI1vqX5Y(txX^%6r+!x z0U4N*Ptfe<1VMXff!Ec7tJcSiC2%+EBJNKwNU3E=PNwO_etY^xoI?s6DYV!*uh|o} zIEUIk`qBA>FerA6s^QnZGiI#)IOfUC(@U9`9za_Px?v1HQRKl`UZUM?`%ix9MKIM& z>{;z^qXl->{4in`j_SI;;pQfu8qb9$2tEN`aBF+FzpplUV*VMIGmC<=9nSC@E7u00 zF>0j8iD%NlfecSD4dG$wm&u{I=I*M_zr{)^X@>tQ_(5j}QT~;`Z~2Xb$S!P)wVWJo z7ntSZoYdzEL#x1LBB z#uzhS-@rn#cJ+M6dXy`hRv>L~fAW8eM)p51?u(m89kcJF)aADIRSvbpMq&qP;}MP7 zCX8Grxx;)S^p0(u9m5lP#|Kq>qBrU*&Q{_1Jmnqa*p0^D`D+DyVJTAU*l2l%5qMY= zGWYjY((8jaI%sVqWXP`4x_i&r<%;D?3iE&h@ZMO#4&;niBBY@p)@kD~s)K0s91;x) zm%4xe^c_!cw_6J4Zcs{VdB5@xt+C9nGGJ+T{7r#s%p*f(AuS~JZyB_M*;lzgF@zSx zg~^>WTCX&EV?$HC%8w zB%0jmA4*Y!G(}uRswVnMMDlyoIMYaU8c8-B83{LsekPz5Q!=Q z&a*5OHioDnu?POW^!b(LWl5=VTikkWN4{D&iN=03TlE4Q}OIOfL}#KAKNs zmJq1eL&jaElN|CmgoH9SHY|Wt!$#pu>^}IB2(ns0tV>4YyR)PUGDNQXKtr`SeeCSC zq49!A*h`W3a~sBYT83rY|E2!vX!s^KYoN;R;yK@~j3JuOHPe@Hhnim<3uOc86LM zck4kmVOo0nQvc2xOE%;9&>HELl+#NnNZuSB;BEK%3UUIf0??;2K2IE4A6x5ao;uhy zY_0kXk3WcBV;I8;;+J(Ghv^RBb56kZQ$x=cD7UNDD#vHm*6dwZ0p$4{&SMVB#Fcf6 z7ptGo3g}OcMI)Jf^fPb$h@dhJ2+t9TlD!W7McNMi00Ehx7V~=yjpAMwwa;y!57PYL zuG^Bq&=}cjFAIFFDT-g?^UFvVUwDEFCP+@sTBz2nvInQ1%M+77HpsoyzLz;mNI2Q= zVDh&xu5b}zDyZ9PWMQX#>t*G26+4UC-$)5(>u9tRKe@-rO{+u=MnX9$IsKIn4v#dT z3odE)a*870cK_MEj!+mC4G%(l3zkY4xVSC@VJ}Q_Ln0$TgGvz*8JVT)D#RcaO-*5bgqC#TF|FYpVp@?slL37fqH&)bYah& zEaY==ko$u#1-u=ZYEb|i(5uAH za)mNPin~ffi}1DXQeQPB@G4pZ=7&KnUq>)+(_Q6tD%H`5B3dmJdy~St)wQCkkyY`D zK$^?wd=+QvDI2b)Yfa@`0P7?nUS7HN>df%vq{U1vuqPe++L;U5HyOhS!A~6BwlBNd zI2{%IZTUw3_tV$uQPf=y9&lZ}v(nN~0cRs4C4~u+QqMsqFdcpeW@asbdoM9($5zmzx)Af&c8eB`qNKg!hjF5qW0UZzpFi>fOnbRx)_+8(Pl>*(XMA>cmJ!dL$&4#fri(YV4#5r%c7Gs@k~9@E zF*nAVL}H_&lPlFOPvUnHinXA9SEp^_ou*FZ(!KD|jUY*Se_9-9t)lskJ^ud9p&IVz zbltvT@6>lvN^DFt!6f1ZSJ&~N%JUlD(gQyzZl>;jyz;@+?5%YJD$4d}g3n1A;`V=> zEHV?Y&&kPYf(g)O6FQ;J9H>~j!$sHx24fR|&uXQp=4xC4=umvI2dLdq3qQRA49KmB zCaHoTA0GwK`J;gzM##8hBMpp~3>&aimB$i%h^ZO{ZiZre3Csr_;;Jy$o-PH#OZ0i) z?o$dB*=tp>)LSfY)@6KliBM=qMcFVi$Jlm|BC3?naIn*;FlP49gJE_#vsBg6yqG%X zF)~0uNv*VhRL(EdLkg*~iWY+p>!x%Zdkw*hXrH)Z#!I-D82&%Xz5^WVw(tKEiI$PQ zsU&3-*+ii|sE{p+BqA#^iXyvPN&`iMgeYW(P!YM4kxiv+m6G}YoYz(NeZSA~JkR_7 z9X-!+H@JS|9N+V^zB8kLCbo_i_|3ewK2@M1$!7U>{AL=Z6r8$s6ILU!)}!_O7qgV< z^H#bgX7XZPx2?HZyobz1t}9osGE-tV9J_o#O+tbR>xwR-0;El!_=|4#ax|%@U$FGC z_pX@j=O8#g^ycoatvhy=92kRVk0 z`K*0?vrJFGL(feNsv=lrTQrX!m)b9RpumiI)8@?=iUH<#i+jiJ+ zTSV66b7F_!;a=wNyp9e>Oc+^lH2!?H+Rpyx=LPyyT$PoEtD5IZlx|MS*LUp-6UoR) zUqC0c`+>+RFw7OPJC79R4M{|pCv${{hmKVR>^q+&0_5qru7H5RYZ`Nq{MJ5o-MPe} zA3t7hA4ur-DiVU}SQFc-{jpq@{#b5QX?5yx^#@2O zt^VPP1pV^{t$Ez5gVSHW%qYD!XH$OqReUnv0_DqXoJ(S!1>dc#{JwD~wynBP;YaXG z!ZPq#gJPYXfS_Q(^g*tbtm8Lj{*<=zI8rR3;btRyS3K~W&nDL$e{5X=h%zwa`f~A# z`L}eOo@I1S)vk}XFlW(Xedn`RglXQFm;Db8MegJc*=w<6p4yG>n>~?BP*j-2wXE%m zlZjY9zVoWx+I3|MBKHV0u3p5B>E$4Qyp4p?_WL{HP@i&P9smj!?ItipfbY zq|kR^!wdkGKy)uB`*srxz9$lmotnEf@4cb(Pp0(TPaXUC@#E4DCD>Zrdr?25{&;QI zeD0Ep4Cwz6-&2ftk4dsa0a5tGk^{<;|=D4;<7p8*zJp9zX+@9CfSLPlAgmWW3( z4L>qP;h5})4NZ7h*0&y*kLdo1SofV3NC(!phGdT3M+J zIVC;bB832Mhr3~eVW-GKs<&gUs3;vKb!FD!>WAnr1JAAy3QNVpbP*;+fe~i*3(q7y z5+jr>jDrp&2%YJ$%Lz&4SWj-R>~w`m98S;U9!KOX|JWuH{bKNtZ^NA2iY$8W>i)5h zIXg|K3Jif=T&@Y}D_h&r_I|>W3Y-r71)MTjU?>A5srkz3d9u3umsOJ*+%Dc1fx? zO|D{Id3N%ic3WW~&&B!ayzR#@f78lSGHh%Y({!}8yFgc0y~{o})*U1~k7Bc)7a*j+ zPAFG5+A`$!D~x_U6qo?zQyIF_oB)6O{{4FqQPErNz3Dk)MLAPt2MkqLw>?NcldQ;L zpkP^XUjCDD+K+dat8W{rc&uQYdPL?~vdlw?Gs)NOA4>t&6O{6bKF|grxvy{K!*=!Y zMb_5SnZ2*)+qp4PG=D*<|J?r%7p+PH4hQ|v@r%-6J1$+ldI8?9MOs=~Mund1_Dgc2 zl}jL`)3+W!<{)_=ADZFoBP;Ldj~`Xwd~4z1Ku_Gw)mOKoj!bKc2S6cME-)@E$&ZEch` z&w=^b#!EarJb(#2bx;TmIgWNvbd=dyQ|p_>2e5$~Hr~5+>!1_o6%Z|6?rPYxnQKR{FxG!dWvnQ=T3TbHA zZ~Ei}fdLJy(M+C>vY21tL2B-%hND;bU5$!0INur%xS25fuR5~0>UpW+Y`i$0MtnsF zc19bj2x1+C2i)N^;I?@NM;d=@V{6x>A+nV4fS&VJM+GFGm;J8%8&Bp(KYYc@eIF;jk9AcJmHN) zLr{WVjEh_1(tL$LwdqK(@1%8~R=5-KEPD{)a~15$~^0IYW9X|^+Sw_L>MZI*GZ@#MHWepSEZ(wZ3zU3T=qL@zhf$UdvrSe) z&QWyU;RN{d5Y6kc_fj++9M+=kP9V-2IL386ja_H``p-YlLd2GcH;J|v0@x(IhvF~b z*{-v!sFVk)5K=k3D+2PEh#6@tWZLSXd*XugEju&kIUKXy*zUY;78;`{z>WP%IB`X9 zVIVrXzu@9L7I`*38OgP41Ml5ih4)1+1U*{G6HBTWS-qN>bl^CwWhi~67rkDekrfhg z4~Nh;4C;(6M~|*XCCij;9FEE6p}Oi(%i=VH!gDP32dbnEdfVsX&z#t0+AC7^M?vDP z%Y5|>66S*d-?9{;-@}=EA&=#sKVG>Ub<()`17sxx)tSgM-%%xv)nsR!3Mcvw`(v2B z9m9IYfYoZW-2=f8bsM)>TQ5gRCJVP3(fXnFlwkg_lkZplz@VUBRMi!@O3^FTDCN#r z%=8-uU%2o&++=0epNWdVue=K4j`xM}1{UZBjr-{1c`;C;m^O|)(uw;6!&!?}d+K)m zQ}^{F>&*`)ER1fM%-C&nlmD{R&cIEzW_sihdgpj!xz@^{f=7HWH6-2pU**<#y#%pL zS+!~v%EK@rbw1c*UVnCNF7ex#_2>~BC+9h+$6xg?d5o-tUYr8>`TV}WR+N=xMMQ`^ ze6HqfHc+_K9Fb#W5o$~O`ub^%+voKLJ}wN5(Jhk0w=Y|_l<6^jaVf4{g&lq{GCe!Z z`%;kZ{4EDM9d4`C; z%tQ0B??zp`cm~}o=23{07REU-$O&O3KbM-i28`@OoV6bqXD=kE2f)$eZzv$cpuzX;ptRPB%}Vm1Ac z(pGgAD81PA-evm}m)hGk_Ga(;Y}dg1paxTceddb<~X|@I}fXfNd-FkfIj_ts_Bl5mgbZ;iH09$@6g%Et&oxV|V=cQ0w)ZCS?Gl z3-ebmA^Wna=#`$Irq$fM~gQQIrtIALH@{CD4@r;sUTv)<|( zv>J-z5FvxhK1ZuIzqvHJrmlYQXHU%dQ9Ju}#uXp*naQh3JljSs8}X5`B1}dFee%W1 z<}3I}ofs`=EgC~D<&!sBg67|a+UZA$+8NMg|DgGCC1T#}MBe$lx_VhSTA0yM)YMx) za$tYZY`x4fl-Hu9X9yCZJH{==&aTm<;to4={58%X2&e#t;xTJC6z}ScD+iUD=!r=Y zA1^o0g;&^*6nos$DLY-4K_%1am_1~6;Z`gUw@m=fI5 z@z$(7il_V7FBml4dMc88^mD1Ho*F=ty1E_y+eIHykCCqGU#<&pavhjlSHLk)qP!*~ zQeZH=Vwu?SC<3CjBw^pKxvt0pLE@I}+ZVzySdS$ZzLdm=4_UF}J$Bs}91>Cnt>P$( zKWnrdidAwJ!-4~J=^T25-@0>WKI~l*I5jz(Hk44@U( zTN5rxb`E2KOgkf3a;T3+Tf4~vatDox^*cCzxVt^g(n-4My!V~R8VrtnUL3u$ld{rz z*C^U*I?0<ev0-XWzn6oLIYK`}R%x`hV_h0sfP~r5}pOh)MZ7ip9wcJ6ZT?l*Q>NG7Y*wzF@QY33@c&XX{LU< zSfY?6!O`m?MAnRC*{xMpVEwTx?5uX#@t1fF!O3$U^&5V9IgokCTE+OAcfvw~|kt*>c!S z?sCQs9W(1xg2BY+t1bo3OM3$OXV>g;tzMJt@RZ*q;;94IgB~bONqx_4hg~Z8dHV6m zVFCBMp{i$>FrTP6`6J9MwY7&*p}hvkpM1aRLZwj2NgmxTle8h&zcepx*{2XR zi6rMzWo2bFwi_KA({gcHKU_TJxb;pWOH?YVC6ozRLA6rfZbFfwp&=B?C3W@+cj{^< zH@EP#tu2?%d^AWp&~`BG>DRBzx^5d#4;&c4!>!-TPo4IXHRNC!tM9@JzjyV}7sE{$ zi;xcjDDy4RSaRp>uEfj*tF~BAk809JTKi#-HG6>(cp}`36*_v9I-Bq4ccUp{^W&q- zz4Szq{M?3na$b9xFdu&Mi2)|M@NT3A%E6NGNco1pt)|5 zl#bp$+Ul(f;=BGA$>G-N$I{o`>tn0!48phYu_ec%6Xby=ThwRsjlHXMeSbhcxbkBp ze^?5$DD9Z$b=Hh?rTm5s-Q_GBwNZ=0M!V#WJ~L(O?%ldr-#~i>6VuC+Clrc;Lo)?u zg%8zNmTPEqxWuC|Vd3oLK=Tvl<>;1aAMGvL%FaMHa@tv-FT`HQI)! z*RrKd+p|FB_xj=vvk;-%pDP(k^YLRH2)ehJS~JvBEDht>abzHJ?`zlih~o#TM6x`K zU)#xV-M+mHi42Rf^M}$BV5y_{U%6sf(3j6ABg4JNJ>9KHN{FEEIJfYGSEjcuZF$;~ z$Tw}F>J)x%DzB?vHK@!%q1+*$5MzdMVNwf25}WL`u&C{U*ZpoTm@W+9s!tn)BK{X! zT;feiDXE>>e-$fY?7t8ssK|A!moM<{ZSq(WbZI^_76f`S(Co5}vEhiWTsfb>Lyzh- z7J*a6am5-@Lr+{kvzvPlZFp|8{*$W;!SL?wYO|rDvX4JmKGbt0pP+LxlukTvu?x?wOFwq)lisqWN+WNnp5NttrlCLy zUh}8*Kk`Rhso|C{I@VaT=a=gBLkh^FK^YWJszH ze)rB;ZF;mkR{7-Jl^VX&o)3&N=V5&z0$D&xQr2f*&nSLtd~ooMqZ$spM;`I=E#aP+ zEU4t7Z#;c!@e-AREc)zC=lec}2bNrUL|s|=jB3`a3vjHrled@nu-@d}X4f6h*JM|%9lD@K8I(l=h&Wc2HLnvx89_-oNyP+Z%HR_(ah%GyVgJ?CXn0+V8y5`ZLgL-Pu zt9kK&{f%FYyW;_#sZ5^~{y8y0q1@JYem6WkO!1Xe0H(Bnf`fIoHz3~asl9fVST=-x z+!j{Jgp{xd#c~kU{jYtx!G{Rh0DHLY!UlhV!PEZ8TJYpKfOyE)(9}mN8*oWt7*d_X zz#@LL0S&E#_a#Ju(@>(&;*+ny_%tyjQ82LmO{KRWJ?N8>7VC-cg9jyIw&MqVJ5*_N zY;IdY!*H?9K3xWD`%POrS5ZHrfgYIKL_10}Dy4`;Em!9!Ky|Y2Cl08(-sKqabv7j@ ze^Np`QAwedZ{zdvVn~?-e*ki67$3DbT;LDaJq^n`#sAqeZHj+V((>Uay-Ohj6Oog9 z)31h?1Y$^2U!T3f-mSBpGM)midZMc1O`@l1?5g1efy30A81mkX;uKE23!Q_zqU{Dt zU)1w#RD1nvB{l1WcEU>k#gxVb!4h$-Do1{Yvql8YvM5 zzEO$it_ye8=Hns`MnaW)Zz!pm2@P-YzBG=$X6ahS<7 zFKY>vcW)66s1JObAfW56m2KznGT3r0pRg5GU=kdhp7C&dDBKmBPo~yON|$6AsIGYW zO5p-NOE1Bi#hXtSM@JOE=fBi89T za<@#ih>#N}e)78O^QSwxt1h}5sMYNX`&Mf0QK<==Bl*kzE%ylyDNS%l#~gMF@mnol zFh$$8vTz7^?f-DZh%RULggJ*We{ZC6n+Z({NvNq4P1`{KbA?x&^X^W$>KwK0=Nduk zRwz8aE6ww;58umU*HJj^3@;VjJVV=!TlfqgJ|B*&DF}n0l)Ym4y!O%N$5sx9sg5%D zEvyXF0|Rms{acIe*1}&}R}&FrT4~XTX^tY?&XI<4864HI4qc^fsed|ZNfx=mS^rcc zo2Kp3iigNhT))V`S=exa8qUcaF2145a77nqe!ka~hB{d!gyqZ0(vx3xH>E`V9wXng zKZS2?xp8LEo$&VZZSE;7;*2nF~UeihK_H`onLEZe}3IQ z8RE1{^o+V9XKpIHaOw70zAcy>io}5P9~27iZ|X__rcIz_FW|til{+rCeVafenI%rf zO@bbv!U0>Uo?K+J3Sh$QL<`#u>13|(U4pz}KYPzNQ&+40Jx{LbxWBNJC-YCwxZUPA zs19C}Hon0s4ZWzT+W7qjSA|czV5UR2gi;D8t$-WcwRtl=5|<-TRfO>?E=8!P4*tY_ z=-oLKgw_=yoH+L85BJH*$&F#wx_^F$JTw%P4<*v~o)D5_h5V-Doi7K7Cmri;#h4I~BUb+IN%Xrv_EcB|ESDffN1NC zT?{D7^xSkCAT0UnkMHG{9IL#%Gxk*@Cv&a+eDY9ek~2Kb@94nJB^k32(tyNFG#RI8 zIy$D?;XGtL0co`}TF{qORoydDrj@N}C-=@}pYmHe8|}M|gV?)>eC^zgNFiLrr&Vpm zX9g>O{$X5L)Eg@y)RR=Yj|vrW(pI%AgJ)bt<~iq0z_X|w@}Nfi|Qv& zoX`Z&3U%vE0}~HZeF+YVKiTX(_w8FYi!{sSz;4huvHX7SM1N z0v0mu>cu2tUS9Y0{U_XxvQ_$nj(Xq5qgO^>pr%%P*2)XKENJIKo?YTp<&Eor?6qAe zzh$q!_fgo`|J-!g%jlA(`>p%tL6=_f+HEk}bL1~%!gmdIvim55iNvBG`tiy3JPt?` zHDfbEY17r48#elynw_`r+#$WhFQc;}kt$Gb-nmoJFNgHbAcItl{WT&(x~(&~4djp0 zBkv$NEv+KA(>D|>n%JsU3o?o)CEtHZ&*;7S>WE$TJ88f8e5bjT_0t2_za~D(xX!m{ zcVwWYomO-yBU$>}4}H^>!!HimO^5Wqz1RR^yW>^#9+#BPW5|a%d{PAl>9ihP>5jhe zz@YO}15lh^^dVshkPb&hN7E&~`4V>1(f!5X14*j}d~o19@T<5{f|9 znboJ)w-oZ}`&XZfSz?qay5v<_W6Yon0LTtGcGx&+n=<2BD13VGLeOqqkRqa>z=hKl zAyWDH_$W{BD|JDGqjaK+BIMac$H2hg`uQ5NzIoIp4T-v_y@d=K$xV+t19vTcebeC~3F1r!; z_O+6GQGk|(mEOGZW8#ne6?27j%ufXzFrC-+;;Qp$Z-T50>K-{FPHbur>{*TVRDdf{ za{a`raQs~$*h@7WATL|b*LvzbQWON9ZD*`3-J>I~Hkp|Tl4T5<>q9@keBjYI{bb3= z`TV2@cbN%%c&o)L@^Fgw*_Dra*QwReWGGHBRFqT=g9hIy>)UYgew+ah*;#D_RbIt-3@RBQT`OP45=!otG*ld3lKKs_Gz zck;^eyfSdk);|vpc+*Y8riBN{V%k`i$k2Z!!Yuh&Pyv9n&4^BUe(4tUlC*s;`B+v~ zmI$a~VPTe=$GsT?N-YoEJaNFsNBzr}FHx5+*&HoGSR38VH)+j)THZOHMiL{St;`!& zyeyJx{UMf2`i<6x2@kU*y|4+WAs^dcXPNYZDJj#b@y5IB3%=hbH8oD%<;x@0NP5GL zX-r-+5&>5uu!aQg!|)eUt7l;mNg`r{!^24cOV{V0Z@Bh>5yi5~#KiQ_A!YQ%s)U<~ ziSxjoF)dp3NTBuIJNwVIugy#6ja&TWq>9B|y#`L*2S|z1n{~J<&5ev=YpldGyR8n> zlL{p^}ML|9~qy4+-S6KA zg0&~yVV%a*T2~dp?<1X&?FAmaFq(J{Wj8@Z^BPhH%JZ5UZIof)Cmtm@L`%RQh& ztBAw*?p<;414DP4%pM)tRWLR<=m39`o%}VB?(D}l#Ew=*ls_?&mfjkBnKf+wGO_0Q z1az*vZP<8Ao{v-D0KN6l3wY6xn)s|S%|y!NLS8|Eb$>&O(L+nR8*$#nAAC=rRzpXM zja_JOq)pR<-P$Wbb|Ju_k9M3DOu64=1s0@XgR#f!sIC61Y%9g2Z?v8u@2C&h_T-X( z2YVKIta(n*O|Q^;J8h`$gnTEgYHldMVHe)$t7h=P=nQhzWqeAZR6@|RP<`b z-&CB!t|=YOlKc}njv@$BZjtuusH$23Yw%HzLByQeDI%#X!Z!ZY-Rh z1L(4Fb%oj=Uz~-gY{p;P^G~UWcVd!19u85UC z%1-Hm?9re~cgq%&h3x6jkZnS0&b4dTq7o81s}Jt;Aga9V842j?gDZ!5`T-JFJ|t=( z;QE|tyNQT8@pk?;NCDqV>MxmCxg43Yo&K*qIw9nj&&vY0@?3F`D!t#(vM7>Pk1_Dj4G}#upY4j8< zblTS76)62^PizQt=!eyj0IG=-A{)7<&pQzuxSp~O zMjXsnc)VZTe+}7X&p<{ABV~oS_&mFTC?bjnPGvPWJ7duI0`=9oMciF|eL+t(b9N6! zDx5yaHTQ~@J3*TvzdIKk#im+|QtA!O$zCHT^PM-I>|e=uorgQvLVSx$D`Bnr6+ltk zG%-AN99O!H5tbW8in_eLnxe;wOCjG)xJWtLa{d1T(ac}O)%E^G^hDLh*)xfWVvG6Y znGmkqT;#0?`5BQ^OE3Pu*`&Cx;<9jUXb)0@)HXV;*nguFwIWnWhaeS#m1FBhfXfBy zFS#RkUg#bX#4BuAjwM>5zTuGO%e#vk6hEi+*q>WmSI5tcV;8L=?iUQ@meIu&z_BX; z3U8jXA|j9@M5=%1xx4H75w{1xwb1qK0y)S+sEw@dbFj;tXAzG0`L)k_w5wDhe^GvX z+~+F~XRkj>^n{kr+*oO0gycEesZPv3;U7RjIQ-O|HNofh)6j>0-nHA22fzO6Sf%Zj zy+(JkLw2lq`o}?MB$Cirn#2wS@BM;jK79&CUM(w&#)PGetxo$d-7t{C-jDu=7cIN> zo4EvvXEOKdAkp@52=fg_%*6?^DJaRHKJMMTD+wzFYFsP#CMe(77vVRkE)`OB?o`k03%%c`)nmIqnpHxneE##b z5QZ%=0@YmNWu4_OexcvlBdyC|ST~$%jVMCVeM69gR*yK&C)enlQYhs2?izFVwcO|P z)Z@VKsoCiJt|3F{q7Rkfy#D5v;+4x{l>l%<-xl zPlm{;aa>4)`L4NP(s8YqcU>QU0?-|v1S{AM|9}uF>eP9zVG2+&B>eMhGr8OY0MdLr zwj%E?`#0RH>U)db@7=rE9P4VXUo*-FS8Dz?QSs1+kBdzm?J`5E59_OW)NS4EN%n^4 zjvN+^P(!3Z4PjJqSf4rm>as|fz5miA$jyIiH2%U0jDlG)G`4F^o*u8l_a!@F!SV`k zS!?+!!Wu%=ReV$M%dA`RG_CF=?HC>s@{a1YnG`Sj&2PdF|4U_|H-$j3?zd2!Y%ucp zslVEWgkvCniS`th;Du$h04|3mhz7n{C(isPruq&jei|)s3({^cp>&O@23fh@&iFOj z@{5rb0%g&8LQtAY$8@!L>x!I}2_qv%7)nRhlnvqoM`6ptKpS<{)=jIGxyoI zhJ53fcSq`*H1`|7*IpHjBmfMx25~%q$1CI^ zIkLWE4y=D95*V%DoQIW>hADy8KWe|O>j(dzn2i8>`8G1_)BS}vVP2*FGUHgh`LN?` zj+EP$C1aUXDB5)g5+%lyK|RyBq*6&j3flIOMbt@|3UkaEf730@SuF_NuZ}e=pH=O- zkEXbBt8C*kOI~>uW@&K3@0pkhUaSIlO9SAZp~}`m+>UwcrKM@LmYg0i-cJ`Y*a^hY zZYMaoml*bDrete58iOnEMl7y8OZFj{5>PEqQhkordt40h~(bh zE#KJqVNKykG`Yn|prw0r!9TGS*OaA9!$?rR@BAU7WTa})hL}L~!KZURz5={`FLkWJ zI`m{7{Usd2Ls*VdCBx6_c6)Zq>gCNJNVv=70`dcoBVO*ipbSUFy~u?X$i3pXA$zlS zgRYSZJo2;MO=py%@xP-*nGdj!-QbDzs z4z(A{B~B3Sz+soafG7dfFm**O&EAhSA?vR@Rgd(mplif!N`c(8LW>XmAX0; z7|JcQi)2ZEpm!xshALjUKozejM?3vNUhG95!q%RS_tN7K2FV2m?4P_psKvSF=lKN1`J6y*oTYYW@(yGt5m=V^Jjwa_Pb|3xFMm(Ssv#v;x;c%g5Jmh_rsY^R5xFw*kZi0<7S%!N9(GG@2zx<2c=}4MC zQ5F9tapG;F@fOZddvdDs!_;?sS@Qiz?+M3q62*3#81TGdO{899?Kbs&A#+pHN(`kc zzfgsipVTCv$vCoi$*ArL?zxN~cDQpDQN}cAVZtw+LePAHcg(KfCC|d@t$y!Ls{{-7 zUf6hCec_{)e6dpGH60GsEv@=XeULq})p7o8;SgA?#<$xp9A0~$c%@YlV> zzkeZnV)hm=2J0)_+wAmM`u>46HlEJ25)7v@lP_aH4F&+ncWt~ZW%3Yh=LRAQ^7Dya z_j1Ob1dVW+l4*8yF(zmVWUqj^PC&AV8t9>ne$FvMWPn?u`kYr~3y{q%+Sn?IoW`O2 z5id!kgQ6RR{e-+7NY7~Z=sE(F}vjt_wM;?gZz`f#wy{Tn`XlXcaux3 z%a#RLm7h2BD1@F3N1!vtX_b)RcK1*2`(WqlDsS#L<97Oci!G$|pF>@TB48c*eH-DYpcgWN5J08b4-&OG|NP znjnVAYh?R{0|ll3H*ct8M}69@^h{2I9%iM-DGxrjdLs3*jy8wg656>}0(}oCrQFd! z$_eQ*dX&;b#BC|d^B~9ViE$WILJhZ$0gidxK2dX34o(#Sy3NB+iY8}WnH5Ir*eaGS zKP}rAae`;pU3U#KXBl{2iSvZ}*V_c{7Xx0L5x0Q-==Jf5ne+0O3=}U^4kvJ&d*2)} zBSa2qHj%(>AEG%XVNOV>imHb+`4hCUSZh%%u*TqlCd7zt8xvp=!5wG5Y4Yid^Ed%3 zB2Yg|z)mFQ81Cp%{I3UWRMq37Z}Lx?(3=B>B19&%1oh4E9z_Z8+tEl)uk-C(E6F*$ z{QMH+U7s(t^e-q-#VI!ut^=)<{hR(z>TnjSI=q$MqhaIw%Z z-PLoB1QjHvUNc!N`{Hm^Iny1}cdMFQU0nyG2cmrJ95)cLJ4~I>K85p#!JAAIj3pL7 z`v0Vm1dBg*mj^lHX%G&KKx9&5z|jgv%N7wClw-+^yWSQPd)VH4AUA%p-&}_8x~e3K^xcxQ zx%<6r?VEBf(E#0~^H9&Zskny1kD2LmlF?;HzqYlUZje=NY{aWMQY8-_1a3~Tgo4Fj z3=%2=C4K(<83LwxK(ECn2lL$qUbnS{-AnN01IRNtx`9PN>Gkl21CpHRaub3TNK{H{ z3Fw@W_w9>MGbSDkqlF4s+kZJb^!_Z3css&aM zx8m?mrH!;3a^X_ErJwR{Xgez(UkH4B?>~MF31KI3EF&dzLXm1;m`X3y0z-&K zhq1T%VN~+54h|pn($u6N%SGEc zWq$~-+!11nS+ytr8T=BX@clx;#;|(zYJ+q10F^6%+KnNje?C4`WB+}V0($=!HkJGd zf?8HF;|d(?%tU5+wrGLwDkieQ^8u=sn=O=DMrBsv0~>pN>6U2?me)j+^H^E<`1v7F zo|kQWe8b(n5)?{IOygJdnG~O%8S&e)e?JGhEk?l2?J@DUBQs#XWJejY&=*nm>gmL$6o{`w*&deuf({aKOXFn>l|( zvYboLd%;Mwf)|Tk^S{Ecy-%4gY^x^=(x8{E4|G9bIhDM=N#KW6MhLFqlUco*?zMqW zYE$m<%_oXpMqLl@lytvqQARXrV-{gdm5&3XiuaHL!p{F?P0ceCr=oM1rfba$c|i3B zEfp*S^0L?1I0SN(p7RMaSeB<`O{QKOSbDQ}#!Ik=SDZi9L-ZDIE_$Rof8Oy5&KLAv zslvpjP~bSRb8vWuflh^q4P>0rIC&m1ACbsI$Dnr+>bPv=O0{7Lu5`QPG}SPR_JhA zS{ktf`sWPsrsqU%-Lt0(88T97FDN(j^ZB5z19^v3j*&OspY{-9OxSyOXb&-^TDn}K zQQ>^CKte}hxfFw3Hq|gAr{+L(bTon7fl!f50we(>)+s0`kkX9=e3+Y?Tiok#!imdm zs@Afp`*gk0emD`X%FSD-Of|23Y6JlH-+4XDf93UdG!;x%wq>_-r{}-hR*K-%9es1k z7qrFtLykE?1tZkD)QqEI!OuL)&xH(15<^q7cmc^yv9J8MiJ#di{wVaP-dx?NEmUGc zKHu~Te}249+=mS*``92BhP-pIvqbpt;lsptg1&ro6r?CU1;H43xcagc(vMG^Jh}P6 z0Zs&R2O+wZ&<-b{xfnBRA<1Kfc^j5}Jhm{FUOjHKIVNIzmx!+I)}kdc~7|N6Ta`qFKH%@mMsYtd!n zwDYI{;1{!0xUSQ>ale59g1lyCyeAcgY9U~@B8N>XFc`Zrgo?h)Teh^XT?MJO-$c3a zC>kV-A@CO_qrDOwc&}BUHq&z4Jo{7bk+KwCj6+l3hhWV)hg1kczY=3Iy3Q&TE^AGx z&k;GiPsNq^bRduWtqUXe#YA5zEKT|;4eRGP80bL3-PgROjxJy28VP?RlF+H+PudV7W9YOV4ZaIBF3)QMv*8GCglL9m@;^W;a z5S+wC6TNEBaKM8>VwsJ(E_WiYG(YZ|sW=hW2VNOSYf(N+#-$?q&X&RcOvtK-R`ehx zQI^7i2?{b0%PY5i_Pc($UAuNoAhIk1`tO|y8iEKP9In4XT@a5)#y^q~P%{G(Sf~m0}TsL5yb>+84sUO})ZL3H95`?)c+T#%KApyKr zk_H_$cnoGqoXM|r;CgtZYTXT)v}H6CEqhmEpE53;t$iUT&5av3h#RavX-hBq!JXTE zuY7i9iloGt=DRXy6n*C-zDxn55?^)vV{lNS_Geo4jx|0wsu&N0AVi=oJDSa>qd?(m zoek1GcLne5jZS;*Jv{ti$DQxqHL7n<(ks+@nj?C(MI{ZeO2Hwvv!x#f5SuWusZ&~xj{`cxDI`@t&%b5375JmYw;*D9Uk#3 z+$+nj%Mck#y_CNWb8b|4XMDvixxW9~%PQ#z8x4bu2i%nht@?_H*Tx{36fDuanx#+rViKuK~8+WU!o z)s_`9Wa#?2T%~NKAN)-NSa_ zF|hLgKo^u2&I;Wq2l!K;Ryu3j(jYn&!F{NjrB7!Ju~62vf&nxnwmxlhnn4m4#NMjN zT<{F+JFm$zV_PETKXw+u zq|$NEw}UYGx16DL%=UHQgE5hcs+$D2l0FjIyN*&33n+YuRy{fLmlmnt+iZY-hx-R8 z@C!@621*Da9K32o;7H3+{T(Beniz|hsOHfEqOn$yf8v=-+NYcR?d_Of6 zd3*~fSRKi4Q5{Z+pW;_FF$#dI58oY#$wLyNCqqM^q$NTcK<>Grj3Hx8(+YUOc)uh#c-nU+U>z%Ovss!44R|#QKk; zY1#9-I&8l}*uv5(aoD9$;^X5f{#cXBU}XI8GG-`#4P+KLbij8~i7X~BMQ8p3BZEjo z#XAV=m~p>KRT3~Bkz%{(7*+2_GC6eaPNrR9rlhTm3I~OAQ1#5taB+q7uJl zp>1ut)BhLRA#8L>mO_B%+3V-_{k)=M4bkOGE-P}W3J*QU4lyt?9wMvqL~kU774zun z$C|Apt=>I8nzK8j^GVyy@{z_niTOpj)puDo9}4`Np7>}$Z{VuHkv;zwmz3Q@lEemEQc`7;~C6E!I2OfOv9bJ$%zR|L@lV_(3{go zGWv%BeLcO_wS3<2oQ3gM=2nwop6qvYL+nGBuT}^ZyFit1AGBtqnX%Emq@j^{Z=DK6 z`NAWr;F)3F1o!&x7Z!RQkw>F1C*?UT967?J;#tq2w*Jf6XiM|+J#NkX&JD|tyha)A z&@sMo?RFkbf+P+NRcVj0I@ziz%W>(aJo|;ow#S0>#xU*HKUHMsP#b%qrckQw*WZ~X zz(R;`LyYStc?E?^VV2#z3oir?WRfRM!6ONS((1vq2cz*4IQkIOy8}Zs?fq`T#A-Lp zp^LIq5aP@J#m?nx!1%Gz@rAZQ{FC+}a#-ECI>hM;1|pkY2`Kj2JVk!* zNNT8=m*tE0v8Zi%gS&A>-%j2TY*7YTaJ@Am#JGJs1y5pWx36^0HY(5Yi7`E=K>fx( znqO(l-%^A)XbpP4HiRZlpnDfhW-XmZsBW`RxfQBEGKyM;q~A+lV?CqCrtu4_`&3Cx z(<5s2js>mIzm6dcZL=iR|3vs77&U@lP1S)t=&1GdHJ`V?Z4(ihCz~Pte>pl60Ozx- z=cl6-M85A0m3KZc9N0}4yH4dnB zoqlcUp;l&)OZ_>Uo359ipUQt)t`iOqrA`7bdsj%`FfZL5)5a^vq`}ugQzxMTP+u4a z^y8vNMkks*jy;ey>J=^CylfywOozhvs-@uVZ<@&+$xHUW1qZ*WXQt8@;=3~%9PiVdv94@YfTjt35T*zUgaucLmcDf403y!@`b67u{(RIZz12An9wUgy{$pEKI#Yi1n4l&?1Y=6e?WGl;f@2t+4#(|Y16br zDn*&?)Sn*gwW#kQ_02^pMG_dipMUN4PqrYRQ!~C+4~Q~tDyUj`*1s@IB{eoczhjvC zbG3`kQPCu^tSvXH$jBK7iTq8@{*FL*vGnDP(EE=OPnXzkEmvoF6H34j0|<87zwls2 zAJvSDiAxF|wu|aL)VDy`?OVe6$6RCIHX+iEXNya<)Pel7B3)Y@l{?y8n){z1ABFaY z*e-I`T2YyFA!i{nAR4BcE$$0n;}8zS47^pmjfIj7snf{pI6azGFnv9#<9kXSVLJv1 z^-2=R#4eHgliZ_N&#&jiPJ2$&7bLnRX5O4ry`9AS#<`Kz_y6n(+EXW~>VkA(LC;UD5D}mq=J1SgieH}bw zxo5`)IY^hAjEMITDV470>PahoKOxe_)?0Vqlw4-NqEZm`dcB{WB4a{tqPM z%UZiC9SK{y1%vUWJ>RXUZQbd5ttuPH_awT{oL<_R{3hw~UFNed7RxAZF}X)G?U@<& zWqPbUe+yD#?9%8%WFtzXt$K>iQ;e#K`p>)iZ=cl{edH!dE>>F*f1*tL^nxV(WlwTo z2<`eE!IqPU@sF?Zg|MjqJR21mqJ?+txVh@*}2Nkw~=*Q9mM)?)LvB zGcuyH@RS@n&CiJd4}~!MtZtH)udfEdHXc8gFP#2TjfCux*V)|CrX;aR&-B*bZ_P90 zhA;X+75>+q*Hg0oJFL$~(L8vNmz)yuV~B-$uVf6xpLv=I$*!x9IW&EV!ZQF#cRW>crc( zn*xEa{&1-zuUI|`fgfZUq<+~^Qqt1SZNGCiH8s_4t-L%3CF=V1MHF(rQ~%NS#&K03 z8u+EwxxxYm)+KJ1+TUV{8hjb#+mj;!P#*$7@%+V$Gxc_XsQ!^g;f$`!nou|xNHJ1_pvS^62N+E8-?6c3yC;FVUh?E4@Z0ZowxQ<2#KbXAcqXP z&#tfSvz0uC1vGxXU*_rU-HWd2{iE4(>(`T}>hVZebl37kiN5>~$YgM~Hd^m?kMa-2-?t?tQvt(_av+{Q=AvZa0 zOU@tAz8V|zA>PI2!u3~2AnT^5z?`C;um~8TpTVLV3mM(|e%tVBEIm zn;n3*KagpG&0*y4SVHGPuLS|c(7ls6lW6Wf+NA~iH!)!t9IvIJjt?S^+u1!G%I%Es z_owX8Dmwr*^*jnv6TkWXNk)lHF1~ei`fHsv5Znns4RnK}FXKtosu4(y@@07zv^H|a7YEi{l z(GsnC()s0L+VjKnMCjhH&odjWYGp$)aWw&TVWIdxvJJ<_sE20b!bt`g#cWvBl|4wlB1@!J94IeI|`g>OUILcx(edbBK$xnT|}53Y0`>L$G~ZV zpgqQVhLvFPvx(j8fmg(Y3Irotl9p=zPs!k zniO+YaE2rpKfrR1nZttbAf7I)d*^_n4Zh2!UJ#_{Qr`E--LZ{x!DsiruWz$tOh_f# z*wRr{;%X(+F`~W3?aA&8yvTF3bb-^8B6{-g>+9y@{R222dYuXW8r@9z9SeqXmI}@x z9`7+f3%CrqI<+_T9Hij>#ATJM!;G{hbf0Zr68RHESJbs@A!OdgDmull)Hw8vi5R<{ zUtA6kEp?40W<^@h-wVc#2DFSWb*Xg-+g9xGq(?+a6|A`tu+fhnEhhI)`%>4LzTZg6 zf&%0TWKNKE8;nIou}-59dq%<>f92og$NYyZuH#d)=GiC+Zm4^6kzVG%(u&BcFP3%8 zy>)drDFHwu?3YaI86O|Vi`|TngZE!@TIKn|DV9I4;bxI<3PRngqwo|_;a~%gg3B3< z7Qua?wIUf>iAhOx6#RE{u9Gmk&ul*9sS5l0z@7F)K3e@jV)VYNE!p9{po)A13O#DJ} z02`4#nd!v-`(dKgJ?#F&Mip_6pw@Ij*JH1u%3l6DG&Ly^Hp1|7>jXZH&o}O*& zVJ{^Ko7Z*?dtV;T*-hdWm9OCCe6MG6Qc z+6IS(J%a)8VnRYB0$QZ|mIF+<^5d0oQd?lov8y#v)lZ(Im74ZUq^V?psdSL3c+d^h zF5l)_3PwgHUXIW8ChQtvUJdCs=VV98_^*VYA z`SKqy{~4BIvKxAb72AM!S5D`r>zEeiT5wVSn)ZLDr#S6z2f~JhH$4W)gS+Ml)AM?~Lc~khMu8$w@qD}bj z1mDRNQ2a;wt7~c+z_hR2hMyXZlSlslWADAAsyw%^QB0yaYQz%-QACWoQJP?(C{2Tc zZi>=Dq^O8A=^%nqVnmdputiWnQRyHc(z^*_fdJA(KtKhgi_(<-&4qfB@4Md}zkA31 z-7)STmyAkdlnJZ-u^G2WRJcoSZG_SNgW2@VkG!MU&PY-(`#h6jc*w-Hbw$i#e_m z@)s*VKOd1@+^JiwHiI3~s~hHNiJ{%k`sMv!ai~yuL1AHiG;{e9o*in4by3Sd{qXn8 zdRZOz7E%g~e)@F(bo<&_{^|#K0dAb>4c=epvHHBff0ONuNaa87ARC)OIKR?Ae#~Zi z?(RQcm`!ct|Fa*zide%Y5>Ro%@zlTYF1r%~OTJ_CN(xeyICR|p)XE(F#-Y{?_(6`- ziq&QdL>nPH@-b0@cRr9!s)#j6Qk4+$`3Vf!bTUQD=6hUSXzBVx!XkokVLuprhw;dVggdo& z91HUE%Y5alg=-b@R&XuN>=4#X9jZ9W=jZ2Vu$hAo>mHNtpB-H#@XwnaypYT69}A3) zEk%`n^*>);+kRF5$Le6?U;cmZ$8X;Cv!&=J=_J>nD(76jd;@lkxYY|4no5fpy+&jg zszjfG&H5S7g;ERFGsCueSQ`H?zfhBRZ+%iIbPvZ)mmfOc9UHuXaitbG z)uXA{v-qS$IJcPjZTJC&WMp3H?fc_X5RUH>)J?gECUEnIUh@|H>tFm0?(&UuOBs^@ zEHMccPv#n9%?`WWsFCY%Dmp+yEX+HWpFg}wM8h#dKte(X4vY*7S(sH|sCbc_-I|#H zK2I6{+F2yZzZzzpFe&H6Q-iIWak8WM$(L#v8T{`-uIqRu0x|lz4l$UX+A%no3fy9W z4*x{01Qw%JsA0K-Uhs;Y!Xoosg?;z1`>}rU@s|Det({-KmGi<;&v#kG5xh>AoO>Sc z*$)_5Q!S4ZGO@R=Iw1fRVt)MtJz@69Pk^5ZhWV#rC0txS$(O5M-|{OK-h;-I*sL|E z&PHHDvI4}n#NE5mzr5^4Y3p`)+_<-&@W(z5`1BmsR`C;mtqfQrt%EM|$9XghY01hh zyIH~4uEmu2EVY6aP|Bg>BDM3I4XjVO&ncv14$vN(g-~CJM3dJUa##$xKPE1&52>*R z!-}F{)iAcP@th5{L-mk^86~V}3}qN-$=5aZoX&*$IQrN2s^Gq&+b01!Es#MhU*5li zN>R3bnhM4qz#|>Z2pQzs&!Y2CHyah`ZP<5N3r81lU^r%hF8%6F*)t)Z%AC*ftcX!y z_f!6r*Y9m$$=&FuM}w|>`xg6`Z>5a#g!HT~LoaeI0&abR=BCm2Er;PKwtR8#297I< zhJ&K?(Ibv=1Hed}wsT`YRF^s+$d(MM=3rlDcKV)VJ%p0xM!8Xvoj|! z5u@zs)92{qPFih~^DEYL9dKh5STcC^>N4cujcA`<&dr^Osw2_1t%#N@xS^XFp*hgn zSYi;INBbLV>*Q=(WyU8b`;elwMtkd>F*UlkCob{@q|r(Hw;!9j5EqU%=~CA-Gy(>ZAOjY8R_gM-er!I2Ddt;8h3WN>#$*fLo4 zRN9N7^jaf*N(I)(jzWiJP*wdrx8Xflo@jq@I_j5S#pOsjfb(yzH zG=kMCg;)5=&3ralvR3X@9WMt5Ltt>%{ReO3!?<*dSp{aN`d{908|}T}!81NSo?uky z)|6(l?^3pjv4XcRts=|kw|}$A?OdTb3A1f|hJ|J>5STS;y+r-YM2tZ-y6c>uo6G6S zHBT@E6?8nNv^v<2OLNBSB!}4)j_SCcKK;Stw@cHLds!cg?&~KP=%y_1xX-`YLC;{( zkeZMT#xfZZ}E2*e-On-N^*(P5V9+TSE!ZW7njltKi$D_qH(WWIY(Wp>r@LJ!$ z@Jalzs4lvSQ{J3NLc1`@iZK5YmyU=ZPTl0^>spkf7VAj6;;P(Qyr$aD3 z=z5dK^r-IULl5?9w0d5I^)nB_XB4KyVC8;(X|TZ0F&H!XN8Z!BF}mXg=9KvtVFxe; zhuoKOahc&}G~0?}r88X}TbVV$(J7cFAEN!4e;@u-4P$Z82%n6P+7lCMR0F^`S|Qoi zT|7ITYUJ9<$t*AqG()p0?3Q)mwnYMnwVm(YC3;LxxPf&Wx5=#xD!H41<;pRn7yQGQ z6`(+O2;b(%Qdo%9`*?2kzCKPj7+WJ2o52ccK|zFT<;r-NyjpPh1OHX%-D6#V4E6ZKLDZkVzP>e>Gm=8*&h5GIlfW?YiD1u}jmCc*)yq3IV5udK%Muh3 zIeZn?g@p^@zOsVoyafx_b3z$bSZxINq*_wCMq`&P-9Z#B5FE32PdPQewip`M-$ z$nyB-GV8`L6ZqsrJ_c+PLPTA52ZV%vg`ikRhOfnhudjyF-H zq)b)bHsBb&^>KK3dt!7y=B9E^9Q!yrs;#GY{i6#)Q<#{=UED(>5STTNRkFIVlU@(f zV6?Dq%3!WN`{B2enW>{)N)vJm|Mjmeg$MD9b@A#OWWiI0?eZ)FA6z7~Qnu`{hqx)A zsOmrfw>XPY`uMTZ+v)EFFmwdd@NQBpr>(?tDp?}QkK+PG3rAR(!XSF-Q`zW+Zy)~& zB?$_1|GXn}-)h)HcjPL1KD6kLU~shOvtgH4b|p(#(dlMYVXUM35IXq6D`GKWI`Ai5 zwbDYfKTApgX*9ccVo;y@>C>MT8V2yhaQq+Yf+^opzoE=i1Nx@Nk9UJ^5N`rlVf3t> z{p`anZW%48oyajRhg72G-?wUx@0LtKk@mr1w6^5pH{ zGj(S_4$D5BYG8(spiki$>z)Ja-j`>^{4ZOH6CvWh&b+6NmDF?yHU?Sudc1s<&B_qJZUKrw2Tw<2@@=Pp}OPK7L zED!-eGNMc=&zS-oRiSs+a#K=$BC3carE7w^GN*#CoOz!c_uODNjuv}C>&0`r&o~ir z8#+fS`Vs^-ZF;g&{{v`L-z!%L35w(e01#l%l;Ow|Nx=))+J}%>YcP&C;LZF`1I?S5 ziI_}OT{YP`u>y&PDdpT73l$;;dh;P(Y+Lee`SQ2d4V+trQ=g zkh|v{PNX-AnU--7N<$l2Jq9VMOasWFNvjqf?wECZF2n;uC2GX@W}R%rv=K%N)+b=& zb402UoLA5j8*U>w@@^+SJ=i6_&cZ)%6XMZ85RcxgMBG!?0l}0&q(HZk?mL(X*Ij#X zS3Sg;<9J`|CNXV5i02VH(5W6v-P1{;A`RvqtX)gg=A9Y3IW<752Hj6=l$3lK2{=pw zdAc5lWykEAozlc$74HS?Jn;yrs$pVG=iWMjk+nK*Zh0T&o<$DZynmJZKtGvp;^!+# zZMf28T_aZo`jhts!w2{njBd%m?OY@VK2IR zdJ?b&GJym@C5B*PZsPz?*hwpH>}n zODR339;0+k3z-|})oZPC!*$fe=75&mV3$A2?Er)Yz>ftkde+in2p+Zu4~PRChJY=5 z5;|&{GI&u%L5NF2E$S6_E1UyMCb$K?%?9mfXsA{>I?wGWrJKD&ybYBXx_z$ z8bLWZqnVi*g0b&&yLgKZr)>BNMA(P)IDB|k@cqiZ_!92hK6bxy+&hA*48o=VOBri` zLi8VBITGp^Z1ne6*$)Z}|Nb|&+bU}R|BrD${u^$KmqX@}KfcSftXHC?f%W*l-wv;w zqQtq`%=hwTiMQ7EiP|@8Z4o*xYhwejNn_A9W{6?2rbQKZU%7VOeC%P{TF_`=Z{S<= zNDjN+q!EEIEn@u~Phqf_C&z>4&O&zcu6T&NCqv83qHqrI|dBmr-Jm4SlP5|}j)ndo??ekO!E z2+g`Ebhu-Tes>N`(;ws*%lF%vlIyf4^2F#6?7Jz%VIi{@euR7TH{wy&WiK|N-H+et-p zI`}cREmK%*k4=bsO4cF%50^9a>66Q`<7{^BQPc+Pg@_9jpx-$rgWGl;WPz>Xuktsg zoJVj_+iI>7z62D-ZV(?cZCa$5HHf7ShZGi;jzeJ%ICmboK=qbPqgHfN)V`&Zk=;P# zJHLIj5R#?^)`lAfdzCY84G|w=-CA%+qxI$_v7Xi0ApIf8>1nhs?8aUTUnETpq24|| zeON<*!MZZ6h=_hWhVRgQltzswCE`v_YM`tka1@5o;tUJq#xGo18BWe)bawvkJ~2{# zZvNunt5>5?v?4x=A+Dee96)V5=d% zqXlrkIC;amb-yA4)IwrNxPEbu@WQ$xy3 zSJ!6D!}RYoa_iyqi8|qZ{(Lu#2lbiY68BF0=$}6jJ_mNm`}laZI8GH5H-CQnWrL`!tRW!r3)~}+}6N8B$TZ;DlSYXXzOXjht^IFSgTMRjvMC@uNEGD%{rKzrl!i~H_#aCFQ> zCp9YHgHXuP6?GT7EFm5uD&B0Bb)U$f)DMi)1EJKx-d-JVQ=jK-1;FH<2wo@1zPTJc z4yf=%ggNE1@Ds;Sd$5iRDt$%`fD_ZElb&}WxIJ8jxN6dI9zaYTku9JAx`3%-kxOou zeAP|qI7Dho&^HK+A&60^6k@=IX@0-pC;n}zYquT0NSkkVs%sa~KyG9G79AcqFhD=Y z?haLSVAH@&9=Il`tpN}w22oK#QE^368(L9mu{Y&8C!r}`8_^h_%7AzvSWm89y}BX6 z&L$k|l4?naYDCqcU5I1FidYfDyl#k^D zzDbkcr;Lk}GX@|^A7+QxF<>-HIQl6M;bdZ$YQJI6>CsR;B|av`_|abkm1iN-!Xn~$ zTEl**;~A16D`B&=bn`_s>^(wk39sKE?6^TScbb`Ujj{&g=mv&h1iT67s(pCG6!ZY* zZZs`jD|fGI*Q^1wylQ+X5nJ2AW1kFJu$5WJhnPL()~LyXA>zm5^zl%0S&4AfS)9PPh;tXlkl|r!6Qx5^p>t17X?Jts znQUBwX;W6P%PZ(Pu^?=EK?Dj_KV2|bFnn1(@OkoKN!js^tBS^WSe*j{$pF7Gv9Z0- zi4zZJ+guu3j5{jad4=^06UB!%s$vfC5yM?oR?s~u|AHD9?K9{G78vQcdaxHuH@0(l zxEs`1H^$rr`uQo}`CL;|GxFzR(Qb?&iyebdB|AGc+;sow?+LQtBG7*M0>dE0EovCz zHm_cGOS_H4<8+QYm{O?u6N2pnIt*~t{Dt$WJk9O8C8QN^hww_Z0Fm{0&4B6oUP3V+ zve&b{&Z|QdZvTk!){94HK`K9Pr%GF+^;s)Ok}7kWiaiR}<{*s%1#)0mpc_dFc7sFS zNPuc0{i0rRF)=+s`*^FDAC$R+wPS&^CKPUsTpfDv=GJ*l*ZCdaob0~&3$bvvXPKtyiJ^g=h-;kWJsXE~!F1iQeyY!S zcAKgw^>gE+0h7DoRA=#1{d%K4wfox64yy)j#TK01x@Z}xV?mRTwIh8-%ghlxNeUaLSZt#^SpH8!4|ruP%to9Hc0UbuXV&<72xh4^@|H@6uUh~kRBo{Ni%n;D#)Ifz|Ngo}uPEr&Ozz&>|6 zi($;Ot0pm;($gL7Ox)6^?m-pX=vN9|S&Z0zt`;8@oaRvNSkd@NF9eBg4rc@g5B!0D z)K)>7H2k|{CbB$ef7ZeX z7A?4q6D|^=PXH$w1f^D^Ym|H{?IxH3B$MM*(h`$8GZmPbA^qllDU>$Xpu13jO(a zaa6k^a2+ARy23LqqkRok7u%JFhKFkrF9S^UlJ&)9r$=g8LyF~8^OACL*e17BYYVF_ zL=~hf(b)A-U6qlPdjFXLw}lk3ali4qEbY9>67W#enk~;4))n zif?1CFh#km3@YQXh{aoz#z=A?&|P*W068+k8kL5W<2(Q8f@AelKNq7qu?hS04fs2W zCgn3CP6&olcCWn{0)Q{{0r^y2()HroE&2PDyRDNO!3U9XgiNJ0O}|jSg$LtEDw!{xCgj&NW^irm%Ol6)tJgcitC%GKvbk z4M_t^e-J%oOq{?6t=#W5`?-B-yU0Ddu`9W=K}J~CL|+28cOYWN9T>D$x-4vpyePUu zWh=yF9O0}dhEpzSk&*tiIKV$JBqSE=&3f zKzbyNBDOQ8ph3dZH;tAXRb@6sEpR#|1` z3oQLi95rpy#E#jv0B%ZU<>|wI%K*r(-MEnewmo3&E+Z7Ynuv#d&-l*@R01{8?ZF4d zXxQ=*UXiCBiT-F`^-Ece1bL4t+Q;pPX{)Z-LOcR;6ciWNCbAGE94x9%%r5x>$7UpG z&am$=!DBB}tvcjPr+0s?0^?OtH`v!_#j)e%8SGm?`H6j3c*Si-9+;u*0kCjAb0*xu z5j>Ph@b?0u2aEbr^1;=dY%6McxOXW`xjfzGB^#q7QH@J zIv*Uc*oERk%%L864&jEcvqCo_@?z?t^v8M|J#!wMrSyw+=eIeG|N36IPPFzy#&d%U zV-&Ts6+XXyu}2~5+!Ne|yUW6ly#_oNfB!OjSG8`G;*#12EBq7yN;cbL?&2q;jZn9> z6hmajhKA_C(>td4#HJj4v>sT{5==$_B)ykrB9JIaf6!I{R)` z!XTM*A755gr9Mp%B9u-h4Gc~P67HiK-elJhDuG^aE~DqbQ}=Mj?)vOQ`s4Gz8^`iM zTC?Hc;|Sf}OllTXXfd-&9>AFuxe@NSXxozZ%VMbBl+p%uu8rXbVUo5fnCG5=WukC7 z@R!ad^CF5jV|FvvV$xI7fyMIuU0W9*Wi3E~isJ`Xdp2|N!CIJvprHTY_$31MD9=cW z*66A7es{%VV<#uCblBLsHhh{zoP2dS=V|4kKaBMC!+;hmRkW5lpsalLIPaZ5s_kYjD-N)fba&XN`8AJih2@MR{~po~?*EC1 zKur8?pz02s#)}=R!da}l#3bK3^IpK-pR2vw`Mzi2(4b2G^;rZGaPp@TvWSC0IjIZi zxGn2yVBl@sM$-N!02fD^tWs|rsA*33w1ow;2LYuT^*@h@)$D|^l081t!)qBPPuMj- z#ziG8A3`Abi(iRL$ zB+k@!1DdEWaz7)BLpUC*CGNY-qYhR;Wz!&TvM>-jMcPh$>+q#5~7Ghe+`Txtgw z++I|A*~HF6?BL#%Jh1QbeccbvnQLS2?6|y0sNcBi?t#6{`Pd)tDsNg%wE!c?PWGph zDu^C{I`C#b8${5K(47!>t7gwq*=XcJOCXGUyx)Jg`d8{~>7u<<^n?57c2c?(Sa5_SjpNy0>_@@PI=+65?RU1R~Q8X8Ma8tK$NpWjTw9O z>NrV{US4eNj3YA&11N}Ax?Y{uVjl6ysEGhCxO3+rdL6iN z+U*)qgA;_SMlF~~V3TxQ0poQ2WGVO=?+E;ecQco%TstA|>2XNF_WXr&Tmz`z`nN2Y z>{%ybdi=*TW135&V`Ix;0@vJuBw!9Fs?^!f_vtE8s3$;=VsEGhfN8($F(b#pGbFKf zYjs8y-sA=ISe- z2HR3WBdzodJz>5b7m}n3-CS;ZM02x2j@nm!k4GdMv|z0v6R6@UqXou1#b#*2NAny# z5Va~C!EKibt6piHEaQq zDpk^W0F zMl|o4#)Caac0G`{sPt~?%33XPa!Zy_XQ$p=q3Jn0*)}m!dr(=%I&?bN65dapr25bK zubqs>bB$5Et6HOa-Zqofcya=*06aEHpk^MA;qaj&ZnqfjxTSL#07X`44nG&0l5Y^k z%X4Re=knQ;-;{)qW4}$OEd0+qpuDinB1w|O8e?hz_AS2oOR`~pmVM0xR%-}Q0W4;u zY(ffXi{A#zO(t;K2_Kgv0C9r>;(L&=O!iNs-0p$4+S`?Y8MwLn6le)MFrkOg-JtlTOO&(;LW<_uJBsL^zO|NVEr z7F$48?+X_M;n+haHzhbc&;T$Z$Ry4Tcs`a`ICuzQiX*n%W4x1V!!GuP3%4GShZiUb z_vY~o;`sqQNK`o!s8sv~b~0}8a!pBZ+^9%|9i{!!Cr_+kpH?(R*jKIH*m1?<7bZTl zI*<<4A+W)g0dw&4BBL#>OEunwh^&mPC3#_G$dE4wN>~z7P?#MotEi|4(GZM$jl^FD z_Yp|T29RO_)}3o>@4|j>frFsQ_s1X~+zTB1;V(VL7&hmIq1Gg=4@i<mM z{t}k-lX8M`^%BDZB$g614u~2FmELgdB`0deT_zC|)DW~K4Wc5z!cxf}b^8vWaO|<) z-*#M4*+>L0Ra{9Z0((=7XKH`|Pd?hngu;Qv(Hlbr30Ti+q-4I%&Ue_@a3yxY;jS?7vl=dpF7T4ILDxITz3 zkbX|0F1SysWA)xWS?ej*Qmqqwu1e?nVutpKdzPt`G ziZq-$V!&N4R7C95#IUsI;5Hac@{&UJIFxvrO44H_UY81Pdcc) zQCdh$48p02RS8;U$^NOVtYS zdfd+&6jxfq@d=XVn#boiKf0qo#@lyee+%YIO@JV?n4*EB+x>_mvJF3}q1hk6mwu#58m)Q=b?XlSj#jAO}%j3pyH_%sH>;9Q7i@3OmKgvSim$OjHEgeyj!d9MW8GLu z2qJ|P(5bF@E|OR;!pDmM$yR>gQsOYc&d=G`zyeX=Co3ZD)wly|YA?AGPI?3xkP2WT zhOx>MP|^&Ij$VUdAHAR)KmUA(SJrLLu2wYBnO zq7Xly0MWC!<&t&;T`6=gV!FwxQGf4{CcN-)TT$gC<y#IJ&P(zn{WU6J^c=(njna@pvWMUEOv)r0)tp&&jGhH>(;HC zfc!R!Q?BE&sBy{%3$AivhtQz67lBGt{nvBHZ_PaP$h&_QZm$N|GQ>b&0t4xxa|Ty- z4IhP)2OKlnSj>0Hp{@DrIYtYH7LYs&4j&HpgOCUibPj}VGxf#(JRxkhYx)AC{cS<8 z%a5&i&~*3>5M-db_lM7&_bAO;mtCj{~AK0kHU(aFP+gy}BW<2nhRnYznn*-&_fr{^6(;_$Yf@I4 zz}{srk4B2VJkZkvB?RPb743Jgr3 z1tc8?J&)qVrE=IF8jl**7EO;GroG6DCczAs=iorqSwl9Ym_Eel8Z;U@gG>V1Aqe9S zK3k2~yzhy7HuL!x+X6`ce}C=1VP}8p?|;R`HSPVcS||QA#o2g)NYWksTkxk|8tbmi zC)#ii#=o$s+2`(*MzIE)E+`7{8Ko z{&5AnZyW*H{^iT-8L7W0V~>UQfPB8?)+@l&fRlp4|G>+%Jzw;Dq7z61hGHDN?{i&p==d%_6l%Ho!FYA|-J5HW}ch1G*_ zs104SZ2MWfnt{UKtvc6VogJ=VZ?1)ig|I9LMi^iNBoYKD>QTVDo(my|DHBY-&nHY| zbo!0A!zEfNx@-T@wGzJ)=cy+Eyb{U*J+6dwg<=k>nkvO8aobh{AVA`GfWu!$Uj!<6 zee&BL!$*t<5VnAr+uN8PhoHEbiQ$~L;nhR(?E0`1`>z4o19fEKm8R}C^IPuqmUExr z7ZcOMZVKhuh3gTy>ErW+Z&&Dl99<6;=!1asBjK_76qxF{wy~0~j_fCo9vO^{CF5jc z4GobqviNI$*|lpUl|4lMr#>Fg0@R4*a05D&G1$m@WC)^kFtVL=Qxf1|Qc1)DOXb;sGXG)yg}K}KdOaFd z$Q4FV9-7P$1SIh1#3Et0J}vDWoc9#<*9fwVIchU3xuD|WVyxI$7)tp;lk9EAM)k5Tm2XGbuwSoo|+_P}9 z4!{XDw1|st(mKD26X`ppFZx!%&_21^-(|MVc0hA`0bLNX6d!qb2eMGK@e4gj-)DQReAqz=3lX%Yrb;pBvd>!J=2 z1IUtzVZId1GjO^=3oKT7;73HQBA=QN&Q7}L_|ocU>R|J7G(5mwK9_rTZ>0!Jj-#)A zKGm(Hj6%+I3pKEXy`3L!-9y*sS#$U#s`+-Rh5!C}96A$$N zJVAOAuyk)RsOjh5ntm8vKKuEYOh>-@6dZ=Y{H-M$3Abpsap zXbBIK@)cchZax!$(E;ILY^u|jb?cUhsHo^6;ShOuh!g-nT!#CK07#@xwxye0LDU3X zQqJI|SH@S4)|VvAHv;U*4|x-O`){QerbBbKoebG z{+7+_LFu9W`2V-h+o0i~-5%?G6oOD6YE8A{$D_CYV2fK)()I3&&Fj9t!~GHfRya^` zV|ay*dBzX;v^X*tUM9mzo3F38JJ4fCW_e?K^ zAHY)+BpYan#iJm)Q7?SDiy_@}UtUS`=i>D4pCP7+S=RBxi}d)YC$wOO09F9xhec7$}^03#RziC0$);&Z#9Afy$ZF0;8f7W80Ap1 zf#c=!5HKm4qnS*JMO~j@`gO^f2AL@s3NQw=<9VoJmFZyM`K7F#Ujq(82{mvs65vlH z*8|xH9yH&R``ApiafUif!* z!*_Sz`2X0CUrU7qU5k1Y{FCYN&K=Sm|G993-{;)Rx6j6yp0Mh?tf^nnbN%uyRf(nz zNekk2`}BL8gyrRKy+7dp@BjQs!y)0thG>5E3!$cEpm@@z@CA@r%Q z{!Vgzw9dul>%W0hwuhO~@+!~uJ<>4|LvU5 z5fCegBy3yn zg&Mgr0VSMkjM#RME0zEC;*6sNrywl0rN(P)eboX4XJE(TNou`W$h5f8*tu%se{M|5 zR+=e*Q=i?j=wLp)K#UQ>!+w^O%lms6nbM7@4*ChJ)oa_fZKQ+&-1NZiqjBWO?X+7U zhPo*+BEnPOuGs)PTX6U_=>s=nPyTCYxUPIpQ@m+QB6@>?3z{50+-r?YSML%zu2i;q z>(Tm3&E6KNnnOpwW-r9mb4h9&`0g1M>J#9GM^QcS7fSKeA1pWZVbA;@e%Q|f6Bh?$ z5794t(X!}*yFW0g(kgyE@8P8two)f1COg+z*CiTVC)?LE>`LvzErnvy0T#Im^KZeF z^z|!P(VJV~D1Z_fFi~SLn5bwlI3o5t@3UYhhQ`KpP>W#TIO=_y5pc9nxr5E~MGHQh zD;fJ6Y{#^4vhhoS*J(ORWlB&y?c6darIo#WgQK zMEM?@od8egxttu3-lR#e$exSSjsNPFh7E-}8<`r&D$cd4lB?XoY){}o9R?G@DV_TI z^anAtp6GW1QIL4HjS|g>+fCPlcSa<05cjtnCUi)QfmF5T#cyRV}qB#eRln?79!O&77#-ngUwXNcg za~0?qECv(Yj4n8$>TLKq&}9tude9qtV6{6EKd@&AqbKCBjXEmrfFfKrQ9!Kd)wTud zE-lWX7U3|!1)xMHry+QsM|IhNw?yf)M!-ym%PsF~NNqrJ;A4WlOU1RH7y(POAC+*Z z*5@6O@TU@m&mas#sMI2yOe%O}Xm7j&(0hFX5G)3S1$73(K^t3yIBkhSkrjtO_tk?mjJvrwS9pEuEGJN@N#Gb2+fZ=M$>18e-n!U|w)yFB zC=h|j*V$tJJm1B(R$};yBN#!lh-tQgu)l}HT$BW-CR=BAEL|<3f>^e70-9VL4tG{& z5sObwL3b2tbK(N6Ls+-2S`?1Ar9zYMZ7VKLumuiBI1G)9j3gtRW*d-y24qEM4dhqo zf~F&ITd8}_jK?uZrHFOphIU_#iP;P$P_-r=LI6mm<1}l86^)FVNE(Cn0k|L`Fpy+G z)RATzf|oN$8~E$`ZkXNLI!Xen^r`+phV0PO4Y8}`sg)dN24&X!ygrc@G*&3K0rH4r z(hgGt<}W7%Wo~!8V%V0W=NJh9(S$%O!ZDKT+SjyzHm&dpBEeBM?4Q$*a&xBK*H)6l z0%DI6i}&>Fd612`-KoUJr(`gxcL0j_@ez`Ga35GaYrDv7fk2w~=E7<*yVtul+fa{i za&dKi*dYp`3cKtogS18jFNq0iD}gC=KZ}YrlFr@n;p_2Sgqb9~kun!03jB*{>mdx7 za`#dph%MwflZ!x4j%}2nRdtex_%K71y|Fh!#fH9k5hhJ~!Jr!dA_zV`SWBS{Vg!lj zpP+Jp#>!~CX^+6b28>oxW?^e4fheubsKAQshUJV#7|^NQnr&FKcM(C9Sr~$UC^v6r z`tT^JJy;kF&Pr=6_Y53t$E>l=Ziz*w(n@!12%?Som{_q|Fkq-Q^ML9F#4l-=!E`V5 zFqVPI#XS}qMT7XG>r!LT{Q|FCxl)~textb8 zftwpkB^ClVKMpW~Ww-bFo!`lQ*Ir&(QK3rq2h{1Kn2uon`r&W%VbC%}E#(L^oD)lX zG$6~zL`NHpxxp_MkshA)E6dV*KKBL&2Qa>tknnSU44rv--f_`T=ee~QoZ{}_&q8Gm zr7SR7ULgytnxgTE6R`=POB%YBm6e4uTG4xhO6rE;uDQW?-BqF) zty6N7%a@E(Hiaz|;~V7Np?q(-br?qmg;j^%ylLH=zN>P+vGk;#pW6UT{3@Gzhxj&a z5?Z(Jk2hwuGnqm;qGL~T@%m44Ut5`ZMu-Ayn};K30WL^zCUz+^nd)e7>UnffZn8lH zAEr*XroF#=JFCsi^X(Sn8GDf&*-5=Mxph?+>n%5r?byV}Cjep*B9QmQ(5?+*n|Ow= zjqKn$JAm>*WPC2%V^&#c_~AFTJf(%F5S00(DofqDR(x{JD*pG8wN`kulGQx@yQ;oi z^ki#M@Zg{5>-A}?7=O}O{pmQfID4_jt${0!f9S;&8%yu&(ns=f!bx5DjnXw;Z{?~& zCp~Y~TWX^NYB553*SSg2Yg0S8J{jsYwMW1dgQQI_*4BJ^ku1GTLrP9ny8As`M3b@k zUpRMhCoF4HQ!Q=@kzH>u50@4#UcP)eim$^bZ8Ys}j`s<^&;NJmg}FsX3onF6XT1*} z{z<;L^Q5<+5>%}q$ROyPi9+7#g9ly{zET{!lYp4n2d6%{A_A74!q^A2ir$8;kh^{_ zi-VUp1puK|t3oGYs`vgm^cHMLs<%w^|G_IMAU|$rqx1?#Ba{zsJTC*{?5m2{p6R{d z$E_OQu^IdhjAZYk^~dsGFg{4p_>Z+8K~^e8My>URPeli1TFG*E$Ss&EvPHYLYj|{2 z{Ker99bxE&4NA+)&tL66|DbT=U{us{PV%+H#hKmdX&o|I!p?r`^xf1PJz1owqI0|a z2XL=bfAxrrYU2WdgaF2FGK#kRmYx$-^mhxLmamSEM#cl;I>iKJwj2zbEEzwW2GN|}F>?oY2rae6ig;X_n6iD8U(yw1El<_L|`>k%Hed3SXf{xDP-j!%|r<3iq z<)0%griN2= z{86_Qsbkc_OhYVycF37Aim3!UdBVP9OA#;BZ`d|HJrfbo;D4e!;02{`UaMF+%ctv8_k%OU0MkWj*M{FNL<{@HezlQ(dG#3PAKgYt>zgF z=SlcI|BG|dS#GoKXxj9UBb3)eUS0 z8wBh2FI>Vc7RXowNkGjQWkPgu*YMM_viuJrWDsm*jeP$w^U;%9L&LBOT$a05h29m< zKY)5HF#5-Q$>1wyZ^p0x=jFUui70uTzzL9TjZ{o(j#;^N-KI_ZAZTA>fX-H8k^0Q6 z0k9TF+Rx3Obw9cp9e8l2fE?92e!Ro96g*QbNG~N8s!us@?`he6lhCI|V9Zfs(Sr}S z$$QqVj44bA6&sj&I8iL1XVVY?ayUS=!K$E z=?rG}D+fO?^tK~#TlP|827{nj9z4>(<0zBJwTR_ChuMlSBd+`PAHUDp8k)pJ%W&&x z@&&U7{U%B9Dz9<}j;#S0;p3F61d^zQv1c=rCeo1owdoy%(^$AvK+2mFuq=XQK>%7G z#xwv*1t-{I*3x4wzALh0*TF6Z6oCb!wpms*x+=q4+sX8eJ{mOha|i#wQ9*2=lDGc9 zuY{O&|9KX5J{iP-obz2sbn`Ew<}VCNVnw5jBHwhZ^}DrQA3oSEStC_f_X&iX5W2;| zw^9|M;*K)27GQ#pNrPbE>hUjv{A*R}%gKsHMROS7OJagZXSfkjTESBQfXXi zf{F+XPUb@wh#h_BS*)n2NY>WaB4|MZ7=r*fL5;ccO&!3l z0F!uz_3LMNm%K%Wg-!fcbiu46{?}#%Xy=QM95GT0hKDWIj_{>o_|M!}EFABQ3KQB0 z>5H%OsWAY^lB3j3irOZKff4YIvDKGmriKq%zq&42 z`UIyj-p0;~qFDeWq%(RhOTSlxE6BeHmLJmmSncgo)C-5~FZs&ucXauf_l85fgj&SI z+XZN01$zK18q|UYt;p-@u`tj@+%S?Z)jW`uyJkM`Ni7)i{T8=%;}&TLt9=>ou^Szo_3(Ms z!HyJM<@_lt0}yu(FYtKfGQC|DO*H{ta!#+H_eX+{8hy|BRYZ+n4= z6(zc1L)24u6;)N|m3?SiQ{i%1{MgbnEsBwbMp|z7@hJU{bkBjIxW4k5FY;_nJz(V8y4!#K{2($IpMcP8W>bU*LRCDB*L1h^%bk-Y2;^gMR|ekCh6K4_!%* z`q}pXa#4^U9QO^-yj_*8VCbiz2!impNs{^?M2?_-_LjgCpQxh z2){a9oIT*zG=$r5F4fRJ(hPOeq)3KnNcq>g015#|Z+=~sqy3ZdV{P7;;*HF^St45? zr*g{qT1Wmjw%X1)QB7fCO`8iutp|IFBE!mnen*XD9C_jPV6sRDqu~;Gbb=_Eii(QJ zSxE$7?afM(B38P5&dm6%BFH zCv9SU{Dz{{{Vq6&MmP)jZ9O9v7o8eQTQS-M9l8n{r>e| zus@!YUl%qw&2%mCW1{Y!`^`7y^qccCqEW$_bsT$=8(($Ml(?`h*1J3BQkbo5+C5T zmF}yqM!6Snix#gbl%s-3PxwpafW^i&DRK{<>w7v@L6JdIGRe1D9$epsou(XE5gv8; zvHE(r51~_O!krSaW)T~QMQc+@Si$k^gycg5|DJZ-y;u`7*lS-#W?MW7I<0x8_UXS1 zC)K2V6gIKhiQ9=u&G-G^LiQr}HEedUS(H^T^<2TrcXW5Yly?*AL*Cx%{YLNKNk_Tx z1~k+pZz>Ze_r;elgM7hPBTLx4Scj3nD-E|)ReKBvXmhO6Io<8bT*UsNCh&l>xb?V) z=45c&sU+vZ#KeB%(_s){Ux2*m1>~xS7mcP+fICH1KqDp)^S3Lq4#5U02Tk5D#rDg1 z)Xfh(k$S;nI|ofeK&uC#q&czXo0_$Wl&chV(C`!0fABAyZf?=*AI=j`z2iK2%xTL+ zM0H45P}HTxF1__}f^gbkbVe1skUC$<^@vNdgy#mvK^7%wB5zqVpr8=P zxU)ByWa`r>z^60=_M!bzyta%`%!hF_ha8+{4>SQdxP-X!h3wh5lOEeWiGS2lF-PV+kG|_eI z%mI>1i#?{%SxEEQ;!(u1Hw**R_X4_-PT1M0{XpFeXvL`iBHU+lUur++?11Uy;kD3P zH=@?g%4o$GdQQIG$9&4g+aKqV{X(Q}ru5I7d4adw8$Zt5t?)BsE3}R((4EhmoS2|- zhN#goocVZK-$md|qKhF*CaA7fjQD!T?c9%2)AWMPFYWd|vv{Yz3`L@Lh2RXI(Y`i` zpr)11I&p7BS6W{ZxG~|@!#mxc@=jM!i!CRSW*sPuVHh8?Oh}Etat?HZPJF~ydL&aH zM2Qf_#wxqXef=OimP6OJ3t*N%eR0t9#y;l#{oK#9jz=*%937pjH)KoJ9NsQjcA{aX zIi$U}MoeCH!yb`cdTjBxsqYn9Fp0Om{{F%axqvMe(5AV!9PyHuSNc?#d$MsG%@Kh} zLDdgT&Say(=YCcibNu^Tb5Fb(^~X00gdhoK2CG1lRI<|@;FpAn;Gy`5nF||~1h$mQ z_ql0vs{5)h8*0(pykGWkT^;s`%0T4w1H<;nUcrMd4duR^Y=U=z$7rw(_B;1ZhiKsy z%0cBGm`F=n6P~eB;Z2$|ipEKt?nonr;Af9U-O&SBs<2{p@C6W4N=jv>sU%{>AjG!! zQ_Apipvf-=do;E`7#(gZiIwn+o*7V*R61Aq>zZEkzB_uQo6J^PS6@m9%XBE_ElU2# zm&%1}fdbtNC{+iDFmlCnY7eS4L5D`%wO*jUUeo{y2c$k>Mgb}tZM49txI8E2lO`Cx z_2Dx|!Ohl3pjx9GiP9oSyY9@bW#5N;C{&6pUqJI6Fz)#^CJH7dTv*j|u$E zUw;avTMQm6r{$*)AD$C03|+z!l|P@L6947WV?p7ocYV|Nf|FUQii_EDdJXrxY*9L= zke5FYbA=U{YH|ExlVsuv%XEF~iQ}!OI^XGwo|$H|%M6E@FE`o09gK-8Jg2P+ik`{ zp|M-4e;2$H3;|fTwEAGPocl7nLy>NtK+hp>-|tiE_!o{(xyy{~&R*c%WOnamYWMlY zx+kR(P%dFYD4ICxVUsExsZplvQ$38fHGU?0+|m0kP&?qaTIq=iw=^?z0tYSAC}Vwz zukCYaGbB|b#Ae^u_kGB`zIO%Qc=!dcj{b?u3CV^fmWev!W&W(d!50gT>ccvFif4RM z{*caP?Z}F@J1%Q2H~E!1C}jW0Rw?1!tA&|Ngo&kyi}>JN02JZA2~?oFz`5+q_nsiYC5RFA^3dZ3Bc>WLmD7?j~ojX8XWPZm}|K_ZReG=myX{v^q&v&n-#s(e~0uBADdD!Q1{pjh`jiy-@rp*V; zn}()iC5%N~RL{nS3r?Q7)_7{(!=C{f&3x3}1U>=q*rBWN^r)t$<|xGoz(KOSn>;$1 zsZVM@;y6vz(EZ{K7=c@v96lt%VpvSTD0Q-te~_9u;Uq9{kmZM!JF^=$sU5ENqk0e_p5NFxb3UshQy6_OQdsC#k8Ufg@Z zHIJMRHFlVT@HR`)!7=lC(`?fjMXOJWp*>l1`E7S2TGV~0)fPSzuHyF}1 zVKH!2bCPQ@1{EFW9CytRRp!Cp;hB|5qgDiiO*75FS zt6^U9T4eb?(kEE`Nbvc~0ZBxn#Q6nQJ$~r3tqjQ zyFfNw9Sj16?UtT6I*|0idqmXK*3ytXYL6qu75!K=3z@0$w=!XgMuNv~RJMScOFT7D z9%)%)nJLJ}*ZH6U+s>@?}&0=54Z=8*D8#|E60&aPxV)K=}bWM2mj>KMS%a)577iDh80C{jKJG9^O*Q_@(}XAfE- z5h&W*cL9x51h-7&3i|#;+vBkT?8NUP$p~Htbks8sK3}Q2fy3Z7|7{sp*PNa^w*nSr zR!#Q>BB|-l%$wUTUgbE{<9lWe+QS!*g4kfPu$=jr5HisZa@MZn*fcICCyTH#=YDyI zycJwLe1Fs{xU??oiSX(d3;0PrU$I`rS|>?0@~`HUR|ZK}?|vzHLhqt4254ylCP357 zndyAF|2-*w4PvGKK~N&N!yOpg`gDI+WHH`z_qfS-B5d)yJ@g#a4()umc*-#RLehic zVMXSB7thd%CTB4tfgaCDuN`OK6}(H9zW3XjUa76y=+?2Raf5fK&I)Qyq(_7J`f2PM+Bd5cNV6+#mZ+TQ=VhOZ;(ir@+^X(T7u48}ds;Si|@NaYA;7%GCsxe)NyXBWF* z{%ih)73;O04fd|&TW8#i6rTMFDLg>3>4C#N?p>Zft*-GmQT{ED`U2=;s^M#Q?tcE} zZaHk3@pv%M^$+jd3t)n-v^o2}h{*f1Z`7B`yh=#{6h0)pV##2mn$i1|*s(SH+D9j| zmLKr_R-zya>;`JpVso6?aXZ3bn15kQm;(RPhFa#&oT7|}hkuuTvAJ%ho+YcK^2|>D z%W|g2Vn+=wuLZfzh^Q)Oqe{JVFg(c?JD|lj<&ISS_o`oWN8Hu*-->3o-L5qJF~uQ$ zmfk5mA$i|0_o4zNltW#|(z=71xyvi+k6C`L$OfY0>22n|)Naw;O43u&Nc3pKHh`!A_5{x2}q|PjdV(PiG)Z$Miy+cSO9;{-DI#5hNXR!PYwf+)KIc2<`knV5*Yfgx=bX=cVvKv-_dQ6yRAgL1 znB9MUKAq((#A)@10WQVC@`_kn+fKJ-@7tGnY zls?+C+k9$7kAe_aZI5QpPP0PeD1Y}o>rIhSZ2Yr@iritAC^89kvmsK{Z)g!!3QP_u z^saofm>y6P00{&0*i84()r{CTn>w;WEOdWA6db~QX9NGUK1h?m!O=PBxbABP(ZJ&F zj?-Yvd%hb(;=f9zg<%;H{$t?po(()t{sO1fgU6Z#bz)Pl@`dE-L<7$Td3j{kuW8?` zyj#rg%i{>1H^u*)8xMj(@lgG#N1r$kJ%|B9F22Q8ZD+{;kX5_G$u-|HcXNr)@hkpy z*?;pj1Kvs@X^^a1d)|*2!+aI{X)1@B`Uh>)5Xv>iglue%WN94rJUen@;vW9za1@;l zN8BbwWPE?&-lfqVc)t_;SXvdGSG4X~xR;0@J*c(gzlZ<%KNCv!Ea75QYh_xE`I@h4 zFa83q)K+;@NlF}Wj5#s#Pu*XKhZ+{F>ilNhKPvvGlhCZv*@zHS=*R+ii&lScH2!^l z+C%gkX0&M2*QUxHviD1q)-+9(p8jJPo1V!b4|YB}Lv zhp-K0rWHoWO-<{aKCjf=uh3%~_c1VZ2=7;oqg9KzgaSh19v~k`U;)*yXW+06Ev69H z9AL6{nf`u+lM0+aqS_Mh`Mv{Orb*fN&!G(la09IY*+5uipcqBCNNDFU3nYUulk3(0 zp4}Qou#QmIAwTxdu4=HSTlAtt7Lmi*BGOWR|AMR1U)aC4JqAhTb+5}}&U*ik;G_PZ z>Cm8zE`kv3e>rP<0h~wt{SYY^QV>AYgAmjr9U35fdW}$cK_*^Jcl59P^+B$> z=6AS!9VANNT8{*E2wVj`I#!b8Ix-gEdgLOZwX5MNIQAh@qPKWtC>$YDU$xa=9Vrv& zKB7bG!sLw2(^+_K?-uVD5!~j9torffj&I`)rItthF5r$^q6_7kjQNFyJutcjVyBVZ zBRIzp$)z=@ZxF@q8QCi!0EM`CB-i!M_#LSN_kIYXA?i(flF}#XvaA z>GhX<5>8U18wQ#mf%~=xQcy&21ZUYZ5RZYJgA>VXpbal#Hpid{GQEEA?$h|@jTF@Y zD*+iqWu4RoRKi`>a8kxOVJiHl?iSU{dv*me{u359r`x5{EBy=x0y-C6ZzGQZZk~uI z;1K9}v}%#_4^lnEAr9DDuX3JrS3>tiIFuka97Wp2yo8FBEtGK)(;R3&;MG?3FGlu* zJSk#Euzgtpe_ljj`Lj=CvlDdJ@koIOi3NaeiLfUib_%bTwnqY8japjT9xc4AV@kn{ zmr)n`QYX-TMTd&-K1raf@#ZZw1oIl5!3kf>NdCg%SL*)bugYWGK$e4?u1@v9^LZ4$ z228JnMBOZnMBp}qMvN@i%TNUZ4g|{rOG02i6RkLc*a-R$zJUfoup9sNxq}7NH515) z5&003FcL8+#Df`U*ejdZ9W^R-cqIs%5&)gM!8oY4)MW_$MI56NJKSLJuRp3C87(mb z>CtEY2|U@%(KvEVVum48Tc=wQ!A6QOI)Ol4vd1rBDA`sBB(i6hxFNUwztgYtl;#`3 zy%@>#5!LKAr7g;dab4nhLGJ}KReQ;X7b{;9A-MQ`S066XUWjSs7sSUKk^|xo8({dS z7!Uqe(D(y%MI0gjC%8=un$aO+^S5m_P&1K&FTxZD=LwMF{eX5*>^m4G42XUWCKy5= z1QUwi8C6pzn6pZ$bMOYl?HytxWIuKFx=G3C=8x>X`B7TmJmJRHY}MVsv5>Kl?B%{ZA=p@UA@%cA;Y34J;gZ;ks;08(il! zu3gCaY&$t8sC6&4v{qjZ^TyuC^VenF_O1<8JFdABZ_loh;s`SrsDE|^ra zHn)vDo+IX-5JM-zSR?QSlmJNdUp?qf!pnRFw0A@S16flFg26*fD`>#sZw%NNxir$8zrxTmX26UvT2f+_)SKIuB7b3;Hg&xVzPD8oTA$@+5& zOpXw38p0jb(_7{+cMc&7U|md(dUUb!tCzC<`SX~9w5f%erJ0$SZUwn_3-MAWEQ^J5 z_63GD7#nlsJJ14HtwDp4%5C-(#O`N1Hq7ljBY7xkznsrEDgd_zh?rp^Q0_0iNBW8k zP5x^$w=95~-d{~Vv|!e&uES#hDyjbaQh}M0^>Uv=y+1m4 z`&HGaKQQgzJ)JU$pl(0%cE1-v{%*VJE@im5}mj@~gRBGd)z+k>IQ!7wf z-8;Yx0^YNa6W<#R-R2$j4dK)vfzv1UnjSB}^G64M7xDc3#BmG7+zu=%7(%zzC>M`H z4db@OJ_t4N-}5CEMzjt68GT_@Jn7L6VNkV^jI69RWF?4b5{$s}=q?g4ECYay0KOJI zDYO7yt=xmk4~!rJT!NRuxD|qdf~-xY8-~dc@*Xfm;i~L_W+iZ_BkialbpGpeBNE3g z!XHEtKsw_-V`e!^8W>Ddti!Po{K5lPAf%?RVVXtSpqg0RDWf=fm86^kl7;zSZ@dh> zkraAGVz0Kc%FpQ07}Q5fMTQ|pZ7|wU3&(A$bbyAHS?m{3Cz1ird8P8)^c~nW%OWm^Kv#GJwRD78 z0`>e+D7*vhZx3v-gxS9Q4fX~UY6jOx&A@SnpnXUZ3}%p{Q}CrR+jyG(@Dt!Y{%zX zyxKIN-5{IGa|nhI_B@av2TxCCP7&cb@>Gb6Jd}u$#zpXOwHpmb4uj0tvOw4$0Gpd4 zMD>Z7Tmy$?5gfVzghaN6Z}0x~IuidU1c@9tLWsPlpWpOqc_flLNdhi((e%#9MmThp zV73@9ii|Ovcj~JF@BDuI;pd{Lx70+w+uzMre4u0y&gLEM4th?oJz^Q&f!Yila_>R+ z>U($91G=MtP94;|S-0(}QK(~fdRBSs=R!N%t_jKV!NTW3R}F=_M4Vp{cF+qE=kn#z zct+))NPQ1P(Qr^8-N>K^Gk?hE58%Sb0MKo|sPuUh)VO;MAksbAsEUktVp-9-d^;x1 zcIB5nQtj!RDZ$RI`PA}tSTPPsx2mtb{J-hbR8G%sVzJ8<$loN!xY3|OkcSBlNBKi{ zW>m@TNxY7gO2^UX6H@2u_x{kYFMjfwO=b{dl`r+H(7|RQd$mtXxoYG2Sq@UCfSaJ% z5)^rY6C!#Q<;0F48=Jl3FLz(xmE|D(v6`(LQl*oiANdaRe=%oZHAQL20lmPYgjrPS zqoKX6R;&{lBx*l3K0ghf@IPLSjc!f%kdl#T-h+zbgukMU3rB zysJ2$a;fUQ{I4DJ-G3t%x^1YeNIMaVeBbMYs`lDJK0|OoUzc@Vx6-K%pWx$djQ$Jl zdG&9!XBHQfY2J=~Ob?b){4Wj_%$G(B$QFQ}O6!q(wDxA-^vicWYdg4zDj!vH?@S_E zA`8QHQ)i8H$j?JWPOA%zPuNFLLaw zI*>#lg3qKN%ROM43VYCM-JTA`E_8o!dvheN59cg|Eg?4`I~TxH2Ej8NYR@M=-uPW$ ze-61E1lK=rjUts7U=rL6d#x4=ZKz38V@&?v?hWuU^H`zN+e3Gw|K-c85uKT^89Avg*vu z7k~U1H(%JirhPp8)_dgiBC^@0A1Gfvbd$BB;k0f{D`h< zfBK@q2*GGht^<$Vmc2p{nc1`6-=T*pYF7U?6rfIEiS|6f@V0g8s7WpVcwD1y^j03m zR{McJC*(@-&p`#Ds4^Vi+wsnI)miJPJY^T5fL|npc^Cac$kJxgS5&e#=DI?Bo!Hl* zZ%^;@0U}s2K^tClt#eZlzK^O#KN~75={u1SqzY65kn%>=x1%Lw{?m_vx~C)SBDpP= zfBe?9ZocKS9^g2!wX#JbShz!_uC7WO_*#;kFc3c$C^#PSaLC@nd!b$E9W4K5(r2qk zt<9L(1NAR3u$+s`haNMerGPqQQ&cA2=WkPp#y6qw(m} zUEotNq3#1HctBR|ScFveONK4poqo8kzq+5HIr(%rofL_*0=n@5tZK9uD#^PS!^+LL zo73GWVVP@m?TZ{}1pl6Z-(dnar$^;YYy|(bgVRzDej<+H{BLh478u1z4NlO$F&a;% zJ3AMc(a!GVZlgrIYkmWR?307n+G28w%$MqUM037^@b ztB&HY*e`tkRI-1N(`VQ@RA^kP2|~p;#8(TRNRK?0}XDXht?y$#Py*78cw!50wh?QKDji-$xn9pS`0dn@a2N;U~cpFe`kJ zsLEd0NhhE1c2w1qL|oqr*U}doqdj3~`y+GM!8cd-uP#!`@2PaBe*cW%*qr;GQ+273 zg@^6dO(~7*;FrmGR*4~t2IIXl z!X^bcRzi&Bbb-|{>(NTZZ;{0K7lz+M&^2D4DJ_rBt7Ih|;{ z^ghnp3Pa^%=Hp6&O42H~Q%&R5td>UzYK zXW|pI^;M{46Al$qy`%y~&E}+9%7nzpgEMgA@)}K|Vkjb` z>VOrODWKJJb^?kc;9Bx>Y5jmOl`R#)&!-yxmDT0p33MN`q5+Q3C;~JzvT6|f^`V1X z;?rMN-&#B2_5)TC{!(A=HjH)(vbfWyA}&j76u&L4;fv6K0w$n=$H6|1mj&ExjikS( zj%ol-_ZPd1>9aDM+Wwx4ZiH~QjplVnqPlPI*dTqhB4SBbtn=iSZOFAuaQkWZ*6*&r z49C^wb{k2#IH`ry=AHd-yl2bRmax$s2K<}8{nb1S86j;FrK!ZK`Q|qq{q`B;) zHiOr9tE|_|GJu{o-}(a?Sldh(*cS_7$iV)YA}mffSe4Bry!Nfw0p^9-!phFCZRQYW zPIX2J&Pv-msxY;W{MWvEdEN=$6WH3!;7c%nyV>;wLjmjY74sQF^c?h@tia-XU*C+_ z6gv)&*s$vOkAzq06uQ0EmwAm98X6dmA%o6fhlb9;Miw8NvDRfia`XqE)a5224{f*k zZ@(8I7hE=YuJ?Si_&)vcGaBm7&^4!HDUx8b^V$UtTs7~g3sqaUKPIhgc%n4xxL1<`+#v?-l2h-#y;tN&d*n&9{F_=N)R3h325uCr$ zo8y;TzNU~tg&lvON!yW&R;>0)T1B5Qk+TSWGIq6%C)am|RW2T3)LXw}uNz~!L9;Iv zdX&PSFo!c2THBEAX=p^5onO+NS*hq+OkwAz7|@*$Z6L^e`QFu+l$bK$_5A|y=UXS8C}`PKtF zFo4US^U3c=wCr~;Xc9SZ_?RijNtqC<5$R-?kI#Q;h_qnEOD}19JV=a+ZqX`k`a%1c z^d>Kp zf50s5@?K~ExssijUO-{TC1*2tWE5pi24Bx#^GV3-h^Cjd;&=0*{W^0mYKU7?K`~q= zU?i7>XYI(G2aP*hJQRDc*^SWbe)&WeXJHFWH0R4+OK(ixju}ko);C8+nlW|?>Y$^x zc6H2D3h9F3MO|e%8@ag2GUmsj1ql7N#~?VK&j zqJ-Scw?K=8^CHpdE#KM1)UJ-YRyUw%SopE5yEGN3u3>yo8Cek8x5W8?+d_ZLCHnF5 zs5)lPRr0>Xk$F8y;%MSKbFPY)*mMT+e1~~Sw-@3b9&C3Cp>CCxVw!OfF-L}7k_yeV zRQF@$Z2~d030RzV_Vxz1nMx`sU_nA(2IASyiDQFFJAS+Ix934H$;`p=R4!N2&W;T# ziGk2;uVz_<1{rS;LK#ih^+8;r&aSS<%;BLiF&-W_WQHOdB=iFOeVjE2A|ke0Zc3ze z<~$Mau^`+|9)2!pc$+osGb!<73AVH!uZltoL<;WQ@W}7q-123^$9H?$P=uFO@pCN6 zFqA1buVv3yOk07BFf8#UGscmPH*HIEvsKl+xwC3_bx7ciZ^$l|aK zZTOxnimid*18Io{pn(Y#H8l!{$0}_}@TkQjL53*`<}CYevYLobb~I=_d!Q?sS$m|U zs_LbNKZmwM!^3mHSfK}e$+zvpmKfX-e%vFsMKLxJ#w+-4fo(P|r|E8rDcq6gv3=83 zm}aMK<>tnp)tXcq3OoPOQsZ`~=4>8{n{()iMkalMBX?@?0m~8vzhy?-_QxB^Rb+e& zDC;g^13orV9HCk|SAojg(F4-x+};Y^W4D630_$>YC|CZAA?H=(5kFR~NhRtCD4NpK zop9YInSB*ma5E>c&iUkAkGDP+8^Uk%Ym+f<|Ls6tfH7HZS@T>+oik z<)EeU?4qH{O~hfR9^ig_T*RvYUVi8H_Vzr%(UsAot;?K;&A4I85)0?a=D@{JaU0?EIqFS|HKR45&mL~kXbI5pC@x7j=SuSi*=%jq|_vV z+s?G8fEqda==V2nTiv_wFZ4Ult&_93<>}XaTEc)id_NL7F$}YzSd?dhr;;;QXJ6k_ z6((sHmu|}knnWi2X&F`P#l^+4etzNrTA>B!Ebf_+fgh7>cmWQH#K3El~Kdu>@U{Ry*wWTZv%lRq-AW9O!A<>!LcZ!#9EHgtku%f@6}eB@lk( z5~9ULX=u+xE(B`}8vS)(-5p)~;%7$}3ymDTmTb!Q^R+ZO&$r4S8Z+O<`lw)tx})R; zJI6OBaM;7b#g)<4CI+E#7!;{edM&If^}!wOoW#-i=h?YAG!!!%8wTo0NQfCI<$#2A z4%l`y;dDUYD~Ha(Dw*Q--9nJ14vmg_cu63;-jLUe7rN7w>Q`h3hMgY{$*HN~$!N-e z&wZ^HZ4Q-1)tJ-i>OD^`}sDKYpA z=WBELrde1hCTGO()B5X5+p0-aA$@nM-}P=`8NXy)4Ny6SLY(dRZv%# zhnoZ`&SNOX;JJLD0C$T6@b!HRpfPMFCeX_c>(V6vzqlI?<}bpNE;Jl%w1Cq@iC`wk za>nfVamo3M$QzZ!kIKddphiA@Wh-&QuWR7w&Cc&Thg6(@`E%-no-`=l_uUSC{~mgOSlzeEiU4RH{gW1+r%SBrGq(h!N9BDz%KhK+{mT)c8eX?b6V^}fsc z(m7O?j%2LjMK_t;^1Mr7tmZseHp{1X^B-!k(01m@KFI09(6cYG=ft`qI`n1Tp1*0# zIhj%<4_zuvlPYH%SFea;Q>x!nuvK`^=f<}yiS{qlBhLTmvHdi@J8p+zQ2$GdBPeU- z@zS6bx*dmjCJ$O*O~mK?!O=vrGNa$SEa7zhQ(@>r!RUc+*KI$cyIQnfjKOF3jI?St zletJm&oRfH6P&QLt8{jXpRYo}%Ue>O z0VH4ASMC$arWe{ZL2N-64BMp@PmMeJ`*HKO$6AXp5hzfzi`h*-QfMN>tXXc863m|? ziy%5Y52!pZAD;>6=i;kru%K}?KR~n2KYwVdr1AHPw}Ychvr(**=AAo4Q0a08IrERb zLk_o9f;3#Nx~-?(tF|w^FsvSGwv^2qCUe?!c0>gpzWN=Xve(a@GUDhgbRoug<~*&a z41ukm2Vd*$(VF2-Q@5?^O&!G^_kD`U)89Rd6i`6Nk~K|du+ZP-E9y}H@UDJ#>0B@2 zWd&1Hd$CmAx>XmvProa&)5RqTZu0s*IPq{{K9acP<+aRP1=-3 zMGr4(s>UNm4F6@J-0uS7fqGg?U-oz6TCy8_%NZZL5c>i;2q|S^7%kiNF=Vqvwol+`= zY!p!Q^e0~v@!gWYWjCh?^WfITBu|*>q=ouB2tLu>%)}PD7fpcd#u9h#+BSJy=$oHB zv7{957R=T8#0`=1mv}dbBbS45Uza9eqeBGyBd{_>zyZsSAK$WjbzK(6OLo|bzt%nf zo>|5DN9u{Dn_)?x4IvR=FeKfJr67M&K;sx9(qG_+g^m5#s5C_7`@#6wOygpDX!HHD z(u=4a_l=z0-Q9#Db!1}=x`^aE94+QmroAzJH4L~O()>e&4rv(y^oP4vR>P@3&7Chb zOg()1`6m82A3wh-_#Sum^vHO531LXyf*AvoDhP40AY2AFm^NKw-(WDO%c-im3OZR* zF>hW6XliwYJ=yIA&Nxm`I&@D~PvgG?I%hrIMs_;7YqyH{@h_-9Gussx<9VoTxbpq= z#r^hlbIMoIxFK@AS~5f8&sA>OrCc_Jq{rVBmraL!r+Z~>feK>ZCOhY1Mkm7r>{^%b za@KrO)a+)l+)n@f$PXF?SH5p{(FKds(bA%M*HeKT_x94WTn>SJrcY%3U zSbMvi-PamoIPzMcI^zsN)QM5!_YRbZMgTbrAtvp&sqF0TmiG1*2D0Y%7iCU2y%eW% z<~bH(M8Gomiamd;>}6+P`o2JiX0L5_LC%KrOKrv<&$i2dte=6E-)o;9=_NH2 z;#amn{Jxv97SBbhLQWR_wL{5C&wrZgW@@qLLoa0|V4TE#R~r7at!8YC>2D=O9i}P*trYVL&xOohTAm#|t2Vmp3t?eTqjJ4*Y%- zu$zkQUaSVnd(EHU`d8A8TvvuTYHI2Y@T}5fUORbF+jHz!GlpY_FJ|j>D7%@i1+ic0t1W3DbuzzmKP_(a$5jt&kIf}xct5K%)w zxik-rD}v>6n?bGa3`7DGh*TX8IgnGk^XmaeT4-k&n1Wdy2Lv)mNsYq9C|HF1Cj_wZFox4X)^5`M2i=^4Gq^37zlPTD+RNy$)}O6TR($Lv zcEu%Cwu~!iO2GjG+-RhfgOELCk4FONlg@1S`{9O9hb%t{@tvVu08P^Nze}U85PlEh z=6*~{NEq5mi?#(_TP_$S3E~FfjifNtvq+B|Pi#O7c0Q2n!v)u9{ zPZqW0p$W4~m^Y_?f5-A`bj^VTYefQc(9iH@QHXpN0FMWl2Ztx3nvyGIQ^mU6yf;|GXTT&_42|kKG4vT}_XTe;N5{?ks(rP?n zx02o!Vl;pZkQHsb(G{YCGL^1W82U}CxfF7MMP2YLy4X5~WOz18wL{a=DLt={v#_JF zQ7Bw}@Zt84k8uj2JF61{lhIX%mB?s>V!nOs`zj^$2M%usD9)SH)Bftm5uhb(8Cg@T(aeHSx7eJ{qAOnQIW3%tunRWcXvtb z_9)sTdG_p?C$tf!!9oXTx5s#sCZ)!WRG{0Hgkvf{oM3-*CIM?A)gq?tAy(wZXAJgrP}fNI55G z51NZxM6#_N9kn9FbSrYJe!{_*8@l6K+b>;iHxn>ggl*@2@r)Y|xqD_@D|#iB=TRK5 zrx$L~#X=cFH-<(6&f+d9d$S`ClSA*uYwXk}_Z?*5@W=hiRk;)5sc5#}oqzFJ_*H-5LgN)+NjTjasbS zi~goNKOW=JGSzIDUKzzCTHzvKRQ=E^GpBz9z>$6nGEsc6^sw>xdU)tv z2W@Bor)!4wz11&$S@N~(y1DzK>-x}%nET;(6=-50An0?jMN9&#Tyw^0$AFK2QzU{? zKD^)Vt$Vd!fX?4&=#$3x@|@-h-~0k;Xj(rL()zuC=@baK_iLMYA{Jg=k!TaAcDrX+ z=?}aLw}-wc0(OEUc?A;x{YuLF`r6@+Y&;bmT#RQchyA_m4NE(@zG5UK<07O2u*g2h`6FnZ`M zP8keulRiC4yPVLRPneG$OF3xr!SR#4zXy+-a)p{{{MVBMF3}IJ1TX&F&lmfnn)CvJ zrQu`Pgka}zR#_CyxP78+FrVNOFqhuAO0paxq`M3OMmEEr&zzU%dX0OZDzSKIS;+WG zd%MS(o44F*?6!krO%Ft@K%?9SA68>`3~56{N`ygsM@I*^urHdx3r2WPZ+LiE5LB-~ zzvY4+Bq58iRiFZ9XJ`9-2?1Hwu~f1}_HY2>xxnjhXjD0P=q2;Z$S|2$^^RQsP)ye6 zG&I6`5kKy;mrzPz@TXz zJ;(nr^PR2!+v;PVublQO?`UGsq(<8D?8uYkWu3jy?1gIX8V;jHszD!Q|{i3AP zw3o~kjMgW=m4yBI$nGZILf)fm9`cA-x3ntAR&TyEB)~vLj5C^O3>FX_v`xi%u15sL zAJk;3nSB4DfO~1z4c_C%u-#jl#-hGId+;XLz?(I&*P;apkHD8Se-*%L+8Z};pz~J~ zbbjFmyH!25tHA8(SU!XZ7D`;&+H>z4PuP)|5r(7`6#$ZAr0>W9SgCG@NF1X9o#}CA zCN)58TYyx3w5XT^$!h@EQ+)Pm0c7pLc>rnS^EyVPgRNXmM(TBPTZ>*QK!_n#kZBpR zN*K3F(#ijsAbr&IEg&`jtl2krTeHcD4ZWq?}GhrGqkJU0x7 z9e)xY#5)+1tc%=z3JdpW6aeRwl@r!8*wnbiV^D_o2Ooc0x%k36hIS6@p^0^CY~*KO zIxRwe%)cQN)Hdj45{pS)d7I<6ch<~<5lu8nThMg1fz;g4NCG6+Ur*Fid9i_1>*8e$ zi;W7V`h9@r#=Og#OeoTC8d2f&P!NYfX%4`im0;YkA35*LDgV^&RiCUTkDAYSz zvI?Yu({c^&vaz@;(N4hMRE5*>Y-#b9{VcutQAS2=w)<;7rT!Z=H7sd!npy^pi`44G z4ryZOXPN4$C)5bZ$odkxcncCodFLwkE--LUKNwLDqmz-YSY3ToK#8L+TnQP5WL~0! zVq3>o@}XEeO(3E2CTsjf-%*dljTh?xjbJ7cY@O<%&69>!AA z_vn!Ce+SqgTgrU3&|t#Xp!%{!OmtJLNwWrv{tnTkxO(7hN{}jsN`|Q1J73kiY#AbQ z?}XHZdiPw2TsjsKhbKOmJyAL@Ir5$F0k7Kz>A=3=g9^7l@F(8`9*?vCA0A&Cyo+a{ zycCUZ@4p#}iiDqHo@jVg1V2?UY;~`3;`L~v!G>r0%Fo@`AK$e)!Oo4dM}pI5do9xQ zFesz{ye&TL6Ta5PFIz}Z2i|XD2}p1UtXATF;h!y-Ahgd|gacV4_qohkHuzzya95;} zAC!Aazo3U5{e!b|p6}neuT5?7Y8YnxdPv09$SXTuevuae-wiEJV#dq^=7I$bjEr}C z=x?Q6%zR3`S>rROCGJ2pB+#4OS`-r9{LU|;TgdbHkOOOuHePSHwvaYSr}|wzo6d!p zBZp_Lc9x%;4>CUkUgkC#rNPh+SQwvii}!J&^5jK7@qW?m00yR!kzB8~T8^cxbP4iL z%Y+QvyB-oB zn);rLJT+SU@(Z-N@0K-GDBua)*B(EZ!B75`?X^N1qPM$BU0qjiDn*d^&_cl!Zh^~j za&bp|=<+mPvo0&#y570+l79D+VB{>0Yk zNnsAV`->vLYg8N=(X>Ya{B8KXr+-=(=drxLU=@7NS9~a#dSTaM)PJ*J(+EdZfrjdC zVx;Z}Rlo;rNfQ&#6ckacY-;H+wp+6pJOzX(5j+30|&ps5xR5A2Ja(*bOpDGGSFauV_Gx-q#RP zYk|+C(du9FJQ(X67nzw_gPUe1%Zd3l{XOpZQ3QQo#A9fbV{HGl9?b2-#=-+nbz~RxLxM5wB@gvol?mu&n&&LM>0I+thagi(7`@S0xICjc^pG0L zPwHRP@2~eIsL4CCnxiN6g?Geiuureea@;iLBz|GUj}N6gZw6iMw7b1S<&eFF@{`Z! zgj6>S(4>*fa9pp*l1;!as}VFdxI$t1XwojAJYjc!vd=~!O+ zL%FMiQ5~||j(zajwzsVG=-9R9b%VeAyU6k3A$(CZGl#iGlppO@cill%#YAsXUvC68 zO$S!m?XYwCpO7-y(Txpi&}H={VlHODA3Jd20O_86nZkI&_{bGCbyyTb1I zUGTiY!Ee`RQm{R~)fe%I)ObzPIZ^j2@>L%t{RCCMT~!}7{e*ZX)8Aju@D;yX~a;ORZ9t4^!KM!^=y9UACXh(e3DL(jvK1>_WGGLFTcrN`32n zW6bQMvA%^5ddL)4PF20iOUAx+hjli4B|{v;(Cc!y8ccfTVZP^O&M`nxA%BJUP)lye zxYHbcBO^J~Qeyg?ZDGZbNX%7wHx=wx3{PsgMRGQrOWvZfj;za1f4G+4O%K(?{*TtL>o`4(YJzKzi6f1@k~F!gu!NXeBHW_N`j5g5(O!OLpvIfawmive=e+u_NR zAvGBnFVsJmCgHq+#Y5IVe5#yBpy9s0RMh=BUkZ)6W*>iWJh)%YeulmDwfL6Ln?zK} z@oyrGY;q$loLl2 zUc&b{a@_YUb4!nXJ{>iEw$yFBtW6SA`E}dJeS1mk3W5L%W&N1n9UrEsS^RD1rTY53 z(9m>1$omTzmq#-fHU!m>&P zy?vVvX#yPAlr7sNhgTTh$>rUe(4&O z+&~OSvmuW<_;U-bYE2>{UT1T0A{Q`JYZ+#Qk0$!@*IPsUv+WCtj+1f6N}@;G4k_%( z)Qq~5Rv&M&dMpHsd?FOUM2stS;1C!IyNQF_bJ<$m=VF>|E7?vpT|bgBy^Pl=_OkZUC*J{TPDop?y0}mlKYjT4^F6=}M4~wVjXP`3{QBJ|8X~lOQ$Dz5=HB^;;orxuIo)?f zeVO+&ZzNBd_-**mi>pdJo*a~`=DwNJJ(xT^2lZ`Djk#lVRh5FPmkk82W&e3mHo-A- zwTLgG7boI;nI3yAHswt41Z8?X2?Jfu20 zhC@t5L<5yGZEbD64=#)#s|3bP_OK18oN;J)>-xpf*3Q^Q8@}ODgO81ZTZyRdKtlt&(#hU)juTF3; z)A^ci+X0R6*re)Rd?8ChSvw&oJX`lg5qm>+tft{E{1gds0+;8-gB=~OEN&Lz2oaUt z%8PZ=ok(6>?%EcSkvTUG%d_qsld_9tr|f@rBlJc?QM?DkQ3n8ZRV-5at0iNjmt3QY z6zURYz{3uR{MYjCi~`0c&W)8gV|_?IC;@zznt8)zOj6W#P_2J?KDbc-S&!9atWKX?vtL{o`{gXPqChadoTV;2cH0CbY_@ez%tmcXkwia9tqFgl_N z?GzKA@%oajECqoQhLzY_gvgvEjJ|0nek07kZXM%QB(A(?Kyn&SZgUAdN$m_p z+ph~3(>X>9h^ik=E2t=9Z7%kNT9rMlWIP@0W-@hEiA@mGFn&-G#&ypg%HCY}ib?)d zjh|L2G`Jf$RE^!Q(sM4#QrbBL^6kQFz*5Bjg{4T`eG2$W&a37hyd)Bwe78Gf=ondW z9*g^Io)IJfQE9Ot)YShPdi+-S(HsmC9;0^D*L_li+vxummvFv?BIz&KNIBQjti|V0 zB^e_tBpk8&i+%PicxT6D3R?C->~p@KXX8G2?dsOeKsWXud)}Sq(Ys+MCnsnq%%J85 z4pislhjSC3KdZkL$G>Fo%ncJ_mgH6d-%2tGR$n}^YD!HajRra*(9s=Z$a;0^K5&zB zPVrTKtx}Q^Ta+;&)+rL{-yomC*Yxwe??({m!&{$DhJVQ&*2QVhr>~xKeXrS6tU`vF zKH8N}aDKn1Z#?Db_IPI5r8_SWr97E~T3xJqXfW+9wz}x8>fWrSd|gqk4st^5Z4*zI zWd_lcX0@&wMefJQ!Mj#Q)0vSjeMa@AqpP_MwY{GYDmxpw+$D{5LMj&BtoZNN?m5?q zC2ox{!{C|6)YmZ)s&iyK!7j$x|CPbkGBaYXRFxsY{p#MsaiX%MW`0 zUZC%V*hT1SsL`o$_b$ub%uvjd&mJ#a1?R*bsW2SZd*u_kaY|m`;tEV6U|p^J$E2cgs8&p9|$pRN~%6^y!7F*YI; zQs1QDU({)<`4j0Q}BkTxwcYcTZ zJsqf`QX+uxdNpjlJD0VtAF< z?F135!>3Tp8laaryx^wDe7U4FziBTN4)j>Ir`c4xw}(TCSN9iWOezN#A%Hs=hBOM8 zPOy$c^^t?^K+H zsptLI5-q4QG4<%W5VYTaixBV5J_Zm7$u#(`NwM!JuWD#$&X^>3^zH24mL^$?g1W^S zovI#)I_HC@ypkoNlpf1pWn6exdEtJQ(Wkt!oat;*G*luE8D!rB>jLiYkTHeET}|NK z?F^mEa?3R*nDjW3&7f<5;G=yuaIGsXV5Vha!UWfqW@z+)M3FCFzRUu<4fxtENl7#q z#JJel`^Kl>TGtL;ySBl{E4Qrd3YeC(f!lT&w2^`{71<)}@>F@7eD}?$*mRaTBj1&4 z!d6wwLVpbx0GMllozJ@}7K;H`xRBu(J^M@fdJXr@Yn>1U0&wrSwA#Y;ZjgzMT?j`) znX);87pggce8-m>+IO99R}mg~PKHe@B^;6h7+FtmcJFWWe(-1Zhv$C<&qE%dMIq82 zc~3;0h!nyf;7acWlhfBk+~$xrj~xyEcn)S^JwEZv;Fu+16~g}Wvtel+b5T*_`_b5a z2Y=OGeO)jt-uo$Wva+^-6NdM=8)&$L!DeDEoZ1XD;GvO`tGth9Dy+HxI@Z zZAf#qRc~+ac_d9jh7cO6`W&slhlKHrKUTBM1Weeq@LigmhSIuM*fW%|?ur^L=6ZrG zBdTNr_h}Qbt38b9UYvNwn7`TqnTrCYRv-&D!nDWCf$)0z;08Gm#4?xj~g|O=s+Jx@+Gcg4Fii z&6^K;EBvE-MMF=MQ^;|K5{z83F7C*=hF-17N}_wB;eY8H&(hb+B{Bexv2J9B0rRPh zy%y_yabI%l)8F_1w30$GX5}Fz0msTn4o2Q>?4Y@wo4hNltJ!zl*Cmix9^5ORDxZxZ zbZV&1>BE&7YApV}&45g#Bq1}mtv%CZj1@1YAF--dZWl*`6uxST}C`_pUGJ`|qkna?;fM z?-mFL5?GL*c~Xm+3kpL1NR2`WmW2MKhDob~+#^t*MgC5I+rY(X1VtzQ_SJUji0{UE z$nazNbwc2^+N7x9UYy+0YU=jDua2#)y)NKQW1<7;|CvO36A4*9jWEla z_NNC2cWp$!M-Dq15aaKWy|7u9@9(Snwuxtd5k<6a@bkR~D%Neo;k1h6*W}f6ot*B} zR+R?r3-7cB30lhiMp+)&eoDophjQ=re>zu&CWX9+1yu9f!AU0!{L{z00=B}&`?Y@qcwi*W+miG~vP zBsEI#EC(@SsJ4arxx%~e>#@;$Et9EM_eLz4ENfD3+K|xAU zx}>E;1wF(|Z6A_S-kPZn+X(R+Bq#IEhk*+f)zH5DJ?Y+-Fhu`~$uP8oq&U@bX z7}vN$%`zbk)nyh^@E3>8^hhRsPDYAzXHE`F)HV=veTJ5 zjOZC9c_e8fdtr6&ehTTqH`q*;dxyuvwmxw5W|Q<{ddOcO1SQT)L(l68IYKFoLb*y4 zu!KS8;Hr0FA|O&Q|b>!atlcz5#VX|k#XqgxDJHPwHk}zdUQtFl@1@Vr4!p>2b*cujo9y@F)iOhN6Lmpr-n<%_&W)n4TsIf@y zJlxpp7RGNO3ik1wwzmrudW!L2tN%Sel?WQlT=ecQRmX{w5;KQ!`xIdrwtz$^?iWMK z$)}G-(Pb&*nS*-BWq&DCX>O4-%-nvgFC4%AZO1=`l!jn}BW%Qc+2CmR#&?H%3HU9| zx1PV*4@uS^acN%gGJEHajViKVwzCvwJa!8uRCH}|CcW|T)Z$HBDCvY(WMzBh8l@(| zd}aAL0&byXuLs1$c-?1IeSYTtOfi1bDn^qN1G26$E+EkZjVY=y=H+yNje~v(A59O2 zYV)8Wo;6`^qWZg?)>~<4T&{NmY4eoyri3>|#0N8Ng9RXT?B8PzGr|Lb+q5=T0+y$v zJ&z}>sL_AU7U4xri zC^YPi9bZ$KF#mTzdNd*@{hv_{}?+x$8VkxBlNZP;gnrGr&GiWOt0gT z{=6@iL4Ae+^%Yg$rFWedge@!LTwww0gGCmIsHCC1x`=fS_`SEDv=ah(3F`u8u3Tug z>9I`+fctlf%a8pr(rBDJyZEU1bbu}`Po^G@d3gv2Z7=?(R`GdN?s-?!ZnPEA`@bAN zC7U$}mvcqly)LjrNc5=zq;%N6wU%M*`ya02RH5=JeXcpvvNn{jFpzw7_mVts@eI(Y z4`DMav47!&fi`Y(h}9m1_9xmCd|ta@PM4hk{qnYZ$ag)@aAUf99~#xmFng`)=}?|Xh(v+;M^99T}DZa zyXorFAn!qNQl?GLAe6$Tmx^gidX1QjhyaL<_7dx1~okKlg{|bTY4Kv(C~$IDR(f~;b9b+0PVo2 zARS;MRoiy+TxhV`hy%?rV9{Qvo&A5;elnmKo z1S_xwtV}%;1i3luR+;frUi4gFY^nI!>b6;!FFLLA()KveY+Q(`JPD7}&Q&qY*3A;# z_qE$*IL|2oDTK4M5pat% zJFF_#6mW?&+j{E`0RIZV>W#3LphNwZr2BMn{B8lwMv@WMLelqk47kn>v5NkPNc;JF zj|s&NKSDIwq|-Qt2*^y6Q?Db~&qpM3?5cdRTg>BApd^|dO<2+8lOCY#>dl_t*j1Ov zu(eOkK0hf_v~9Imyv_azZWMr65`fM7b&eG=i2?x$E86BK?HScC+a|~Aj<}w>OX`-` zKV1t!>5I)$otfnfamS(|5odZJU4dAbdA=l?m&O-sbq`C75SgN^j8)k5f(3=yr;UaA zM#n7YzQNT6`@8*R)e@t+D|YhuI!8CJwLiJ5amx8h?(Rq#L9&jw$mI(u5k^X*y4b6K zb|!wmT{>Re8sEoyZz_*VeCGy#L4wf5#>%88D#P{3&mRm_B7VFTOjH+=!|AHM+ovSUN6~{u=uO)bCuVN&i#PWTo zXV4m2sx_+TxM2-X{qSU2;BXFcQ0jT#w;Y9l*}9x5wZL= zq|HWUO{Wd0cR1Ds1zHWUaoIA)hlfXM^gon@1POD!QXa z_Bxa9^DmmmvKBsV%w5Kx{yDOx6c@7hbfedw@gnZ-@H%KNLv!1$E(@VeS@Kd+P~c400-@b~!^7AM=*Q>*L^>24jBc7iv{&3?h0={8)0k{B<%h|=Dqlv38oj@-e`VT zdjGc?Cc=9l^P6>ZrLEe)dQcVY%%m^9d-MsQQ2vK%Z}dyUnDebI?|Z6*JkM#h?!N!u zHN{GbGRN{xTHw)I0 zO8B|W4Kw*2@chHxPZmK~0w_PLc zWKmksfFM&BM5+Q*_30ooU0+C43yC=dAA)mm$H{rEsYuy0Q7^s(Lb%JUjn5$^vPmGr zm+SRbX&i#ODnULzUc>Yv^dcCz*W0S-|Kbu1X?5xU`*0uvnh%e0%i@`R<=b`^a#UZF zYy#^(;vf7LxRa1)uM@{efcA}&G9-mf7*@NszWoX=B!+!}=?PU9Krdu`y zOa&$Y?8uNRUJ382;gXflhiTc~#-R}$H&1v?q$dZ^D!@_$p5-t#cOXY7f%PCAa>>Z{4@oinPu&j*hJ z&?*zs249a;Y6P%*MU4h-6;$3#b7TzYH7T(bKHlNk)lSP-QsHLl10t)0OAKBpEI5!w z-3s?`B8fm=GGHKU%z@f)>w4?mrWX?fG0c~_&VT^TB&IN*7a1}zf{LUd{+Zd8EjY+S zu?Y-&gPt(8tWK}m2YU-n>CZ@-t}J_X1DZLyRs!=bR8fpkd$-koErc7b7Zr%7el9X- zvW47GNK1x6ecHtFkA(%X3^jm+TMb44d;%MAr9|5ge-)e*dPVwZ;^N{!SQi*SuWDLz zyiEj}n%B)3sQMaf5_GSDBF*ZGd`5u9Tq^etzZjN%HvE+&c<$kihlN7;fR;HSjuch6 zL6~u;Mjy%O$+fBSLCA0DnYe|&1;pl5+#xw>{*N;Bar8IZm&G^5IDWDm_Ui)1 z3m+GbgMe3H{PK`G=ysQumJndfj8H&FEdrcrp1-M(4*R5`b3!gTK-MC2U<4`#aF}2Y zJKgG(aGgti@O61n~P(W14D%D+*}Dw%{Tql7sz()HjAr(hn`wjH%IVMvOrE$&EIC1jIWtY zA+)hbpWqJ9ol1IExSP>e6iA`wVd^S+>%J&`URk+E*NqC{L%_;X@ktF3vziNHK$nm| z9}qEdacPW%qEbuWmI;QoW7P2=l!9MShZ935xl?eSOpp#7j>Ns=*AgYy2J+WSK-;3K zGVMy7Q`Xoh{2N%C(Rnc9+vDBUMD3910uXdJ zlv7RX_8Wtfo>`p_FI<6Iu~!(Q5>=sL?EniJ=lVJ1J| zL{WzB(yiD$RnNy9=2N%DXc8HI*ODCf;NZpWj55t)XVI^(1?zEzc?QlCuq%kasn-BV zUB~J7yll07$juTHpC|ax1LG8qIC^1w)GF1d+7Jh*ES9SB_ZaB#|K~Y8t&wOu= z_B2#AU0U|Fo@K+>`1p$BVZ;PpP0c8$^e;17GC>w?Yg)ZEV!6rUbH6M{*b&h zbX5<$ohH2xtso_o7(7YD1S@*B5uCAY_rf8vJDjKnjEsH;= zsjE9VBNFP~@9{W#T2t&56wIz>Xw?m8HDTZM(33k?g{!e)=GD%bTiRwM;bl9~Ox~pw zy~Jx2z(GnrP_d*o-TP^}F*Gm&^L$kIam8og-0+hSX&*esu~AGgJV(F;^-LCxk-8cq zqUUgz=yaR@UxE9G#tTo$)B)uB5GHh8^7wv5oDn7#)-0mGK|qkS)Ks+Y?rxY|)g(lO zRt1%E;Cxq(4yt-lzY%tDgO-!~Oow5OP#lh$?7*w76<5e>K$*YA)o70X4t&};W%-M3 zNe;vZ-RBjbkCqtYB3d;tbrw3=n}eUK^qrF5o)I+-;dA(%k?}Qc!#05=zjK>)NA|G?}0wm-fF!D6ZzuaeulZ5VLonKD9YKL*JyY3B|UiTa* z*tB}orLbt&JKg8p1Ee!PUyXRa*)e+VX2nvamZs{uN~G=Pq3aV>E4XUTH2?-I<^8+Up#}zua)Cc8yny|pb z0s}Fq`0X*k5&}3GWt?Mhm*G|d&5LdS3I{uT(4aXOaR&;En0}>#DYrgspL-MQ-llpL z#l=KZKBvMTwb;RT``fuG_GioX5jmTMh4_NaZJ<_7W09DMg+m&?KqLv$)E@5|N|T5= zvz{Tizpf?=)-|?^1FRehMOs#7W;_*+9A=ikR8(wkrkD3Iv#@1b49=v~laXt)G24z7 zfBrJ%wIAFHh6C>^-KZvq-&n1`nT`Z)7Tn)wDY!pdRzj;ZDoB~$C|s+7IN9N97fxND zV<&`m69?|u1})*2-*6dUc<1)xO7?@CHBU1J3JejaQ6>YktT6_nZ6|*^Fr~S)8A9OF zFtF6r+0cJ^Z$^dwhwkOo)|H=WbC?e`i>*cFauX6cltX{Fo!fdr_;aqWGhWr@07ChI z&~+2?)K{kJr+x{js;Y*Do3q412IAoc#6HeKqw?7w6PJ*RDsp^W+Y|CgLn9&(#|x&h zlLby6F<@fTOhHq7q)3gQX}Q&=H6Si@xg^d|J`wG z7)N?l>J`ot&P$}Ze=r_{7Y@yvWQ;2ji)RbjEY_Us?dG?)Y$5)EJ6gXJ{t7-_Pm4dP zuwFL1$+xa)YsZMxmrnUZPYW$feNtjO88I>AsAFtr*6XzOZ8L}+(KPK&6;?#l{jEl% zn9a5iLh*+fV48maQqu^%-$AXl7iQt3QY^@EZ-Jm(>d|k}l)U&A(O`0k`gNx81H)WyJZYeYT(UkLBYnfh z6B?cM_5d?T+ty9Ue@A@APmaY@W*8TyT7T!X=hH_n1bjh{lDkD0ano#rShDyK-wu?v z1PC!s@ATmNYQYxJB4$7Podfqhf~sYMiE9Xc%q-A#z?8PO+vRiHcEd4<(RncuWAc{L zxwoLD*jL`2Rd|}j2sjWUkYO7XAK_U8Aqy!;5>d??khlP+viYa6(bD9ph4DiOM zsIpG4ul89aSq}Tv-wl%YMAAfU-gC-(3IH(e7jnfKlle7DxFkC%8_opA* z=}deOtqtk!>})#NUVfI4fD7~(;B6t}2S~92YHi%a4)j9TkuV+j&1qmYtf|p>LeJs# zZ1CEwXA)Y*pdxdwb+gPAt%PE3T43B-d>GDVw2^S>Z@Eoir+VCe7{@a2U+X$;Z$zZ! z!lP39**E)pe3v>oJI|g+E}*|_uiC&JW!SnlDkCrNB4mwJt_VQB^l*u>{8{Z{+xGRH z3-dkf-kfL8NkAzd>K^is^Pn>Ax?kBK_5Qyxis?PSVE16=@L~6C!gWtCa;z7FzXTkf zr`uXAVXNP~)aSc>+<*=sYeW{Fu^2iyo1)5WrbO~Ii_ziz)-1QYGGNXP`G-LeLy%fo zNlpMrs&U2fudHPETaOvi*1$|4hcXhbR{W9Ti{tfgOVTeN@}{N@(ec}SfBTOtI8O15 zc*=TZq4&}}S3_#-wEY|rlrFACpM68!@4{vV%nQlx9-;+$LxPg@A zee^9I>Il=Dxi(xgQH3SIk{teJpPxm%t9P5i zCkk2 z>nJlXN~!B!k;|c2E$*l?@%*?=?p=0sBq|=yGek*ax{MQ_uS=<(nV$!HS4S}C`=}9! zIwTYWh&T(r296lMXl^ zoCeMB-ulqo9>d(n{sl1j6;-D8`#8_+P0pBx)@@ieQ)p45GLp&HDZ$hR-V&HmRgXp0 zfQO<=`#dQLFJ2*de*U$g>zO5^U?P(zBmpDfBz#nFzu4HXF|O_a3njIZv1$6kMxeXsn}B(z3@O#zxY_xd%)v}o$Dh0R2@ALYekW6qmIhuhwlmR!5d&_V0a zy0p=hDlU7JtVh#GNsu9)@v@qw)Nj$!jav6nH7)4bmEvSV%r;>J0HT<`FUq>;)Q{=V zlNyO5zkS(I>_7Grh?Vfb;|muq5R#LJf~ytMD1f-&WxU1> zpUbV?-`l9Bdczj|Y7yOc|B{hyf8-2cGkqy=r_n9P9a`zVn_i}pi`Hzp2qR1DR83Eh z0Nf$Lqm&9rIn72f!oc?Rg^I^=vgQidB{l%)e4T#^Tt-gFK$5c8pL~K5ztSPXTMMon zN^@YQ1sxFFX^oG13`^BODXVSpOf40=pBsIi-A7}9T)%wBIu6B2z~t{GOgNt|2Bil0 zxUl;iniFPk`>M?xk@GUN`qRd|N}cTGA_kswXqMbcWA3ako?bUYyQT|!0-pE5hs#iA}J#?Hg{e5T{BSrL!Un< zM)?*Na(eGpPZ>b+_rxah${HtsXznZhR{a}jOK;w_bKu-@eL8RfY#oSvPG?gXPximQ zKpd#SGugT$?=&~ZrWJvej?tASvapz$x`uiDDtz_CeO5V;ct!Z;3oQQ2_;M zBnytgutVAaP3fp)P2CKg`nV{ref)Im%0ZU!b-9g$Uuc%+<~*Q{wv@6(41xk90U(e)SemC1aiYzqvr-K^5r=hIBVQ_iWj;D|;2MTzV!L z&#wORJGqM1L?E1Xuj@HaK0Gzwt&vw&ZO8P83wW(`{#)ZYaP$=?@G6)6Z$79Qx13N3 z^A#L{Nel_eb9!+R#1JNUnwpu3g}8pg8D|`G3S2$7A&uPMwF6FfPfuu3DIY(QfG5{Y zh+6j5J6_Vz@q(r$1wX0abFu#oS+-{70Fdkf!8qII)BjAI3Ahn2T5#uMd)Hd6p#z-G z=~{M~V&kbtS)IZU?8muH88exJcZWkBz}TbJybR!$9i%P^%Y9`pT1_ zUtCYKHZ2R|({m~QIjt5F5J61<0!{kx+PD}x6xt+>QZ*H|CQib#rQu& z5~`8Ak^$0jG0u>kB@Ui;KfWfhhONWK51m>orOn5-#>Pe%s|5#~C-4t#zoJS~+#wqm z*m6Ha%sZ$QI}D5B{{SxpgReWU@sLlVkVcfo&|2H4tth3qHmSW?c)zfBwpNe{k6 zggtiXn3yo($=UW?QwVe1W`K;k)qjK}fa`91nt~sOB#e^x_Keu$8 z{>|n5caMNprna^=5E|;P1Iq=FAX{);lQmS&y$m}b(%=*sd=um`GBiX?Oq{m?x%M6P z$)UP_A(yEbRyvqk{D1=ZAoEw)Ogo>hfBM1J&YM2VmITVu%{alV-(OFhHLVzi)`Y@9 zbj^26$9@SkH&)Dm-*t*JGf+5|8+!ie(XZ$j##3DE%n43ov8=9{ehR4@#z*< z+_E|)j_$acHITz}|7_Mgz7F2+lY75oB|O0z|Fr_;(UQvoZ}>j4!O!e!B%zj_*-XZrwHXr z8mwoyARuqG8XUiUOA*3=#lV0Xb}6BP_nNVI1Es7Ws~?BKUJ3!*k<30%t_-%4?j0lOKORw768E0Qn(C4XMJP6$isF$iZl^S-_$`GiImQTCkf z1vH@0+vt~pz6AK^hG3ht2m26xI0KlV+kr2n;be{5no>+VJNPhxBmuDp0{z{Nt7SGp zK>~RIxU+0L*zIa@MrytYsq^{pc%jevn2E!U7fmhi{#^SS-QDHBwNOkjU8#11^WWE+ zSq^X}DQRge@K;Ai!C5|B<;058hu};6<@Kq0TIjEIyfhgo^hZ~p-?SoE4!oq(3UfNx z`!P)AIK8k7c;9S({c+WG={1gZSDpd#~N{{GETI( z1;>erp2=5fzl?qR7IFWi_BmeGyM;hQuyG;tLx`jY9Ek0a%UHP;GHmw-L9NT#%loB^ zy$McsSvH2xxG!c$nEZa)Lx{qz_U#KkC$0o}`691QT<57wV!|5V69w#={>-i;W*wrw zE0y2y>o@q~+nqY4BV9dGB5YIBdiz`)*?)z(WW10ZswM2FUO7Pje?r6%G&6hkbw?z z*IiZ=tHGY7+yH7u)F*PVkVBaH=KB z;0D6@#ns5yD=g+2H4~c`AWp)JuLDM}?qYF1W1u&w*_(|(s-)jrTQkec5d#Acz#Z$0 zJLE}{3pg?(sw>dyIU)}VHR6y)k|l4>RNQ`-Q-P}C#f)Tu)*U9&M{XHl>sHcs17Xh( z83B0(CeB0E;Ww56{ypY2DT)G`_N3%(O>wb-fvg3mV*G$V`Jbqgzp&lNM7GH2u&e<{> z1<%Gf&tqh4-D|#*_uo_Np*B(sy3b?j6GXV)ljiSam|)CR`2FBFpt6(h%nVC1&dJPT z-T-Kk0^bUhQb>VwN&!Iw(6?H4ay)O=JN->K>B@Zh#=_?sa=kILW>Fq0aTDtg{R=P- z7qZnMmTtSa(s+@9K|?d2r>4e)w!{4GKRW;wZlM^@?-1leW?zN9`FAHrQvCp?#VwJq zZXw@s>{3Bn!IVy=Ju~;g+@(f5W#}9zU*xCG!J$xXrwKIJC;;60|7f2XSZYqlI|jY( z4sU}KC%dlq1|cf^-`&wYc=We-7GBM)Q{s<4zFN*L!hx~dB_F8XGxy$~Z+$sdUl(O)_ISj%6?^Aby zQA=-}Y&D}~h}kV~J~#$-SfaH2^t6SEp`nQV&xT@6HZK9xk)9hm_&lrKE>+t3FXupk z-$g;)@F@&lp(EHtGe9es-1bgXa-I~1fP-U6;A_AWNk$k}^)ttS{>}K0+2;WIZR4FcC=0Xh$T>{p z_|Rf>I@xC}*4TTK7J&Z;hy{SFxTIr4{5P#5qb9MCJ&3odG?q@Jh`KX2BBwmYvzE!42Hn-)}Ktc4^#k6*An;$-K~ z`TiTcJeNB8>ik08li=$6SD}ZN&%|5Q>6V;mZj+Bn&T5^jW#@ms*FhU?)X{zuCh(tF zN?GrN?>TUi=>+S){rXKu3$IdaVCB8~NyW&}>=S9o500D z@dhT~pV#Ix5ZAY}6r;kZ5!aKiAC5Dz&ESu_Zsl73o!}`W6Fh10^tCt}=)2qpI`Z3W zr$5e_rX3bgvp*FBvn2RZgB^8arDG!7>P}g;(!Xq)7PyeEiQ;n4Lzrr!9TzbhOx1a5 z78_#1M=XMp1&CqD!3sIR;*c9b}g*ngM)D%?%LC6JYz8|n6L`>kc z4V|5=1cEW|n&6P)Z(dX;_h%7uK2vR#EyHQtDLN8yExpAcUX4EB)1cey@{bfuh?oUK z=dN-lIC2aW{sGV%cy2Yowd(!l=nAx}RJ9K%)PDo4aLx=@sOApBv?{zBK0c2#6^poP z2JrW96$2hKaOq!#-)JWJ&)RO8TM)i-R5NJZvGN-+MsBG(#$Rpar1-Ga##tyYo)RO2 zX!{04L2tLQ%ZU%((RetYzdhRgS4H3h9xE;K$;PtQ8d2c7!K_#4BSlAXoAY#UO4}}7paG^7<`qpg*dl(irEuD4k-Q*A`BPnb) zS!o8ea{t>IPCJ?q`Q*Yw+i<@^0XUS=dFqtvkos;aXm4L_E-t-CR|P9(2w_lsOe~aoHrg1<-dOckMj3X zgV0zs>X+P2SG$}4vYOrqmcfbGk2MT|tAc`*T=5sbeEEU`+OPpvnP|@kc5wVi$Ug?f z_(1750ors+UTx%gH!w&D}^Hl#zr&2qZ09v$ApI~{g<6bRcM z8Drpi*O{IQ#a1Y5*zVCWn1i5v_Epbr`ulDVjtuc&s5>ucMdc*1pRu&DDxy4;<_4Y4 zgC`4=cLW^wO2`;LW~z70EM}M>a|YG(2M@CXftLZzWm-*ll=%K1*&uATg&u1ephuFfx!ft2f z(Gdh0h5VJbM?_Kb8p3S15<+E=s??d~e{4x8=AGwXrhV+T+)GWJBqvUhG?EBNC}E(` z{w9ri^>zixLRmMY9Knd7Xn}c6j&kjF;u0SG&o*O;(nA5Awbj(mt1L}7Z;TR zabCZdG&Eot{E$8boiv-Iw*O^ke}Ab4O-Od(*sJ@I+@j!mt_LpR;6veZhRDHKBmv5W zq;8oa@X2Z70&R6$oOTeA>WOtbFj~y?e}O3tY{B3`Te)0NU8LWD?%@gP5eVQJ2!rC4 ziE1(k7C46z`qL|c^6g8P8T(ER6xVA?iJ@8V+%JGPD2~b3L>x}e!cLJj;8Fd!^A2=M z3Ef!vuU)8!bfO`GtF=o!WKZ$-+;lgvesrCBY9x=gx6VBu9$GQ%`ecqREq(rvIS07b z&u()P04>oi19U!j$8U2IiM*@9trIQ&qS(v4&}%N7qIR)n=M|_c+Cpf(u>r2-o$|Z{ zXUC2)fDteqatk@!181N}Ao!G3dv?F&2D5Uj>Q3O4ZNj9;iNlU!1Ng%;$nd54Qu?_f zHm0vzVc(&p^CdnZ00smz9p4q;ky0Kn>X5L#7zJOHb6kB`tt^v zhC2!*Noap?oT|LMlt2N_-y2V_y;c0S$>XO*AXH;}alT~om)2R$KcENZ=&rv?y`a_a zGI#`oi+5GV=;FsCuYmoYUHvCF_ZQr6FnfsQ=l}3yed?R|7yX-jY!0o!O+~>Sa@@Zh zmMstYL?;zZb~IzqOnjoJWI*>dE}ga8aKy}^{8v5x6mNs87ch$9 zU`XVkB~ELRz-^^t*!ZmjQ`0fpla2FWt9vv<;PW{cj}mf9ZiP5`dV1b8?jXq;Gc_@p z1p|P}t*?SGr<(s}bnV(T-STvFA{0PM$RGvs3Z7T0aWo<71NQ3fGfI*yR0FOjFNE7! zvh6iVulx+UZgYje2O>uQ?gQJok4?^S6&NXQB>~)pLizdmp?o1J$#SZW+PxOgweW}t zG*m-F&HaSz>sG=Ke1C&SsPqzgY|8Qk&!(hfm~{SU&H`sPkv-4FFU}E|RUMF{x3Jdl zx{^e#L@R+R(pS59;yLz&lu;!a=&CW7S=AAqP10ofhYvV_bPHRF7~%zyo1-lK1YN_BZV6 zp5<42=<5P?h(pKMkTP0`L4o)mygQGl_%Us6PTdtB-LmuR)*=^5TYeM?-OX*KeXaPp zD?g2}WrkNPGr-`1Sfz8Ze3jVD#qvZ@=5iZ?X&b*Y2X-FwcO;D0N<6 z@?|{~mT5!Q%9MWTrmTI#o*!H}*R}ELMjfztm6qSlrcSiUZvZ1XwcyJ><{-=iDlOLT zxw>sgh~g=LqaJd*Z-V?^iL7Z2_D?Hs=Ve#NWg|`1py?mnbpRm*h%@*4J0|$_ zrdEn94LzA@5|c49{CVYIm2bZUuQ(J6P_+L`rp9i=d0I(BLxyszv0%s{LhhP8gG8&##6RLXaDrGBVlqi)%A$g`p3vsXU;AjjHjS59j4I1?uo0nb*!TR0l-= zlY;E{A#3rK&m=?ISAJE^Ako6n0EmS(c(nB9-jSg-!;_Lp7U*^vDDn<;RMAmL-W`Ny z{InH@f)N0g_nDbHo@$beo2!1M*?BM2X<}%ILo4rzg053i2@FI1;Az|aS^1Y>D`ev+ z={`Mn(y0nGbrgkp6EU-Y(ffm$GxQ!tQXV>Z}i^%k!w%SD@#tu0BY^EeeY-{{UQ*DJdQs9x&Qzxy;a`V1+Pa| z3?ftUAy6|x_l)0Z%!LMBWK*MTam_hDtRUn)g6SAUTC5E?LpiJ@=nk2Wh(ZPqW5^{a zl9$4XyNWFySIw@n{T&MvN)iW6GI5Lsg6A_|SG1B+s=Yd`sh1 z5zV~lMP~2U#4(Vv@yA;YxlEEN<4dX)Vf&%Xj4H=w{tZsrQ_Fc716VSn?#dWHIr%D3F5y%5rqYLfD`4) zqN1WG-v<54qwqU)t0KDogP=;7-D;IGI$(~@Jk#Y=nRSW?PafT%G)!>E5JRmc@m?r> z&M@(U8~|%NCI=UEi1US3J7j4suD2%xkcQpTj)MYmf_NEW1 z0F_DX_j31Owp%Yl0ygi(EGiFtPZSzaOW}Re9@|-J=1#ziKI97;%+*K-MieZD z(6F%LLW?^T#qfTkK7V}L@6P73P+(a<=5Hh%W>)Dnoqfb=Pd z{Mm7mlaoV1xKA8s$9|{Yi9cl9AZ!ZYivs|L5aUjED(VpfzDh_-B)WV#2i~}WP#8JsGO^(N`RlhMAY)MDm8Thy(BBkG>ev#pZ^MH%?5hi z!4fO1q?D>E3b_p8;^Jb2IabYhk?VBqSpx<5(LO|gHO0ZcL%?qq|$P``Hy+Fcq zO_SOr`UjOZas$&Kgoo4fZP?0@T!=9^Si;n06Z>~xCH-yqrg;=%ug4CkqKs9#EHS01 z5XP-k1fxiPLd6P%jQ9mWc!kOwi9!4cGcBMo0rzL)_ge*7)0fR=K1cOTAfvG@;0JXN zb533xsBrn3=dK=aY>iwU|4STJol*mU*CBU@Ze2U7sf3WOr}Bnd-f&o`w7bCICLw#UfzQjCLvm+MkMav~{XWa#fEP?my-0`_r&nGrUIrINy( zcVap(KFT)+?h#6s1_`Rx8{T=OqS>c^s`ZbZSuoh+3NqkGleS_4Kz{fEBK=;+ z=L<;+vfA`W% z#cq>48*u+d6+O8DNbfdpFpm@_z{5NEI98Cd0%MO~JJ%xmox;HM(E^k?fXcz-r;9qw zV5Ux~{LGV%LI}JQ0kc?AVUqesO_^3V z_ZJIWzWL)K3IfVodrV_|^O6!+k@a1RWVRM)8kJtrdyLGvyoS18+)VxmA>60@WK5)V_S@Ec_y=uFKMltMLWK zSMV#KPL*~$tJc203O*hEPwvbG!DQpQ=5fscLTn+i_qIe%rL+c%u~hMzjd}$*v}LtX zw!p*%uS(78x$E#Wvu*T~jTx#K!2OSBMT98~2i=(yniPB9x0h7spL zAZ>=?Yz5VQBn98Z=T?9fW?l=fzN!QEzRDz0N5>HX%w`Uj58E-2=X6vvVBGK`3K zs-=U5HJoD3G%WVa_x_>m+_^EtPfVl}1^|X+YcX-iUXp{1p=+St`t`HhJ8&Le+lOOH zeC+pBZamprjWRkr-Ueb2hdF`#N&Q9$b!NH+7fHbfq;c2TA9kR-iPKTeRh^S5D$&f__ ziGK}tKD)TRzb!o*7=5i3v9-$tWWA1-$xhDtdvOKGAjhfWAoBjrkE_pP$)i<#yy@sD z+6UzLBfyus3R2?t4ubgv7g~D|BTktmp7b7LL|NW5XO_078ni|jwIcV~* zjgy_)eBpyJ!{3vq3duxmH(qv0@^5(Zs@Wv+f(-UpCh`-V%%;9@$}6+naZN1Ejzm6nxQ1( zRiH-LD)4m>1AhYs9>;`IPdzm&IoIw{{(AQw`A+EgJ+RU=)i6oPnfzH~sRo^wb}UAa z_K)B$_-1xBl9Zrb0wW$X>wYGKAKBQzGXg7B_)u#D$VbwpbiC?=k9*f+xZDhuz8j?f#mb!|u9rOri_TWepecvt;Pl@q%CsDU|E?1}+8eqAc+5EZISnds=4>PzHG)xyvfhUn(-=f>Q!Mq0U!GK?X$22p@v3uR^GK z$3Kn!ryg(+Iafp5UaIH()Whs2O~$- z=zTD(j1!$Y|2W}Nfkcz1?e}rX@^qxmN)wYHk@5spBofcl#06z=)A#E}p4y>*SL0+O ztVZ+i`vL(*aTuR2f&|P-`3&?rz6<6VWE!Sv4f?N}z2Ig16I=|t%s+cwwhQ3K1a*SJ z+to_^Y)Gc;>TE6qeNX>=;JnBKAKeD#B-qxp4L&+o_S8sgyqwzc|c9&wt@$vZfkw zvuKYG$m)c$n;(A@9K69c{U12gNe3(sh6?p!0LujY4LEoRl?<1RBT(TQwY<}ht}B+q zMQBh^OFKDngQZ_=Y^-mBbrsI7TesHsv9iA-bcmUL1;iYh4R?FD`F=&DOl(Xm9_+v{ z!bY}iDzGuySoMDTnERFgKbLkNynN%nuLLN*Y(->(-v_VKWBYp$Bgfs6zt`aoqMbr~ zEJ}jhc;`$J^9ylR~7aZ8!IKI%Z~U5X*zVDm6Y8pD#>? zj39%``p7z)cK`(Ci!r;97W=PSZp#%&aPFdj_A=PBBE#GOZjk;MzH*kmCm-#f95?#U z#-#&o9O6~y9`K0x^mKK?&Go_d$mwPe+m}GgC)DV&bYj>B^21V?=Y+!LKIdaU%-wt7 z)tvF__a9Z3L5jx%S_?}(IS-aE6oj(3Fofdj=QCq4-WwLDTX{rBAHNd6uzQ}#De*3G zd)E`U`hwi=#&uJ-ryP>)IVv4~i0tyR@!b`zH+h5JeN9vf3x%>>%gM?4Xx7KTbnTis zFl{ef6xx=umzEALDJglGoLmS!Oi)NjZf2%*VckKFe?UMPAp1ZmyZ4-<_K({3>X=54 z$YCD%erATyxVIVZM#(ppuBgK%snT?LTQB^oLWeva-2Q@>1X(jC^#I3Mt*b41ehjEFsQ>?#z)= zQ(jWWd(ZpKAolJxgu`6ETm|Ad=oSlG{R8_!A;}^uVEg&l?UF!TQ=8SnHYMc)HQl*e zyhr|;6N2nfh7j*tbu*k8Rq5t(Ofy2g<`u$8`8s>eQ5JD70hVfYG>u-$UP?^CZ4U;pV9D$ zc{gxXo+rB|XVPfxd4sT1+vNabE{bEqD=3s1C$k)?H@hfLF4^0o*Q$$H{~Jja+!B;) z`-n!G#`k&mNIkB~EDqXtKmY#2Um*|fGBtHRK*sF4HJSj1n46iYfW{uwRaulaHL7A_ z-7Y~|weDq%zh)yit^3>{d3tQ#)x;zdJWp1fJvuqjbt`J81HE^x%gvXBPqpD^XUAVT zUa@xLq9ltDcjRF`!{O3mzwGG`QR3{=Ivluwd@E|1s;1zbykyTCHm5 ze2}+hz#I3gPloo@7sns3kMuk@gG7!tB({I`%iPaW5?+6ksFY>NGv-zpP+rV`8u;$< zCkZ^!pH(+xq9W}UcQNK_de%R*7sxyfB zpMu1$qpQo3idNLS28j>_T=DS~gwtISq=OU?1pub%X6NR(Z6<3->mOCv&NNmyRxhFMy7u??kFER*1Mn0Lc)V zE=T6sXPwxt7M}c*Ae813vKIl@rT3`9Ithty9UK_=`DyXpi$QazgnIBcd8npl-`b<+ z(9K=7|05wmIA+57J7}u^fKm2Om=AHqD*)8+4e*dV<^$PVb@+*bah(~W`NKI!Mhn(+ z(g9+-it-u?uDoZjcyBo*LO#&Z$p;v*I*RflmSQ5GYPHMy(9Y_Za5ypOfwmz`(4r|A zk3zsP8<@$tuo3fw!9=Fr^2f9C`eQGNI$f=@38Qhd|EIU_fadal`_~?#No6*W5mL6S zib!PdQK@XQLq@4o5}75NvZ?Hkm6TE0D`b?t$ts2a>sI}KzyJ3+&w2jmJm>uHbG`?C zzTEfyeqZlvysp=EUA)F~f%|Emwl!>3T18fyB^d5sdI`01!`pr+{q)`D(Lyh(lvra+ zneVf1U8r4C0@L!^wPX?^D*F0qA^Og;C@ZNd`a;FPkVf>VuFTVi@D5&g|JB2o1$@V6 z56=m7Y)MK_Z;6j|OF~lc3t~oU;mbxg}$s{YFAXE3cQ_1ZvcKWpl zgdN2zmw0JJ`$6JkmWAwD_7DR2;`|L@r?G4*&j&G7P-tY1a!^y3E2Q(+^^i`z;&MZ- z*XE6jEn8HyZ5}WlSVKlOMvqwlGvf=Q@^-R=v9*>(t)A=2JmxIH8GY407AjyB0s3qj zy@*5Dl&1H)7Hez3p{LUX&J#N zOjTIW+nM2h4MX*j;%(1^XvvTF-{ibfRp^$h{PmzKBSdX*+wl*&tL)gkeA_em2mIPK zb0h-NXqw8;ZO0ZZp>}Z)b$rceWK!UkWyO%retyfBnsz_>LXy+c6fh8D(wXk6 z#`Ra)=eRn0l`2}>X`Z(q+Uj3&ep2NUPIxDeNj=lvE$cDv=d40}hV^I595ZA^{d=>s zK7D=XonQWz9e~KzFT5(xIC6k27kdwDt>L0w~J8+!(w+U zDQ!>d>HU&~+U?F1vh5FR7#MZPe;{obz-d);sU|{n^Oh~D=m1~-*<;ljCRvQ)smGvS zCk3A9*;+Ay;_Y)3qT-{Qzm%i=_!VNE)U7bUc`wBA=#C|X#g z!gi#Cglzie)+QiFK}__=uPGwed=FhCsJ3s9PA^^}Rq}alW#saH_w~bv*G@^PIV1Px z`PoF$KN)hd6g*`J<53npaC1e656VD?lU{f-^Se%k|BR^USY1WB$fO7Qw4erRd$bF# zy(Azq=oVT3g}Je0;ywp2J%@ZN5+j-uEc6~9FhiRC zzOQc!W>CzwnWb0u*|3R;>7)63zK*~x=wJN95PAw|=U)A?x5Wi5lP+axyKWb5^GJQJ zuVv*H8}4;RwEt`D$(h#_2dVbb^m;+7OPoHbw7T|oVTNVS>5yB=(hbb3$ULT%aFEw* zpd5Vu)Kb~lH~}6}6w;0>@^#LAp&`RT5Cs*FPg4YukP|Ek*Ajq<9Zf^U;E)lJXxn!j*j@RBE$ECO!gU* zE%*xq!$03`>fwwGVw{V!}{H}b&uuU?!%a5@?}&q?`6uUJnMFgD?KXV#8&R5GtaVx zee-zMI74qoAANc}CloRyNc+k>ikzJ_sWv3QcPQYTm=fWVJ)|Fb$1&8VZpmI7-^K84&+^nW zHk$fFZCjy(p+9G9#!kB4pQy<2v{|MReI*>B+8nPKF+w9ry) zIzQe4WAjQo73!-N(-=gxKQGlH`I*dJ{7V`ATS#yYTzB!*x)l^UNt<@E7TK(X3IN0$PNGlIu| z^zLF^;e01Yz>s+AvOBt*B*n#@LW?bRMMXa*6PTn>LF*WFK}bd~7iCn9s6nR^jm*N2 zJ1G-ozW7t4rOO*g569LWnyEkC<@CTnFv=^9uBrUbyJwlI+=y5{{qVGgT5L&Fd-|TZ zsgqw92_I=tRrqs`ADjZ`g`xN$90Sl|-d}ty3iDi>E_AH6Ydu0Z&{tVd66pNp_U_|l25vw4%9h3SCWT$*%qeJiWg^{| z@`#*E;;UE6h#5(}{&IHcDVL~`uFokvfKfls(0#>quGMG(>6!x>ttm^3KS!!XCK{t8 zJ=;dxCK<~=n9aO(dD?rGB<>@BcT*F+#+Lr7BZQc6}I z+={P``NHtyOIm}(`PeJ}TlpE+y_Al9sa7tIxZ&uu)shy!vLrDC0@>)h9)yEvA7-8V0aETCUkK-bgx^pE{V;Rb7kyQX_O zTv=S0ebs6n%7dw-k~i&!qb=}+OOsyWyjp(yWC`<#GEDmqoH^gwb))nuMVytpM%nFX z$+uEAG7-z~!Sxa5<+i(dL&U*D9c2{jhdL97?)#iihDIW@W1#d>S+@Go%tF57ME*MC z&X08~GSA-c!;{aJA-5QdTQz-mM&4)G@)@-I1ly3Cs!x-3C!%(Z)M z^%XUT8_niIgFIq}*WD>(@6Vk)MJNfhjS!q^d}VI&No$a2H`SH8H4Uc|HSF%LxPjq% zUhHQCuT4h5Lr~M!PD0lWjzfn|gV(T`oV+2|y2q}^M}VD`wPNfT+7~$wJqzJI|C&tC zsV_(vl`&7tdROdSo1J`AYTs^kV-@|E?V6f7WIp4x(Mj*mh-0SB;DaPH+TJ5iF9qw} zxYEVjx0$kE-XZKM{RV}>cfLi_dl&=iPpU{C`XROt5Bpj!X@q8`Jx9Q_-K}Pz$NEu; zbN8J`_cu-SuSl9kEhNpHpWvwk1_eC>lgtdmT#^zK>rm5{W!l6()}C_?NwI{pEGj)l zo!5FOTFv=Izf~MON>ejd@*PZREN)d*P{WX_rsgN1RPyid?{5{&XF6MWMC;LZc`L`& zP@XoS!%dv8<6gYjw=Pt^%AIU7U?&xy^RGhrS0bN9oSEL<8m{Kp@ZtQC?`)7OvM39< zXF~#VMe3b(0SvaTDHj%shYnIPoZTP{hfkC~iM7NsCpjhMECzlnU%7In=R1t>Y{fxe zV<^7`Gz_uB{>%AiTXrPDT^%|k{^OqW>~LSuW3-?A*iSj@evtH7&+ANiVD?Achoi_nB}9dM;bqV=^Qo z(@^JAFX$1fkO_Mcs6=tda_c3@H~u{vxb;ut2rJPJ^4;+PTvxAY(r4jr za~wQ)5;iH_am|M7DcPNA!@~L0?of&2!!KQ_&W|h35-(_RaiBzb(W#H$cS?y(UB2*0 z>u=*=Vb#9W<2*WMrK(~y|2Y5RC94j>3my;Fd#;>BJvE%f$Gj%~guNkR5BTwxqmR$$ zv}awZqLHmnP>N-eC44(HJ3QWu4z(?4*kVLMFRZHa{H)30i)6y2Vbc24Dxnm$r<*g2 z6Jjb2W8NGl*~t|69)vS8Sxoqn?pCp*lp*ccuG`&R8hF1S%{!>^z~Qf3@*fUWK5I37 zAtKuU+;=42WH(E|PdIInBZ^X+K>8ppEsV^9`^3t@AqTshl$zRvMl$x3-I9dDM$c0J zxcw*Z@a9QdTfbytV`o2uE|xP|qaFgTIm@{bnk3miwOJ$4@i(iiSKuc-1q4pzT^(`C zPhIM4Uc5^YYN6CnJnyxTMM#cuy^+q=Vc!oqa7?-1^2xxZl8~nf9*leqPd%jNOGHr> zCbgk27y$8%V{q-W^1?HUtL^9nIHezow!OYFy--yMT($&GJW z8BYNOtQ9NB92OZnt~$(r#m~;MUvc*Cmbr6^xU&SU_L5rB(ktiu^4vP(+EKQXeEfXX zMD$Mp26f^UagUP$E#vdVj-T1e*5ReKM7)_6GKLH2d<&QwdUAtM|k{T6C#^pWJ6-Vypki zt=JA0F~U^CJlUl74t=?C3*J+>5Q<~=jd-@x1K&}rrcCmH#cgl_$!ZwTEi}Ph2yj)Hv>MQMEh)ll2+b>7F&*{eVj>B6S%`IQ;i*~sh% z+ZH-_#;lE6gO_b{nTea*t;YM1x$KU~rDXH3Se&q?dOT6%CuC%x9^ ztDGyykv(LdW!ZxeG}N}^NcKzIUBXLU$EwAuYf0MRy5n0zu51?p(d!L+W|ulMh!u73 zF`=uKRBhp7HR`gqXw^xna(c%utTo0TxvORIfz>Yr_}HnI=p8yen(~Eu!DIm%hFG&n zui}&?1rf?%lFweIj&LDW>qq;SHbKvk8L!C>b-rBq!^eSvefF^&+#}dg_5+Py3Cl!Q z{Q&P3j`tE)G*_96IAGDKlS04YT*dg+@Tu=N-{6BZ7V@2lcIOyHhWN{9Z^CjM)6VGg zP`5lhxuTw*k64UBN*Y6Z%OADW15nXzu}p*gaPr(^+R$C8kp|&h+kNk=IqHK8E{+}7Wr?5u*s(2i`sXv+S%?HpQ?VDU6<+Su^!=H}dLPSj`7`GP zyJ~T4+Jaua*Czb6QOfxCYc&byJM5bj_tw0q0@O`b+ex)cZN)CVdjFq++XJ_|@Ta0K z1An-lQjnz3)gNN~q4OfKrUCN5j@E@$D0<^;0JcudX&zUq_7 zak=B{tcS6+Puq=3N$UnRa?9E6J4hct6YYL&nYS6oGXzt8gfcyRHduxb~!UNV)UTQHyF3wOcO+WS;1zn%_Bp zlqPToV{~Tko@dPE7@eki$Gv!LH$8Qn_~G`LI}?Re*Am|LD?QjJyNV^&&B2~Ts(ViX zC2+u9t57d*gTl23HPv)U5lZ`4&@nPW_ovhMv${Kc0Owx&s3*#=B~FSn=s9)*=W_0W z70W7a*5nk?rcL|PHL)TIF{Gj*j~H?ldM;Dz z{yqYuMO*srrI%H)?MZd#YsWOU)PLVUwX_Ms&`mc*!F=`M!;lu^X-EP@vDuY5DJd(* z*@t0!$#SRve(l72O0U*Ft`>^0ym1BQB0;uvhgwUwA`Umy#y}Pe?(Aa!>XkyVi%~4i zs6x(EZpHZyJ`8fpe7O#vZRoMd%bf8(&beeBUmn(asnNBeo;QZE<261jC*YwI6b|tY zbjPEiNZJZ=G}lQ{rRpE7HE#qsd*ietn7u3TiMDG9RH!k}c;j z?I`nHEgxz-c6FW3>R-E{(#ft`LJ8~Z1tb@{22Jqn-gUm7JB>E&Mqz6(PKMoC48kx{ zQdLi_a`6rf*GRGWc}7)b!g!XZFy@4U{mDVv*ud`_*M+hX)?1J<-7PG6Vl%|4i|=xR zvy|8O{Ri;x-JgZ%6h!kcCW|f3`IYM$=a4OS`AV}3aedzU2q!a>;{FbgiA1ZbgZVdi z;)P@&eD>e3T~2PVz>w+Su48^WvL)r~S$h!L*_2RIi!&^mzom$FkF<>)#W1 zr$6iJc@o9&ItCN$GmpJDXAG9mN+%lax}8IuDgY6BosV_~#b;lt;HY<8cSyCRcENJv zy_eBgGBs_R%!S$eeyx{gPQ>9)>FL0QMp`VDp=Zd|(5{1d_xF{cp!>LL3;$rwgn483 zV!3F)v-G-=8$+?X=owb9JF+%vsZE16rXyo**~{M(PuwbeC#wb+VZQZ)^Qr8r#WTj5~*a zJH58HFa%z^t&G1p_1z`EXkU}bi%@3eMP9qGo{@;}hNu@OoM?5qpLyBpr32mJpwc1J zCYtr0c9Ob|&t`p;w3Q$De|=-JhB$dfX10_jKki)jok}ieF;iV;_;XqXoKBw`e)Nu3 zRY`AxN5JwbcZb`%eWVP4lM44@*4k;pTv}9d*Yy_=iaSnOM=!Z$kby2C8i(})Aj8`Z zv9kz21T%=RC#ywYMPA;A7|N^#pTZEins{WNImE>c(0zOJrcFwwrb%uyv5cs zEo#qQUZ8jU9F2k8zEiceX)pC8A!=^(e+04lt|dvtHsOJ^jUc4PZVRmgL&HQ@je6-; z8y(qDv9!qNdXGOI1JkxP5i;nI3VWzCM{P!EL3lOa*lU`vRNdQ>#acL#Xt@z^2ivC; z0))92#-Qr}B+@XFG82P50VF!<@+|NO~x8@3L$fJTq+LwkFrNOi3r&y>lN+I)=P?K zuE*5}81nK8Nl(@7SEX44xZU61KPGZ%CVunQtu5dBK0H1CJSOJEZvAV~h=-dr{jRoE zJVL^210~(0Lqhl)v&d1!>Y^Too@LvnZOm`&d=v=RcJTYsoLZ5-ej(C#mv{$pD*olb ze`k=|G?}lgGnS~Z5gOdzAmx4N9rg6>J3CWf(!P29R%YEg8#6pdHl{0tUGW{Ni}gbC zAcAE8$QF|<>bFwGavFvLOTQ>eNVE)6zOZb+u!1yAB^vtk3O`%HWq^K{!y&IU%Ae`n z?Q*<-b(Qquuh0u;L7zCY!0X%58>Bj+H^TY?|E{1kv5PrDYK8^A~}^) zn0bKn(>gXsUEV|flbmB_IoN%KM=b7KU|L~9I)NU=z%qq*4AGU2zxZG|Up4&wz1(7!xkpa({dVIv&vQkD}-aaUjNLhyK z!ImQuS!L=M+bQ5H=jrL`YQykd9kJwX_l_9uY{sd_BMdtdHaz!Ly=eY;C90m}zQ3cJ z3ebqn)@%9Y_5Z?1a3{U<*^tN}{BX6x}N;Dp*}pQ56zt({@Ushj(vatfRup>T!Ux3hdV z#8AeoxN8>4GE>=G_5_o~y19)Vc_;0)K)^+{RC|^ezn#^5EL9Sm_VO}yo4!S%9uY!4 zou_s|E1rqx8?5%QanuN`B9qt+<<$1^Wj#CL_g1;w56oE39j4Ll(iadDK4ntLb7q1z zcB$|H)p<)Tvx|g_7*|xJUn9O;fSzMf(I>r3F~tm;)1=NQz~4VGFtC@0Vb7if!2crv zGBgwv6uj^6s4gQ?EtZVMl!uoDHH3cpx;*G|>f6RVJ2+055($lq{CP~Cm-wHw%e&or z_)c9VJw>3nuKMIN2HQBojV&?rWV?l-qtr$^6D9=X zMZ|Huk;p1iQ!$~V_bHYDS!?Tf;nk0;t`wrZ%~2Ykg;K_)ZuRRZc8NxTxTbWh67mzX zZG7MFr;_xGU5#SPgAI7TblBB2^6vHdj=z7gtK^iY&ReL z>#&hKY?-uU7SKbrZexO)NvFGhX{%*XFxV~!MMUXv(Xz(g7>I^;jR&1at%={*%B~g4DVW(wMI^?J(2SVGeeJ~uFxqs z48MLuEk_Y9IsfXAXP_@;8VY?`Nm}xEJF4i1r|rHIbBi+WH--pTbzOM5{w0NYxu4$e zvxL$$TVbM{5>yq8IP&-JiE|S@NRz*RW@x)K;W7;dvYdS!g&bGr_fO5}1!@w|1FWza z&##lE?lN0SHy7FNJGkiH??fkXS(VX8-e>ycE!@!~{&~c^wB_*^5kJm*)5#x~D>V5^ zrYH2uH@y@N!{9d7GsXje-$AQ*p>DxmyA*T3Y^s;x3(3^Z9|1Azr~2zIIy&}+*}c0X z+k!7xy;R_T*`j58;KX6rH+Aqz*3aKlm)T}vkB#acIxA!jkg+_N!k;(SG0-wMvzYrN z?m~7vY)s1>+PUbH+uNRF;O=0(G<()Nj&e5R(}H_3w;V?E7t3eg*2}Y}C(S=@H&Bg6 zNGNZW+VRYwiY|4(ERW?hWxO20KI<;7%SV>Tr4(-yBhj8*z0^00#K4G%G&DS8Wnt;xVl-LY-2c*l*DmAw&m(sb)4%s7 zA~`$C>ph<-)mr!l?>&2Z+`s!DASCUm!s763dg$j5DMD8QB-iXX5EMa|99>yAOhvWg zDw1Vto7rm0Za%X2vd_l3%a4h*xeX6j6vGA2$jH>98R_MhnlpH2Ijf={&mS^~CK2RB z1|3b!gdfub$uK(_diFs(cJ8#F8B|Ku%~S4j`w6W1m-V}{vV|$rPzNrj6v_H!een~x zx$7RnM<|z4Iw3+xn$FV~cNM(}tnuK_dB2kLejobkv9F76L(<_XDFJcnR7{%rj%K5G zjvV1p4wkSa_9raTw(^462z}d&p2sW{ax)sA@Q;F?s4wHx&Xhq@ae@uCdd)fe9D)|} zuN)-dfy>r|!UKi|FLcne_;`V|W9MI!iDDuJ!C(SEWhBH*=&5a`p`QOky7V|!tZvlp z@BAcs?X1MtV*?wE9@HfS=9WcLtVdbK+o2nK4QG-ymRm=iZ(NC0-WeMhK^F@!X!dTe z1uq^cGu5{0f_RP4Sya%17?$a_=!|?6=1=9n+$ZS|o+U_xpC4wmnqxYKxAXk?m^zY> zAf~CDIB}ETqAd>H2yWlGv%s_ze83kF#iZE%9c@qAobw6XbSVr-7cgL6r_qEZ(ZD^l z{dHnz{hg(6vvLYan^e}(_-#uBL)(l83l?Q25=l3A{+_%&1*O*N3JttfTR>PC-Cm!v zvIA?trMq)yea`CJ4d1?fLq0Fa-@m?Bd(HYSwWw~-!T@^;Osr2C5LFSl{V(^jS+7DX z%!)xrP3If~IT_inH{*5{U#?bsIg(@IX2&nzcY~AIUlqLpw+5zn_8}$bjlXGnmP7%W zvy|F&jB6~Ne8XEdAhzjcn~42%C=xQ4FAVp}!w+L0|7R+wq<>sY%mG>DvuDo+n1wTN zA3l6M{WrO?W_Mq7%uD8=S2 zf9+zRecge4v^om*~u5+#x0?{Z5Zn?gkOVgpV^ zj_XH-55T^@^aS~c&p)I%xC!>zqvla5e&0^aAzu%{GR$f4(PPJG%gcQk(@!%vog%o8 zX!#Kfw<_1Vy3fXmkHxcOUq;>-gNavY4Yx1FCevjDEWAf`eu<<5rD z{{rGVw&$PqCooK4xQp9?#ga)-Lf&dCHB;58cI~P#p1GdR3Fc0{`+W-Mx#K|%L4kq1 zdFseQwOfyE`hA-^wrKjz*=vnEAJlC=lD|Af$Nq9CvWtFB=H{BS(u|1qnQ|8 z$+OUrx;xROqII5`e1tk``Kj0mz>;DFVTsI`MhLGhG4S8e3*(3ZOTt;MrBb`~;*dUL z6%lE-2*=q=NJx;u;1;vbJ7Nld-;B$1WTs!!I151sQeV&F;;xO+AqStLho#z;@Pu^HhNtXZ<)zsW<3i?D_v3oJ$e}GySKlKTG?W#y| z1OBJ-AhQz20)a=SP2U+QDJc_zU%z#$_<-g$YV6r0<17VbQ)~GBEm;XJci$ZvI37ut zeUh4av^C>2(P=ZwW!@50WrVMnFI_SoZcJow3A1Q@B}E)8{LxDpt-GkGo)MIlP_s0p zNHN!H4oU;2H(ic^bj#0hP@O-@el*2ZoeCj*norEFhb19S=9lFQ9|o&e_FZLXIq+#@ z0IJZK6G6}T2kzoV_?q_UT>U$DXohCrp~szGA&t@g5{JkcEk$M!pe6x=0F{P@&lE1q zmpjw!GJe;emXdV2=s8lAFK7j&Y%ke$0K(%CA4|n-mut>0BBFCnd@&Qg{_!l{k|blxl&BYHH78%4 z?Wr9Tkw!*XlO9u@ws(R*$~>5f>Y;LJ^+e6>ZKtq znW%LsM3E{0v`Tvs5&(APovzT2Y;}4NS*QW$j`p6<|8lVpVh*Wh& zWWruMCc85_DM?r;j4N-He%O~1I(&Om7KP1Y`J*4wBr>e-v;Iane&3%=V#GedDtpW! zAM^-bv*yEqCioxvGK%Z_9{+Y1&SWZ_GihK&FWU#ce*HRw8KBZQmcYwuVG=Se+WCoE z1>mPu5sv>bN58mIZhxG0;Hx<7)$pu4iYzj0C!7Ypu;T0 z()Fv$*kQ0Sk51u<{PA8`=P}>^Q zpK1$_cdRW-PEu0xjDo^k2Q10IuG-s=7wIL;n3$P|Kp$zrLPVvc9ENsF$j(;B@o9mK z@LhOwtITpU=FHaJ+O#thqYOpSF^pg#k{l<}2%Q1yIZ9z_Epj@ZM@8)uHy{$Hf3I*A zoJ_wsFbUbgfktIeGg^tUz=Z*T)#3JYj8JLqFA35J9{w2t8=$d{s*_V5Dx2Wu#h_Y% zm6NmT9N`iEy(9)&{)6eDn^BHTxzFBX(x7%vN&e@d?Ed*+2{QZd%T57VtN*yt2mRUw zh@Y2;5skzI2!g3aOo&Ks8n!+QRH4&-*|o2?w*pSp;lpQ#hK4}QyMwt-+I9?hi7d00 z;f8bKO>jdImx5??Hq>oLa9=*$z~T zj^q(J4NM$j773D1-lNnNR*#oVj@xM2P!F70SuV z)?q19L7fht+Jova5QGvOCpstN8fUFUt z$iO3pbdjd>=!n0FYjUsf%5GIXK4zqlQQq6vR|y_CGxM{`AFI}^9|VfihC{98O-sb7 zvhpi|QjTG$?enxPF!caNpg1|T2d;mjR}dO?Xbo3rrKe1fL|`42r7*y{LfS8Zk#7$^ zkDEytA%lvqwd>KZg-I4AkMh-#gx_xNKp#+v65)-$c@-5hPtHES(qg zGB{WP(QzB~*8H7p+$ zuwN*|Nc->`Yc`zm$M~y0o*ct+O4)jX{k!ktpFcswnE2L%AKlGTytzm+rytTOIXGmu ze?}iBRaMnE7+W@0)&%QcpfL*D^xj43UC$?w^Dgt!WWnzfqI^bO> zc=#ksc7^_I`d8Qpc^2}U%h>cpco6z!Lc;V6>6+@YoM z-V=Jvb-@l>V$zhP=DaxD!i+tVT|k?bL>|Mk%t`_VBQ6i_J$Q@{U6OXH03AUDIwLDP zRrv$%HhXePP{$Wl8N%O27+gz}J?w0s@fbxZaj(e&Q1`cI&tSl`9ZS(iLtr>T>di@{ z=lWmvQ~x<-|8m%0_(tSCD$S1Iw$+5z1b65?yIGUy+XJyB-Ltkhb(zwxo+s@c;t!nU z93GD~vK$B~rD;F%pQ_tK`X8D*YDLlgegPD1u0yTZzpvLEzgO<>zvxS@obA6qPHtWi zp}#+RXeb8${hs(*a5Ltwej|SDruxUNkK|&xiaiGt06C*d=F&^e>o#KXADF!wn3uq` zlF07^a^iVb+7qYwoWkC1+xEel1lZbBW0~}pt1pBrg!Deu!D~k~Z4nMCrSa)~F&2~_ z`A(QlL5PS`B}9yl(p(-U6H3YRF&t6`3bSQ>m*`c2Gd6pfehgm6BMrO{5!#_$lLOFd zy{8Jx&Uqqk=-MK7I7MpY$AVT$iLu|kpV-|@+cL^;bioS$_uBkVM)7~Qv>C-~29pSJ z4C2s8dhtRrIvPxhb1fK{n1H@{0#3g$d?J*Nk>}zqmWHcVtzsw_ z1;b$T#*IKDm9?~S&>?}&lc=H(2Ar39 z^~(uhcKm7feL)(-aQi}-;3pD1hV+7O_X!uEoWbRzLC9VHxgP_O90CF@7Ne-pTEKMB zWOD8TMT~L|LHsiF?E|+N;^zglf#lBkA->G?8Kt^FGyRx|FdR*soiS)mO;{zJ2>Mw;vM`3u$MW291+Oh+Q6r{`Q%F zPyA1S(#C_hZqiYwN>O^-N-}t7?DvwA!#eW*U1BUVvBXWS7*TCC$pbAzIMelV+)eN- zcJ1CBkFAl96w`ylpnl=Pm%ZY&upTh#Im@?i-MU8CQt09c-N_1|1_IKA#7Zo(&K7N% zl}|&~bDLC(LRe_rQ57(idX`H_ZOb5L05V}dKr|TEmm8{!6SoCwe0RRG#oyw7%)Io)sxjc*HaJ%hbU-0=|yRsZnwSng)yg%W*GT5OFSg-^XAPr&NhuYo%fr?TwfB&X6;V7#ntQ-k(~tjx^s#zJCiYZVcO zvmQDW|8C%i8x@ss8cIW@08tUG06NNYEZU!EX7a*qDtk#h@nGFaog7Pxy0$ABKOR_Z zk1~WIwX&;Z4yiEyb&j3zBz)*a)X|&uGxyc$#h<5?dtATR3Fm2$HvZjdr^BHqDZQKb zd94#VtnE!lJpi$&f;gDj_0t8%tQFr#K{1< zf5o1Wlgl&czVfZNvhBqMPlwS@`yr$7kqDOoSRN{%3ZXK#{eDYr&*Q(}32Nexh--WP zea$(#V4DJmeZX-dUFV(c(Go{`HX!mBI-e>DJB(`f=tBOP!l%R#v|H1<09ql6BxK{~3IgId(sECr zr@*ZG_sM9a354hn7)Bp`nDfv@P|)T8^o+eRlCyo2{)x3My3p+L2JUkL8RqOHZpu^9s|b}@f%g&#PB z(R8vQx;FRHikB9SF%ZXFO;vRe)wuW)I5*yroYUznN7PiHx87*8Y0tew5ys}rC?R~- zk?%lm58*~wZ+hGJqePhE6h8lb@&2nb8 zyw*jS33OHAPb@2j5(DfdC>!r)0zOP5=t4wNS@hRr>1GNF(Fxnv?kL@0TCpa0Z6;Y% zU%?x79!`v(>2U1IG8G*UsUE|_HKNWh2IcuHYk`IVvj`4Ho)|^c)nE>!dg{BvdMBLP ztcnZP{Jk%YH(@E(3mpzUffI7T>s>28V#X@b{5CVbL<0!S4bs!lwZ2W56JOU8MH585 zTYC*WLv%6F&NO}muV$srQV4m!*RNlfMRMnMuLkNhgfg2({Fj0^%r4&Z{q2+SCjiIF z*b^sEwLbUV8>ah|wY9a6czJob8LE31i1a$jo^@>dKTZwvy%r%#h~%I04<*J&AoQ_o z&kOE61jNa5UKZ)3^%OL5NZOg9mXkE}{At?}=kIMprtgulAo*-X?V!J^t}faut@8(@ zhgmbR7~Exs)3k!cu?h>LwxAFL6IAWfW zP7rD?GDRyxnkcHXEc(%J+gTh7KF~_$Cg)Nv8xJp2YtBl0dMGGp>EnF>sv?GBq=^N8?6;hj3M7>(T$#mMDBgZ$_hnb9c7v ztsfNvvT->}JMWzEjqdLAXh+Zyn;Heg&5m8G-|dFoa80{qj!Q3Pyv`$PYHFKwJD%Sv zo7d|&p{~7yv8`Jlt?YOm9d>&c1V4$Rmk|6C81GpWA`WJ^TQ;a+I^XCNQL9fhZve5O zDz*MHq7eT$22p3;{4&@ZCRrTgfjxY`67yWHyuG`ffRqUko0^(MmYthh1LKLjlfn`9 zJ){$g`Sj@%zb}2!6cofKL++?wzpfLROfdLVz>&!UH~?cQ>yTggGV45`;)jglX~-7v zxjZEFB{CjAKL`ll0Zd{B3?Nj<_KKlPNw|pP8GqyyI|OSM>$6~eW&8={iO_`iAtOpn z>l1TsLM2lEQHeK?Q>Vg4;qbqk|MUJkurw6-_>@M)1-3IQR-Z6F2&Ni|6a{7zk*YSV zTbD>8Y5R)KJ;r>4Xr!1lu^DXzP8k3xXYe+fkifP)+xSkW0eaLbHTLwg;oV_7;#34Q zVGW4NR|+~oMX0zUl4|Co;LpcG^ceHJgE7U|JU=wNU5l7Kg!y1e<2K zB|WlSToY|GeC8{ML?ct(ta}&^FO_+oi2gKY2=)AD?*IW9ovuYH3j!2l8 zm<+dO9GWLbO94K(#I9(@40u`X%$fUm-Ar_VM#38{N|a~>ukM>avs+?CPr#KbkKrTS zQD{@2-vy8QNgK4 zQiYqX1ez>rwe4Zljq!M|G!Og7J(CMDZGCmwas>GXGnHo_oS_zCh$K=p^ff;21G zO8Bn8xXd8saExnFj%J5CSyq_C`b5zb!tmwTBbY)RX;ZPgJG{a0X3y{nt$IYlA-Eac{KvCdy}U5#|%9}oYp zJ2eeaUC}jmFxUSGC#BtiC;$5rKJ>qeM*nxw+W*H}Pc5$!lexeV{Nii`@$#}#3a8Re IoWJ>h0644&jQ{`u literal 0 HcmV?d00001 diff --git a/examples/pruning/minitron/NVIDIA-Nemotron-Nano-9B-v2/nemo_evaluator.yaml b/examples/pruning/minitron/NVIDIA-Nemotron-Nano-9B-v2/nemo_evaluator.yaml new file mode 100644 index 00000000000..256a4031be1 --- /dev/null +++ b/examples/pruning/minitron/NVIDIA-Nemotron-Nano-9B-v2/nemo_evaluator.yaml @@ -0,0 +1,194 @@ +# NeMo Evaluator Launcher config for Nemotron-Nano-9B-v2 and Pruned variants +# -------------------------------------------------------------------------- +# Before running, update the following fields in the yaml: +# - `execution.hostname` — your Slurm login node hostname +# - `execution.account` — your Slurm account +# - `deployment.checkpoint_path` — Hugging Face checkpoint path (original, pruned or quantized) +# - `evaluation.nemo_evaluator_config.config.params.extra.tokenizer` — same path as `checkpoint_path` +# +# Usage: +# pip install "nemo-evaluator-launcher[all]==0.1.90" +# +# # Set required environment variables: +# export HF_TOKEN= +# export SLURM_JOB_DIR= +# export HF_HOME= +# export VLLM_CACHE_ROOT= +# +# # Set additional unused but required environment variables: +# export API_KEY=xxxxxx +# export INFERENCE_API_KEY=xxxxxx +# export OPENAI_CLIENT_ID=xxxxxx +# export OPENAI_CLIENT_SECRET=xxxxxx +# +# nemo-evaluator-launcher run --config nemo_evaluator.yaml +# + +defaults: + - execution: slurm/default + - deployment: vllm + - _self_ + +execution: + type: slurm + hostname: + username: ${oc.env:USER} + account: + partition: batch + num_nodes: 1 + ntasks_per_node: 1 + gpus_per_node: 8 + gres: "gpu:8" + walltime: 04:00:00 + sbatch_comment: "{\"OccupiedIdleGPUsJobReaper\":{\"exemptIdleTimeMins\":\"1920\",\"reason\":\"benchmarking\",\"description\":\"Some evals need idle time\ + \ else gets cancelled\"}}" + subproject: nel + output_dir: ${oc.env:SLURM_JOB_DIR} + mode: sequential + + mounts: + mount_home: false + deployment: + n_tasks: 1 + batch_comment: "{\"OccupiedIdleGPUsJobReaper\":{\"exemptIdleTimeMins\":\"1920\",\"reason\":\"benchmarking\",\"description\":\"Required data validation\ + \ and evaluation\"}}" + +# Note: Only tp=1 works for Nano (Mamba-based architecture) +deployment: + # Update this to your Hugging Face checkpoint path (original, pruned or quantized) + checkpoint_path: + served_model_name: Nemotron-Nano-9B-v2 + port: 8000 + tensor_parallel_size: 1 + pipeline_parallel_size: 1 + data_parallel_size: 8 + gpu_memory_utilization: 0.8 + extra_args: "--trust-remote-code --no-enable-prefix-caching --mamba_ssm_cache_dtype float32 --model-loader-extra-config '{\"enable_multithread_load\"\ + : true, \"num_threads\": 96}' --kv-cache-dtype fp8 " + env_vars: + VLLM_ATTENTION_BACKEND: FLASH_ATTN + endpoints: + chat: /v1/chat/completions + completions: /v1/completions + health: /health + multiple_instances: true + +evaluation: + nemo_evaluator_config: + target: + api_endpoint: + adapter_config: + use_system_prompt: true + use_reasoning: false + params_to_add: + chat_template_kwargs: + enable_thinking: true + skip_special_tokens: false + use_caching: true + tracking_requests_stats: true + log_failed_requests: true + use_request_logging: true + max_logged_requests: 10 + use_response_logging: true + max_logged_responses: 10 + config: + params: + parallelism: 64 + max_new_tokens: 32768 + temperature: 0.6 + top_p: 0.95 + request_timeout: 3600 + max_retries: 10 + extra: + tokenizer_backend: huggingface + # Update tokenizer path to match checkpoint_path above + tokenizer: + env_vars: + HF_TOKEN: HF_TOKEN + HF_HOME: HF_HOME + VLLM_CACHE_ROOT: VLLM_CACHE_ROOT + API_KEY: API_KEY + INFERENCE_API_KEY: INFERENCE_API_KEY + OPENAI_CLIENT_ID: OPENAI_CLIENT_ID + OPENAI_CLIENT_SECRET: OPENAI_CLIENT_SECRET + + tasks: + # 1. MMLU Pro + - name: ns_mmlu_pro + env_vars: + HF_TOKEN: HF_TOKEN + nemo_evaluator_config: + config: + params: + # limit_samples: 8 + extra: + num_repeats: 1 + args: "++prompt_config=eval/aai/mcq-10choices-boxed" + + # 2. GPQA Diamond + - name: ns_gpqa + env_vars: + HF_TOKEN: HF_TOKEN + nemo_evaluator_config: + config: + params: + # limit_samples: 8 + extra: + num_repeats: 8 + args: "++prompt_config=eval/aai/mcq-4choices" + + # 3. LiveCodeBench + - name: ns_livecodebench + env_vars: + HF_TOKEN: HF_TOKEN + nemo_evaluator_config: + config: + params: + # limit_samples: 8 + extra: + num_repeats: 8 + dataset_split: test_v6_2408_2505 + + # 4. AIME 2025 + - name: ns_aime2025 + env_vars: + HF_TOKEN: HF_TOKEN + nemo_evaluator_config: + config: + params: + # limit_samples: 8 + extra: + num_repeats: 64 + + # 5. MATH500 (Requires JUDGE_API_KEY) + # - name: AA_math_test_500 + # env_vars: + # HF_TOKEN: HF_TOKEN + # JUDGE_API_KEY: JUDGE_API_KEY + # nemo_evaluator_config: + # config: + # params: + # # limit_samples: 8 + # extra: + # n_samples: 5 + + # 6. IFEval + - name: ns_ifeval + env_vars: + HF_TOKEN: HF_TOKEN + # nemo_evaluator_config: + # config: + # params: + # limit_samples: 8 + + # 7. SciCode + - name: ns_scicode + env_vars: + HF_TOKEN: HF_TOKEN + nemo_evaluator_config: + config: + params: + # limit_samples: 8 + max_new_tokens: 8192 + extra: + num_repeats: 8 diff --git a/examples/pruning/minitron/README.md b/examples/pruning/minitron/README.md new file mode 100644 index 00000000000..8749c366a70 --- /dev/null +++ b/examples/pruning/minitron/README.md @@ -0,0 +1,11 @@ +# Minitron Pruning — End-to-End Tutorials + +End-to-end tutorials for [Minitron](https://arxiv.org/abs/2407.14679) structured pruning followed by knowledge distillation, quantization, evaluation,and vLLM deployment. + +Each subdirectory covers a specific source model and target size, including the full data blend, pruning config, distillation hyperparameters, evaluation results, and throughput benchmarks. + +## Related + +- [Minitron pruning instructions](../../megatron_bridge/README.md#pruning) and [Megatron-Bridge distillation instructions](../../megatron_bridge/README.md#distillation) +- [Megatron dataset tokenization](../../dataset/MEGATRON_DATA_PREP.md) +- [Puzzletron pruning algorithm](../../puzzletron/README.md) diff --git a/examples/megatron_bridge/results/puzzletron.md b/examples/pruning/puzzletron/Llama-3.1-8B-Instruct.md similarity index 100% rename from examples/megatron_bridge/results/puzzletron.md rename to examples/pruning/puzzletron/Llama-3.1-8B-Instruct.md diff --git a/examples/pruning/puzzletron/README.md b/examples/pruning/puzzletron/README.md new file mode 100644 index 00000000000..426ced00c4c --- /dev/null +++ b/examples/pruning/puzzletron/README.md @@ -0,0 +1,16 @@ +# Puzzletron Pruning — Distillation Results + +Distillation results for models compressed with [Puzzletron](../../puzzletron/README.md) MIP-based heterogeneous pruning, followed by Megatron-Bridge knowledge distillation. + +## Results + +| Model | File | +| --- | --- | +| Llama-3.1-8B-Instruct and Qwen3-8B | [Llama-3.1-8B-Instruct.md](Llama-3.1-8B-Instruct.md) | + +## Related + +- [Puzzletron pruning example](../../puzzletron/README.md) +- [Megatron-Bridge distillation instructions](../../megatron_bridge/README.md#distillation) +- [Megatron dataset tokenization](../../dataset/MEGATRON_DATA_PREP.md) +- [Minitron pruning instructions](../../pruning/README.md#minitron) diff --git a/examples/puzzletron/README.md b/examples/puzzletron/README.md index 89183073399..571b40ca499 100644 --- a/examples/puzzletron/README.md +++ b/examples/puzzletron/README.md @@ -341,6 +341,8 @@ To recover degradation in the quality of the compressed model, we can use knowle See [Megatron-Bridge distillation](../megatron_bridge/README.md#distillation) for instructions on using Megatron-Bridge for knowledge distillation. The distillation script supports both standard HuggingFace and Puzzletron AnyModel checkpoints. +For distillation results on Puzzletron-compressed models, see [examples/pruning/puzzletron/](../pruning/puzzletron/README.md). + ## Advanced Usage Modify `llama-3_1-8B_pruneffn_memory.yaml` file for advanced compression scenarios. diff --git a/modelopt/torch/utils/plugins/megatron_preprocess_data.py b/modelopt/torch/utils/plugins/megatron_preprocess_data.py index 0c9a121f696..81dac1580b0 100644 --- a/modelopt/torch/utils/plugins/megatron_preprocess_data.py +++ b/modelopt/torch/utils/plugins/megatron_preprocess_data.py @@ -78,8 +78,9 @@ --strip_newlines ``` -Note: ``--hf_streaming`` without ``--hf_max_samples_per_split`` falls back to non-streaming, -since streaming the full dataset is slower than the cached non-streaming path. +Note: streaming does not cache to disk, so re-runs re-download. For full-dataset streaming +without a sample cap this is slower than non-streaming mode, but it avoids Arrow schema +compatibility issues with complex nested message types. """ import argparse @@ -191,7 +192,14 @@ def encode(self, json_line: str): if tools: kwargs["tools"] = tools value = self._process_messages(value) - text = _Encoder.tokenizer.apply_chat_template(value, tokenize=False, **kwargs) + try: + text = _Encoder.tokenizer.apply_chat_template(value, tokenize=False, **kwargs) + except Exception as e: + print( + f"apply_chat_template failed: {e}\nData:\n{json.dumps(data, indent=2, default=str)}", + flush=True, + ) + raise # chat template already embeds all special tokens; don't add BOS again add_special_tokens = False else: @@ -452,8 +460,9 @@ def megatron_preprocess_data( hf_split: Hugging Face Hub dataset split. Defaults to None (all splits). hf_max_samples_per_split: Maximum number of rows to consume per split. hf_streaming: Load HuggingFace datasets in streaming mode. Only consumed rows are - downloaded — useful for very large pretraining datasets. Note: streaming does not - cache to disk, so re-runs re-download. Defaults to False. + downloaded — useful for very large pretraining datasets or datasets with complex + nested message schemas that cause Arrow type-cast errors in non-streaming mode. + Note: streaming does not cache to disk, so re-runs re-download. Defaults to False. output_dir: Path to directory to save binary output files. tokenizer_name_or_path: Name or path of the Hugging Face tokenizer to use. json_keys: Key or list of keys to extract from json. Defaults to ["text"]. @@ -485,10 +494,9 @@ def megatron_preprocess_data( warnings.warn( "--hf_streaming is set but --hf_max_samples_per_split is not. " "Streaming without a sample cap re-downloads the full dataset on every run with no " - "disk cache, which is slower than non-streaming mode. Falling back to streaming=False.", + "disk cache, which is slower than the cached non-streaming path.", stacklevel=2, ) - hf_streaming = False Path(output_dir).mkdir(parents=True, exist_ok=True) vocab_size = AutoTokenizer.from_pretrained(tokenizer_name_or_path).vocab_size From 0f9ef85310a6ec4077c8c1885c64ae03a208b5a7 Mon Sep 17 00:00:00 2001 From: Hrishith Thadicherla <99313418+hthadicherla@users.noreply.github.com> Date: Mon, 4 May 2026 15:15:53 +0530 Subject: [PATCH 15/24] Added fallback to load extra cudnn dlls in the site packages (#1369) ### What does this PR do? Type of change: Bug fix CUDNN 9.21 added a new dll dependency called cudnn_engines_tensor_ir64_9.dll that ort.preload_dlls() is not updated on for windows to load this dll hence fails trying to load cudnn when just nvidia-cudnn-cu12>9.20 package is used. So added code to add any extra dlls from the site-packages folder that the preload function misses. ## Summary by CodeRabbit * **Bug Fixes** * Improved Windows cuDNN detection and loading for ONNX Runtime with CUDA by scanning installed cuDNN packages and attempting to load any missing DLLs to reduce startup failures. * Enhanced logging and diagnostics: preload output is now surfaced as warnings and individual DLL load successes/failures are logged to aid troubleshooting. --------- Signed-off-by: Hrishith Thadicherla Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> --- modelopt/onnx/quantization/ort_utils.py | 87 +++++++++++++++++++++++-- 1 file changed, 80 insertions(+), 7 deletions(-) diff --git a/modelopt/onnx/quantization/ort_utils.py b/modelopt/onnx/quantization/ort_utils.py index 0ea465487a3..2c5a0b7d2da 100755 --- a/modelopt/onnx/quantization/ort_utils.py +++ b/modelopt/onnx/quantization/ort_utils.py @@ -18,6 +18,7 @@ import glob import io import os +import pathlib import platform import re import shutil @@ -25,6 +26,7 @@ import sys from collections.abc import Sequence from contextlib import redirect_stderr, redirect_stdout +from importlib.metadata import PackageNotFoundError, distribution import onnxruntime as ort from onnxruntime.quantization.operators.qdq_base_operator import QDQOperatorBase @@ -126,6 +128,78 @@ def _check_for_tensorrt(min_version: str = "10.0"): ) +def _find_cudnn_bin_dir(): + """Locate the nvidia cudnn bin directory inside site-packages.""" + for pkg_name in ("nvidia-cudnn-cu12", "nvidia-cudnn-cu13"): + try: + dist = distribution(pkg_name) + except PackageNotFoundError: + continue + for f in dist.files or []: + if f.name.startswith("cudnn64_") and f.name.endswith(".dll"): + bin_dir = str(pathlib.Path(f.locate()).parent) + if os.path.isdir(bin_dir): + return bin_dir + return None + + +def _load_extra_cudnn_dlls(): + """Load any cuDNN DLLs from site-packages that ORT's preload_dlls() missed. + + TEMPORARY WORKAROUND: This function exists because ort.preload_dlls() has a + hardcoded list of cuDNN sub-libraries which may be incomplete for newer cuDNN + versions (e.g. cuDNN 9.21 added cudnn_engines_tensor_ir64_9.dll, cuDNN 9.20 + added cudnn_cnn64_9.dll). Once ort.preload_dlls() is fixed upstream to + dynamically discover all cuDNN DLLs, this function and its helper + (_find_cudnn_bin_dir) should be removed. + + This scans the nvidia-cudnn bin directory and loads any cudnn*.dll not already + loaded in the process. + """ + import ctypes + import ctypes.wintypes + + cudnn_bin_dir = _find_cudnn_bin_dir() + if not cudnn_bin_dir: + logger.debug( + "nvidia-cudnn bin directory not found in site-packages, skipping extra DLL load" + ) + return + + dll_files = sorted(glob.glob(os.path.join(cudnn_bin_dir, "cudnn*.dll"))) + if not dll_files: + logger.debug("No cudnn*.dll files found in %s", cudnn_bin_dir) + return + + get_module_handle_w = ctypes.windll.kernel32.GetModuleHandleW # type: ignore[attr-defined] + get_module_handle_w.argtypes = [ctypes.wintypes.LPCWSTR] + get_module_handle_w.restype = ctypes.wintypes.HMODULE + + loaded = [] + skipped = [] + failed = [] + for dll_path in dll_files: + dll_name = os.path.basename(dll_path) + if get_module_handle_w(dll_name): + skipped.append(dll_name) + continue + try: + ctypes.CDLL(dll_path) + loaded.append(dll_name) + except OSError as e: + failed.append(dll_name) + logger.warning(f"Failed to load {dll_name} from site-packages: {e}") + + if skipped: + logger.debug(f"Already loaded (skipped): {skipped}") + if loaded: + logger.info( + f"Loaded {len(loaded)} extra cuDNN DLLs that ort.preload_dlls() missed: {loaded}" + ) + if failed: + logger.warning(f"Failed to load {len(failed)} cuDNN DLLs: {failed}") + + def _check_for_libcudnn(): # TODO: handle multiple calls to this function logger.info("Checking for cuDNN library") @@ -150,10 +224,6 @@ def _check_for_libcudnn(): f"cuDNN not found in {env_variable}. " "Attempting onnxruntime.preload_dlls() to load from site-packages..." ) - # preload_dlls() does not raise on failure — it silently prints - # "Failed to load ..." messages. Capture its output and check - # whether the key cuDNN DLL actually loaded. - cudnn_dll = "cudnn" if platform.system() == "Windows" else "libcudnn_adv" captured = io.StringIO() try: with redirect_stdout(captured), redirect_stderr(captured): @@ -163,14 +233,17 @@ def _check_for_libcudnn(): preload_output = captured.getvalue() if preload_output: - logger.debug(f"preload_dlls() output:\n{preload_output}") + logger.warning(f"preload_dlls() output:\n{preload_output}") - if f"Failed to load {cudnn_dll}" in preload_output: + core_cudnn_dll = "cudnn64_9" if platform.system() == "Windows" else "libcudnn_adv" + if f"Failed to load {core_cudnn_dll}" in preload_output: logger.error( - f"onnxruntime.preload_dlls() was called but {cudnn_dll} failed to load. " + f"onnxruntime.preload_dlls() was called but {core_cudnn_dll} failed to load. " "cuDNN DLLs were NOT successfully loaded from site-packages." ) else: + if platform.system() == "Windows": + _load_extra_cudnn_dlls() logger.info( "onnxruntime.preload_dlls() succeeded — CUDA/cuDNN DLLs loaded" " from site-packages. Verify version compatibility at" From d3d519dfefca283440e4347aadeaf2fd79c3c10b Mon Sep 17 00:00:00 2001 From: yeyu-nvidia Date: Mon, 4 May 2026 02:47:50 -0700 Subject: [PATCH 16/24] fix: include medusa in data_module assignment in main.py (#1370) ## Problem When `training.mode == "medusa"` is used in `main.py`, the `data_module` variable is never assigned because line 344 only covered `eagle3` and `dflash` modes. This causes an `UnboundLocalError` when the trainer is constructed with `**data_module`. Fixes OMNIML-4147 ## Fix Add `"medusa"` to the `training_args.mode in ("eagle3", "dflash")` condition so `data_module` is correctly populated for medusa training. ## Summary by CodeRabbit * **Bug Fixes** * Fixed speculative decoding example to properly handle "medusa" mode alongside existing "eagle3" and "dflash" modes. Signed-off-by: Ye Yu Co-authored-by: Claude Sonnet 4.6 Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> --- examples/speculative_decoding/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/speculative_decoding/main.py b/examples/speculative_decoding/main.py index 31c73d04275..6a8855930b8 100644 --- a/examples/speculative_decoding/main.py +++ b/examples/speculative_decoding/main.py @@ -350,7 +350,7 @@ def train(): print_rank_0("Loading dataset...") is_dflash = training_args.mode == "dflash" - if training_args.mode in ("eagle3", "dflash"): + if training_args.mode in ("eagle3", "medusa", "dflash"): data_module = make_speculative_data_module( tokenizer, data_args, From 1b2f029c112ff2a86f019ee9e4b570cfe5e33bb1 Mon Sep 17 00:00:00 2001 From: yeyu-nvidia Date: Mon, 4 May 2026 02:47:56 -0700 Subject: [PATCH 17/24] fix: guard against None chat_template in _post_process_chat_template (#1371) ## Problem When training with a model that has no `chat_template` in its tokenizer (e.g. base Llama-3.2 models), `_post_process_chat_template()` crashes: ``` AttributeError: 'NoneType' object has no attribute 'replace' ``` The DeepSeek WAR at the top of `_post_process_chat_template` called `.replace()` directly on `self.tokenizer.chat_template` without checking for `None` first. Fixes NVBug 6120958 ## Fix Add an early return when `chat_template is None`. The existing check at line 164 (`if self.tokenizer.chat_template is None: raise ValueError`) still provides a clear error message if no valid template is available after post-processing. ## Summary by CodeRabbit * **Bug Fixes** * Fixed a crash in chat template processing that occurred when a chat template configuration was not set, improving system stability and reliability during initialization. Signed-off-by: Ye Yu Co-authored-by: Claude Sonnet 4.6 Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> --- modelopt/torch/utils/plugins/transformers_dataset.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modelopt/torch/utils/plugins/transformers_dataset.py b/modelopt/torch/utils/plugins/transformers_dataset.py index 56b1e4f07b1..162bdbd8cf9 100644 --- a/modelopt/torch/utils/plugins/transformers_dataset.py +++ b/modelopt/torch/utils/plugins/transformers_dataset.py @@ -181,6 +181,8 @@ def _post_process_tokenizer(self): def _post_process_chat_template(self): # [WAR]: For DeepSeek-V3/R1 tokenizer, we modify the chat_template such that the # tokens are preserved for supervised learning. + if self.tokenizer.chat_template is None: + return self.tokenizer.chat_template = self.tokenizer.chat_template.replace( REMOVE_THINK_CHAT_TEMPLATE, "" ) From 3720a7ac6369734c7f6244c3c876684e8ce4eed1 Mon Sep 17 00:00:00 2001 From: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> Date: Mon, 4 May 2026 15:17:07 +0530 Subject: [PATCH 18/24] Increase gpu_tests CI timeout from 60 to 75 mins Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> --- .github/workflows/gpu_tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/gpu_tests.yml b/.github/workflows/gpu_tests.yml index 628aead7ee8..ec378ff3ea9 100644 --- a/.github/workflows/gpu_tests.yml +++ b/.github/workflows/gpu_tests.yml @@ -39,7 +39,7 @@ jobs: matrix: include: - example: gpu - timeout: 60 + timeout: 75 container_image: pytorch:26.01-py3 # tests/gpu/_extensions/test_onnx_extensions.py fails for newer containers until https://github.com/tbenthompson/cppimport/pull/98 - example: gpu_megatron From 1f619ce6a10439cfe3238a9cf6ef6dfc89011f78 Mon Sep 17 00:00:00 2001 From: kaix-nv Date: Mon, 4 May 2026 22:10:55 +0100 Subject: [PATCH 19/24] Fix sparsity-only export emitting empty hf_quant_config.json (#1375) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### What does this PR do? Type of change: ? Bug fix. Fix sparsity-only export writing `hf_quant_config.json` with null `quant_algo`. ### Testing ### Before your PR is "*Ready for review*" Make sure you read and follow [Contributor guidelines](https://github.com/NVIDIA/Model-Optimizer/blob/main/CONTRIBUTING.md) and your commits are signed (`git commit -s -S`). Make sure you read and follow the [Security Best Practices](https://github.com/NVIDIA/Model-Optimizer/blob/main/SECURITY.md#security-coding-practices-for-contributors) (e.g. avoiding hardcoded `trust_remote_code=True`, `torch.load(..., weights_only=False)`, `pickle`, etc.). - Is this change backward compatible?: ✅ / ❌ / N/A - If you copied code from any other sources or added a new PIP dependency, did you follow guidance in `CONTRIBUTING.md`: ✅ / ❌ / N/A - Did you write any new necessary tests?: ✅ / ❌ / N/A - Did you update [Changelog](https://github.com/NVIDIA/Model-Optimizer/blob/main/CHANGELOG.rst)?: ✅ / ❌ / N/A ### Additional Information ## Summary by CodeRabbit * **Bug Fixes** * Improved quantization metadata handling in model export to correctly identify quantized checkpoints based on algorithm presence. * **Style** * Reorganized imports across example files for consistency. Signed-off-by: Kai Xu Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> --- modelopt/torch/export/unified_export_hf.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/modelopt/torch/export/unified_export_hf.py b/modelopt/torch/export/unified_export_hf.py index a76783ac172..ed6ed2fcf21 100644 --- a/modelopt/torch/export/unified_export_hf.py +++ b/modelopt/torch/export/unified_export_hf.py @@ -1189,12 +1189,25 @@ def export_hf_checkpoint( try: post_state_dict, hf_quant_config = _export_transformers_checkpoint(model, dtype) - if hf_quant_config is not None: + # Only treat the export as quantized when at least one quant_algo field is set. + # get_quant_config always returns a dict (even for sparsity-only or unmodified models), + # so emitting hf_quant_config.json unconditionally produces a file with + # "quant_algo": null that downstream loaders (e.g. TensorRT-LLM) reject as a + # malformed pre-quantized checkpoint. + quantization_details = (hf_quant_config or {}).get("quantization", {}) + is_quantized_export = ( + quantization_details.get("quant_algo") is not None + or quantization_details.get("kv_cache_quant_algo") is not None + ) + + if is_quantized_export: # Save hf_quant_config.json for backward compatibility with open(f"{export_dir}/hf_quant_config.json", "w") as file: json.dump(hf_quant_config, file, indent=4) hf_quant_config = convert_hf_quant_config_format(hf_quant_config) + else: + hf_quant_config = None # Remove hf_quantizer from model so post_state_dict can be exported. if getattr(model, "hf_quantizer", None) is not None: From c07841a54558cd585f51c1fa7ce296793f31aeb5 Mon Sep 17 00:00:00 2001 From: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> Date: Tue, 5 May 2026 03:26:04 +0530 Subject: [PATCH 20/24] Enable Python 3.14 wheel support to unblock NGC PyTorch container testing on Ubuntu 26.04 + Python 3.14 (#1386) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Ubuntu 26.04 is here and very soon, NVIDIA PyTorch containers will ship with Python 3.14 requiring us to enable untested support to unblock them * **New Features** * DFlash offline speculative decoding training * MXFP4→NVFP4 weight conversion support * Shared hidden-state dump utilities * Updated DeepSeek PTQ calibration defaults * **Chores** * Added Python 3.14 support; updated Python requirement to <3.15 * **Documentation** * Updated installation documentation for Python version compatibility --------- Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> Co-authored-by: coderabbitai[bot] <136622811+coderabbitai[bot]@users.noreply.github.com> Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> --- .github/workflows/unit_tests.yml | 1 + CHANGELOG.rst | 1 + docs/source/getting_started/_installation_for_Linux.rst | 2 +- noxfile.py | 2 +- pyproject.toml | 2 +- 5 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/unit_tests.yml b/.github/workflows/unit_tests.yml index 9832f0cc605..e0933babf66 100644 --- a/.github/workflows/unit_tests.yml +++ b/.github/workflows/unit_tests.yml @@ -99,6 +99,7 @@ jobs: - {nox_session: "unit-3.10(torch_211, tf_latest)", python_version: "3.10"} - {nox_session: "unit-3.11(torch_211, tf_latest)", python_version: "3.11"} - {nox_session: "unit-3.13(torch_211, tf_latest)", python_version: "3.13"} + - {nox_session: "unit-3.14(torch_211, tf_latest)", python_version: "3.14"} - {nox_session: "unit-3.12(torch_28, tf_latest)", python_version: "3.12"} - {nox_session: "unit-3.12(torch_29, tf_latest)", python_version: "3.12"} - {nox_session: "unit-3.12(torch_210, tf_latest)", python_version: "3.12"} diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 3444a043a43..7c08a9e0d30 100755 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -39,6 +39,7 @@ Changelog - Bump minimum required PyTorch version to 2.8. - [Experimental] Add support for transformers>=5.0, including generic PTQ and unified HF checkpoint export for fused MoE expert modules (Mixtral, Qwen2-MoE, Qwen3-MoE, Qwen3.5-MoE, DeepSeek-V3, Jamba, OLMoE, etc.). - Improve ``megatron_preprocess_data``: add ``--reasoning_content`` support for Nemotron v3 datasets, eliminate intermediate JSONL for HuggingFace datasets, return output file prefixes from the Python API, add gzip input support (``.jsonl.gz``), add ``--strip_newlines`` flag for plain-text pretraining data, add ``--hf_streaming`` for very large datasets (only consumed rows downloaded), and auto-shuffle when ``--hf_max_samples_per_split`` is set to avoid biased sampling. +- Add installation support for Python 3.14. Only basic unit tests are verified for now. Production usage still defaults to Python 3.12. Python 3.10 support will be dropped in the next release. 0.43 (2026-04-16) ^^^^^^^^^^^^^^^^^ diff --git a/docs/source/getting_started/_installation_for_Linux.rst b/docs/source/getting_started/_installation_for_Linux.rst index a18b45ee7c4..1c3f17fc0fa 100644 --- a/docs/source/getting_started/_installation_for_Linux.rst +++ b/docs/source/getting_started/_installation_for_Linux.rst @@ -12,7 +12,7 @@ Latest Model Optimizer (``nvidia-modelopt``) currently has the following system +-------------------------+-----------------------------+ | Architecture | x86_64, aarch64 (SBSA) | +-------------------------+-----------------------------+ -| Python | >=3.10,<3.14 | +| Python | >=3.10,<3.15 | +-------------------------+-----------------------------+ | CUDA | 12.x, 13.x | +-------------------------+-----------------------------+ diff --git a/noxfile.py b/noxfile.py index fcef3d30875..4b012c9bcc3 100644 --- a/noxfile.py +++ b/noxfile.py @@ -52,7 +52,7 @@ def _cov_args(): # ─── CPU unit tests ─────────────────────────────────────────────────────────── -@nox.session(python=["3.10", "3.11", "3.12", "3.13"]) +@nox.session(python=["3.10", "3.11", "3.12", "3.13", "3.14"]) @nox.parametrize("tf_ver", [nox.param(k, id=k) for k in TRANSFORMERS_VERSIONS]) @nox.parametrize("torch_ver", [nox.param(k, id=k) for k in TORCH_VERSIONS]) def unit(session, torch_ver, tf_ver): diff --git a/pyproject.toml b/pyproject.toml index b129ae67093..a174c6218d8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,7 +19,7 @@ description = "Nvidia Model Optimizer: a unified model optimization and deployme readme = { text = "Checkout https://github.com/nvidia/Model-Optimizer for more information.", content-type = "text/markdown" } license = "Apache-2.0" license-files = ["LICENSE_HEADER"] -requires-python = ">=3.10,<3.14" +requires-python = ">=3.10,<3.15" authors = [{ name = "NVIDIA Corporation" }] classifiers = [ "Programming Language :: Python :: 3", From 55c338f6169d1c24b28fe6dc668343d487c4be6b Mon Sep 17 00:00:00 2001 From: Ajinkya Rasane <131806219+ajrasane@users.noreply.github.com> Date: Tue, 5 May 2026 00:20:30 -0400 Subject: [PATCH 21/24] [6110209] Patch zero FP16 scales in INT4_AWQ ONNX export (#1353) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### What does this PR do? Type of change: Bug fix Fix `replace_zero_scale_with_smallest_nonzero()` in `modelopt/onnx/quantization/qdq_utils.py` so the FP16-scale sanitizer actually runs for INT4_AWQ ONNX exports. The function is supposed to ensure all FP16 scales are strictly positive before the model reaches TensorRT, since `trtexec --stronglyTyped` asserts `scaleAllPositive`. It had two latent bugs that made it a complete no-op for INT4_AWQ: 1. It only collected scales from `QuantizeLinear` consumers — but INT4_AWQ exports use `DequantizeLinear` (default domain) and `TRT_INT4DequantizeLinear` (trt:: domain). There are zero `QuantizeLinear` nodes in such graphs, so the collected set was empty. 2. It only patched scales emitted by `Constant` nodes — but INT4_AWQ stores scales as graph initializers. When the FP32→FP16 cast in `_convert_fp32_init_to_fp16()` underflowed small amax values to `0.0` (FP16 min subnormal is 5.96e-8), those zeros sailed through into the exported ONNX. TRT then rejected the model with: ``` Assertion failed: (scaleAllPositive || allowNegativeScale): Scale coefficients must all be positive ``` This PR extends the sanitizer to: - Walk `QuantizeLinear` / `DequantizeLinear` / `TRT_INT4QuantizeLinear` / `TRT_INT4DequantizeLinear` nodes when collecting scale tensor names. - Patch zero entries in float-typed graph initializers in addition to `Constant`-node values, preserving the original dtype. Files modified: - `modelopt/onnx/quantization/qdq_utils.py` — fix. - `tests/unit/onnx/quantization/test_qdq_utils.py` — `TestReplaceZeroScaleWithSmallestNonzero` regression tests. - `CHANGELOG.rst` — bug-fix entry under 0.45. ### Usage ```bash # Repro that previously failed and now succeeds: python torch_quant_to_onnx.py \ --quantize_mode=int4_awq \ --timm_model_name=vit_base_patch16_224 \ --onnx_save_path=/tmp/vit_base_patch16_224.int4_awq.onnx \ --calibration_data_size=32 trtexec --onnx=/tmp/vit_base_patch16_224.int4_awq.onnx --stronglyTyped --skipInference ``` ### Testing - New tests pass: `pytest tests/unit/onnx/quantization/test_qdq_utils.py::TestReplaceZeroScaleWithSmallestNonzero -v` (3 passed). - Full file: `pytest tests/unit/onnx/quantization/test_qdq_utils.py` (25 passed). - Broader sanity: `pytest tests/unit/onnx/quantization/` (288 passed). - Smoke test on a synthetic model with explicit zero scales: 3 zeros → 0 zeros, all positive, FP16 dtype preserved. - `pre-commit run --files ` clean. ### Before your PR is "*Ready for review*" Make sure you read and follow [Contributor guidelines](https://github.com/NVIDIA/Model-Optimizer/blob/main/CONTRIBUTING.md) and your commits are signed (`git commit -s -S`). Make sure you read and follow the [Security Best Practices](https://github.com/NVIDIA/Model-Optimizer/blob/main/SECURITY.md#security-coding-practices-for-contributors) (e.g. avoiding hardcoded `trust_remote_code=True`, `torch.load(..., weights_only=False)`, `pickle`, etc.). - Is this change backward compatible?: ✅ - If you copied code from any other sources or added a new PIP dependency, did you follow guidance in `CONTRIBUTING.md`: N/A - Did you write any new necessary tests?: ✅ - Did you update [Changelog](https://github.com/NVIDIA/Model-Optimizer/blob/main/CHANGELOG.rst)?: ✅ ### Additional Information - NVBug: 6110209 - Reproduces with `nvidia-modelopt==0.44.0rc1`, TensorRT 10.15.1.29, on B200 / H20. - Root cause introduced when the `TRT_INT4DequantizeLinear` export path was added (PR #575, commit `0a4f0a8b`); that PR didn't update the sanitizer to handle the new node type or scale storage. ## Summary by CodeRabbit * **Bug Fixes** * Zero scale values in quantization/dequantization ops (including additional operator variants) are now replaced with the smallest nonzero fp16 scale for matching tensors; replacements preserve the original tensor data type and handle scales provided via initializers or constant nodes. * **Tests** * Added regression tests covering initializer- and constant-backed scale tensors across multiple operator configurations to ensure zeros are eliminated and dtype is preserved. Signed-off-by: ajrasane <131806219+ajrasane@users.noreply.github.com> Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> --- modelopt/onnx/quantization/qdq_utils.py | 31 ++++++- .../unit/onnx/quantization/test_qdq_utils.py | 87 +++++++++++++++++++ 2 files changed, 114 insertions(+), 4 deletions(-) diff --git a/modelopt/onnx/quantization/qdq_utils.py b/modelopt/onnx/quantization/qdq_utils.py index 0cb1a45f681..265bcf36b2a 100644 --- a/modelopt/onnx/quantization/qdq_utils.py +++ b/modelopt/onnx/quantization/qdq_utils.py @@ -1011,14 +1011,37 @@ def replace_zero_scale_with_smallest_nonzero(onnx_model: onnx.ModelProto) -> onn """Replace zero scale values with smallest nonzero fp16 value in the ONNX model.""" graph = onnx_model.graph fp16_smallest_nonzero = np.float16(6e-08) - scale_nodes = [node.input[1] for node in graph.node if node.op_type == "QuantizeLinear"] + qdq_op_types = { + "QuantizeLinear", + "DequantizeLinear", + "TRT_INT4QuantizeLinear", + "TRT_INT4DequantizeLinear", + } + scale_tensor_names = { + node.input[1] + for node in graph.node + if node.op_type in qdq_op_types and len(node.input) >= 2 + } + # Scales stored as graph initializers (e.g. INT4_AWQ / TRT_INT4DequantizeLinear exports). + for init in graph.initializer: + if init.name in scale_tensor_names: + tensor = numpy_helper.to_array(init) + if tensor.dtype.kind == "f": + new_tensor = np.where(tensor == 0, fp16_smallest_nonzero, tensor).astype( + tensor.dtype + ) + init.CopyFrom(numpy_helper.from_array(new_tensor, init.name)) + # Scales emitted by Constant nodes (legacy QDQ export path). for node in graph.node: - if node.op_type == "Constant" and node.output[0] in scale_nodes: + if node.op_type == "Constant" and node.output[0] in scale_tensor_names: for attr in node.attribute: if attr.name == "value": tensor = numpy_helper.to_array(attr.t) - new_tensor = np.where(tensor == 0, fp16_smallest_nonzero, tensor) - attr.t.CopyFrom(numpy_helper.from_array(new_tensor, attr.t.name)) + if tensor.dtype.kind == "f": + new_tensor = np.where(tensor == 0, fp16_smallest_nonzero, tensor).astype( + tensor.dtype + ) + attr.t.CopyFrom(numpy_helper.from_array(new_tensor, attr.t.name)) return onnx_model diff --git a/tests/unit/onnx/quantization/test_qdq_utils.py b/tests/unit/onnx/quantization/test_qdq_utils.py index 42aa317119f..8af5f560dd0 100644 --- a/tests/unit/onnx/quantization/test_qdq_utils.py +++ b/tests/unit/onnx/quantization/test_qdq_utils.py @@ -1021,3 +1021,90 @@ def test_column_major_gemm_trans_b_flip(self): print(f"transB flipped: 1 -> {trans_b_value}") print(f"Transpose nodes: {len(transpose_nodes)}") + + +def _build_model_with_zero_scale_initializer(dq_op_type: str): + """Build an ONNX model whose scale initializer feeds a (Quantize|Dequantize)Linear node. + + Mirrors the INT4_AWQ failure mode from NVBug 6110209: scales live in graph initializers + (not Constant nodes) and feed DequantizeLinear (default or trt:: domain) consumers. + """ + weight_data = np.random.randint(-8, 8, size=(6, 8), dtype=np.int8) + weight_tensor = numpy_helper.from_array(weight_data, "weight") + + scale_data = np.array([1e-3, 0.0, 5e-4, 0.0, 0.0, 2e-3], dtype=np.float16).reshape(6, 1) + scale_tensor = numpy_helper.from_array(scale_data, "scale") + + input_tensor = helper.make_tensor_value_info("input", TensorProto.FLOAT16, [None, 6]) + dq_node = helper.make_node( + dq_op_type, inputs=["weight", "scale"], outputs=["dq_output"], name="weight_dq" + ) + matmul_node = helper.make_node( + "MatMul", inputs=["input", "dq_output"], outputs=["output"], name="matmul" + ) + graph = helper.make_graph( + nodes=[dq_node, matmul_node], + name="test_graph", + inputs=[input_tensor], + outputs=[helper.make_tensor_value_info("output", TensorProto.FLOAT16, [None, 8])], + initializer=[weight_tensor, scale_tensor], + ) + return helper.make_model(graph) + + +class TestReplaceZeroScaleWithSmallestNonzero: + """Regression tests for ``replace_zero_scale_with_smallest_nonzero`` (NVBug 6110209).""" + + @pytest.mark.parametrize("dq_op_type", ["DequantizeLinear", "TRT_INT4DequantizeLinear"]) + def test_zero_scale_initializer_fed_to_dq_is_patched(self, dq_op_type): + from modelopt.onnx.quantization.qdq_utils import replace_zero_scale_with_smallest_nonzero + + model = _build_model_with_zero_scale_initializer(dq_op_type) + scale_before = numpy_helper.to_array( + next(init for init in model.graph.initializer if init.name == "scale") + ) + assert (scale_before == 0).any(), "fixture must contain zeros to exercise the fix" + + patched = replace_zero_scale_with_smallest_nonzero(model) + + scale_after_init = next(init for init in patched.graph.initializer if init.name == "scale") + scale_after = numpy_helper.to_array(scale_after_init) + assert not (scale_after == 0).any() + assert (scale_after > 0).all() + assert scale_after_init.data_type == TensorProto.FLOAT16 + + def test_constant_node_scale_path_still_patched(self): + """Legacy Constant-node QDQ path must continue to be patched.""" + from modelopt.onnx.quantization.qdq_utils import replace_zero_scale_with_smallest_nonzero + + scale_data = np.array([1e-3, 0.0, 2e-3], dtype=np.float16) + scale_const = helper.make_node( + "Constant", + inputs=[], + outputs=["scale_out"], + value=numpy_helper.from_array(scale_data), + name="scale_constant", + ) + input_tensor = helper.make_tensor_value_info("input", TensorProto.FLOAT, [3]) + q_node = helper.make_node( + "QuantizeLinear", + inputs=["input", "scale_out"], + outputs=["q_output"], + name="q", + ) + graph = helper.make_graph( + nodes=[scale_const, q_node], + name="test_graph", + inputs=[input_tensor], + outputs=[helper.make_tensor_value_info("q_output", TensorProto.INT8, [3])], + initializer=[], + ) + model = helper.make_model(graph) + + patched = replace_zero_scale_with_smallest_nonzero(model) + + const = next(n for n in patched.graph.node if n.op_type == "Constant") + value_attr = next(a for a in const.attribute if a.name == "value") + scale_arr = numpy_helper.to_array(value_attr.t) + assert not (scale_arr == 0).any() + assert (scale_arr > 0).all() From fc248132d785f54adad20754bcc16536b6d45c6b Mon Sep 17 00:00:00 2001 From: Ajinkya Rasane <131806219+ajrasane@users.noreply.github.com> Date: Tue, 5 May 2026 00:20:43 -0400 Subject: [PATCH 22/24] [6106576] Restore llm_export_utils as deprecated shim for edgellm 0.6.1 compat (#1356) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What does this PR do? **Type of change:** Bug fix (backward-compat restoration) #1210 (b3feebfe, "Replace in-repo LLM ONNX export with TensorRT-Edge-LLM") removed `modelopt/onnx/llm_export_utils/` from 0.44.0rc1 and pointed users at [TensorRT-Edge-LLM](https://github.com/NVIDIA/TensorRT-Edge-LLM) as the migration target. The PR description openly flagged the change as not backward compatible. The catch: **TensorRT-Edge-LLM 0.6.1 itself imports the deleted symbol**: ```python # tensorrt_edgellm/onnx_export/onnx_utils.py:24 from modelopt.onnx.llm_export_utils.surgeon_utils import fold_fp8_qdq_to_dq ``` Because that import is at module load time, every `tensorrt-edgellm-*` CLI — including `tensorrt-edgellm-quantize-llm --help` — fails immediately with `ModuleNotFoundError`. QA reports a 5/5 (100%) failure rate on `tests/test_onnx_ptq/test_onnx_ptq_edge_llm.py`, reproducible without a GPU. The "unused" framing in the original removal commit (`d89138a6`) only held inside this repo; the public API surface had an external consumer. ## What this PR does - Restores the four original submodules under `modelopt/onnx/llm_export_utils/` verbatim from `d89138a6^`: - `__init__.py` - `surgeon_utils.py` (contains the missing `fold_fp8_qdq_to_dq`, plus `clear_inputs`, `clear_outputs`, `extract_layer_id`, `no_none_elements`) - `export_utils.py` (`ModelLoader`, `WrapperModelForCausalLM`, `RopeType`, `llm_to_onnx`, `torch_to_onnx`) - `quantization_utils.py` (`get_quant_config`, `quantize`) - `__init__.py` emits a `DeprecationWarning` on import directing users to `modelopt.onnx.export`, `modelopt.onnx.graph_surgery`, or TensorRT-Edge-LLM. The new `modelopt.onnx.export` and `modelopt.onnx.graph_surgery` packages do **not** expose `fold_fp8_qdq_to_dq` (verified by `grep`), so a pure import-redirect shim wouldn't have worked — the function itself has to come back. ## Why a shim instead of fixing edgellm We should still ship `tensorrt-edgellm` 0.6.2 that inlines the helper and drops the import, but every existing 0.6.1 install (and the version pin in its wheel — `nvidia-modelopt[torch,onnx]==0.39.0`) is broken in the meantime, and pip doesn't downgrade modelopt when 0.44.0rc1 is already installed. The shim unblocks them on the modelopt side immediately. ## Usage No new usage surface. External consumers continue to import the same paths as before; they get a `DeprecationWarning` pointing them at the successor APIs. ## Testing Verified the failing import succeeds and the warning fires: \`\`\`bash \$ python -W default::DeprecationWarning -c \\ "from modelopt.onnx.llm_export_utils.surgeon_utils import fold_fp8_qdq_to_dq; print('OK')" :1: DeprecationWarning: modelopt.onnx.llm_export_utils is deprecated and will be removed in a future release. Use modelopt.onnx.export and modelopt.onnx.graph_surgery, or migrate to TensorRT-Edge-LLM (https://github.com/NVIDIA/TensorRT-Edge-LLM). OK \`\`\` The four submodules \`import\` cleanly (\`surgeon_utils\`, \`export_utils\`, \`quantization_utils\`). All pre-commit hooks pass (ruff, mypy, license headers, bandit, RST formatting). ## Before your PR is "*Ready for review*" - [x] Make sure you read and follow [Contributor guidelines](https://github.com/NVIDIA/Model-Optimizer/blob/main/CONTRIBUTING.md) and your commits are signed (\`git commit -s -S\`). - [x] Make sure you read and follow the [Security Best Practices](https://github.com/NVIDIA/Model-Optimizer/blob/main/SECURITY.md#security-coding-practices-for-contributors). - Is this change backward compatible?: ✅ — restores a previously public API; new code should still migrate. - If you copied code from any other sources or added a new PIP dependency, did you follow guidance in \`CONTRIBUTING.md\`: N/A (restoration of code from this repo's own history). - Did you write any new necessary tests?: ❌ — restoration of removed code; the failing edgellm test path is the integration test. - Did you update [Changelog](https://github.com/NVIDIA/Model-Optimizer/blob/main/CHANGELOG.rst)?: ❌ — intentionally omitted; this restores a previously public API as a deprecated shim, and the prior removal wasn't itself called out in the changelog. ## Additional Information - Original removal: #1210 (\`b3feebfe\`) and prep commit \`d89138a6\` "Remove unused llm_export_utils package". - Followup: track edgellm-side fix (drop the modelopt import / inline \`fold_fp8_qdq_to_dq\`) so the shim can be removed in a future major. - Suggested cherry-pick to \`release/0.44.0\` so 0.44.0 GA ships without the regression. ## Summary by CodeRabbit * **Deprecated** * Legacy LLM ONNX export pipeline is now deprecated; users are directed to newer alternatives for model export. * **New Features** * Added support for exporting HuggingFace causal language models to ONNX format with dynamic shape support. * Added quantization utilities supporting FP8, INT4-AWQ, and NVFP4 precisions for LLM models. * Added ONNX graph optimization utilities for quantized operations. Signed-off-by: ajrasane <131806219+ajrasane@users.noreply.github.com> Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> --- modelopt/onnx/llm_export_utils/__init__.py | 42 +++++ .../onnx/llm_export_utils/export_utils.py | 162 ++++++++++++++++++ .../llm_export_utils/quantization_utils.py | 146 ++++++++++++++++ .../onnx/llm_export_utils/surgeon_utils.py | 120 +++++++++++++ 4 files changed, 470 insertions(+) create mode 100644 modelopt/onnx/llm_export_utils/__init__.py create mode 100644 modelopt/onnx/llm_export_utils/export_utils.py create mode 100644 modelopt/onnx/llm_export_utils/quantization_utils.py create mode 100644 modelopt/onnx/llm_export_utils/surgeon_utils.py diff --git a/modelopt/onnx/llm_export_utils/__init__.py b/modelopt/onnx/llm_export_utils/__init__.py new file mode 100644 index 00000000000..8ea066d8658 --- /dev/null +++ b/modelopt/onnx/llm_export_utils/__init__.py @@ -0,0 +1,42 @@ +# SPDX-FileCopyrightText: Copyright (c) 2023-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Deprecated shim for the legacy ``modelopt.onnx.llm_export_utils`` package. + +The in-repo LLM ONNX export pipeline (formerly ``examples/torch_onnx/llm_export.py`` +plus this package) was removed in 0.44.0rc1 in favor of +`TensorRT-Edge-LLM `_, which provides +a more complete and actively maintained pipeline. + +This package is preserved only as a compatibility shim so external consumers that +still import ``modelopt.onnx.llm_export_utils`` (notably TensorRT-Edge-LLM 0.6.1 +and earlier) continue to work. It will be removed in a future release. + +New code should migrate to: + +* ``modelopt.onnx.export`` — quant exporters (``FP8QuantExporter``, ``NVFP4QuantExporter``, etc.) +* ``modelopt.onnx.graph_surgery`` — graph transforms (GQA replacement, BF16 conversion, etc.) +* `TensorRT-Edge-LLM `_ — end-to-end LLM export. +""" + +import warnings + +warnings.warn( + "modelopt.onnx.llm_export_utils is deprecated and will be removed in a future " + "release. Use modelopt.onnx.export and modelopt.onnx.graph_surgery, or migrate " + "to TensorRT-Edge-LLM (https://github.com/NVIDIA/TensorRT-Edge-LLM).", + DeprecationWarning, + stacklevel=2, +) diff --git a/modelopt/onnx/llm_export_utils/export_utils.py b/modelopt/onnx/llm_export_utils/export_utils.py new file mode 100644 index 00000000000..2016e872e28 --- /dev/null +++ b/modelopt/onnx/llm_export_utils/export_utils.py @@ -0,0 +1,162 @@ +# SPDX-FileCopyrightText: Copyright (c) 2023-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities for exporting LLM models to ONNX.""" + +import json +import os +import time +from enum import Enum + +import torch +from transformers import AutoModelForCausalLM, DynamicCache + + +class RopeType(Enum): + """Rope type enum.""" + + K_NONE = 0 + K_ROPE_ROTATE_GPTJ = 1 + K_ROPE_ROTATE_NEOX = 2 + K_MROPE = 3 + + +class ModelLoader: + """A class to handle HuggingFace model loading and configuration.""" + + def __init__(self, hf_model_path: str, config_path: str): + """Initialize the ModelLoader.""" + self.config_path = config_path + self.hf_model_path = hf_model_path + self.model_type = self.get_model_type() + self.hf_model = None + self.rope_type = RopeType.K_ROPE_ROTATE_NEOX + + def get_model_type(self): + """Get model type from config file.""" + with open(self.config_path) as f: + return json.load(f).get("model_type") + + def load_model(self, trust_remote_code: bool = False) -> AutoModelForCausalLM: + """Load HuggingFace model based on model type.""" + print(f"Loading HF model from {self.hf_model_path} with model type {self.model_type}") + self.hf_model = AutoModelForCausalLM.from_pretrained( + self.hf_model_path, torch_dtype=torch.float16, trust_remote_code=trust_remote_code + ) + + return self.hf_model.eval().cuda() # type: ignore[attr-defined] + + def get_rope_type(self): + """Get rope type.""" + return self.rope_type + + +class WrapperModelForCausalLM(torch.nn.Module): + """Wrapper Model to ensure all models have the same I/O.""" + + def __init__(self, model): + """Initialize the WrapperModelForCausalLM.""" + super().__init__() + try: + self.model = model.model + except Exception: + self.model = model + self.lm_head = model.lm_head + self.config = model.config + + def forward(self, input_ids: torch.Tensor | None, past_key_values: tuple): + """Forward pass.""" + # Convert tuple cache to DynamicCache for models that require it (e.g., Qwen3) + cache = DynamicCache(config=self.config) + cache.key_cache = [kv[0] for kv in past_key_values] + cache.value_cache = [kv[1] for kv in past_key_values] + past_key_values = cache + + outputs = self.model(input_ids=input_ids, past_key_values=past_key_values, use_cache=True) + hidden_states = outputs[0] + past_key_values = outputs.past_key_values.to_legacy_cache() + logits = self.lm_head(hidden_states) + return logits, past_key_values + + +def llm_to_onnx(model, output_dir, extra_inputs={}, extra_dyn_axes={}): + """Export the WrapperModelForCausalLM to ONNX with fixed I/O names and shape definitions and save to `output_dir`. + + Parameters: + model: torch.Module + output_dir: str, the output_dir of the original ONNX. + extra_inputs: dict, append additional inputs after kv_cache. Usually for VL models + extra_dyn_axes: dict. Usually for VL models + """ + start_time = time.time() + config = model.config + num_layers = config.num_hidden_layers + num_attention_heads = config.num_attention_heads + num_key_value_heads = config.num_key_value_heads + hidden_size = config.hidden_size + hidden_size_per_layer = hidden_size // num_attention_heads + + dummy_bs = 1 + dummy_len = 10 + dummy_input_ids = torch.randint(100, (dummy_bs, dummy_len), dtype=torch.int64).cuda() + input_names = ["input_ids"] + output_names = ["logits"] + dynamic_axes = {"input_ids": {0: "batch_size", 1: "seq_len"}} + dummy_kv_cache = () + for i in range(num_layers): + dummy_k = torch.rand( + (dummy_bs, num_key_value_heads, dummy_len, hidden_size_per_layer), dtype=torch.float16 + ).cuda() + dummy_v = torch.rand( + (dummy_bs, num_key_value_heads, dummy_len, hidden_size_per_layer), dtype=torch.float16 + ).cuda() + dummy_kv_cache = (*dummy_kv_cache, (dummy_k, dummy_v)) + input_names.extend([f"past_key_values.{i}.key", f"past_key_values.{i}.value"]) + output_names.extend([f"present_key_values.{i}.key", f"present_key_values.{i}.value"]) + input_dynamic_axes = {0: "batch_size", 2: "past_len"} + dynamic_axes[f"past_key_values.{i}.key"] = input_dynamic_axes + dynamic_axes[f"past_key_values.{i}.value"] = input_dynamic_axes + + torch_to_onnx( + model, + (dummy_input_ids, {"past_key_values": dummy_kv_cache, **extra_inputs}), + output_dir, + "model.onnx", + input_names=input_names + list(extra_inputs.keys()), + output_names=output_names, + dynamic_axes=dynamic_axes | extra_dyn_axes, + ) + + end_time = time.time() + print( + f"Native ONNX Export from torch completed in {end_time - start_time}s. ONNX file is saved to {output_dir}." + ) + + +def torch_to_onnx(model, inputs, onnx_dir, onnx_name, input_names, output_names, dynamic_axes): + """Export the model to ONNX.""" + os.makedirs(onnx_dir, exist_ok=True) + with torch.inference_mode(): + torch.onnx.export( + model, + inputs, + f"{onnx_dir}/{onnx_name}", + input_names=input_names, + output_names=output_names, + dynamic_axes=dynamic_axes, + opset_version=19, + do_constant_folding=True, + dynamo=False, + ) diff --git a/modelopt/onnx/llm_export_utils/quantization_utils.py b/modelopt/onnx/llm_export_utils/quantization_utils.py new file mode 100644 index 00000000000..ac24c24a53c --- /dev/null +++ b/modelopt/onnx/llm_export_utils/quantization_utils.py @@ -0,0 +1,146 @@ +# SPDX-FileCopyrightText: Copyright (c) 2023-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Quantization utilities for LLM models.""" + +import copy +import time + +import modelopt.torch.quantization as mtq +from modelopt.torch.utils.dataset_utils import get_dataset_dataloader + + +def _quantize_model(model, quant_config, calib_dataloader=None): + """The calibration loop for the model can be setup using the modelopt API. + + Example usage: + from modelopt.torch.utils.dataset_utils import create_forward_loop + model = ... # Initialize the model + tokenizer = ... # Initialize the tokenizer + quant_cfg = ... # Setup quantization configuration + forward_loop = create_forward_loop(model=model, dataset_name="cnn_dailymail", tokenizer=tokenizer) + mtq.quantize(model, quant_cfg, forward_loop=forward_loop) + """ + + def calibrate_loop(model): + """Adjusts weights and scaling factors based on selected algorithms.""" + for idx, data in enumerate(calib_dataloader): + if idx % 10 == 0: + print(f"Calibrating batch {idx}...") + if isinstance(data, dict): + data = {k: v.to(model.device) for k, v in data.items()} + model(**data) + else: + data = data.to(model.device) + model(data) + + print("Starting quantization...") + start_time = time.time() + mtq.quantize(model, quant_config, forward_loop=calibrate_loop) + end_time = time.time() + print(f"Quantization finishes in {end_time - start_time}s.") + + return model + + +def get_quant_config(precision, lm_head_precision="fp16"): + """Get the quantization configuration.""" + if precision == "fp8": + quant_cfg = copy.deepcopy(mtq.FP8_DEFAULT_CFG) + + elif precision == "nvfp4": + quant_cfg = copy.deepcopy(mtq.NVFP4_DEFAULT_CFG) + + elif precision == "int4_awq": + quant_cfg = copy.deepcopy(mtq.INT4_AWQ_CFG) # type: ignore[arg-type] + + else: + raise ValueError(f"Unsupported precision: {precision}") + + quant_cfg_list: list = [ + e for e in quant_cfg["quant_cfg"] if isinstance(e, dict) and "quantizer_name" in e + ] + + if lm_head_precision == "fp8": + quant_cfg_list.append( + { + "quantizer_name": "*lm_head.input_quantizer", + "cfg": {"num_bits": (4, 3), "axis": None}, + } + ) + quant_cfg_list.append( + { + "quantizer_name": "*lm_head.weight_quantizer", + "cfg": {"num_bits": (4, 3), "axis": None}, + } + ) + elif lm_head_precision == "nvfp4": + quant_cfg_list.append( + { + "quantizer_name": "*lm_head.input_quantizer", + "cfg": { + "num_bits": (2, 1), + "block_sizes": {-1: 16, "type": "dynamic", "scale_bits": (4, 3)}, + "axis": None, + }, + "enable": True, + } + ) + quant_cfg_list.append( + { + "quantizer_name": "*lm_head.weight_quantizer", + "cfg": { + "num_bits": (2, 1), + "block_sizes": {-1: 16, "type": "dynamic", "scale_bits": (4, 3)}, + "axis": None, + }, + "enable": True, + } + ) + quant_cfg["quant_cfg"] = quant_cfg_list + return quant_cfg + + +def quantize( + model, tokenizer, precision, lm_head_precision="fp16", dataset_dir=None, calib_size=512 +): + """Quantize the PyTorch model to fp8 or int4_awq.""" + assert precision in [ + "fp8", + "int4_awq", + "nvfp4", + ], ( + f"Only fp8(W8A8), int4_awq(W4A16), nvfp4(W4A4) is supported. You passed an unsupported precision: {precision}." + ) + + assert lm_head_precision in ["fp16"], ( + f"Only fp16(unquantized) is supported for lm_head. You passed an unsupported precision: {lm_head_precision}." + ) + + if tokenizer.pad_token != "": # nosec B105 + tokenizer.pad_token = tokenizer.eos_token + if tokenizer.pad_token is None: + tokenizer.pad_token = tokenizer.eos_token + if not dataset_dir: + dataset_dir = "cnn_dailymail" + + batch_size = 1 + data_loader = get_dataset_dataloader( + dataset_name=dataset_dir, tokenizer=tokenizer, batch_size=batch_size, num_samples=calib_size + ) + quant_config = get_quant_config(precision, lm_head_precision) + quantized_model = _quantize_model(model, quant_config, data_loader) + mtq.print_quant_summary(quantized_model) + return quantized_model diff --git a/modelopt/onnx/llm_export_utils/surgeon_utils.py b/modelopt/onnx/llm_export_utils/surgeon_utils.py new file mode 100644 index 00000000000..2937f6ad0cc --- /dev/null +++ b/modelopt/onnx/llm_export_utils/surgeon_utils.py @@ -0,0 +1,120 @@ +# SPDX-FileCopyrightText: Copyright (c) 2023-2026 NVIDIA CORPORATION & AFFILIATES. All rights reserved. +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Utilities to surgeon ONNX graph after export.""" + +import re +import time + +import onnx +import onnx_graphsurgeon as gs +import torch +from onnx_graphsurgeon.ir.tensor import LazyValues + + +def clear_inputs(node: gs.Node | gs.Tensor): + """Clear all inputs for a node or tensor in ONNX.""" + for i in node.inputs: + i.outputs.clear() + node.inputs.clear() + return node + + +def clear_outputs(node: gs.Node | gs.Tensor): + """Clear all outputs for a node or tensor in ONNX.""" + for o in node.outputs: + o.inputs.clear() + node.outputs.clear() + return node + + +def extract_layer_id(name: str): + """Extract layer id from certain ONNX layer name. + + Parameters: + name: str + The name of ONNX layer. e.g. /model/layer.0/q_proj/... + + Returns: + The layer id for the layer as int. In the example above, it returns 0 + """ + match = re.search(r"layers\.(\d+)", name) + if match: + return int(match.group(1)) + raise Exception(f"{name} does not contain layer info!") + + +def no_none_elements(elements: list): + """Check if all elements in the list are not None.""" + return all(i is not None for i in elements) + + +def fold_fp8_qdq_to_dq(graph: gs.Graph): + """Convert FP32/FP16 weights of the given ONNX model to FP8 weights. + + Even though modelopt supports FP8 onnx export, the weights are represented in fp32 + QDQ. + The storage is therefore very bad. In this function, + Q nodes will get removed from the weights and have only DQ nodes with those converted FP8 + weights in the output model. + + Parameters: + graph: gs.Graph. + + Returns: + gs.Graph with only DQ nodes for weights and same QDQ nodes for activations. + """ + start_time = time.time() + print("Replacing all (fp32 weights + fp8 QDQ) with (fp8 weights + DQ)...") + # Fold constants is required since the scale is not constant yet. + graph.cleanup().toposort().fold_constants().cleanup() + + for node in graph.nodes: + if node.op == "TRT_FP8QuantizeLinear": + # Should not remove input QDQ + if not isinstance(node.inputs[0], gs.Constant): + continue + + weights = node.inputs[0] + scale = node.inputs[1] + torch_weights = torch.from_numpy(weights.values) + torch_scale = torch.from_numpy(scale.values) + quantizer_name = scale.name.rsplit("/", 1)[0] + dq_op = node.outputs[0].outputs[0] + assert dq_op.op == "TRT_FP8DequantizeLinear", ( + f"QDQ does not occur in pairs. You reached {dq_op.op}" + ) + + # Replace it with Dequantize with FP8 weights. This is a WAR because numpy does not support fp8. + numpy_weights = ( + (torch_weights / torch_scale).to(torch.float8_e4m3fn).view(torch.uint8).numpy() + ) + tensor = onnx.TensorProto() + tensor.data_type = onnx.TensorProto.FLOAT8E4M3FN + tensor.dims.extend(numpy_weights.shape) + tensor.raw_data = numpy_weights.tobytes() + values = LazyValues(tensor) + onnx_weights_fp8 = gs.Constant(quantizer_name + "/fp8_weights", values) + + node.outputs.clear() + # DQ Op is separated out + dq_op.inputs[0] = onnx_weights_fp8 + dq_op.op = "DequantizeLinear" + dq_op.outputs[0].dtype = dq_op.inputs[1].dtype + + graph.cleanup().toposort() + end_time = time.time() + print(f"fp8 qdq replaced with only dq completed in {end_time - start_time}s.") + + return graph From e3d332119cafef8053a0634d439b4349fd3fb119 Mon Sep 17 00:00:00 2001 From: sugunav14 <178320438+sugunav14@users.noreply.github.com> Date: Mon, 4 May 2026 21:21:30 -0700 Subject: [PATCH 23/24] Fix gpt-oss examples trl import error (#1390) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ### What does this PR do? Type of change: Bug fix Cap kernels<0.13 and trackio<0.21 in examples/gpt-oss/requirements.txt. Both newer versions require huggingface_hub>=1.x, but the example's transformers pins huggingface_hub<1.0, so a fresh install breaks on import (Unsupported type for field 'import_name': str | None from kernels; cannot import name 'Volume' from trackio). ### Usage No API change. On transformers<5.0, override the config's warmup_steps with --warmup_ratio 0.03 --warmup_steps 0 (or edit the YAML), as already noted by the comment in configs/sft_*.yaml. ```python # Add a code snippet demonstrating how to use this accelerate launch --config_file configs/zero3.yaml sft.py --config configs/sft_full.yaml --model_name_or_path openai/gpt-oss-20b --quant_cfg MXFP4_MLP_WEIGHT_ONLY_CFG --output_dir gpt-oss-20b-qat --warmup_steps 0 --warmup_ratio 0.03 ``` ### Testing 1. pip install -r examples/gpt-oss/requirements.txt pip install transformers==4.57.3 ```python # Add a code snippet demonstrating how to use this accelerate launch --config_file configs/zero3.yaml sft.py --config configs/sft_full.yaml --model_name_or_path openai/gpt-oss-20b --quant_cfg MXFP4_MLP_WEIGHT_ONLY_CFG --output_dir gpt-oss-20b-qat --warmup_steps 0 --warmup_ratio 0.03 ``` 2. pip install -r examples/gpt-oss/requirements.txt pip install --upgrade transformers ```python # Add a code snippet demonstrating how to use this accelerate launch --config_file configs/zero3.yaml sft.py --config configs/sft_full.yaml --model_name_or_path openai/gpt-oss-20b --quant_cfg MXFP4_MLP_WEIGHT_ONLY_CFG --output_dir gpt-oss-20b-qat ``` ### Before your PR is "*Ready for review*" Make sure you read and follow [Contributor guidelines](https://github.com/NVIDIA/Model-Optimizer/blob/main/CONTRIBUTING.md) and your commits are signed (`git commit -s -S`). Make sure you read and follow the [Security Best Practices](https://github.com/NVIDIA/Model-Optimizer/blob/main/SECURITY.md#security-coding-practices-for-contributors) (e.g. avoiding hardcoded `trust_remote_code=True`, `torch.load(..., weights_only=False)`, `pickle`, etc.). - Is this change backward compatible?: ✅ - If you copied code from any other sources or added a new PIP dependency, did you follow guidance in `CONTRIBUTING.md`: N/A - Did you write any new necessary tests?: N/A - Did you update [Changelog](https://github.com/NVIDIA/Model-Optimizer/blob/main/CHANGELOG.rst)?: N/A ### Additional Information Signed-off-by: Suguna Velury <178320438+sugunav14@users.noreply.github.com> Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> --- examples/gpt-oss/requirements.txt | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/gpt-oss/requirements.txt b/examples/gpt-oss/requirements.txt index d18f9eb539d..f063bfb0570 100644 --- a/examples/gpt-oss/requirements.txt +++ b/examples/gpt-oss/requirements.txt @@ -1,3 +1,3 @@ -kernels>=0.9.0 -trackio +kernels>=0.9.0,<0.13 +trackio<0.21 trl>=0.21.0 From 6b9f3701a868fa29ee081abe769fecf53b786f80 Mon Sep 17 00:00:00 2001 From: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> Date: Mon, 4 May 2026 21:26:29 -0700 Subject: [PATCH 24/24] Remove test_dflash_offline.py regression test Signed-off-by: Keval Morabia <28916987+kevalmorabia97@users.noreply.github.com> --- .../torch/speculative/test_dflash_offline.py | 162 ------------------ 1 file changed, 162 deletions(-) delete mode 100644 tests/regression/torch/speculative/test_dflash_offline.py diff --git a/tests/regression/torch/speculative/test_dflash_offline.py b/tests/regression/torch/speculative/test_dflash_offline.py deleted file mode 100644 index da951fdcda6..00000000000 --- a/tests/regression/torch/speculative/test_dflash_offline.py +++ /dev/null @@ -1,162 +0,0 @@ -# SPDX-FileCopyrightText: Copyright (c) 2024 NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# SPDX-License-Identifier: Apache-2.0 -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""DFlash offline E2E regression tests. - -Mirrors test_dflash.py but exercises the offline pipeline: - 1. Dump base-model hidden states from a slice of synthetic_conversations_1k.jsonl - via examples/speculative_decoding/collect_hidden_states/compute_hidden_states_hf.py. - 2. Train DFlash with data.offline_data_path set (triggers _derive_dflash_offline, - which deletes base-model layers post-convert to save memory). - 3. Verify loss decreases on the offline path. - -Aux-layer ids 1,25 match build_target_layer_ids(num_orig_hidden_layers=28, -num_draft_layers=2) for Qwen3-0.6B (28 hidden layers); changing the base model -or draft layer count requires updating --aux-layers accordingly so the dumped -aux_hidden_states dim matches the draft module input. -""" - -import json -import os - -import pytest -from _test_utils.examples.run_command import MODELOPT_ROOT, run_example_command - -DFLASH_YAML = str( - MODELOPT_ROOT / "modelopt_recipes" / "general" / "speculative_decoding" / "dflash.yaml" -) - -CHAT_TEMPLATE = str( - MODELOPT_ROOT - / "tools" - / "launcher" - / "examples" - / "Qwen" - / "Qwen3-0.6B" - / "chat_template_train.jinja" -) - -SYNTH_DATA = str(MODELOPT_ROOT / "examples" / "dataset" / "synthetic_conversations_1k.jsonl") - -# Match _DFLASH_OVERRIDES in test_dflash.py so the offline run is comparable to online. -_DFLASH_OVERRIDES = [ - f"data.chat_template={CHAT_TEMPLATE}", - "training.training_seq_len=512", - "training.per_device_train_batch_size=2", - "training.logging_steps=50", - "training.answer_only_loss=true", - "dflash.dflash_block_size=8", - "dflash.dflash_mask_token_id=151669", - "dflash.dflash_use_torch_compile=False", - "dflash.dflash_architecture_config.num_hidden_layers=2", -] - -# Number of conversations to dump. Smaller than the full 1K to keep dump time -# bounded; large enough that loss-decrease becomes visible across logging_steps. -_DUMP_NUM_CONVERSATIONS = 200 - - -@pytest.fixture(scope="session") -def qwen3_model_name(): - """Qwen3-0.6B model name (downloaded from HF on first use).""" - return "Qwen/Qwen3-0.6B" - - -@pytest.fixture(scope="session") -def dflash_offline_output_dir(tmp_path_factory): - return tmp_path_factory.mktemp("dflash_offline_output") - - -@pytest.fixture(scope="session") -def tagged_synth_data_path(dflash_offline_output_dir): - """Tag each row of synthetic_conversations_1k.jsonl with a stable conversation_id. - - compute_hidden_states_hf.py uses conversation_id as the dump filename and - resume/skip key, asserting it is non-null. The shared synthetic dataset - only ships a `messages` field, so we materialize a tagged copy here. - """ - tagged_path = dflash_offline_output_dir / "tagged_synth.jsonl" - with open(SYNTH_DATA) as src, open(tagged_path, "w") as dst: - for i, line in enumerate(src): - entry = json.loads(line) - entry.setdefault("conversation_id", f"{i:04d}") - dst.write(json.dumps(entry) + "\n") - return tagged_path - - -@pytest.fixture(scope="session") -def offline_hidden_states_dir(qwen3_model_name, dflash_offline_output_dir, tagged_synth_data_path): - """Dump base-model hidden states once for the whole test module.""" - dump_dir = dflash_offline_output_dir / "hidden_states" - run_example_command( - [ - "python", - "collect_hidden_states/compute_hidden_states_hf.py", - "--model", - qwen3_model_name, - "--input-data", - str(tagged_synth_data_path), - "--output-dir", - str(dump_dir), - "--debug-max-num-conversations", - str(_DUMP_NUM_CONVERSATIONS), - # Two draft layers — matches build_target_layer_ids(28, 2) for Qwen3-0.6B. - "--aux-layers", - "1,25", - "--answer-only-loss", - "--chat-template", - CHAT_TEMPLATE, - ], - "speculative_decoding", - ) - pt_files = list(dump_dir.rglob("*.pt")) - assert pt_files, f"No .pt files dumped under {dump_dir}" - return dump_dir - - -def test_dflash_offline_training( - qwen3_model_name, dflash_offline_output_dir, offline_hidden_states_dir -): - """Train DFlash from dumped hidden states and validate loss decreases.""" - output_dir = str(dflash_offline_output_dir / "dflash-qwen3-0.6b-offline") - overrides = [ - f"model.model_name_or_path={qwen3_model_name}", - f"data.offline_data_path={offline_hidden_states_dir}", - f"training.output_dir={output_dir}", - # Two epochs over the dumped slice gives enough steps for two log entries - # at logging_steps=50 with batch=2 (200/2 * 2 = 200 steps → 4 entries). - "training.num_train_epochs=2", - "training.save_steps=500", - *_DFLASH_OVERRIDES, - ] - - run_example_command( - ["./launch_train.sh", "--config", DFLASH_YAML, *overrides], - "speculative_decoding", - ) - - trainer_state = os.path.join(output_dir, "trainer_state.json") - assert os.path.exists(trainer_state), "trainer_state.json not found" - with open(trainer_state) as f: - state = json.load(f) - logs = [h for h in state.get("log_history", []) if "loss" in h] - assert len(logs) >= 2, f"Expected at least 2 log entries, got {len(logs)}" - - first_loss = float(logs[0]["loss"]) - final_loss = float(logs[-1]["loss"]) - assert final_loss < first_loss, f"Loss did not decrease: {first_loss:.3f} -> {final_loss:.3f}" - # Sanity ceiling — same threshold as the online regression. Offline trains - # on fewer samples so we don't tighten it further here. - assert final_loss < 5.0, f"Final loss {final_loss:.3f} too high (expected < 5.0)"