From 166b158e322c9b6462cefe29098b622ff1e435e1 Mon Sep 17 00:00:00 2001 From: Scott Roy <161522778+metascroy@users.noreply.github.com> Date: Tue, 1 Jul 2025 10:37:11 -0700 Subject: [PATCH] Revert "[Example] Yolo12 Detection sample with OpenVINO/XNNPACK backend (#10156)" This reverts commit 00df64c7dd8b66971f3b4b29ada7113eba98df8e. --- .ci/scripts/test_yolo12.sh | 197 --------- backends/openvino/README.md | 2 +- examples/models/yolo12/CMakeLists.txt | 85 ---- examples/models/yolo12/README.md | 121 ------ examples/models/yolo12/export_and_validate.py | 397 ------------------ examples/models/yolo12/inference.h | 151 ------- examples/models/yolo12/main.cpp | 168 -------- examples/models/yolo12/requirements.txt | 1 - examples/models/yolo12/yolo12s_demo.gif | Bin 13117571 -> 0 bytes 9 files changed, 1 insertion(+), 1121 deletions(-) delete mode 100755 .ci/scripts/test_yolo12.sh delete mode 100644 examples/models/yolo12/CMakeLists.txt delete mode 100644 examples/models/yolo12/README.md delete mode 100644 examples/models/yolo12/export_and_validate.py delete mode 100644 examples/models/yolo12/inference.h delete mode 100644 examples/models/yolo12/main.cpp delete mode 100644 examples/models/yolo12/requirements.txt delete mode 100644 examples/models/yolo12/yolo12s_demo.gif diff --git a/.ci/scripts/test_yolo12.sh b/.ci/scripts/test_yolo12.sh deleted file mode 100755 index e3f20d5f970..00000000000 --- a/.ci/scripts/test_yolo12.sh +++ /dev/null @@ -1,197 +0,0 @@ -#!/bin/bash -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the BSD-style license found in the -# LICENSE file in the root directory of this source tree. - -set -ex -# shellcheck source=/dev/null -source "$(dirname "${BASH_SOURCE[0]}")/utils.sh" - -while [[ $# -gt 0 ]]; do - case "$1" in - -model) - MODEL_NAME="$2" # stories110M - shift 2 - ;; - -mode) - MODE="$2" # portable or xnnpack+custom or xnnpack+custom+qe - shift 2 - ;; - -pt2e_quantize) - PT2E_QUANTIZE="$2" - shift 2 - ;; - -upload) - UPLOAD_DIR="$2" - shift 2 - ;; - -video_path) - VIDEO_PATH="$2" # portable or xnnpack+custom or xnnpack+custom+qe - shift 2 - ;; - *) - echo "Unknown option: $1" - usage - ;; - esac -done - -# Default mode to xnnpack+custom if not set -MODE=${MODE:-"openvino"} - -# Default UPLOAD_DIR to empty string if not set -UPLOAD_DIR="${UPLOAD_DIR:-}" - -# Default PT2E_QUANTIZE to empty string if not set -PT2E_QUANTIZE="${PT2E_QUANTIZE:-}" - -# Default CMake Build Type to release mode -CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE:-Release} - -if [[ $# -lt 5 ]]; then # Assuming 4 mandatory args - echo "Expecting atleast 5 positional arguments" - echo "Usage: [...]" -fi -if [[ -z "${MODEL_NAME:-}" ]]; then - echo "Missing model name, exiting..." - exit 1 -fi - - -if [[ -z "${MODE:-}" ]]; then - echo "Missing mode, choose openvino or xnnpack, exiting..." - exit 1 -fi - -if [[ -z "${PYTHON_EXECUTABLE:-}" ]]; then - PYTHON_EXECUTABLE=python3 -fi - -TARGET_LIBS="" - -if [[ "${MODE}" =~ .*openvino.* ]]; then - OPENVINO=ON - TARGET_LIBS="$TARGET_LIBS openvino_backend " - - git clone https://github.com/openvinotoolkit/openvino.git - cd openvino && git b16b776ac119dafda51f69a80f1e6b7376d02c3b - git submodule update --init --recursive - sudo ./install_build_dependencies.sh - mkdir build && cd build - cmake .. -DCMAKE_BUILD_TYPE=Release -DENABLE_PYTHON=ON - make -j$(nproc) - - cd .. - cmake --install build --prefix dist - - source dist/setupvars.sh - cd ../backends/openvino - pip install -r requirements.txt - cd ../../ -else - OPENVINO=OFF -fi - -if [[ "${MODE}" =~ .*xnnpack.* ]]; then - XNNPACK=ON - TARGET_LIBS="$TARGET_LIBS xnnpack_backend " -else - XNNPACK=OFF -fi - -which "${PYTHON_EXECUTABLE}" - - -DIR="examples/models/yolo12" -$PYTHON_EXECUTABLE -m pip install -r ${DIR}/requirements.txt - -cmake_install_executorch_libraries() { - rm -rf cmake-out - build_dir=cmake-out - mkdir $build_dir - - - retry cmake -DCMAKE_INSTALL_PREFIX="${build_dir}" \ - -DCMAKE_BUILD_TYPE="${CMAKE_BUILD_TYPE}" \ - -DEXECUTORCH_BUILD_OPENVINO="$OPENVINO" \ - -DEXECUTORCH_BUILD_XNNPACK="$XNNPACK" \ - -DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON \ - -DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \ - -DEXECUTORCH_BUILD_EXTENSION_RUNNER_UTIL=ON \ - -DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \ - -B"${build_dir}" - - # Build the project - cmake --build ${build_dir} --target install --config ${CMAKE_BUILD_TYPE} -j$(nproc) - - export CMAKE_ARGS=" - -DEXECUTORCH_BUILD_OPENVINO="$OPENVINO" \ - -DEXECUTORCH_BUILD_XNNPACK="$XNNPACK" \ - -DEXECUTORCH_BUILD_EXTENSION_DATA_LOADER=ON \ - -DEXECUTORCH_BUILD_EXTENSION_MODULE=ON \ - -DEXECUTORCH_BUILD_EXTENSION_RUNNER_UTIL=ON \ - -DEXECUTORCH_ENABLE_LOGGING=ON \ - -DEXECUTORCH_BUILD_EXTENSION_TENSOR=ON \ - -DEXECUTORCH_BUILD_PYBIND=ON" - - echo $TARGET_LIBS - export CMAKE_BUILD_ARGS="--target $TARGET_LIBS" - pip install . --no-build-isolation -} - -cmake_build_demo() { - echo "Building yolo12 runner" - retry cmake \ - -DCMAKE_BUILD_TYPE="$CMAKE_BUILD_TYPE" \ - -DUSE_OPENVINO_BACKEND="$OPENVINO" \ - -DUSE_XNNPACK_BACKEND="$XNNPACK" \ - -Bcmake-out/${DIR} \ - ${DIR} - cmake --build cmake-out/${DIR} -j9 --config "$CMAKE_BUILD_TYPE" - -} - -cleanup_files() { - rm $EXPORTED_MODEL_NAME -} - -prepare_artifacts_upload() { - if [ -n "${UPLOAD_DIR}" ]; then - echo "Preparing for uploading generated artifacs" - zip -j model.zip "${EXPORTED_MODEL_NAME}" - mkdir -p "${UPLOAD_DIR}" - mv model.zip "${UPLOAD_DIR}" - mv result.txt "${UPLOAD_DIR}" - - fi -} - - -# Export model. -EXPORTED_MODEL_NAME="${MODEL_NAME}_fp32_${MODE}.pte" -echo "Exporting ${EXPORTED_MODEL_NAME}" -EXPORT_ARGS="--model_name=${MODEL_NAME} --backend=${MODE}" - -# Add dynamically linked library location -cmake_install_executorch_libraries - -$PYTHON_EXECUTABLE -m examples.models.yolo12.export_and_validate ${EXPORT_ARGS} - - -RUNTIME_ARGS="--model_path=${EXPORTED_MODEL_NAME} --input_path=${VIDEO_PATH}" -# Check build tool. -cmake_build_demo -# Run yolo12 runner -NOW=$(date +"%H:%M:%S") -echo "Starting to run yolo12 runner at ${NOW}" -# shellcheck source=/dev/null -cmake-out/examples/models/yolo12/Yolo12DetectionDemo ${RUNTIME_ARGS} > result.txt -NOW=$(date +"%H:%M:%S") -echo "Finished at ${NOW}" - -RESULT=$(cat result.txt) - -prepare_artifacts_upload -cleanup_files diff --git a/backends/openvino/README.md b/backends/openvino/README.md index a67cf12eca2..8adc19f828a 100644 --- a/backends/openvino/README.md +++ b/backends/openvino/README.md @@ -46,7 +46,7 @@ Before you begin, ensure you have openvino installed and configured on your syst ```bash git clone https://github.com/openvinotoolkit/openvino.git -cd openvino && git checkout b16b776ac119dafda51f69a80f1e6b7376d02c3b +cd openvino && git checkout releases/2025/1 git submodule update --init --recursive sudo ./install_build_dependencies.sh mkdir build && cd build diff --git a/examples/models/yolo12/CMakeLists.txt b/examples/models/yolo12/CMakeLists.txt deleted file mode 100644 index dd0fcf4f5ef..00000000000 --- a/examples/models/yolo12/CMakeLists.txt +++ /dev/null @@ -1,85 +0,0 @@ -cmake_minimum_required(VERSION 3.5) - -project(Yolo12DetectionDemo VERSION 0.1) - -option(USE_OPENVINO_BACKEND "Build the tutorial with the OPENVINO backend" ON) -option(USE_XNNPACK_BACKEND "Build the tutorial with the XNNPACK backend" OFF) - -set(CMAKE_INCLUDE_CURRENT_DIR ON) - -set(CMAKE_CXX_STANDARD 17) -set(CMAKE_CXX_STANDARD_REQUIRED ON) -set(CMAKE_CXX_EXTENSIONS OFF) - -# OpenCV -find_package(OpenCV REQUIRED) -include_directories(${OpenCV_INCLUDE_DIRS}) -# !OpenCV - -if(NOT PYTHON_EXECUTABLE) - set(PYTHON_EXECUTABLE python3) -endif() - -set(EXECUTORCH_ROOT ${CMAKE_CURRENT_SOURCE_DIR}/../../..) -set(TORCH_ROOT ${EXECUTORCH_ROOT}/third-party/pytorch) - -include(${EXECUTORCH_ROOT}/tools/cmake/Utils.cmake) - -# Let files say "include ". -set(_common_include_directories ${EXECUTORCH_ROOT}/..) - -# find `executorch` libraries Same as for gflags -find_package(executorch CONFIG REQUIRED PATHS ${EXECUTORCH_ROOT}/cmake-out) -target_link_options_shared_lib(executorch) - -add_subdirectory(${EXECUTORCH_ROOT}/third-party/gflags gflags) -set(link_libraries gflags) -list(APPEND link_libraries portable_ops_lib portable_kernels) -target_link_options_shared_lib(portable_ops_lib) - - -if(USE_XNNPACK_BACKEND) - set(xnnpack_backend_libs xnnpack_backend XNNPACK microkernels-prod) - list(APPEND link_libraries ${xnnpack_backend_libs}) - target_link_options_shared_lib(xnnpack_backend) -endif() - -if(USE_OPENVINO_BACKEND) - add_subdirectory(${EXECUTORCH_ROOT}/backends/openvino openvino_backend) - - target_include_directories( - openvino_backend - INTERFACE ${CMAKE_CURRENT_BINARY_DIR}/../../include - ${CMAKE_CURRENT_BINARY_DIR}/../../include/executorch/runtime/core/portable_type/c10 - ${CMAKE_CURRENT_BINARY_DIR}/../../lib - ) - list(APPEND link_libraries openvino_backend) - target_link_options_shared_lib(openvino_backend) -endif() - -list(APPEND link_libraries extension_threadpool pthreadpool) -list(APPEND _common_include_directories - ${XNNPACK_ROOT}/third-party/pthreadpool/include -) - -set(PROJECT_SOURCES - main.cpp - inference.h - ${EXECUTORCH_ROOT}/extension/data_loader/file_data_loader.cpp - ${EXECUTORCH_ROOT}/extension/evalue_util/print_evalue.cpp - ${EXECUTORCH_ROOT}/extension/runner_util/inputs.cpp - ${EXECUTORCH_ROOT}/extension/runner_util/inputs_portable.cpp -) - -add_executable(Yolo12DetectionDemo ${PROJECT_SOURCES}) -target_link_libraries(Yolo12DetectionDemo PUBLIC - ${link_libraries} - ${OpenCV_LIBS} - executorch_core - extension_module - extension_tensor -) - -find_package(Threads REQUIRED) -target_link_libraries(Yolo12DetectionDemo PRIVATE Threads::Threads) -target_include_directories(Yolo12DetectionDemo PUBLIC ${_common_include_directories}) \ No newline at end of file diff --git a/examples/models/yolo12/README.md b/examples/models/yolo12/README.md deleted file mode 100644 index 0e1cbce21c2..00000000000 --- a/examples/models/yolo12/README.md +++ /dev/null @@ -1,121 +0,0 @@ -# YOLO12 Detection C++ Inference with ExecuTorch - -

-
- -
-

- -This example demonstrates how to perform inference of [Ultralytics YOLO12 family](https://docs.ultralytics.com/models/yolo12/) detection models in C++ leveraging the Executorch backends: -- [OpenVINO](../../../backends/openvino/README.md) -- [XNNPACK](../../../backends/xnnpack/README.md) - -# Performance Evaluation - -| CPU | Model | Backend | Device | Precision | Average Latency, ms | -|--------------------------------|---------|----------|--------|-----------|---------------------| -| Intel(R) Core(TM) Ultra 7 155H | yolo12s | openvino | CPU | FP32 | 88.3549 | -| Intel(R) Core(TM) Ultra 7 155H | yolo12s | openvino | CPU | INT8 | 53.066 | -| Intel(R) Core(TM) Ultra 7 155H | yolo12l | openvino | CPU | FP32 | 317.953 | -| Intel(R) Core(TM) Ultra 7 155H | yolo12l | openvino | CPU | INT8 | 150.846 | -| Intel(R) Core(TM) Ultra 7 155H | yolo12s | openvino | GPU | FP32 | 32.71 | -| Intel(R) Core(TM) Ultra 7 155H | yolo12l | openvino | GPU | FP32 | 70.885 | -| Intel(R) Core(TM) Ultra 7 155H | yolo12s | xnnpack | CPU | FP32 | 169.36 | -| Intel(R) Core(TM) Ultra 7 155H | yolo12l | xnnpack | CPU | FP32 | 436.876 | - - -# Instructions - -### Step 1: Install ExecuTorch - -To install ExecuTorch, follow this [guide](https://pytorch.org/executorch/stable/getting-started-setup.html). - -### Step 2: Install the backend of your choice - -- [OpenVINO backend installation guide](../../../backends/openvino/README.md#build-instructions) -- [XNNPACK backend installation guilde](https://pytorch.org/executorch/stable/tutorial-xnnpack-delegate-lowering.html#running-the-xnnpack-model-with-cmake) - -### Step 3: Install the demo requirements - - -Python demo requirements: -```bash -python -m pip install -r examples/models/yolo12/requirements.txt -``` - -Demo infenrece dependency - OpenCV library: -https://opencv.org/get-started/ - - -### Step 4: Export the Yolo12 model to the ExecuTorch - - -OpenVINO: -```bash -python export_and_validate.py --model_name yolo12s --input_dims=[1920,1080] --backend openvino --device CPU -``` - -OpenVINO quantized model: -```bash -python export_and_validate.py --model_name yolo12s --input_dims=[1920,1080] --backend openvino --quantize --video_input /path/to/calibration/video --device CPU -``` - -XNNPACK: -```bash -python export_and_validate.py --model_name yolo12s --input_dims=[1920,1080] --backend xnnpack -``` - -> **_NOTE:_** Quantization for XNNPACK backend is WIP. Please refere to https://github.com/pytorch/executorch/issues/11523 for more details. - -Exported model could be validated using the `--validate` key: - -```bash -python export_and_validate.py --model_name yolo12s --backend ... --validate dataset_name.yaml -``` - -A list of available datasets and instructions on how to use a custom dataset can be found [here](https://docs.ultralytics.com/datasets/detect/). -Validation only supports the default `--input_dims`; please do not specify this parameter when using the `--validate` flag. - - -To get a full parameters description please use the following command: -```bash -python export_and_validate.py --help -``` - -### Step 5: Build the demo project - -OpenVINO: - -```bash -cd examples/models/yolo12 -mkdir build && cd build -cmake -DCMAKE_BUILD_TYPE=Release -DUSE_OPENVINO_BACKEND=ON .. -make -j$(nproc) -``` - -XNNPACK: - -```bash -cd examples/models/yolo12 -mkdir build && cd build -cmake -DCMAKE_BUILD_TYPE=Release -DUSE_XNNPACK_BACKEND=ON .. -make -j$(nproc) -``` - -### Step 6: Run the demo - -```bash -./build/Yolo12DetectionDemo -model_path /path/to/exported/model -input_path /path/to/video/file -output_path /path/to/output/annotated/video -``` - -To get a full parameters description please use the following command: -``` -./build/Yolo12DetectionDemo --help -``` - - -# Credits: - -Ultralytics examples: https://github.com/ultralytics/ultralytics/tree/main/examples - -Sample video: https://www.pexels.com/@shanu-1040189/ diff --git a/examples/models/yolo12/export_and_validate.py b/examples/models/yolo12/export_and_validate.py deleted file mode 100644 index e2349fb6434..00000000000 --- a/examples/models/yolo12/export_and_validate.py +++ /dev/null @@ -1,397 +0,0 @@ -# Copyright (c) Intel Corporation -# -# Licensed under the BSD License (the "License"); you may not use this file -# except in compliance with the License. See the license file found in the -# LICENSE file in the root directory of this source tree. - -# mypy: disable-error-code="import-untyped,import-not-found" - - -import argparse -from itertools import islice -from typing import Any, Dict, Iterator, Optional, Tuple - -import cv2 -import executorch -import numpy as np -import torch -from executorch.backends.xnnpack.partition.xnnpack_partitioner import XnnpackPartitioner -from executorch.backends.xnnpack.quantizer.xnnpack_quantizer import ( - get_symmetric_quantization_config, - XNNPACKQuantizer, -) -from executorch.exir import ( - EdgeCompileConfig, - EdgeProgramManager, - ExecutorchBackendConfig, - ExecutorchProgramManager, - to_edge_transform_and_lower, -) -from executorch.exir.backend.backend_details import CompileSpec -from executorch.runtime import Runtime -from torch.export.exported_program import ExportedProgram -from torchao.quantization.pt2e.quantize_pt2e import convert_pt2e, prepare_pt2e -from ultralytics import YOLO - -from ultralytics.data.utils import check_det_dataset -from ultralytics.engine.validator import BaseValidator as Validator -from ultralytics.utils.torch_utils import de_parallel - - -class CV2VideoIter: - def __init__(self, cap) -> None: - self._cap = cap - - def __iter__(self): - return self - - def __next__(self): - success, frame = self._cap.read() - if not success: - raise StopIteration() - return frame - - def __len__(self): - return int(self._cap.get(cv2.CAP_PROP_FRAME_COUNT)) - - -class CV2VideoDataset(torch.utils.data.IterableDataset): - def __init__(self, cap) -> None: - super().__init__() - self._iter = CV2VideoIter(cap) - - def __iter__(self) -> Iterator: - return self._iter - - def __len__(self): - return len(self._iter) - - -def lower_to_openvino( - aten_dialect: ExportedProgram, - example_args: Tuple[Any, ...], - transform_fn: callable, - device: str, - calibration_dataset: CV2VideoDataset, - subset_size: int, - quantize: bool, -) -> ExecutorchProgramManager: - # Import openvino locally to avoid nncf side-effects - import nncf.torch - from executorch.backends.openvino.partitioner import OpenvinoPartitioner - from executorch.backends.openvino.quantizer import OpenVINOQuantizer - from executorch.backends.openvino.quantizer.quantizer import QuantizationMode - from nncf.experimental.torch.fx import quantize_pt2e - - with nncf.torch.disable_patching(): - if quantize: - target_input_dims = tuple(example_args[0].shape[2:]) - - def ext_transform_fn(sample): - sample = transform_fn(sample) - return pad_to_target(sample, target_input_dims) - - quantizer = OpenVINOQuantizer(mode=QuantizationMode.INT8_TRANSFORMER) - quantizer.set_ignored_scope( - types=["mul", "sub", "sigmoid", "__getitem__"], - ) - quantized_model = quantize_pt2e( - aten_dialect.module(), - quantizer, - nncf.Dataset(calibration_dataset, ext_transform_fn), - subset_size=subset_size, - smooth_quant=True, - fold_quantize=False, - ) - - aten_dialect = torch.export.export(quantized_model, example_args) - # Convert to edge dialect and lower the module to the backend with a custom partitioner - compile_spec = [CompileSpec("device", device.encode())] - lowered_module: EdgeProgramManager = to_edge_transform_and_lower( - aten_dialect, - partitioner=[ - OpenvinoPartitioner(compile_spec), - ], - compile_config=EdgeCompileConfig( - _skip_dim_order=True, - ), - ) - - # Apply backend-specific passes - return lowered_module.to_executorch( - config=executorch.exir.ExecutorchBackendConfig() - ) - - -def lower_to_xnnpack( - aten_dialect: ExportedProgram, - example_args: Tuple[Any, ...], - transform_fn: callable, - device: str, - calibration_dataset: CV2VideoDataset, - subset_size: int, - quantize: bool, -) -> ExecutorchProgramManager: - if quantize: - quantizer = XNNPACKQuantizer() - operator_config = get_symmetric_quantization_config( - is_per_channel=False, - is_dynamic=False, - ) - quantizer.set_global(operator_config) - m = prepare_pt2e(aten_dialect.module(), quantizer) - # calibration - target_input_dims = tuple(example_args[0].shape[2:]) - print("Start quantization...") - for sample in islice(calibration_dataset, subset_size): - sample = transform_fn(sample) - sample = pad_to_target(sample, target_input_dims) - m(sample) - m = convert_pt2e(m) - print("Quantized succsessfully!") - aten_dialect = torch.export.export(m, example_args) - - edge = to_edge_transform_and_lower( - aten_dialect, - partitioner=[XnnpackPartitioner()], - compile_config=EdgeCompileConfig( - _check_ir_validity=False if args.quantize else True, - _skip_dim_order=True, # TODO(T182187531): enable dim order in xnnpack - ), - ) - - return edge.to_executorch( - config=ExecutorchBackendConfig(extract_delegate_segments=False) - ) - - -def pad_to_target( - image: torch.Tensor, - target_size: Tuple[int, int], -): - if image.shape[2:] == target_size: - return image - img_h, img_w = image.shape[2:] - target_h, target_w = target_size - - diff_h = target_h - img_h - pad_h_from = diff_h // 2 - pad_h_to = -(pad_h_from + diff_h % 2) or None - diff_w = target_w - img_w - pad_w_from = diff_w // 2 - pad_w_to = -(pad_w_from + diff_w % 2) or None - - result = torch.zeros( - ( - 1, - 3, - ) - + target_size, - device=image.device, - dtype=image.dtype, - ) - result[:, :, pad_h_from:pad_h_to, pad_w_from:pad_w_to] = image - return result - - -def main( - model_name: str, - input_dims: Tuple[int, int], - quantize: bool, - video_path: str, - subset_size: int, - backend: str, - device: str, - val_dataset_yaml_path: Optional[str], -): - """ - Main function to load, quantize, and export an Yolo model model. - - :param model_name: The name of the YOLO model to load. - :param input_dims: Input dims to use for the export of a YOLO12 model. - :param quantize: Whether to quantize the model. - :param video_path: Path to the video to use for the calibration - :param subset_size: Subset size for the quantized model calibration. The default value is 300. - :param backend: The Executorch inference backend (e.g., "openvino", "xnnpack"). - :param device: The device to run the model on (e.g., "cpu", "gpu"). - :param val_dataset_yaml_path: Path to the validation dataset file in Ultralytics .yaml format. - Performs validation if the path is not None, skips validation otherwise. - """ - # Load the selected model - model = YOLO(model_name) - - if quantize: - if video_path is None: - raise RuntimeError( - "Could not quantize model without the video for the calibration." - " --video_path parameter is needed." - ) - cap = cv2.VideoCapture(video_path, cv2.CAP_FFMPEG) - height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) - width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) - print(f"Calibration video dims: h: {height} w: {width}") - calibration_dataset = CV2VideoDataset(cap) - else: - calibration_dataset = None - - # Setup pre-processing - np_dummy_tensor = np.ones((input_dims[0], input_dims[1], 3)) - model.predict(np_dummy_tensor, imgsz=((input_dims[0], input_dims[1])), device="cpu") - - pt_model = model.model.to(torch.device("cpu")) - - def transform_fn(frame): - input_tensor = model.predictor.preprocess([frame]) - return input_tensor - - example_args = (transform_fn(np_dummy_tensor),) - with torch.no_grad(): - aten_dialect = torch.export.export(pt_model, args=example_args) - - if backend == "openvino": - lower_fn = lower_to_openvino - elif backend == "xnnpack": - lower_fn = lower_to_xnnpack - - exec_prog = lower_fn( - aten_dialect=aten_dialect, - example_args=example_args, - transform_fn=transform_fn, - device=device, - calibration_dataset=calibration_dataset, - subset_size=subset_size, - quantize=quantize, - ) - - model_file_name = f"{model_name}_{'int8' if quantize else 'fp32'}_{backend}.pte" - with open(model_file_name, "wb") as file: - exec_prog.write_to_file(file) - print(f"Model exported and saved as {model_file_name} on {device}.") - - if val_dataset_yaml_path is not None: - if input_dims != [640, 640]: - raise NotImplementedError( - f"Validation with the custom input shape {input_dims} is not implmenented." - " Please use the default --input_dims=[640, 640] for the validation." - ) - stats = validate_yolo(model, exec_prog, val_dataset_yaml_path) - for stat, value in stats.items(): - print(f"{stat}: {value}") - - -def _prepare_validation( - model: YOLO, dataset_yaml_path: str -) -> Tuple[Validator, torch.utils.data.DataLoader]: - custom = {"rect": False, "batch": 1} # method defaults - args = { - **model.overrides, - **custom, - "mode": "val", - } # highest priority args on the right - - validator = model._smart_load("validator")(args=args, _callbacks=model.callbacks) - stride = 32 # default stride - validator.stride = stride # used in get_dataloader() for padding - validator.data = check_det_dataset(dataset_yaml_path) - validator.init_metrics(de_parallel(model)) - - data_loader = validator.get_dataloader( - validator.data.get(validator.args.split), validator.args.batch - ) - - return validator, data_loader - - -def validate_yolo( - model: YOLO, exec_prog: ExecutorchProgramManager, dataset_yaml_path: str -) -> Dict[str, float]: - """ - Runs validation on a YOLO model using an ExecuTorch program and a dataset in Ultralytics format. - - :param model: The YOLO model instance to validate. - :param exec_prog: The ExecuTorch program manager containing the compiled model. - :param dataset_yaml_path: Path to the validation dataset file in Ultralytics .yaml format. - :return: Dictionary of validation statistics computed over the dataset. - """ - # Load model from buffer - runtime = Runtime.get() - program = runtime.load_program(exec_prog.buffer) - method = program.load_method("forward") - if method is None: - raise ValueError("Load method failed") - validator, data_loader = _prepare_validation(model, dataset_yaml_path) - print(f"Start validation on {dataset_yaml_path} dataset ...") - for batch in data_loader: - batch = validator.preprocess(batch) - preds = method.execute((batch["img"],)) - preds = validator.postprocess(preds) - validator.update_metrics(preds, batch) - stats = validator.get_stats() - return stats - - -if __name__ == "__main__": - parser = argparse.ArgumentParser( - description="Export FP32 and INT8 Ultralytics Yolo models with executorch." - ) - parser.add_argument( - "--model_name", - type=str, - default="yolo12s", - choices=["yolo12n", "yolo12s", "yolo12m", "yolo12l", "yolo12x"], - help="Ultralytics yolo12 model name.", - ) - parser.add_argument( - "--input_dims", - type=eval, - default=[640, 640], - help="Input model dimensions in format [hight, weight] or (hight, weight). Default models dimensions are [640, 640]", - ) - parser.add_argument( - "--video_path", - type=str, - help="Path to the input video file to use for the quantization callibration.", - ) - parser.add_argument( - "--quantize", action="store_true", help="Enable model quantization." - ) - parser.add_argument( - "--subset_size", - type=int, - default=300, - help="Subset size for the quantized model calibration. The default value is 300.", - ) - parser.add_argument( - "--backend", - type=str, - default="openvino", - choices=["openvino", "xnnpack"], - help="Select the Executorch inference backend (openvino, xnnpack). openvino by default.", - ) - parser.add_argument( - "--device", - type=str, - default="CPU", - help="Target device for compiling the model (e.g., CPU, GPU). Default is CPU.", - ) - parser.add_argument( - "--validate", - nargs="?", - const="coco128.yaml", - help="Validate executorch model using the Ultralytics validation pipeline." - " Default validateion dataset is coco128.yaml.", - ) - - args = parser.parse_args() - - # Run the main function with parsed arguments - main( - model_name=args.model_name, - input_dims=args.input_dims, - quantize=args.quantize, - val_dataset_yaml_path=args.validate, - video_path=args.video_path, - subset_size=args.subset_size, - backend=args.backend, - device=args.device, - ) diff --git a/examples/models/yolo12/inference.h b/examples/models/yolo12/inference.h deleted file mode 100644 index 467ef5ce0ca..00000000000 --- a/examples/models/yolo12/inference.h +++ /dev/null @@ -1,151 +0,0 @@ -#ifndef INFERENCE_H -#define INFERENCE_H - -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -using executorch::aten::ScalarType; -using executorch::extension::from_blob; -using executorch::extension::Module; -using executorch::runtime::Error; -using executorch::runtime::Result; - -struct Detection { - int class_id{0}; - std::string className{}; - float confidence{0.0}; - cv::Rect box{}; -}; - -struct DetectionConfig { - std::vector classes; - float modelScoreThreshold; - float modelNMSThreshold; -}; - -cv::Mat scale_with_padding( - cv::Mat& source, - int* pad_x, - int* pad_y, - float* scale, - cv::Size img_dims) { - int col = source.cols; - int row = source.rows; - int m_inputWidth = img_dims.width; - int m_inputHeight = img_dims.height; - if (col == m_inputWidth and row == m_inputHeight) { - return source; - } - - *scale = std::min(m_inputWidth / (float)col, m_inputHeight / (float)row); - int resized_w = col * *scale; - int resized_h = row * *scale; - *pad_x = (m_inputWidth - resized_w) / 2; - *pad_y = (m_inputHeight - resized_h) / 2; - - cv::Mat resized; - cv::resize(source, resized, cv::Size(resized_w, resized_h)); - cv::Mat result = cv::Mat::zeros(m_inputHeight, m_inputWidth, source.type()); - resized.copyTo(result(cv::Rect(*pad_x, *pad_y, resized_w, resized_h))); - resized.release(); - return result; -} - -std::vector infer_yolo_once( - Module& module, - cv::Mat input, - cv::Size img_dims, - const DetectionConfig yolo_config) { - int pad_x, pad_y; - float scale; - input = scale_with_padding(input, &pad_x, &pad_y, &scale, img_dims); - - cv::Mat blob; - cv::dnn::blobFromImage( - input, blob, 1.0 / 255.0, img_dims, cv::Scalar(), true, false); - const auto t_input = from_blob( - (void*)blob.data, - std::vector(blob.size.p, blob.size.p + blob.dims), - ScalarType::Float); - const auto result = module.forward(t_input); - - ET_CHECK_MSG( - result.ok(), - "Execution of method forward failed with status 0x%" PRIx32, - (uint32_t)result.error()); - - const auto t = result->at(0).toTensor(); // Using only the 0 output - // yolov8 has an output of shape (batchSize, 84, 8400) (Num classes + - // box[x,y,w,h]) - cv::Mat mat_output(t.dim() - 1, t.sizes().data() + 1, CV_32FC1, t.data_ptr()); - - std::vector class_ids; - std::vector confidences; - std::vector boxes; - - // Iterate over detections and collect class IDs, confidence scores, and - // bounding boxes - for (int i = 0; i < mat_output.cols; ++i) { - const cv::Mat classes_scores = - mat_output.col(i).rowRange(4, mat_output.rows); - - cv::Point class_id; - double score; - cv::minMaxLoc( - classes_scores, - nullptr, - &score, - nullptr, - &class_id); // Find the class with the highest score - - // Check if the detection meets the confidence threshold - if (score <= yolo_config.modelScoreThreshold) - continue; - - class_ids.push_back(class_id.y); - confidences.push_back(score); - - const float x = mat_output.at(0, i); - const float y = mat_output.at(1, i); - const float w = mat_output.at(2, i); - const float h = mat_output.at(3, i); - - const int left = int((x - 0.5 * w - pad_x) / scale); - const int top = int((y - 0.5 * h - pad_y) / scale); - const int width = int(w / scale); - const int height = int(h / scale); - - boxes.push_back(cv::Rect(left, top, width, height)); - } - - std::vector nms_result; - cv::dnn::NMSBoxes( - boxes, - confidences, - yolo_config.modelScoreThreshold, - yolo_config.modelNMSThreshold, - nms_result); - - std::vector detections{}; - for (auto& idx : nms_result) { - Detection result; - result.class_id = class_ids[idx]; - result.confidence = confidences[idx]; - - result.className = yolo_config.classes[result.class_id]; - result.box = boxes[idx]; - - detections.push_back(result); - } - - return detections; -} -#endif // INFERENCE_H diff --git a/examples/models/yolo12/main.cpp b/examples/models/yolo12/main.cpp deleted file mode 100644 index 95ea98d6634..00000000000 --- a/examples/models/yolo12/main.cpp +++ /dev/null @@ -1,168 +0,0 @@ -#include "inference.h" - -#include - -void draw_detection( - cv::Mat& frame, - const Detection detection, - const cv::Scalar color); - -DetectionConfig DEFAULT_YOLO_CONFIG = { - {"person", "bicycle", "car", - "motorcycle", "airplane", "bus", - "train", "truck", "boat", - "traffic light", "fire hydrant", "stop sign", - "parking meter", "bench", "bird", - "cat", "dog", "horse", - "sheep", "cow", "elephant", - "bear", "zebra", "giraffe", - "backpack", "umbrella", "handbag", - "tie", "suitcase", "frisbee", - "skis", "snowboard", "sports ball", - "kite", "baseball bat", "baseball glove", - "skateboard", "surfboard", "tennis racket", - "bottle", "wine glass", "cup", - "fork", "knife", "spoon", - "bowl", "banana", "apple", - "sandwich", "orange", "broccoli", - "carrot", "hot dog", "pizza", - "donut", "cake", "chair", - "couch", "potted plant", "bed", - "dining table", "toilet", "tv", - "laptop", "mouse", "remote", - "keyboard", "cell phone", "microwave", - "oven", "toaster", "sink", - "refrigerator", "book", "clock", - "vase", "scissors", "teddy bear", - "hair drier", "toothbrush"}, - 0.45, - 0.50}; - -DEFINE_string( - model_path, - "model.pte", - "Model serialized in flatbuffer format."); - -DEFINE_string(input_path, "input.mp4", "Path to the mp4 input video"); - -DEFINE_string(output_path, "output.mp4", "Path to the mp4 output video"); - -int main(int argc, char** argv) { - executorch::runtime::runtime_init(); - gflags::ParseCommandLineFlags(&argc, &argv, true); - - // Use Mmap model to enable loading of big YOLO models in OpenVINO - Module yolo_module(FLAGS_model_path, Module::LoadMode::Mmap); - - auto error = yolo_module.load(); - - ET_CHECK_MSG( - error == Error::Ok, - "Loading of the model failed with status 0x%" PRIx32, - (uint32_t)error); - error = yolo_module.load_forward(); - ET_CHECK_MSG( - error == Error::Ok, - "Loading of the forward method failed with status 0x%" PRIx32, - (uint32_t)error); - - const auto model_input_shape = - yolo_module.method_meta("forward")->input_tensor_meta(0)->sizes(); - std::cout << "Model input shape: ["; - for (auto& dim : model_input_shape) { - std::cout << dim << ", "; - } - std::cout << "]" << std::endl; - const cv::Size img_dims = {model_input_shape[3], model_input_shape[2]}; - - cv::VideoCapture cap(FLAGS_input_path.c_str()); - if (!cap.isOpened()) { - std::cout << "Error opening video stream or file" << std::endl; - return -1; - } - const auto frame_width = cap.get(cv::CAP_PROP_FRAME_WIDTH); - const auto frame_height = cap.get(cv::CAP_PROP_FRAME_HEIGHT); - const auto video_lenght = cap.get(cv::CAP_PROP_FRAME_COUNT); - std::cout << "Input video shape: [3, " << frame_width << ", " << frame_height - << ", ]" << std::endl; - - cv::VideoWriter video( - FLAGS_output_path.c_str(), - cv::VideoWriter::fourcc('m', 'p', '4', 'v'), - 30, - cv::Size(frame_width, frame_height)); - - std::cout << "Start the detection..." << std::endl; - et_timestamp_t time_spent_executing = 0; - unsigned long long iters = 0; - // Show progress every 10% - unsigned long long progress_bar_tick = std::round(video_lenght / 10); - while (true) { - cv::Mat frame; - cap >> frame; - - if (frame.empty()) - break; - - const et_timestamp_t before_execute = et_pal_current_ticks(); - std::vector output = - infer_yolo_once(yolo_module, frame, img_dims, DEFAULT_YOLO_CONFIG); - - for (auto& detection : output) { - draw_detection(frame, detection, cv::Scalar(0, 0, 255)); - } - const et_timestamp_t after_execute = et_pal_current_ticks(); - time_spent_executing += after_execute - before_execute; - iters++; - - if (!(iters % progress_bar_tick)) { - const int precent_ready = (100 * iters) / video_lenght; - std::cout << iters << " out of " << video_lenght - << " frames are are processed (" << precent_ready << "\%)" - << std::endl; - } - video.write(frame); - } - - const auto tick_ratio = et_pal_ticks_to_ns_multiplier(); - constexpr auto NANOSECONDS_PER_MILLISECOND = 1000000; - - double elapsed_ms = static_cast(time_spent_executing) * - tick_ratio.numerator / tick_ratio.denominator / - NANOSECONDS_PER_MILLISECOND; - std::cout << "Model executed successfully " << iters << " times in " - << elapsed_ms << " ms." << std::endl; - std::cout << "Average detection time: " << elapsed_ms / iters << " ms." - << std::endl; - cap.release(); - video.release(); -} - -void draw_detection( - cv::Mat& frame, - const Detection detection, - const cv::Scalar color) { - cv::Rect box = detection.box; - - // Detection box - cv::rectangle(frame, box, color, 2); - - // Detection box text - std::string classString = detection.className + ' ' + - std::to_string(detection.confidence).substr(0, 4); - cv::Size textSize = - cv::getTextSize(classString, cv::FONT_HERSHEY_DUPLEX, 1, 2, 0); - cv::Rect textBox( - box.x, box.y - 40, textSize.width + 10, textSize.height + 20); - - cv::rectangle(frame, textBox, color, cv::FILLED); - cv::putText( - frame, - classString, - cv::Point(box.x + 5, box.y - 10), - cv::FONT_HERSHEY_DUPLEX, - 1, - cv::Scalar(0, 0, 0), - 2, - 0); -} \ No newline at end of file diff --git a/examples/models/yolo12/requirements.txt b/examples/models/yolo12/requirements.txt deleted file mode 100644 index de537f46170..00000000000 --- a/examples/models/yolo12/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -ultralytics==8.3.97 \ No newline at end of file diff --git a/examples/models/yolo12/yolo12s_demo.gif b/examples/models/yolo12/yolo12s_demo.gif deleted file mode 100644 index be029bf416ca8d95356b7a98bf0ba9a70266084a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 13117571 zcmeF1`8O2qANFToEMx3DGxpus$39~xG}b0tV=G%l)+Ei!kY%hPYAj`~hBidsu`j8R zM5zzjkcv{Nl&9zKc%C2bbN_mu^FFWZ-0$nUT-{ypmf=)T4Dbg40N4kBK_D;`2}X*- zB{fiR2ue^0B_Sh<#fyod#B~lx2_U3!0n&0RvML(#veF9pV~R?$O8P`)NpXz05LOqb zV&tr9V4(&EsUd{b3~eG*19L4y6D=dWw!EZ{j-HOGrH(gM7b~Y{ZliBz zqmQ@Kw{u(Gmn zaK}4(S;~u8;tZ{{^{vs0*81u;K!}a0p^dATt+SV%j-s8To4vcYeLyb3-Is8p)WOEw z!PVQ*)6dB(z}eBk*~`zx)x*W(psSas>p`-cPoTSxpF54`;U9J&F5A=puqWBY%a!1* zt?O;7?roua(8Gyjtl(p#@8f9U6B6qa8txkv?}tM98JhXonFRoV0aBO%E2jW&=Kvo! zvaJo---8@*AP7$g!h2H!J;M!c!d*?ny&NJA`$rn-M@AfpKH?uo35W*(;=Rn{13lv- zgW}_3;!mU{AW#XX&YjRuJ{c35EUTM*gq)l}Nj`f%CD1ulT0YgsAninST5?=^7BxM$ z_N5N^WBfC$Hv0MNMN{?J4^@1G7tNYL^ zq+m<8;4G+wSW5_p;=o3TE0MUka8)@|ajo(NUh**}T0{d;w-xDq8Rv9VN0|89wOE;S z!0jViWf&q9wwD5Yaz&7(Na3TA_fk6!UsuiJY);r2K$F80`FQt3$*4C^*#f8fD z``Lf~Dk|W&D2Ldh@=|+;aFHXqds}bY@ttq)r`L7PZWdIU9BR_g?o##LM5#3UF@W)b z$1jM*#GDR_?)lypu=(Zblm0Wow_@&^JC`*Z9>rM6?%&$X|7DKF z_*iS4_;o*fr!8RrruqFE<6})RNe$6A{=5HqQ>rcESn&}vf$pNWzhB+3znA^9@5y8P zy{)ezwC@XFy=m(UUmsiVeDtHx;dZ;;RM_9-Lf!JHS%k_tX3Ln8>fcUyUizL7JMWz5 zm+(9dDf)OGYm2Zj@{yfGNg*8L!~AthCD70e~#C|BV}uRKl4io9LvX}WUTF#+>w@nE^=(Imb*k+IKh_swZ9PbR@1VEr6d6Jd)ucbOe zd?^{>CBerPPmQDGXG-&woA{;ALd!qCl|E8zX-*!#eOjz)G*33Ln$qEPM>?W@Zhkv7 zot1`&RcO6E^x(+ZZ(VVer+qa+Ar%-q2$^6v_Z^z0>!4*?zve6scbD3LnX_Gj1Kj+Q)^t zm9iANTnn*Rudga4&n+|->vrKI>O3yVM5=b~uk?H**}1QlQKMQPRFZ=h-)`L(Qr4)7 z9CXmAnQmUx&~3~B9n&hEUL04u`3AZ6Bl9ZgFE_JBCAZ-|WEG*;&o^GjZ3Rab8jT9w ze4tf43DQu?**VFUJH;EtWr(uF_Z$LjDeQJWAK^TQ4Ga$-z9#Xd?cCD=R2jQKhB{Kh zf(WXG$w<2oCW=MNat~{H=G{EHDyGg2X9UXTla$cqb^g()G{U4OiCzlf;WYL02*`aa z4p43vNC{u0P2Fj@q4wpxuG1Prn$0P>!yP%2n+O#hUWOmq;|iv6q3YkKWdcoeBGQte zqvl7A2Fg1kbAKL~a0!$TQiwoZ$Gwi+SQ9YHSb>`&22c)M3H3*toaUs(IH{f+E-Ie5 z@@_AU6JKxED|%V{;=+s@N(v)ztFfaca;L6JP5K7Dw|tU>n%{FQjN5TlCf{@o+Yl2t zMjn#w_Gjw6npaNkXpnnEa-|RZ00O2Pls>H&+iv)n@+A>O39IRq-EGu_j~h|aW6b({ zQa8ADNW6%@lpB}=*BH3&inn^H=D_T=-6e{g!!wokaJg8;2K2SBFWE6s`@Py92b`~q zms%yI_x`v2Db>Tlx8hFJl@&=5&m*%V2E$QTjhzpA&BptEyMDpebpd&B1zf(HU^H8@ z;VwfJ$kDgMw)j;!h)r^wzeltc_$&bhh=~`(6pVy`-zBn#NHASQea_#r5u)$Q{DD5H zxk`FWn&9chc0M}C&Xa^u+8b9}lg&|%QWKIwf*kj>IOk4Z$(;(SM4nwMz5E7Z-roQe z3>GVc^C22o#Ix>l2l1M{H$XyO9_i>Qwp9U3H>VaUA-hOmG`E`Y;t4*JqJs{)4M2QCgUR%l~F3!5Cu2uquuf;{ZhVc`&Q@1 zliIa~MmhFYfk}DzrS0X23xC2DtSLT)?retgAGh3&waTf#A!{cKO{SMO{OY0l*H554 zreA-@_JD;WMJ_ZB7p7R`X8s!IdaN#$zMEDRynEf{Th{x$4P@i9pPvSQmzR`;z*VHG z$=u%HmP*crJ~>^()8ch427142-q*)w@?=;SpEGT(UdBjWUM_Rr_xCOR-b$j!jcV6+ z|A@&kNKB+CSG%G1p*c_rKNm)shXjd{>u)(}6VVK@VG$~lnu=>&h3;r?)A>7ZwYF|<}vQr&K{o2p-V>W(!@N_bTKvcPD9;+2dBSHS#Dp}XU!08MbnD0K!jznf1v^v}PnLyxSBc03^}pT823 z1XR2GBW{-_%={!tHc{c-@L;N$F4t37v-$_XMw$``psTs!Q2p-KtT^?L3!=50i`f3@ zki`eRT810&ob#qcvJvwQhV$$Zz1Z#KeTT_Y&e8Jo&$20OMf3IqUry8?7Gki=HSNN& z1D=H)w%6W%X*gGE1QSJJ(MOn${SC+K-kNOB2SxBDwCd0H1xQ*`PUOwG9W^rf^j)Wo z0(**;5V0n$u0(#)?$T8`R8c17Cd zq{Vh=#HHJwkoLCDMZElKyXu#&vHvuRS{ilsm|)75P@oDj$3mcp2aF@YbC&b|ezm`} zs$H^tw1gv2m3#U!88yt)BA97szta)uL?rK5cHvcMxg3_A_vj|FcWJ+W!d5gpIc z^8q{N6`1RqWUXt*cE#PEC%bMPI{6{3%{yVL|ta+lhAb*oVFVAR0hSL;x5P zGCaL?P*wmC_Rfv=-`I)c;i8@R>`)RspOc-&M>em(r6-CP*9APLp~*O;(^h>G39(K) zCz5B(Eg`DW9p%BN*!_Mu9H@5q^;yt2~h zURgxC3Ot2(?ebdQ(E3p>1<_h1kVCkh!V4P$AgCB5jd$_33h|o*Fy}THx`NKTnyFM@ zqJ$Mgu81lU^&+Mv#_$pkWyB>g;>HlCvyza{+Au}T7*T5+8A&zx)6)qg zfD5Uxcioa38Dc;VU`s6P2r<4W#iZvuIlw79!N^&tf>dK)1bqO8e$xqVgqH0(7r4+3Bh`GeX zIJvMVD~bs^s2Ik9He7R-xRTg~=HDD?*0f?Y68j5a>5a#Fky~;-uwW_tDQyh=l<+x% zW>vDPD*ZzAuC-6DYWS|I87Q^Gk90fuUPoqIY0y3YyAjv#rQS>S9t_US=&rugmU}_l z$4bif(E;}VbUNl=BU?{*oa3?2eqpOr^k=DDn{Crf*~bZf1|u4m1v@c<$2F0QjlG)> zkQD@YNn-6vX6@u@*dv@a6(G1}q7ABd3dO)4sEAoF6Ct(dmo4?eIkAdO=>A!Lcw&hu zsmXXC(6X~&Y)nkXQ(V8EAA<&78ZC6B!IHOhow%KQ6P^b0e!c#IY=dUL7V3$VF|8QL^o!+1Tb+h_Trr+g|W2;F*|=i zfO`2K7%Z{9%=W9vcNgKGX$s#scZ2g2&+gw7rVtP(4utBhxnHExbIx?fOfSFa5JZI@$T@9&mP z{Wzf_&c%{1XebpJkt&vefmv^f-W@a++Wwz%JycNCpv$!&^xOYV0^GFD@E+m^w3-5C z_VhH3fF8WqXR`&O1fU8|C2cvNcHewVbLq0+)<4~na^e@KYg+gese7{AaY zua21bfbp0ilri~ZcS%BOWZ_pl6yn&#(~_%?eJa0mM4oObPezHvJUM()Rp~5cGV5l~ zFOrDCd#2$b<#o=BZ?sbkD+;I*bv+BSkci+|dAot`T?-=JXtcf}a8GUaL22eeX_)mn zO88&XR6mQVr^mhhrPb{BjnyxiJVZG^4DaeY8>7MPiR}LsIpASCSc6=d(t2$wSi$c& zyUISZ+Oc}`KV7rD%gcGC%YOGU$PfAUjF=3Xrr=KnKk_?)3JRim?NP>-P#kA?rd&EDT`=-?wy7d`{W4ua5QB_{^g#DMm=LJ7h`&ciW79x|_P;miE{_C%qy z-(}y>#L_y2aVO!HzvHK^>DpS)m(@SS>O8;w*W>emh1fAqWS7WO+$jQkBJXC;&qNI5 z4qp4hi`@~_yeYo43|m?zQdWi<_R>^hAqx3w7iuE#VpYR^R-3n44;OAPwRVdvHg&5+A%S~7Ya48zBMa*8c)w$xJiK*WTY>}r*qc& zU%jwD#uc)I+IQjvib>m2`v@8iQB`%ZD>1r*l>b)l`Z2!1Bww(Ug4}wI6yZXGIY4_1 zEP*0+a-W3QAB4Hm>X`GQO7+kl1nlBGKYyDoZ{)g_$((3}(VFuXi_D$Ai0V7dj3 z-+~hPm$97G*T%SFNtil%?7{5iWSn8>eXt;DaLpJPcizCENZ+;J4exly>v%C{8{ATI zmX$IbwhVio13TvrO2xq~c)DgE;APwdj0Z%{J>^KEl}BPQi)0*DTgDE*P5yAJ8i#bx zZEUAorvu^JLa3avNZ0KB6Y3IQW*PHUc_--SKf@Q}Q z++GV;K15u2I9A$$+83<|^83KPX!alQfLyNm-Oh^pc0GUIDj~*D-?)?KZ;CK$Q2YDS zA}@9(MZ~;j>^|mld!Jph@{v}(oosccM&e8W#JCj;t_ z6)Vxt2#M)6X;vDB_NJ}3NyT3<%#qNwnoIRL)IsJ&9A`?FM(PEsyB0>MYnL^V%H@0- zn22dTugeK9`>F+wc?Vl1{eomZdDk3txggs(3Q>#=obM3+YBp8nA2G@fRfjzlyYEfLHpbD3!@f7Jd=Dm!g49|%UX%tji z1xX11op}q7Fbc~gAxbaVps?UGFP?$sV3YZ@%Z${rM8RRM*I{Uha()J7kkIQsEhRSY13QaNt0NyHHA0 zk);+kx->v6A#n&MKGjsTHK_VI7g8zCrAqkeVW^M{L<2;0Q>UH@VQWQ|d4+YeL=|PD zIHkhwhn?LHxz%5}l(i9Vd^!6Up8{7tDe8JT>-wCl0uB)lDKL-)icfj*qmI>O?+{JA z%w{&%q}fO z`OM#y+}V`f_4jq@zfPAcpHVu#qv-UbUgDxVrD0gqN#JKAI&gnCQshwRA4d!$;wktB z`OwqGz(CQ7)FS#R_4&cW=Wiz#lZADh0-fUOUKd*yS`ww zZmmAod=>GMGtXjGkjOOkavujD?Q&02XsVQT_G)@fUFNR~n$+Xk8(Li)V<%y=J!-T} zbB7Tuf_=j%XSFSBWm1})mXw2XEHGvYhGfKk5WV9vKom0a*cu!(P-$@#xqk~WRBVeD z6yo7}@p=fOcf8lf>=(9FIvWD+BmfuiPdZ{BdN^J)0T%_I*zW;pPz@1s!&Sp5gSa8# z1eL+WMu-523lud>5o92qa)XzMySnLs&yL$CzZrPU1 z>HCwqA7+jzF3GGIdw#pOGwNyebTd~mZXx!Gm-R~4e-BL_-}?I3aj~9dw>eWBH|-$# zNQUwUeWf^)*XYA(n`_pI*7kh*TJdsCd9%j1V!#QUOs<+ejfMg>*Ca)v9K4BXd{2)KzX3$&5K0YpZNKA^j%ooV15>#D%?rf%{ za$O@WUTtjyagRb1ORrNCujfg7D$KB}{RcJnTRHlbVF^^Nngyk)vBQ z>17Vw)y+Yy2;ijj1S`9fl?6bTUa6F z?tqfNhKX~?37q9kFNzM>)Mz&x6^zUe_mI&ZN z(j|)yrux_V#|PH^ju`RLg--nky?&zMW-Xm4&5>BIUuQPzHd2}$WG6|hXbCeh^8cLP z4$;bMmYcPrTY;HqMp5r_lhy7(I#)oWy@A;Q`O+TN-*$K^uJT&QLR(KSCm`J{Lgf8c zPI$jSU|$z0;taH%$wwPbsEqF`xL~j6c$TQhFOEoz1&owrMX3cP9CEWU5;*8KD1K?2 zsrpI);a#{z{!=KoG`o|x}L|>lc|13*>uON(f5`gN{Q9w3Nz-WOea&mknYo|f}{$7INSD%8(Yz<}8EY}mT@wZ^pNWiZ) zEx?Zv!)!qk{p}7#@C#DGSdx|-c{JDjclbqVtu6C}!H<=-d2+EQsF!cBuAO|lKKN*G zqE6_>L&5(RiD!Db5S1!nlQ-LgB5aJ35Q#govZ5%blKc3}PIzrv%g{T!yKwu2kLXYn z4$a>J%lbqUE*p2s!SCO~exQw`)D1*L!=|A{u#<5h?&mox%7xeJ#Z3>Ak7dX-)vtSf z&TONMmDb!6N$Q9YPhDW@v~@noDt~-n?SmL;K+WI9f1)K(Y|P3Z*#Fb)cAIydU(Kgy z|7(@f$z{~~w%O|;w2GPaoyw@GIpZu+iD~d_(gV)-wFmI6;o&_K~MKk&X zZznu;IJAzv@ys_66|dhh{D{V)vYslfeaW%d1R6pPS3aa)(YN(X3wc}_adas4`uV00 zs+e^*DMxzg>4$;^)o#bz&@_bi=7Bq)i!k{`nP96~Q3=?YbTuLDNt-!&k+|+3{XJZq zSrh>^$#aQyG%nb;o(e`)kii%&F+q2kNa;8%UGWZ2!7HC8#ilqazQe$dexn(lX$41^ zp190qtt3l+KA~8S&Jn{e#2z{!nk~%v@3$2V4qPCp2HgV4X=(5m!<`iQm>q-iX2V`0 zSWc>d)>}V%Lg z$obcyId2;g&WNKUVFbqby$^7wxdI9MEqeA6Mpxx>zQj3v*CtzS=0o)($z*a;`2de~ z6Kd$Z^Koih1uZ9FLj|en10M`<1ypv3qAYG$wXB4nX?wN%?}aja8&Nob)Tn%@E!j3( z?yBPEK3|BU1*`Ks=HxqkkINXyhw-s*yoK$e8^? zo+CD%%Tx8^LHz(Mw_T0Xq+D5qR?e!) z{cJqUg#e=vU{NG!)Bki-KOE*ZBS2ouL*D-QpxkTN!p>vV{yz8!=#2bvP%L@Sx>ju0b+QWtpe z`9x4>Wk~0s^>TeoNymeRhS(uQ?k+-ekZ^K_pYWP+a6zRUuRmusA)y2DCNZg^u(XT% z5?KS%HUfU62M2Lb*^U7ZJl=}Sa3c$3nlMec3=$6(jbMf?$OJA}SW#5McuYG1h&zW( z=CE+fogSwdw|sIf*J>=;vJ)%kt;q76i&t>FxU)Jn1%9&q8CQGjE&pUvtG+_;2!O&C z3*|=DHwK6zSk)J)KX!Wv5>EGL3av1pZg&wxaLxW2Z&X-|Cd?oG+9G=GN#Xx`oUSK} z2tPpePw>8P)SXu;{zB>;4lnL_ttHgr^)nG{L^e7uVC1_)=Oi<@tzgq-Mf+XcLs6lJ z>-391Y3{sgx)sD+i>Y=`alT#=ox~zlF}A0#ekF)ZX8;}_n1;NhBH90515)DBWY+>xsxQi z<8J4(aK=N+0wah}MJu?$r&AdXu34%6x^ewSM)5uy@O1nJB!dcIQ$x2Pz!>n6ZcFzJ z=G#QNxDVYjkpXhlSn^bl@-VvUSBU1hT$vd~$`o_NTH(uw-qZuHeeK<38()$wxDCAb z0xYD;)%arAf{Rd~7pRYipZo+Gtr$}Q99u23;sqqwb6D?B7X zW~{YNXqw#O|@NGo{=A!iD?0UTV$SvTsFq~ zw$OE2PmMbt@Kt>qY@Mh4W!uyn^0VugGlkaX3ASFngRI@^;`0}wEl1kQh)Kn1W2q^JoM*-V>vedk_)}siG zeEE%kJd(9PYjw>tG318cU2#a2=i9ulzC3R;Xd~dMU(5mTMA$Ke!yU|&51wg@3;aWY zMpp?q{K^kcgqSUZtVoRO99AeNznPb_P6y-G!1g3&*s`qQvdAG{<~NXKKcJvb*YS}M zI7Z;1k^kcX+0K3vNF4#Xw`HT`Mn!EwT@e6L8P>_SK!x2akX^bX8l3TsveY3AeX;mq zR^v@|uh`bJOTsibzSwQf3bzHipLeez(mE;PV$v7#BMC-l{KaB*k%=vkIy(GJQ0Jg% z@ua&^N<^)g)?*x=>DvYV!v#J_5*bbeM|6Rm5*dcw41Hd`ZOdV7eG51V8aT~>$`tP- zwwfWI0Phw67EO-R1k#&4r;-MM5rQ=UaK(*>i72|ZK1I*9!aC|&Pq)C*{o&w2TfJ>q z(gG~58ydoeWhRcD8wTL;usEDuTYk&=^eAUf1*?90asTZL_N_82zw%G# zeYbU&Qg+LCTcdA*+= zK0`|BRL9Yo`i80hh5Q5^h?>5S8J89g2c5zyi-7h9n8 zMdVwr#bBIQtoC9CIrs`4_q z+J&LKAj@#p{NYw4M1EXfWXy?{9mYA2B8~c97yogaH{jAV+p8UyA@&O(+inIXk!~3y zyV$Atq+Tm13<+WvPG3uWHI|q=MBHvy=ExVZ# zRVpqS?Hm?$wyHmFAtw%>V@HA}ap9R;uuTFiDDkcR2l##*!0sUI?UkUitZvp773Dw! z2}}34HizF@Se9jN-A7EC4(XQ{>HSYoK+;Fv9reri0vj3kQ+Kzl(3Sv+?s*b;hUTErPCh90aCrK%hT)$OJo~svhAmk^RWpYbzFzw% zNe5^^zJ%r$ipV=2+;J8{0E0SKuD3gFRjuOO=B*HPKZO!Sbo|fd&ih-=Qwi$UyI@FE zg%~Bs03HcWTJj48nDcT-Y$rkn3T*ym;%Xr4;}?Dl=&&t@AnftGnVU_2BJ^ zah;PFx9j)I>dBUOBI2>Z7yXWjoxL;yf)rC@k zg=S^E1BuYzyFjWc-908qA$ZBHlCw`pFU-A^*TT&{k+3wLardWQ9a_xoH z_4CQ6K-Qg7@qJ}N8{=Oko}B-Tp!jcsMt)dn|0no7rn$44;mGThlou6io&Pl0`6WZc z-~^a*lwq3)8~-3@BxB)AVL5PV`}SNWiOTplKmrBqz6^ZVUGoqSG`$9p%b;?eyR74x zRtv!GCcsdPw!Oy906gZ&b@*aCEOpoQi{*F6O4}4t&a3#LJ#H}@lc?m9sGJ5sr2TqiL&heWIH1hqniwE#AC0y%BUz_xOwM-gl$3P+h5x zJ|4M~ZkGSFg;Y;wO)}j|HTMse%VAfe-k0eI-&S+Ip3rluZIa_v*Oj_auKkGXbv8fB zt;jgOG)wm8op^tYD^0Di^mhC+UidtDVte3i8P<_Ymsf!(bYp#TeD&dtYZdfGCH~EzM7H$JNs*K?1;2AGcg>3jglFF$j8YzAhBV16;Y?H+#_c#wxnTl z66Om~X^AiIM}$N6mPbVX1u{7_^*yvhURXH^E-kT1b&+w}hr$hNfoCm}W zm#`BMaXktjg7lmQiYR&lB68F{G3ugc=YTmnX&qhbYV9pu9?h5OnVz;gbIBX68!bac zRsq&|xxdb&+%vHlN{dIG0~210&f{O%T`3qT7S=g__}-_4i>Z5>hznnX588- zRb?-m&MBK=Bg&L4|2S0~gg=n*Cx?AAb;W`{h|ZBrunuzyp{aW1USZK82>Gyw+b)BS z(IFCJ`%VKZd4@(qss9?T!(CEACmyOxho-Jvd3GdwKt)C`&!?2EQ4LvQD_^cURjps` zYBVX3;WLyBczE`%55B%&pSJCI zM%`EEZK2@z_uW}L?%hYuU~~SN#$7Lz24UhPY9sS&^#V&ib@xZqxxd0UtT-C8IET}NP7vQC@aNi?RW?gE8b2hr!^^?9)e9xHIAET(1$GqOM(v?j z54)5HnhN=LWMjZWcc%r&8nWd*#Q%)7W#10sqq9LcL}#O1`PBGVaFUo?%~;1(UJe$#Ox zXVtY?BDyqvdPva$YjSJ2L*FV`Fysy0O04vDL1 zPTo|MMsoF(|3j&Z+n%^P->avqRK)l{pX12eQq}866Z$DomCb2}&@MrEShVPsUHIL% zOV=)5Zef}W5Fp;&SV$;lL}8eu;l$3hS=k=CG6;Z;JzPO2P#-IdKY{LMX(Gzn!OV?F z<*|n?Nt>}G^rngWj4Ycd7!mrq`>V#K7cO!n5u*EdnPm$o7f;<9^4Y^7J-X|KlVc#5jrYi3iu^x2rqS>%D9JVovo0X* zH28ERkNKux&tuGafmUMjMOAt9;WN91&e6_ZpX+|IWUR(^uDhvQ>{Jk$7J#N=$#EMom>NG+oSZ)ECyU^To>@f}ZbN)&7N2U0B44cGWJ*NoEn zQzR|MwEV1vAco^eXLi9D*$LN6ugYc!vHt2MCw6FE52kZn)Mn+YO}YL#oC0$1W8g>3 zRrRiQ5f3y(yhjuAU*~K_`HuCtCCb%bgPgN z#0EiAvdAA}Y>r_E5gAVz64vt3@c35~P2dhFmnY7AHzU7x+U|V?13`OiGI!@Pnc` z)vrks?iIFLz8dKZL-OyquI6^` zg34oQf{!vERrVB|;%82rH$W-eB0)58$7`eY9V4sTymBs7E(#ghKPh+|vno-tl^H8k z&z|cLmMTPpVa7bLNefZBIDQX%SQB2?FHAU{lDwI-RuQu&41O*_$mVhH!i z`~oPa|1(=(P0)Fgi3)F^1w$HQ%u|WOFMxn*@OIF;}PQea?{neg~sOM|&poR)T|3!Fkt^ z7dmW5C%}@VGt*fj#Km(T;;gph$`Q5S9I|d1GRwH=>K2kVUdksy)3Nk4*#iEKlcE=E z@(*w%aZ0(`(~ki4A~{c_OetSE+26UmIN1s>@D$j|;x>Y?TheqKaK4s_NVnLdx8e$Xa#>;fY!~s-`B9<(kH1+a) z^g@9T-B@b~*Vmx8rc5`eXR?X2$ zc4-7Uv9i?0g8y9U!O{U5B0DMe}IpIQVpdp;m!Bwxi5~RP<@g?ZZRnR4GX-uxOz)G!y z#Wm`O-kvwpdM1;{bbrG!+{_0y8vgX1KdzEf2hx-SDYXFeJ71c;E!3F}RZn!#+8NZ98^Tq+ z$Bom~$soNqK*eQZ;xJGbf4(b3Ky`;GlR#Gyn!lp=C^HI~UF2HY4NvF0N@pAmP$PgsBA3ap&Hn;KAjCD=s|$bNtSZ&&2{@~u`W#?mdiJ6p_EJb48|6KalKVB{k?~nS;UQb6L6ap2dW;{Q=?W-24S#nN99l zt-P~QYHu)&Hu?l$6d-k}Y^^%TH4q6U)JQK70ng`zmjHh_blvhiMJ7L)P5b#$jlBLJ zwp>|rmuB+~sE@&r^e%>E_!z1Sc^uY5P6~z8tDg zs2#Mt;O2z2-Wj|uS4kI9xqv^L!YVZC2}K{`ek0qI;c-Ayy{@d!2hiol3o#u) zZM>l2cNL`_71afzGR`4ub09fkEPYDczM|<(XYL#IiCu*?yO#k>kC?|w0+lf8bWr3w zTRvNIoTkQhstdC+{+OG8xT>TmzX2soA|RcViXslK-^$$}Aw+jozhfE+m5Uo$uCEbM z-!V^{i(<>GYtH%CcpycjsRQ2^Xr;R}6-e=7@1vz0TJhmUlmsB4&J(>vC`8Hm2e=ER zxnH30F97o77l``W4*KeOMmlumM36Q|d5gba7G6SEOmKL;L({=HtY=Px?YKHELw0U2 zTHg#=WjLBK0~-wD?B*O%4-KrAIaX@|h_)d$e}>rNCxKb8{xZmbGk7$Bp{q66EEU~e z(-!gyWQKPVe>te{bdtz(d`|^p3v{gg9b2UyB*!ubXXw0Hx)o(Oida7wI)6O_V!H)4 z$AF@GOH@)rj_N=oneCGl-QQ{~xufl8uGC_0FCD6#8e4v*=r44LSKzmF;X$gW{Mvv< zC+i+e5xeR+gCvJl%~E6*Ck^mGay=cDRLXRJ zsB_#t{2-D_bck({VN#iwI++FlIX?o3L}(!KL6lf?DL{iw{t9TkC`;CRJ?w|&$4~Ab z*WLBktF*kojeV+!{L$~PoPO{O;fRN=@%Qq@Gvm1&|0B{>Pu_V>lqCX-n&$pP=M}xY zra0wittk7QJMQp`08Au4TpW;IM)+5wZ0Opss8>G$c?5xqvs_?tk^@JSsA!7P-lggC z^S+Me=|~JVPlNQEK7nrnK1{@uJGiBnhp+0|QOhVaqJ zZ7WB}b&%%tV6-Awe`kMsYmjqO!qK$pK|=qa{@#n@Z{gTCaMfu#elEHjppk5*U{Cpa zyAx^KpL3qe9Z+ zvEzzMGm46}Ak8Vdn%2Lt32YRlXPA{b`j<}rIw%6~91U`~??D%92P&CvFW(kd*%^>W zR8wHLc$5PLw>q zekut_ZF_oR{}qj@C1Rl z=gkq7wLsch4jCCbnh+g$k3%KfLVt#?$#Kvqcx!++foqjBKRkxy^n7~G@0@d;b6?m0J_$Qg7c-B-43UbTHWinD6sxH|nUX6$I4A*I zpN`)1-D-Nl?|${;qb;5@rN1UqY!ALAbK;oZXjC3OgGK+32p0nn*V{-FWY8iT2@U>bh+;9nuHrJ zi)8)Np9M}zwB=_7iXeL_@~5g6gDcH4@GScROd*f`e5Kj(73U2bIHgRnoYy)(v$4V! z9ms~Pm=Zz!e9IX)zoZGt^?I+0!*w#3=&2G8$wU#)}GZiiQ9pbFiCE#wXWc{Qi)&F zNX+ub{m96uf!}cy?o}CcT?98u6y&Q+S)jF)@hV=Uy$S9= zde@Pdd+;SCq8HkOSpnc^+q1Hc7(LPzgd#H4eAq5iPm)MB45l z%o-{0TWz1Z=5A9~XV-eHIqDUr8tk6Sb;MX@e`vLvKX}L2S`gxL`4^4T!|>#dV|Rb+ zTFgOrRaZ-Q$PbIw(j(>BE6+XG>f)ysojpqOJ>0sVpP9L`8!6e}sC4t;UG$kZr+W%Q z9$&T;+zml@fRLwu{NCPZE4caa>YqcC`{Q>+?uB)XEOwO_+<17o@aMy>@OX*-i$`~K ziZ>@*>dk1J5B`0>H6vqrl~c0SFXY8dljJMV@n=2-60A$9N3ZJ7J+C&2&DOdaFktZg z{Y3f4UtNEk4|ZOczo5JvFc358NZDR$%=w#qu=93&R`%`R|9*E6-@SOjDcI&x%f^ks zoz#p5As3vpMy`R~yr%~t7~g4rA6(^hDc1>O@G#uzmkjD$sY`~?S?kTw5k99s=@kNb z-tVh|Xu@McUNVybT;ldebT%sY9CP^i&>%?XN^&W|at*V0rE?2Iuu`ye`@=P;NP`Lm zqGUwYtW|6o(4?U_VX+h-N`XlRqj0eRotMWinw%(dY^s4og%-$AnntAZ=+amekQ_{l ztPYlqdq%tSN0O!{_{02(D!z_Jqx4~=Bt`lV!(8?TvcTLl&Tmq=YC8+HTy8EH0l2o( z?fN>*NOORP%$vgamlPTJWWJh0^KgL+RAiP*dl%j5dUAMy1U1_24dC)Lc~&+F-hrj_ zpRwSq%a<6MV!bZdY2KJ9kVA#adHAjcmZ|?HH00a;jvAFU(#Hp&%;cie1<%<=Ta|@v zP0A}YT~ zv=_^|ja;B#g6q8z2~F!Up|M{oFgjylX|P%PG}6GVTDbx7v9;^DVRw&m;Qef^pHKn!e+x1MF zj~4s7aCneW#zX)g+O0yNkg4N>1*QL@pu~Pq)yN>3vTrqw zD(_6}owBLoEAxfnR)uyhY}_i<{K%DQFn1agtDs_gJ1`w0Q%iEyW3Y=5-$73d>`@yy z*?E-9+JhEJJU+1U16*A@0w%|8_!T5#jsuC%*AOALtCH?B(1P4e+eB=+EsX%RYBm>W zh$@%rqjQ?|Mx(0f>39FoX*WvWp?B%yQ~a^~)QEJ2l&@S*tC%rZ574AF0>v zVR!V)UK=Lj-8;=|g+>G*>WOQRTT9B21iA$#?1ZoWNNxKL6MV{r$u%l6;iuTC_$Y%! zYg__|Rvf0=QvhE4BpCSgWR_zuNlK4ZCv{v0H|tL0yS|WywsFm@PQ`mmwXiGNb!6)% zQ26`^X;RgLSr+@X`O&!*?gMK!qkE0YPzx_fz9#kb`Fh3Y1S=OQgVHdDhU#)+w$_#g zZ_;RP$!fE;1V~rdBeD45E#D2q!?kSMK7~(K**+j}@u=c5!6IZHZ9TNw@$~A#1}3N4 z-gM_H&lRtFM9o4qg-E3tk>PsG8S0zerW?0wKk1E7oF%JXuEEdq8BR#@zTIrP|1Nxd zcx}e{?eDRS_n^=42%=T;EA|)3Puyxg7x8Wq0S10snK6I3!OH4CZ&IYz5gaH#DkD7S z#f`e!n~7T)1MP-RA^w5^`5%I{poykaUfHt3>rl7ntzPL-b|S+SZex0zm*TP4AI~Nm zzEj|MW)44*9ZX~61&$VKmeJ?Aq9m&9)d!7_bQ(Z>y5Co+w1661k`9J z+7EyFb>HGW2ZCBn>E4coSgTjMR-c1lkhWDxlckrbp*M77-vXm>-XLM#D-YA1)`P|l z$>kJxvt4S^?7)i5>Td{-t1pyiCXE~IN;*C>NJaHj!o`wfu>;TKsv`eWk4d5Sd^G+5 ze>rtM_BllFT?*fY$1|d=!r08frDsPMRONU@m)J5d-iMlIN^6=32^sw5t`Ye2>lXQ6 z0O_tkodrFIKsD`o_flu|Df|;5Qz|-J{#`N=*%^iqm5M0Y8e(I58V}ZT#A?0`Eo6tX#7p{b(;{UZ)y#gi2 z&r>;Zzz`xC^ao3Lq~-=QRB!6&)B#-$vjr)rQ`&XYAS@$6D=DieyR1T3Pp5L@0A1zq z_1lc+XTn7s!0bbGFD7fP{nks+fA6Di;_T34mG*0Ebr~>czEUvf-Tl00u z>Zdoxd&f%Koim5eGgZodDEElT#)}@jua6hd#Ul}U$u+j%85sA-7T917&5zGaV92-+ z3d`in5`!_D6wy`^Ml*k-LcPD5uxWi~Qla5CyS~6JBLc)LYoVJBTYKDkEK;fWPTbo+GTdUMwC%z+zIUE!c zA?>KOBgIGt{_IsT!+Om35vO+}7y(9wRJ~p1r;6dR+zmdXXx`^7sL_m%?e#|a8|SCh zB1YK09Nd}MrG6n6eJKpzBi7zzOS`Eqk82n^-#&%s&W`JKv=DppP3`1U?ZEH)Uu=jw zHgjZQkAfdW-h-pM4w3rkACQP2Fx3Nl$%8_f>=h-7nm>RxZZ95mSh{0BH-5+nObNjq zG6NlZ!+up2wAx3^^@R~Qg9wK7-*Fe%Us;08>1-lWsd8kXUYAW&F-*`cb6#hiLDcR-mM}XB3^il7}Iq|LB>{A zW`vwI=j3i3^d^J|wI@Q&r+GP_y7Nc|ItS$)Q3n)*-PAc)VVEiXpZ0> zYb~F`S54xJ!VBDAL+?;T3MEB4DWU@;OvcbVedl0cD9^BR=@?Lme^NLffN95L-c$Id zrWKC1YDESHg?Gpplr_eEQ1pqq#-BQg(?g=~@nT!}6Y#DRR5i_%_vZxdg!U1k_7)`V zdJVxAr3H!>k49J=RY!0pvi*5P`hwlBd4FDRsm( z`}%o-WZh{LF1J2wYL3-lk(odtt!yDTs3_KKLR9qR&wL8 z73nZX*A5heSC_FMr)>M59Cs>_Ar<7_qyUPBWm}k`#Ex3bmj2N!dxf z!V4T&VRmN)s!5{lsV5d=BwABNe$%y9{U7}%i`-WyjVY^q{)T=psk!*H6lJX`LfuLW zHrZlm7eWx1CxsVkwRt*_=C$XWA$q5WBX*mHXCX$OGkQens!nzT9um)yoTOfAz=d`l z!+h#UKKrL^>ex!8{tvcp^Ny-jtih#0p)~I3L$#VY0mJVWdk0jrCM`O{?JFk$p$*_Y z#x%&jh|iee{}W$}R*Khr5pTF@>?0&ocj5@hlbNzUJJj_N>!IZguaZYE%B#x-kc zH}_w%YU+9zkeMm@N+xGT^;F%Kwa%8;z^&t{T1?hu@xoLXV*k)$WVZ%`D?QuNQY+dUYpxN>=Y!22pw8Kl%OOH0YrNf`zuTxzrjmc|A zLH?s3ELr8cM91r|qGa%r%6!(NVP*qu0>)2M69ypAg&7)Z4hGA}tts z?^IFn;OZbLqv_P*8UiE9-<*L`SkqA$3=4lboXpO*?EEqRp7)HJ-h1ru#CzT4e8Z)D z{egfR)x)1xdq$1Jc~ZFjF0c7!s?_XeMT^vHSiFKh-gJ!*bxm|FmTG`D-TqG1Ss*#A;q59MkLE^#kF;8rzPO}ir?dyMhp@fO z-?3N9YvX2{KbZM_pv9{|0wgkpsc)Fba>d%gYaIdEc&yj7SR7^W$O#G=X1_x3{=Y zgF<4I3ht4JlzQF!^+?y5kJAu+^vuWZ_YuJxIvL+}EV(Z*AY)?4(N~k#2c-2La2Pai z@O(aXmE!0JIAJ=sF;S>Oeyekaedi|UW*BEJY7c9n-ar|Cc@~psjM@37<`u6Fq8coi z8ZA&I1=FMj)2@GcIooP3VQX)nU{8L6iME^#p+m!}975=`PeAhnO3k4WfZxN8G88C~ z$st2>Ok`f3ZD13_bLg zEl*#;94yN)??1BLN!h&M^&S)=;^-tQVi3p8FPXOlzMBKeuZ$=;I)v|%;mNIcDnUh8 ziy@#K)^Fki`%xZuN#EZ%CuAO?^A`95rTE7)#mhq6&e`etT934aMo2u;^TXLAVRWiN^4VwKYX`zdZA7T-5mcw&4Mtv#Ru!VPJ68@$TE=8r z#Dmg>%w5JZQ<8BhbMk@k{aX}6i639D0&zOr&4|K2b`g@}VH#El%l41OrM@1l!1fnr zVdcPrDB$dSlO;Q=B&WrmJ+4Y!;AAJ9UyuQF3o`6zdH-PMD}f<)?bkejIquVJGW?Q9 zv$p5A-vLf)0)A9r|1F(zMXk?GWY~@#43UBV(DIKH z30?8_ibn_cBOP*IZFMTIH>k6f3VYk}M2Nnx6(0b&1o_<0ArUTKF4MY5K$eO4UeZR6 z^|0G%Q2TlBJrzHSn^Y^QJ2CbV{TmznyH>nU%G{fg9h2sGl=iLjY_)Y? z$@L`h_do9$Lv^RTsz>js2-uz;OA>FC798Vv=2X#oWktq5mv^E~26TlC4I#1*rPvd_ znp!MDhS)z-mj6{}|E8a45gjBi(4jyw@Db7R^JXJEk+<9U_HHI{YQ=tiCuKb>MMUb3 z!d5rhT>VI4LjELIcDkyyY@;22`a(m4>!H$Zm-Dt3*etg53_Ti*1-qv%diOsj?>E>= z_8e_a!EHs>_VzWbsXsNLboMVDKKl$_4NpJFwo|e#noL0Tl7R_WZcB`BRFuqeH@GYf z$yoozDJBi(BAIrT(`v5%+>2@h_|dM*;jKF9LNqeVI0P9`YmoeY zx9F%M`kh3%@P`4?a4=7GUQ|Nl3eDw1wNZr3uWyaYEv|n3agt6;rHXT2eg57L?dMz7 zyck}CBBPwU%`P=EmEg2y7fI(L=S4%Xp4tjAX0594zwJAUJ~t1mo?I{ga?3r9OH|F5 zt7F|gi`PgkC~+_q1V-83G4}3!$76N{^i?d)HVb8zyhnGDvZV2}T&;dTMZE`mQmQP} zi=pE_gRZfcPQ><)X8^ej1Dgny=49OnVBj^!T2LNhR^E{=o|?L|yLiuS@AHlRoiD2w z)983AtZ3hi_J=-WV2hUZGd^m*`j#tCZQBCz^(0K0a+T{foDAJGYl6fWmkYpplYq($|5rVcQqPY^*Y@7|sdP%uQ$U!VzZ~E9YN?O2Y zW6%UdhEgIqnnkr(cY#W0hj((wU!NP4l(uiJ%a+QHG9Tgjvp6`Z)SdX?iJXH{Os@XQ zN@}o1*d1?fy%p?6aP()c)8G3q3rzomiG-L@Mfy(Gk$)#O>$gr1 z!E|WiRIakje=R(!&-d54RT~P|IptdyHg4VgdyHZ|CbI_cn*F8)oNQ}fddX|_r8;}Z z=%f2^oBq8e=|cHZZc$gObTO$S8DjlAL#cNHDnP?%q%E5IDsM5#Uns2p#qCs!vO0cn9kM zX{XCrF0U2itWvTS1&-1+h*vo>I#gS@b-k3tMPH!<@;NrVo=6D=KLw6F3||Z!nRGnO zi%+>Po@^4Qdzx39aqyiN^XE@26p#H769HMt@B#9xm{9n7{O;pmd@2Y`+|xH2_g*>| zT*M?Tx7?>Dtdxxt6Bo;zR(ZBtUW1W(E95sx+9|IpFW*pMRF2Z5ea^e}-!8)Z}=UWe4;0gNbp7)k)+D`}ZM(fv~I!MekNyc-Kgh!+VLPx~uVAXR1 zL(+>+CLE-`K}*nFa;g+htAur0))qoieriHSu47pIeuKP8s6~I;(4E{V83mJH=5kx{ zCobCsDY6IHGkb^bD#^(bNynbgCz_o03R6h9MVizSsw4P_Fy)^Z8gSMWVDp%%5h&qS ziNUyW6>QI_XCTrfr?48`#TRqV(q^P@RB7Ut!!-oizKBg1L zA|Cm1rD^@jbe&Nzp2L!Dcx_-B{CE0U#7CDGS+;8#qsUtO%f#ZHF+CaeeUh!LxhuZB zn**t6E^wKF0`Ej|JLlR7{H=%Rym#S^PP}mBw8DP&5FP#WwQ7C}F~qIrOqJ=6XzJx( z6E=TQvds>vPY6Fg+L$2T<=SgVUAgK%E)$}GJEcz*5cLJS{1{}q+m%VYKDK|(Mk}B0 zkoFXG^k{0u=fv+?VCHcZ^5W)M88Rb27rkf?Q2w|1YNmajJ+ZWqGapWC{|LWIrb)}y z7M^noL{&~$S`O$SO;~}_)&rBG37xmY|60mlBva+-3;YenvS;0jI6V&=1sbEi5@8Mt z==oHd{WQhD{UMsTQmipJ5}hAEQ12g3Ssq3YA#xCDB6CCt_K*!GtZ?VJW@^jsB^R8b zf`jE}iR?3>rg(JT^t4v@3?sO|{78l;NRXl3^kyv7@j5#k$VzieqeHRWgP8jSw$nA8 z$YRPnm0{hdU&7C64?8X=#tGMS|ClYWOY#eo!%bTM!-2*uorD(1&X1DTE`v@v+ zA@$@n zsZ8-8GLZc?!7Aq#r?Wbc7dkZj#T_auqZsO#?KNs?a4d2?vhAS)_=c?c;g8c0=qEcZTB@!UwtZn zcVUf|``5W<`}o;y+sq>!nM37Eab*%87l=9|0MFBjb&TOUK`B%!!pzHQ#vZ0J#RT3aWNuoOr=AG|Lc$4X0M`buiABf z8=oacMWafn6_-5r)#)6MDYb|+uCswg<9Z?)`Dvk!;=i_*ahyW15uM2B+W|M=TRv|A zTRm+0if1fLPxalPoLp^HzA<1|9Kuz=77fV~Udk;8n)RL*7r|4k_v`wudJVsx=wcJ$ zH`hCO=zQ^;k?t|1;_*2j(Ix-In8QEkv|DO<&IvlQL%yrPFyDr6ePEvx1Tf6 z3x}RCK9#WLh{g)=) z@2k0=&%H}-XO7x4B3Ne`rO2eQ@$=e#nFAKAF-{LRN3(TyDRI-0_} zCmSPbx*UI4zBKz=`NH^FG=I>I~-u*!E^ zJM#ZaPh~VX?{-zaEAlXFlj7-^AYo=`AruE$ZVj3__v~%7pcLO+)vzVm^-jd?X}JrD zDh?I!yLt<2JYO|AyI-96)EJUL^!!!+(Aa2+n%AgO)BISbke6mZx278Jvf;C8E_9(t zRV<Q%fui$WmR2@~yI6?exX~JbTDSk4* zlI|iVC6~*Rv3@Vp%9I%$c76QP=rV)TJqDI{k-Ngy*e`=?g2nxfm^eXBJXR|q*QAMv zBhyE2u5U}^OFjf&cpqGTPJSKB^NoD&>ieXdKhM>DaJw-SGOuOQO^2NfQn&-XG^6hU zZ{T@EfFH9z|L*bZ7IKu}IMQ@DHeHb;=Twv|&vVzFUkyoN#Xuu|K*Ox^xhbb4C6SdZ z*cF{{<~7y(uOM&E@r@m*Jsm??uE8Enb3J5>xNc=t*~s;fEbvcC;75$WSq(vfZL8uv ztDhGOs&P<0>B!AsjiYjnB&5(qjmSTz$Tz3SkKVBldR#yged_{sUPog@5kNGJBGyYZ zQUn^aO3XM?@xPA2FHyOHjx=|mUR3Ot4nO>NEgn4`&Ew4cSGK#B0OTH(5r|Ww)zh8=X9JFVj`&59)QH}rU7c*b(S@bztftP1nRNd?X5w67Rg?!JXxqEC5(dj z3Sp0-duMPNXRs2{^#Oj(l2cmoy4Nz#5TNqM6{vccQ7YD-0gh!td|0u{8B%&!y+7UM zVIQEkU&?HD1v=}>4b{W$vR!mGHzuqb9Wx<)?sINxWlw7*`rYT~VZpy)d1t1x!+{Xo zbm9-oYL;7d_z+y}Ue2}goE3)rk3!xn32r9y=N4K*1K)ACF+;i;9Bt>6eldCeW8r6; zYg&tW=yE*&F?n|O@*r!R&?1Wyq?8X{=k4cUbL*bQrK;E>m5=@^`^`rP6cQ;th?-ef zx&I0ky9d?l2;X%z)Fnap{KHG5_~9H8Urq_wf93xqU)O;x+O;V!2+G0l<4t+q!A!} zItWj;w`YO8T0!R9lw%$nnZ?0Z$YyX0l;a*%A1#tw8vAlo<0&aHd;B*oVUb z7gF{s=F;V1vLyuimEt3Og)pY%A7G>ZaAp0(faj>CvBT-6Z(&Ac}XF5xQ#$lVgrpQK1lry5TmFQG<# zGP`FhNWZYrnE{UB0{gIm&+dU;$vyZ=HXsYcAi`gh;oB?_Aj&afr}W$tTNRpk(G&KP zK7bnEnVQlv&j4Y)id&bK9ApbQ+L@d?jk+sH-39(Wz?j3`5^*$30#Akn?O^s?fI|8J z45Pue)o;|2J)Q+Au5^CN2kEa55UGRNpj-7x(12#Cj26cLuZoN$m|w54;VZE3G+4gV zfHl%DGzy8BZo?K!Zdy2960W#XD80lVU@!|UA-F8qXIl)x;t?*b>Rb)SuM)Ac*|^(3 zEN@@lyU0>XTqpB>!@)-jkKl3U@^39*%JJCGzwev}CS~iH5{VEaA|x0)cwPO*!-`9~ z@k(EYlZF%V2Zfie6!SI#^IF#N9LksA*Lmds@Z_=bt_C4W=$GI5tI>!B!^IYt=fg(* z&82-*z9fdt$3Qho5n&p9QP|5TBu^bnXx;O*+QDiiVCcm?NFJl>XR5bLDgO((?jtEK z3!iR?ThUKF!D0jqGRRRz;JBsE6UfDg#{S(XQEjQP95!Y#0_Oi4Eq=;#$D=zAog+N3eC^V@KzR zjofxzuL1Up>~zqP7E}9XlnCk7uL{h~+ zN2fW`L|X9;QSgl^0kKgY^wx#lbS^$8|txruu;8Si9^H2V*kG{Tt_y zMUL!jP{l&kF~5-z!*xa8c(|T3?PpG;ARMeR3lW1W%qJcrTPM`zpKY7m>or-{8l0#M zZuPz^oFczla(AKShT*5AZza5(A8{qR%Dkd;cV7>=1tErAz0`}$pRIe2k9UO5z^+bv zorO629o13dcEa*v~{*RhHoY!;h3ex(+ zvTa{BHtnrx71{UGwQb)93EwACvGQq&tq;~~MgQtvbJTC1jb_&t>i_8hyd55O^Q-U~ zIw*t;Y0S27p}hH%PSNhsvZ&V@aH6ek`o9U!SOq=LR_7RQd;STjS4f3SS9tm$RXfg{NfTukjzUH~K#yA#bW7Sj^-U6QkyR86l!Jqs%9vTi3 zJ?V>uLP~;Sqd%U5tK+uRoKGV|oPSQ0J8xTkS=7NR6yWs)7#`{Zla`rJYP9%73!A(J z#gJ6bK7xAq8AgHC9>1pJ)};J#G}f_t)qqA6cDZ$5iG6s&8!u2PyLAly9FF48Htqg5 zBw)d{ZFjxS?ovT<6l}tUEn@N7fpv+e^rF-6l5217XXG|U8}*!x>ba{5URgeZ6mmmv zJzFAUKX%qY8~u(qx%e0ZERo8fX*&EJ`Sc>`}mlehsy@GwHFp{Y~=&PpU~` zwV>$98`>2)x6GSA`PYlkOGN?G+i$L{F5a;FtE+=&u-P+(pUy$dn2=^|=)4uX*P@o% zA=@8;X9km{9AnFodvU8WlCu7?QvTS32JGeUZztNMuhdItcn1V}!Tb=wvdMuduDJbL ziO@{Q>EaJ*A#INxuzuL8AS&R2vL3k8IkV6p9_Jzpl>L$uIJaZw9>Z0cNEq#!5sT-( zdMl_GVbaUS-OGT_VR=^xcjvKg`f9S*ad6$w`|S%Gg-{53I`MweQSc*bqPpyjm)