diff --git a/pretrained_model/resnet18-f37072fd.pth b/pretrained_model/checkpoints/resnet18-f37072fd.pth similarity index 100% rename from pretrained_model/resnet18-f37072fd.pth rename to pretrained_model/checkpoints/resnet18-f37072fd.pth diff --git a/pretrained_model/resnet50-0676ba61.pth b/pretrained_model/checkpoints/resnet50-0676ba61.pth similarity index 100% rename from pretrained_model/resnet50-0676ba61.pth rename to pretrained_model/checkpoints/resnet50-0676ba61.pth diff --git a/pretrained_model/pytorch_vision_v0.10.0/.circleci/.gitignore b/pretrained_model/pytorch_vision_v0.10.0/.circleci/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..485dee64bcfb48793379b200a1afd14e85a8aaf4 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.circleci/.gitignore @@ -0,0 +1 @@ +.idea diff --git a/pretrained_model/pytorch_vision_v0.10.0/.circleci/build_docs/commit_docs.sh b/pretrained_model/pytorch_vision_v0.10.0/.circleci/build_docs/commit_docs.sh new file mode 100644 index 0000000000000000000000000000000000000000..b923b0edbc42195c260fda6da46a741971957324 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.circleci/build_docs/commit_docs.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash + +set -ex + + +if [ "$2" == "" ]; then + echo call as "$0" "<src>" "<target branch>" + echo where src is the root of the built documentation git checkout and + echo branch should be "master" or "1.7" or so + exit 1 +fi + +src=$1 +target=$2 + +echo "committing docs from ${src} to ${target}" + +pushd "${src}" +git checkout gh-pages +mkdir -p ./"${target}" +rm -rf ./"${target}"/* +cp -r "${src}/docs/build/html/"* ./"$target" +if [ "${target}" == "master" ]; then + mkdir -p ./_static + rm -rf ./_static/* + cp -r "${src}/docs/build/html/_static/"* ./_static + git add --all ./_static || true +fi +git add --all ./"${target}" || true +git config user.email "soumith+bot@pytorch.org" +git config user.name "pytorchbot" +# If there aren't changes, don't make a commit; push is no-op +git commit -m "auto-generating sphinx docs" || true +git remote add https https://github.com/pytorch/vision.git +git push -u https gh-pages diff --git a/pretrained_model/pytorch_vision_v0.10.0/.circleci/config.yml b/pretrained_model/pytorch_vision_v0.10.0/.circleci/config.yml new file mode 100644 index 0000000000000000000000000000000000000000..b28cd30c8cbed0910b9238d1708409604414bd48 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.circleci/config.yml @@ -0,0 +1,3479 @@ +version: 2.1 + +# How to test the Linux jobs: +# - Install CircleCI local CLI: https://circleci.com/docs/2.0/local-cli/ +# - circleci config process .circleci/config.yml > gen.yml && circleci local execute -c gen.yml --job binary_linux_wheel_py3.7 +# - Replace binary_linux_wheel_py3.7 with the name of the job you want to test. +# Job names are 'name:' key. + +executors: + windows-cpu: + machine: + resource_class: windows.xlarge + image: windows-server-2019-vs2019:stable + shell: bash.exe + + windows-gpu: + machine: + resource_class: windows.gpu.nvidia.medium + image: windows-server-2019-nvidia:stable + shell: bash.exe + +commands: + checkout_merge: + description: "checkout merge branch" + steps: + - checkout +# - run: +# name: Checkout merge branch +# command: | +# set -ex +# BRANCH=$(git rev-parse --abbrev-ref HEAD) +# if [[ "$BRANCH" != "master" ]]; then +# git fetch --force origin ${CIRCLE_BRANCH}/merge:merged/${CIRCLE_BRANCH} +# git checkout "merged/$CIRCLE_BRANCH" +# fi + designate_upload_channel: + description: "inserts the correct upload channel into ${BASH_ENV}" + steps: + - run: + name: adding UPLOAD_CHANNEL to BASH_ENV + command: | + our_upload_channel=test + echo "export UPLOAD_CHANNEL=${our_upload_channel}" >> ${BASH_ENV} + install_cuda_compatible_cmath: + description: "Install CUDA compatible cmath" + steps: + - run: + name: _HACK_ Install CUDA compatible cmath + no_output_timeout: 1m + command: | + powershell .circleci/scripts/vs_install_cmath.ps1 + + brew_update: + description: "Update Homebrew and install base formulae" + steps: + - run: + name: Update Homebrew + no_output_timeout: "10m" + command: | + set -ex + + # Update repositories manually. + # Running `brew update` produces a comparison between the + # current checkout and the updated checkout, which takes a + # very long time because the existing checkout is 2y old. + for path in $(find /usr/local/Homebrew -type d -name .git) + do + cd $path/.. + git fetch --depth=1 origin + git reset --hard origin/master + done + + export HOMEBREW_NO_AUTO_UPDATE=1 + + # Install expect and moreutils so that we can call `unbuffer` and `ts`. + # moreutils installs a `parallel` executable by default, which conflicts + # with the executable from the GNU `parallel`, so we must unlink GNU + # `parallel` first, and relink it afterwards. + brew install coreutils + brew unlink parallel + brew install moreutils + brew link parallel --overwrite + brew install expect + + brew_install: + description: "Install Homebrew formulae" + parameters: + formulae: + type: string + default: "" + steps: + - run: + name: Install << parameters.formulae >> + no_output_timeout: "10m" + command: | + set -ex + export HOMEBREW_NO_AUTO_UPDATE=1 + brew install << parameters.formulae >> + + run_brew_for_ios_build: + steps: + - brew_update + - brew_install: + formulae: libtool + +binary_common: &binary_common + parameters: + # Edit these defaults to do a release + build_version: + description: "version number of release binary; by default, build a nightly" + type: string + default: "0.10.0" + pytorch_version: + description: "PyTorch version to build against; by default, use a nightly" + type: string + default: "1.9.0" + # Don't edit these + python_version: + description: "Python version to build against (e.g., 3.7)" + type: string + cu_version: + description: "CUDA version to build against, in CU format (e.g., cpu or cu100)" + type: string + default: "cpu" + unicode_abi: + description: "Python 2.7 wheel only: whether or not we are cp27mu (default: no)" + type: string + default: "" + wheel_docker_image: + description: "Wheel only: what docker image to use" + type: string + default: "pytorch/manylinux-cuda102" + conda_docker_image: + description: "Conda only: what docker image to use" + type: string + default: "pytorch/conda-builder:cpu" + environment: + PYTHON_VERSION: << parameters.python_version >> + PYTORCH_VERSION: << parameters.pytorch_version >> + UNICODE_ABI: << parameters.unicode_abi >> + CU_VERSION: << parameters.cu_version >> + +torchvision_ios_params: &torchvision_ios_params + parameters: + build_environment: + type: string + default: "" + ios_arch: + type: string + default: "" + ios_platform: + type: string + default: "" + environment: + BUILD_ENVIRONMENT: << parameters.build_environment >> + IOS_ARCH: << parameters.ios_arch >> + IOS_PLATFORM: << parameters.ios_platform >> + +torchvision_android_params: &torchvision_android_params + parameters: + build_environment: + type: string + default: "" + environment: + BUILD_ENVIRONMENT: << parameters.build_environment >> + +smoke_test_common: &smoke_test_common + <<: *binary_common + docker: + - image: torchvision/smoke_test:latest + +jobs: + circleci_consistency: + docker: + - image: circleci/python:3.7 + steps: + - checkout + - run: + command: | + pip install --user --progress-bar off jinja2 pyyaml + python .circleci/regenerate.py + git diff --exit-code || (echo ".circleci/config.yml not in sync with config.yml.in! Run .circleci/regenerate.py to update config"; exit 1) + + python_lint: + docker: + - image: circleci/python:3.7 + steps: + - checkout + - run: + command: | + pip install --user --progress-bar off flake8 typing + flake8 --config=setup.cfg . + + python_type_check: + docker: + - image: circleci/python:3.7 + steps: + - checkout + - run: + command: | + sudo apt-get update -y + sudo apt install -y libturbojpeg-dev + pip install --user --progress-bar off mypy + pip install --user --progress-bar off --pre torch -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html + pip install --user --progress-bar off --editable . + mypy --config-file mypy.ini + + docstring_parameters_sync: + docker: + - image: circleci/python:3.7 + steps: + - checkout + - run: + command: | + pip install --user pydocstyle + pydocstyle + + clang_format: + docker: + - image: circleci/python:3.7 + steps: + - checkout + - run: + command: | + curl https://oss-clang-format.s3.us-east-2.amazonaws.com/linux64/clang-format-linux64 -o clang-format + chmod +x clang-format + sudo mv clang-format /opt/clang-format + ./.circleci/unittest/linux/scripts/run-clang-format.py -r torchvision/csrc --clang-format-executable /opt/clang-format + + torchhub_test: + docker: + - image: circleci/python:3.7 + steps: + - checkout + - run: + command: | + pip install --user --progress-bar off --pre torch -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html + # need to install torchvision dependencies due to transitive imports + pip install --user --progress-bar off --editable . + python test/test_hub.py + + torch_onnx_test: + docker: + - image: circleci/python:3.7 + steps: + - checkout + - run: + command: | + pip install --user --progress-bar off --pre torch -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html + # need to install torchvision dependencies due to transitive imports + pip install --user --progress-bar off --editable . + pip install --user onnx + pip install --user onnxruntime + python test/test_onnx.py + + binary_linux_wheel: + <<: *binary_common + docker: + - image: << parameters.wheel_docker_image >> + resource_class: 2xlarge+ + steps: + - checkout_merge + - designate_upload_channel + - run: packaging/build_wheel.sh + - store_artifacts: + path: dist + - persist_to_workspace: + root: dist + paths: + - "*" + + binary_linux_conda: + <<: *binary_common + docker: + - image: "<< parameters.conda_docker_image >>" + resource_class: 2xlarge+ + steps: + - checkout_merge + - designate_upload_channel + - run: packaging/build_conda.sh + - store_artifacts: + path: /opt/conda/conda-bld/linux-64 + - persist_to_workspace: + root: /opt/conda/conda-bld/linux-64 + paths: + - "*" + - store_test_results: + path: build_results/ + + binary_win_conda: + <<: *binary_common + executor: windows-cpu + steps: + - checkout_merge + - designate_upload_channel + - install_cuda_compatible_cmath + - run: + name: Build conda packages + no_output_timeout: 20m + command: | + set -ex + source packaging/windows/internal/vc_install_helper.sh + packaging/windows/internal/cuda_install.bat + eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" + conda activate base + conda install -yq conda-build "conda-package-handling!=1.5.0" + packaging/build_conda.sh + rm /C/tools/miniconda3/conda-bld/win-64/vs${VC_YEAR}*.tar.bz2 + - store_artifacts: + path: C:/tools/miniconda3/conda-bld/win-64 + - persist_to_workspace: + root: C:/tools/miniconda3/conda-bld/win-64 + paths: + - "*" + - store_test_results: + path: build_results/ + + binary_win_wheel: + <<: *binary_common + executor: windows-cpu + steps: + - checkout_merge + - designate_upload_channel + - install_cuda_compatible_cmath + - run: + name: Build wheel packages + command: | + set -ex + source packaging/windows/internal/vc_install_helper.sh + packaging/windows/internal/cuda_install.bat + packaging/build_wheel.sh + - store_artifacts: + path: dist + - persist_to_workspace: + root: dist + paths: + - "*" + - store_test_results: + path: build_results/ + + binary_macos_wheel: + <<: *binary_common + macos: + xcode: "12.0" + steps: + - checkout_merge + - designate_upload_channel + - run: + # Cannot easily deduplicate this as source'ing activate + # will set environment variables which we need to propagate + # to build_wheel.sh + command: | + curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh + sh conda.sh -b + source $HOME/miniconda3/bin/activate + packaging/build_wheel.sh + - store_artifacts: + path: dist + - persist_to_workspace: + root: dist + paths: + - "*" + + binary_ios_build: + <<: *torchvision_ios_params + macos: + xcode: "12.0" + steps: + - attach_workspace: + at: ~/workspace + - checkout + - run_brew_for_ios_build + - run: + name: Build + no_output_timeout: "1h" + command: | + script="/Users/distiller/project/.circleci/unittest/ios/scripts/binary_ios_build.sh" + cat "$script" + source "$script" + - persist_to_workspace: + root: /Users/distiller/workspace/ + paths: ios + + binary_ios_upload: + <<: *torchvision_ios_params + macos: + xcode: "12.0" + steps: + - attach_workspace: + at: ~/workspace + - checkout + - run_brew_for_ios_build + - run: + name: Upload + no_output_timeout: "1h" + command: | + script="/Users/distiller/project/.circleci/unittest/ios/scripts/binary_ios_upload.sh" + cat "$script" + source "$script" + + binary_android_build: + <<: *torchvision_android_params + docker: + - image: circleci/android:api-29-ndk + resource_class: xlarge + steps: + - attach_workspace: + at: ~/workspace + - checkout + - run: + name: Build + no_output_timeout: "1h" + command: | + script="/home/circleci/project/.circleci/unittest/android/scripts/binary_android_build.sh" + cat "$script" + source "$script" + - store_artifacts: + path: ~/workspace/artifacts + + binary_android_upload: + <<: *torchvision_android_params + docker: + - image: circleci/android:api-29-ndk + resource_class: xlarge + steps: + - attach_workspace: + at: ~/workspace + - checkout + - run: + name: Upload + no_output_timeout: "1h" + command: | + script="/home/circleci/project/.circleci/unittest/android/scripts/binary_android_upload.sh" + cat "$script" + source "$script" + + binary_macos_conda: + <<: *binary_common + macos: + xcode: "12.0" + steps: + - checkout_merge + - designate_upload_channel + - run: + command: | + curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh + sh conda.sh -b + source $HOME/miniconda3/bin/activate + conda install -yq conda-build + packaging/build_conda.sh + - store_artifacts: + path: /Users/distiller/miniconda3/conda-bld/osx-64 + - persist_to_workspace: + root: /Users/distiller/miniconda3/conda-bld/osx-64 + paths: + - "*" + - store_test_results: + path: build_results/ + + # Requires org-member context + binary_conda_upload: + docker: + - image: continuumio/miniconda + steps: + - attach_workspace: + at: ~/workspace + - designate_upload_channel + - run: + command: | + # Prevent credential from leaking + conda install -yq anaconda-client + set -x + anaconda -t "${CONDA_PYTORCHBOT_TOKEN}" upload ~/workspace/*.tar.bz2 -u "pytorch-${UPLOAD_CHANNEL}" --label main --no-progress --force + + # Requires org-member context + binary_wheel_upload: + parameters: + subfolder: + description: "What whl subfolder to upload to, e.g., blank or cu100/ (trailing slash is important)" + type: string + docker: + - image: circleci/python:3.7 + steps: + - attach_workspace: + at: ~/workspace + - designate_upload_channel + - checkout + - run: + command: | + pip install --user awscli + export PATH="$HOME/.local/bin:$PATH" + # Prevent credential from leaking + set +x + export AWS_ACCESS_KEY_ID="${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}" + export AWS_SECRET_ACCESS_KEY="${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}" + set -x + for pkg in ~/workspace/*.whl; do + aws s3 cp "$pkg" "s3://pytorch/whl/${UPLOAD_CHANNEL}/<< parameters.subfolder >>" --acl public-read + done + + smoke_test_linux_conda: + <<: *smoke_test_common + steps: + - attach_workspace: + at: ~/workspace + - designate_upload_channel + - run: + name: install binaries + command: | + set -x + source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} + conda install -v -y -c pytorch-nightly pytorch + conda install -v -y $(ls ~/workspace/torchvision*.tar.bz2) + - run: + name: smoke test + command: | + source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} + python -c "import torchvision" + + smoke_test_linux_pip: + <<: *smoke_test_common + steps: + - attach_workspace: + at: ~/workspace + - designate_upload_channel + - run: + name: install binaries + command: | + set -x + source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} + pip install $(ls ~/workspace/torchvision*.whl) --pre -f https://download.pytorch.org/whl/nightly/torch_nightly.html + - run: + name: smoke test + command: | + source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} + python -c "import torchvision" + + smoke_test_docker_image_build: + machine: + image: ubuntu-1604:201903-01 + resource_class: large + environment: + image_name: torchvision/smoke_test + steps: + - checkout + - designate_upload_channel + - run: + name: Build and push Docker image + no_output_timeout: "1h" + command: | + set +x + echo "${DOCKER_HUB_TOKEN}" | docker login --username "${DOCKER_HUB_USERNAME}" --password-stdin + set -x + cd .circleci/smoke_test/docker && docker build . -t ${image_name}:${CIRCLE_WORKFLOW_ID} + docker tag ${image_name}:${CIRCLE_WORKFLOW_ID} ${image_name}:latest + docker push ${image_name}:${CIRCLE_WORKFLOW_ID} + docker push ${image_name}:latest + + smoke_test_win_conda: + <<: *binary_common + executor: + name: windows-cpu + steps: + - attach_workspace: + at: ~/workspace + - designate_upload_channel + - run: + name: install binaries + command: | + set -x + eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" + conda env remove -n python${PYTHON_VERSION} || true + CONDA_CHANNEL_FLAGS="" + if [[ "${PYTHON_VERSION}" = 3.9 ]]; then + CONDA_CHANNEL_FLAGS="-c=conda-forge" + fi + conda create ${CONDA_CHANNEL_FLAGS} -yn python${PYTHON_VERSION} python=${PYTHON_VERSION} + conda activate python${PYTHON_VERSION} + conda install Pillow>=5.3.0 + conda install -v -y -c pytorch-nightly pytorch + conda install -v -y $(ls ~/workspace/torchvision*.tar.bz2) + - run: + name: smoke test + command: | + eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" + conda activate python${PYTHON_VERSION} + python -c "import torchvision" + + smoke_test_win_pip: + <<: *binary_common + executor: + name: windows-cpu + steps: + - attach_workspace: + at: ~/workspace + - designate_upload_channel + - run: + name: install binaries + command: | + set -x + eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" + CONDA_CHANNEL_FLAGS="" + if [[ "${PYTHON_VERSION}" = 3.9 ]]; then + CONDA_CHANNEL_FLAGS="-c=conda-forge" + fi + conda create ${CONDA_CHANNEL_FLAGS} -yn python${PYTHON_VERSION} python=${PYTHON_VERSION} + conda create -yn python${PYTHON_VERSION} python=${PYTHON_VERSION} + conda activate python${PYTHON_VERSION} + pip install $(ls ~/workspace/torchvision*.whl) --pre -f https://download.pytorch.org/whl/nightly/torch_nightly.html + - run: + name: smoke test + command: | + eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" + conda activate python${PYTHON_VERSION} + python -c "import torchvision" + + unittest_linux_cpu: + <<: *binary_common + docker: + - image: "pytorch/manylinux-cuda102" + resource_class: 2xlarge+ + steps: + - checkout + - designate_upload_channel + - run: + name: Generate cache key + # This will refresh cache on Sundays, nightly build should generate new cache. + command: echo "$(date +"%Y-%U")" > .circleci-weekly + - restore_cache: + + keys: + - env-v2-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + + - run: + name: Setup + command: .circleci/unittest/linux/scripts/setup_env.sh + - save_cache: + + key: env-v2-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + + paths: + - conda + - env + - run: + name: Install torchvision + command: .circleci/unittest/linux/scripts/install.sh + - run: + name: Run tests + command: .circleci/unittest/linux/scripts/run_test.sh + - run: + name: Post process + command: .circleci/unittest/linux/scripts/post_process.sh + - store_test_results: + path: test-results + + unittest_linux_gpu: + <<: *binary_common + machine: + image: ubuntu-1604-cuda-10.2:202012-01 + resource_class: gpu.nvidia.medium + environment: + image_name: "pytorch/manylinux-cuda102" + PYTHON_VERSION: << parameters.python_version >> + steps: + - checkout + - designate_upload_channel + - run: + name: Generate cache key + # This will refresh cache on Sundays, nightly build should generate new cache. + command: echo "$(date +"%Y-%U")" > .circleci-weekly + - restore_cache: + + keys: + - env-v3-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + + - run: + name: Setup + command: docker run -e PYTHON_VERSION -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/setup_env.sh + - save_cache: + + key: env-v3-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + + paths: + - conda + - env + - run: + name: Install torchvision + command: docker run -t --gpus all -v $PWD:$PWD -w $PWD -e UPLOAD_CHANNEL -e CU_VERSION "${image_name}" .circleci/unittest/linux/scripts/install.sh + - run: + name: Run tests + command: docker run -e CIRCLECI -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/run_test.sh + - run: + name: Post Process + command: docker run -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/post_process.sh + - store_test_results: + path: test-results + + unittest_windows_cpu: + <<: *binary_common + executor: + name: windows-cpu + steps: + - checkout + - designate_upload_channel + - install_cuda_compatible_cmath + - run: + name: Generate cache key + # This will refresh cache on Sundays, nightly build should generate new cache. + command: echo "$(date +"%Y-%U")" > .circleci-weekly + - restore_cache: + + keys: + - env-v2-windows-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/windows/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + + - run: + name: Setup + command: .circleci/unittest/windows/scripts/setup_env.sh + - save_cache: + + key: env-v2-windows-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/windows/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + + paths: + - conda + - env + - run: + name: Install torchvision + command: .circleci/unittest/windows/scripts/install.sh + - run: + name: Run tests + command: .circleci/unittest/windows/scripts/run_test.sh + - run: + name: Post process + command: .circleci/unittest/windows/scripts/post_process.sh + - store_test_results: + path: test-results + + unittest_windows_gpu: + <<: *binary_common + executor: + name: windows-gpu + environment: + CUDA_VERSION: "10.2" + PYTHON_VERSION: << parameters.python_version >> + steps: + - checkout + - designate_upload_channel + - install_cuda_compatible_cmath + - run: + name: Generate cache key + # This will refresh cache on Sundays, nightly build should generate new cache. + command: echo "$(date +"%Y-%U")" > .circleci-weekly + - restore_cache: + + keys: + - env-v1-windows-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/windows/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + + - run: + name: Setup + command: .circleci/unittest/windows/scripts/setup_env.sh + - save_cache: + + key: env-v1-windows-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/windows/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + + paths: + - conda + - env + - run: + name: Install torchvision + command: .circleci/unittest/windows/scripts/install.sh + - run: + name: Run tests + command: .circleci/unittest/windows/scripts/run_test.sh + - run: + name: Post process + command: .circleci/unittest/windows/scripts/post_process.sh + - store_test_results: + path: test-results + + unittest_macos_cpu: + <<: *binary_common + macos: + xcode: "12.0" + resource_class: large + steps: + - checkout + - designate_upload_channel + - run: + name: Install wget + command: HOMEBREW_NO_AUTO_UPDATE=1 brew install wget + # Disable brew auto update which is very slow + - run: + name: Generate cache key + # This will refresh cache on Sundays, nightly build should generate new cache. + command: echo "$(date +"%Y-%U")" > .circleci-weekly + - restore_cache: + + keys: + - env-v3-macos-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + + - run: + name: Setup + command: .circleci/unittest/linux/scripts/setup_env.sh + - save_cache: + + key: env-v3-macos-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + + paths: + - conda + - env + - run: + name: Install torchvision + command: .circleci/unittest/linux/scripts/install.sh + - run: + name: Run tests + command: .circleci/unittest/linux/scripts/run_test.sh + - run: + name: Post process + command: .circleci/unittest/linux/scripts/post_process.sh + - store_test_results: + path: test-results + + cmake_linux_cpu: + <<: *binary_common + docker: + - image: "pytorch/manylinux-cuda102" + resource_class: 2xlarge+ + steps: + - checkout_merge + - designate_upload_channel + - run: + name: Setup conda + command: .circleci/unittest/linux/scripts/setup_env.sh + - run: packaging/build_cmake.sh + + cmake_linux_gpu: + <<: *binary_common + machine: + image: ubuntu-1604-cuda-10.2:202012-01 + resource_class: gpu.small + environment: + PYTHON_VERSION: << parameters.python_version >> + PYTORCH_VERSION: << parameters.pytorch_version >> + UNICODE_ABI: << parameters.unicode_abi >> + CU_VERSION: << parameters.cu_version >> + steps: + - checkout_merge + - designate_upload_channel + - run: + name: Setup conda + command: docker run -e CU_VERSION -e PYTHON_VERSION -e UNICODE_ABI -e PYTORCH_VERSION -t --gpus all -v $PWD:$PWD -w $PWD << parameters.wheel_docker_image >> .circleci/unittest/linux/scripts/setup_env.sh + - run: + name: Build torchvision C++ distribution and test + command: docker run -e CU_VERSION -e PYTHON_VERSION -e UNICODE_ABI -e PYTORCH_VERSION -e UPLOAD_CHANNEL -t --gpus all -v $PWD:$PWD -w $PWD << parameters.wheel_docker_image >> packaging/build_cmake.sh + + cmake_macos_cpu: + <<: *binary_common + macos: + xcode: "12.0" + steps: + - checkout_merge + - designate_upload_channel + - run: + command: | + curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh + sh conda.sh -b + source $HOME/miniconda3/bin/activate + conda install -yq conda-build cmake + packaging/build_cmake.sh + + cmake_windows_cpu: + <<: *binary_common + executor: + name: windows-cpu + steps: + - checkout_merge + - designate_upload_channel + - install_cuda_compatible_cmath + - run: + command: | + set -ex + source packaging/windows/internal/vc_install_helper.sh + packaging/build_cmake.sh + + cmake_windows_gpu: + <<: *binary_common + executor: + name: windows-gpu + steps: + - checkout_merge + - designate_upload_channel + - install_cuda_compatible_cmath + - run: + command: | + set -ex + source packaging/windows/internal/vc_install_helper.sh + packaging/windows/internal/cuda_install.bat + packaging/build_cmake.sh + + build_docs: + <<: *binary_common + docker: + - image: "pytorch/manylinux-cuda100" + resource_class: 2xlarge+ + steps: + - attach_workspace: + at: ~/workspace + - checkout + - run: + name: Setup + command: .circleci/unittest/linux/scripts/setup_env.sh + - designate_upload_channel + - run: + name: Install torchvision + command: .circleci/unittest/linux/scripts/install.sh + - run: + name: Generate cache key + # This will refresh cache on Sundays, nightly build should generate new cache. + command: echo "$(date +"%Y-%U")" > .circleci-weekly + - restore_cache: + + keys: + - sphinx-gallery-{{ checksum "./docs/source/conf.py" }}-{{ checksum ".circleci-weekly" }} + + - run: + name: Build docs + command: | + set -ex + tag=${CIRCLE_TAG:1:5} + VERSION=${tag:-master} + eval "$(./conda/bin/conda shell.bash hook)" + conda activate ./env + pushd docs + pip install -r requirements.txt + make html + popd + - save_cache: + + key: sphinx-gallery-{{ checksum "./docs/source/conf.py" }}-{{ checksum ".circleci-weekly" }} + + paths: + - ./docs/source/auto_examples + - persist_to_workspace: + root: ./ + paths: + - "*" + - store_artifacts: + path: ./docs/build/html + destination: docs + + upload_docs: + <<: *binary_common + docker: + - image: "pytorch/manylinux-cuda100" + resource_class: 2xlarge+ + steps: + - attach_workspace: + at: ~/workspace + - run: + name: Generate netrc + command: | + # set credentials for https pushing + # requires the org-member context + cat > ~/.netrc \<<DONE + machine github.com + login pytorchbot + password ${GITHUB_PYTORCHBOT_TOKEN} + DONE + - run: + name: Upload docs + command: | + # Don't use "checkout" step since it uses ssh, which cannot git push + # https://circleci.com/docs/2.0/configuration-reference/#checkout + set -ex + tag=${CIRCLE_TAG:1:5} + target=${tag:-master} + ~/workspace/.circleci/build_docs/commit_docs.sh ~/workspace $target + + +workflows: + build: + jobs: + - circleci_consistency + - binary_linux_wheel: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + name: binary_linux_wheel_py3.6_cpu + python_version: '3.6' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_linux_wheel: + conda_docker_image: pytorch/conda-builder:cuda102 + cu_version: cu102 + name: binary_linux_wheel_py3.6_cu102 + python_version: '3.6' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_linux_wheel: + conda_docker_image: pytorch/conda-builder:cuda111 + cu_version: cu111 + name: binary_linux_wheel_py3.6_cu111 + python_version: '3.6' + wheel_docker_image: pytorch/manylinux-cuda111 + - binary_linux_wheel: + cu_version: rocm4.1 + name: binary_linux_wheel_py3.6_rocm4.1 + python_version: '3.6' + wheel_docker_image: pytorch/manylinux-rocm:4.1 + - binary_linux_wheel: + cu_version: rocm4.2 + name: binary_linux_wheel_py3.6_rocm4.2 + python_version: '3.6' + wheel_docker_image: pytorch/manylinux-rocm:4.2 + - binary_linux_wheel: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + filters: + branches: + only: /.*/ + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: binary_linux_wheel_py3.7_cpu + python_version: '3.7' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_linux_wheel: + conda_docker_image: pytorch/conda-builder:cuda102 + cu_version: cu102 + name: binary_linux_wheel_py3.7_cu102 + python_version: '3.7' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_linux_wheel: + conda_docker_image: pytorch/conda-builder:cuda111 + cu_version: cu111 + name: binary_linux_wheel_py3.7_cu111 + python_version: '3.7' + wheel_docker_image: pytorch/manylinux-cuda111 + - binary_linux_wheel: + cu_version: rocm4.1 + name: binary_linux_wheel_py3.7_rocm4.1 + python_version: '3.7' + wheel_docker_image: pytorch/manylinux-rocm:4.1 + - binary_linux_wheel: + cu_version: rocm4.2 + name: binary_linux_wheel_py3.7_rocm4.2 + python_version: '3.7' + wheel_docker_image: pytorch/manylinux-rocm:4.2 + - binary_linux_wheel: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + name: binary_linux_wheel_py3.8_cpu + python_version: '3.8' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_linux_wheel: + conda_docker_image: pytorch/conda-builder:cuda102 + cu_version: cu102 + name: binary_linux_wheel_py3.8_cu102 + python_version: '3.8' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_linux_wheel: + conda_docker_image: pytorch/conda-builder:cuda111 + cu_version: cu111 + name: binary_linux_wheel_py3.8_cu111 + python_version: '3.8' + wheel_docker_image: pytorch/manylinux-cuda111 + - binary_linux_wheel: + cu_version: rocm4.1 + name: binary_linux_wheel_py3.8_rocm4.1 + python_version: '3.8' + wheel_docker_image: pytorch/manylinux-rocm:4.1 + - binary_linux_wheel: + cu_version: rocm4.2 + name: binary_linux_wheel_py3.8_rocm4.2 + python_version: '3.8' + wheel_docker_image: pytorch/manylinux-rocm:4.2 + - binary_linux_wheel: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + name: binary_linux_wheel_py3.9_cpu + python_version: '3.9' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_linux_wheel: + conda_docker_image: pytorch/conda-builder:cuda102 + cu_version: cu102 + name: binary_linux_wheel_py3.9_cu102 + python_version: '3.9' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_linux_wheel: + conda_docker_image: pytorch/conda-builder:cuda111 + cu_version: cu111 + name: binary_linux_wheel_py3.9_cu111 + python_version: '3.9' + wheel_docker_image: pytorch/manylinux-cuda111 + - binary_linux_wheel: + cu_version: rocm4.1 + name: binary_linux_wheel_py3.9_rocm4.1 + python_version: '3.9' + wheel_docker_image: pytorch/manylinux-rocm:4.1 + - binary_linux_wheel: + cu_version: rocm4.2 + name: binary_linux_wheel_py3.9_rocm4.2 + python_version: '3.9' + wheel_docker_image: pytorch/manylinux-rocm:4.2 + - binary_macos_wheel: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + name: binary_macos_wheel_py3.6_cpu + python_version: '3.6' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_macos_wheel: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + name: binary_macos_wheel_py3.7_cpu + python_version: '3.7' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_macos_wheel: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + name: binary_macos_wheel_py3.8_cpu + python_version: '3.8' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_macos_wheel: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + name: binary_macos_wheel_py3.9_cpu + python_version: '3.9' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_win_wheel: + cu_version: cpu + filters: + branches: + only: master + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: binary_win_wheel_py3.6_cpu + python_version: '3.6' + - binary_win_wheel: + cu_version: cu102 + filters: + branches: + only: master + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: binary_win_wheel_py3.6_cu102 + python_version: '3.6' + - binary_win_wheel: + cu_version: cu111 + filters: + branches: + only: master + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: binary_win_wheel_py3.6_cu111 + python_version: '3.6' + - binary_win_wheel: + cu_version: cpu + filters: + branches: + only: master + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: binary_win_wheel_py3.7_cpu + python_version: '3.7' + - binary_win_wheel: + cu_version: cu102 + filters: + branches: + only: master + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: binary_win_wheel_py3.7_cu102 + python_version: '3.7' + - binary_win_wheel: + cu_version: cu111 + filters: + branches: + only: master + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: binary_win_wheel_py3.7_cu111 + python_version: '3.7' + - binary_win_wheel: + cu_version: cpu + filters: + branches: + only: master + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: binary_win_wheel_py3.8_cpu + python_version: '3.8' + - binary_win_wheel: + cu_version: cu102 + filters: + branches: + only: master + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: binary_win_wheel_py3.8_cu102 + python_version: '3.8' + - binary_win_wheel: + cu_version: cu111 + filters: + branches: + only: master + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: binary_win_wheel_py3.8_cu111 + python_version: '3.8' + - binary_win_wheel: + cu_version: cpu + name: binary_win_wheel_py3.9_cpu + python_version: '3.9' + - binary_win_wheel: + cu_version: cu102 + filters: + branches: + only: master + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: binary_win_wheel_py3.9_cu102 + python_version: '3.9' + - binary_win_wheel: + cu_version: cu111 + name: binary_win_wheel_py3.9_cu111 + python_version: '3.9' + - binary_linux_conda: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + name: binary_linux_conda_py3.6_cpu + python_version: '3.6' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_linux_conda: + conda_docker_image: pytorch/conda-builder:cuda102 + cu_version: cu102 + name: binary_linux_conda_py3.6_cu102 + python_version: '3.6' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_linux_conda: + conda_docker_image: pytorch/conda-builder:cuda111 + cu_version: cu111 + name: binary_linux_conda_py3.6_cu111 + python_version: '3.6' + wheel_docker_image: pytorch/manylinux-cuda111 + - binary_linux_conda: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + name: binary_linux_conda_py3.7_cpu + python_version: '3.7' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_linux_conda: + conda_docker_image: pytorch/conda-builder:cuda102 + cu_version: cu102 + name: binary_linux_conda_py3.7_cu102 + python_version: '3.7' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_linux_conda: + conda_docker_image: pytorch/conda-builder:cuda111 + cu_version: cu111 + name: binary_linux_conda_py3.7_cu111 + python_version: '3.7' + wheel_docker_image: pytorch/manylinux-cuda111 + - binary_linux_conda: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + name: binary_linux_conda_py3.8_cpu + python_version: '3.8' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_linux_conda: + conda_docker_image: pytorch/conda-builder:cuda102 + cu_version: cu102 + name: binary_linux_conda_py3.8_cu102 + python_version: '3.8' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_linux_conda: + conda_docker_image: pytorch/conda-builder:cuda111 + cu_version: cu111 + name: binary_linux_conda_py3.8_cu111 + python_version: '3.8' + wheel_docker_image: pytorch/manylinux-cuda111 + - binary_linux_conda: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + name: binary_linux_conda_py3.9_cpu + python_version: '3.9' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_linux_conda: + conda_docker_image: pytorch/conda-builder:cuda102 + cu_version: cu102 + name: binary_linux_conda_py3.9_cu102 + python_version: '3.9' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_linux_conda: + conda_docker_image: pytorch/conda-builder:cuda111 + cu_version: cu111 + name: binary_linux_conda_py3.9_cu111 + python_version: '3.9' + wheel_docker_image: pytorch/manylinux-cuda111 + - binary_macos_conda: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + name: binary_macos_conda_py3.6_cpu + python_version: '3.6' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_macos_conda: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + name: binary_macos_conda_py3.7_cpu + python_version: '3.7' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_macos_conda: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + name: binary_macos_conda_py3.8_cpu + python_version: '3.8' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_macos_conda: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + name: binary_macos_conda_py3.9_cpu + python_version: '3.9' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_win_conda: + cu_version: cpu + filters: + branches: + only: master + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: binary_win_conda_py3.6_cpu + python_version: '3.6' + - binary_win_conda: + cu_version: cu102 + filters: + branches: + only: master + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: binary_win_conda_py3.6_cu102 + python_version: '3.6' + - binary_win_conda: + cu_version: cu111 + filters: + branches: + only: master + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: binary_win_conda_py3.6_cu111 + python_version: '3.6' + - binary_win_conda: + cu_version: cpu + filters: + branches: + only: master + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: binary_win_conda_py3.7_cpu + python_version: '3.7' + - binary_win_conda: + cu_version: cu102 + filters: + branches: + only: master + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: binary_win_conda_py3.7_cu102 + python_version: '3.7' + - binary_win_conda: + cu_version: cu111 + filters: + branches: + only: master + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: binary_win_conda_py3.7_cu111 + python_version: '3.7' + - binary_win_conda: + cu_version: cpu + filters: + branches: + only: master + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: binary_win_conda_py3.8_cpu + python_version: '3.8' + - binary_win_conda: + cu_version: cu102 + filters: + branches: + only: master + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: binary_win_conda_py3.8_cu102 + python_version: '3.8' + - binary_win_conda: + cu_version: cu111 + filters: + branches: + only: master + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: binary_win_conda_py3.8_cu111 + python_version: '3.8' + - binary_win_conda: + cu_version: cpu + name: binary_win_conda_py3.9_cpu + python_version: '3.9' + - binary_win_conda: + cu_version: cu102 + filters: + branches: + only: master + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: binary_win_conda_py3.9_cu102 + python_version: '3.9' + - binary_win_conda: + cu_version: cu111 + name: binary_win_conda_py3.9_cu111 + python_version: '3.9' + - build_docs: + filters: + branches: + only: + - /.*/ + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: build_docs + python_version: '3.7' + requires: + - binary_linux_wheel_py3.7_cpu + - upload_docs: + context: org-member + filters: + branches: + only: + - nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: upload_docs + python_version: '3.7' + requires: + - build_docs + - python_lint + - python_type_check + - docstring_parameters_sync + - clang_format + - torchhub_test + - torch_onnx_test + - binary_ios_build: + build_environment: binary-libtorchvision_ops-ios-12.0.0-x86_64 + ios_arch: x86_64 + ios_platform: SIMULATOR + name: binary_libtorchvision_ops_ios_12.0.0_x86_64 + - binary_ios_build: + build_environment: binary-libtorchvision_ops-ios-12.0.0-arm64 + ios_arch: arm64 + ios_platform: OS + name: binary_libtorchvision_ops_ios_12.0.0_arm64 + - binary_android_build: + build_environment: binary-libtorchvision_ops-android + name: binary_libtorchvision_ops_android + + unittest: + jobs: + - unittest_linux_cpu: + cu_version: cpu + name: unittest_linux_cpu_py3.6 + python_version: '3.6' + - unittest_linux_cpu: + cu_version: cpu + name: unittest_linux_cpu_py3.7 + python_version: '3.7' + - unittest_linux_cpu: + cu_version: cpu + name: unittest_linux_cpu_py3.8 + python_version: '3.8' + - unittest_linux_cpu: + cu_version: cpu + name: unittest_linux_cpu_py3.9 + python_version: '3.9' + - unittest_linux_gpu: + cu_version: cu102 + filters: + branches: + only: + - master + - nightly + name: unittest_linux_gpu_py3.6 + python_version: '3.6' + - unittest_linux_gpu: + cu_version: cu102 + filters: + branches: + only: + - master + - nightly + name: unittest_linux_gpu_py3.7 + python_version: '3.7' + - unittest_linux_gpu: + cu_version: cu102 + name: unittest_linux_gpu_py3.8 + python_version: '3.8' + - unittest_linux_gpu: + cu_version: cu102 + filters: + branches: + only: + - master + - nightly + name: unittest_linux_gpu_py3.9 + python_version: '3.9' + - unittest_windows_cpu: + cu_version: cpu + name: unittest_windows_cpu_py3.6 + python_version: '3.6' + - unittest_windows_cpu: + cu_version: cpu + name: unittest_windows_cpu_py3.7 + python_version: '3.7' + - unittest_windows_cpu: + cu_version: cpu + name: unittest_windows_cpu_py3.8 + python_version: '3.8' + - unittest_windows_cpu: + cu_version: cpu + name: unittest_windows_cpu_py3.9 + python_version: '3.9' + - unittest_windows_gpu: + cu_version: cu102 + filters: + branches: + only: + - master + - nightly + name: unittest_windows_gpu_py3.6 + python_version: '3.6' + - unittest_windows_gpu: + cu_version: cu102 + filters: + branches: + only: + - master + - nightly + name: unittest_windows_gpu_py3.7 + python_version: '3.7' + - unittest_windows_gpu: + cu_version: cu102 + name: unittest_windows_gpu_py3.8 + python_version: '3.8' + - unittest_windows_gpu: + cu_version: cu102 + filters: + branches: + only: + - master + - nightly + name: unittest_windows_gpu_py3.9 + python_version: '3.9' + - unittest_macos_cpu: + cu_version: cpu + name: unittest_macos_cpu_py3.6 + python_version: '3.6' + - unittest_macos_cpu: + cu_version: cpu + name: unittest_macos_cpu_py3.7 + python_version: '3.7' + - unittest_macos_cpu: + cu_version: cpu + name: unittest_macos_cpu_py3.8 + python_version: '3.8' + - unittest_macos_cpu: + cu_version: cpu + name: unittest_macos_cpu_py3.9 + python_version: '3.9' + + cmake: + jobs: + - cmake_linux_cpu: + cu_version: cpu + name: cmake_linux_cpu + python_version: '3.8' + - cmake_linux_gpu: + cu_version: cu102 + name: cmake_linux_gpu + python_version: '3.8' + wheel_docker_image: pytorch/manylinux-cuda102 + - cmake_windows_cpu: + cu_version: cpu + name: cmake_windows_cpu + python_version: '3.8' + - cmake_windows_gpu: + cu_version: cu102 + name: cmake_windows_gpu + python_version: '3.8' + - cmake_macos_cpu: + cu_version: cpu + name: cmake_macos_cpu + python_version: '3.8' + + nightly: + jobs: + - circleci_consistency + - python_lint + - python_type_check + - docstring_parameters_sync + - clang_format + - torchhub_test + - torch_onnx_test + - binary_ios_build: + build_environment: nightly-binary-libtorchvision_ops-ios-12.0.0-x86_64 + filters: + branches: + only: + - nightly + ios_arch: x86_64 + ios_platform: SIMULATOR + name: nightly_binary_libtorchvision_ops_ios_12.0.0_x86_64 + - binary_ios_build: + build_environment: nightly-binary-libtorchvision_ops-ios-12.0.0-arm64 + filters: + branches: + only: + - nightly + ios_arch: arm64 + ios_platform: OS + name: nightly_binary_libtorchvision_ops_ios_12.0.0_arm64 + - binary_ios_upload: + build_environment: nightly-binary-libtorchvision_ops-ios-12.0.0-upload + context: org-member + filters: + branches: + only: + - nightly + requires: + - nightly_binary_libtorchvision_ops_ios_12.0.0_x86_64 + - nightly_binary_libtorchvision_ops_ios_12.0.0_arm64 + - binary_android_upload: + build_environment: nightly-binary-libtorchvision_ops-android-upload + context: org-member + filters: + branches: + only: + - nightly + name: nightly_binary_libtorchvision_ops_android_upload + - binary_linux_wheel: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.6_cpu + python_version: '3.6' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.6_cpu_upload + requires: + - nightly_binary_linux_wheel_py3.6_cpu + subfolder: cpu/ + - smoke_test_linux_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_wheel_py3.6_cpu_smoke_test_pip + python_version: '3.6' + requires: + - nightly_binary_linux_wheel_py3.6_cpu_upload + - binary_linux_wheel: + conda_docker_image: pytorch/conda-builder:cuda102 + cu_version: cu102 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.6_cu102 + python_version: '3.6' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.6_cu102_upload + requires: + - nightly_binary_linux_wheel_py3.6_cu102 + subfolder: cu102/ + - smoke_test_linux_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_wheel_py3.6_cu102_smoke_test_pip + python_version: '3.6' + requires: + - nightly_binary_linux_wheel_py3.6_cu102_upload + - binary_linux_wheel: + conda_docker_image: pytorch/conda-builder:cuda111 + cu_version: cu111 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.6_cu111 + python_version: '3.6' + wheel_docker_image: pytorch/manylinux-cuda111 + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.6_cu111_upload + requires: + - nightly_binary_linux_wheel_py3.6_cu111 + subfolder: cu111/ + - smoke_test_linux_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_wheel_py3.6_cu111_smoke_test_pip + python_version: '3.6' + requires: + - nightly_binary_linux_wheel_py3.6_cu111_upload + - binary_linux_wheel: + cu_version: rocm4.1 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.6_rocm4.1 + python_version: '3.6' + wheel_docker_image: pytorch/manylinux-rocm:4.1 + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.6_rocm4.1_upload + requires: + - nightly_binary_linux_wheel_py3.6_rocm4.1 + subfolder: rocm4.1/ + - smoke_test_linux_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_wheel_py3.6_rocm4.1_smoke_test_pip + python_version: '3.6' + requires: + - nightly_binary_linux_wheel_py3.6_rocm4.1_upload + - binary_linux_wheel: + cu_version: rocm4.2 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.6_rocm4.2 + python_version: '3.6' + wheel_docker_image: pytorch/manylinux-rocm:4.2 + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.6_rocm4.2_upload + requires: + - nightly_binary_linux_wheel_py3.6_rocm4.2 + subfolder: rocm4.2/ + - smoke_test_linux_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_wheel_py3.6_rocm4.2_smoke_test_pip + python_version: '3.6' + requires: + - nightly_binary_linux_wheel_py3.6_rocm4.2_upload + - binary_linux_wheel: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.7_cpu + python_version: '3.7' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.7_cpu_upload + requires: + - nightly_binary_linux_wheel_py3.7_cpu + subfolder: cpu/ + - smoke_test_linux_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_wheel_py3.7_cpu_smoke_test_pip + python_version: '3.7' + requires: + - nightly_binary_linux_wheel_py3.7_cpu_upload + - binary_linux_wheel: + conda_docker_image: pytorch/conda-builder:cuda102 + cu_version: cu102 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.7_cu102 + python_version: '3.7' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.7_cu102_upload + requires: + - nightly_binary_linux_wheel_py3.7_cu102 + subfolder: cu102/ + - smoke_test_linux_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_wheel_py3.7_cu102_smoke_test_pip + python_version: '3.7' + requires: + - nightly_binary_linux_wheel_py3.7_cu102_upload + - binary_linux_wheel: + conda_docker_image: pytorch/conda-builder:cuda111 + cu_version: cu111 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.7_cu111 + python_version: '3.7' + wheel_docker_image: pytorch/manylinux-cuda111 + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.7_cu111_upload + requires: + - nightly_binary_linux_wheel_py3.7_cu111 + subfolder: cu111/ + - smoke_test_linux_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_wheel_py3.7_cu111_smoke_test_pip + python_version: '3.7' + requires: + - nightly_binary_linux_wheel_py3.7_cu111_upload + - binary_linux_wheel: + cu_version: rocm4.1 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.7_rocm4.1 + python_version: '3.7' + wheel_docker_image: pytorch/manylinux-rocm:4.1 + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.7_rocm4.1_upload + requires: + - nightly_binary_linux_wheel_py3.7_rocm4.1 + subfolder: rocm4.1/ + - smoke_test_linux_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_wheel_py3.7_rocm4.1_smoke_test_pip + python_version: '3.7' + requires: + - nightly_binary_linux_wheel_py3.7_rocm4.1_upload + - binary_linux_wheel: + cu_version: rocm4.2 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.7_rocm4.2 + python_version: '3.7' + wheel_docker_image: pytorch/manylinux-rocm:4.2 + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.7_rocm4.2_upload + requires: + - nightly_binary_linux_wheel_py3.7_rocm4.2 + subfolder: rocm4.2/ + - smoke_test_linux_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_wheel_py3.7_rocm4.2_smoke_test_pip + python_version: '3.7' + requires: + - nightly_binary_linux_wheel_py3.7_rocm4.2_upload + - binary_linux_wheel: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.8_cpu + python_version: '3.8' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.8_cpu_upload + requires: + - nightly_binary_linux_wheel_py3.8_cpu + subfolder: cpu/ + - smoke_test_linux_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_wheel_py3.8_cpu_smoke_test_pip + python_version: '3.8' + requires: + - nightly_binary_linux_wheel_py3.8_cpu_upload + - binary_linux_wheel: + conda_docker_image: pytorch/conda-builder:cuda102 + cu_version: cu102 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.8_cu102 + python_version: '3.8' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.8_cu102_upload + requires: + - nightly_binary_linux_wheel_py3.8_cu102 + subfolder: cu102/ + - smoke_test_linux_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_wheel_py3.8_cu102_smoke_test_pip + python_version: '3.8' + requires: + - nightly_binary_linux_wheel_py3.8_cu102_upload + - binary_linux_wheel: + conda_docker_image: pytorch/conda-builder:cuda111 + cu_version: cu111 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.8_cu111 + python_version: '3.8' + wheel_docker_image: pytorch/manylinux-cuda111 + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.8_cu111_upload + requires: + - nightly_binary_linux_wheel_py3.8_cu111 + subfolder: cu111/ + - smoke_test_linux_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_wheel_py3.8_cu111_smoke_test_pip + python_version: '3.8' + requires: + - nightly_binary_linux_wheel_py3.8_cu111_upload + - binary_linux_wheel: + cu_version: rocm4.1 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.8_rocm4.1 + python_version: '3.8' + wheel_docker_image: pytorch/manylinux-rocm:4.1 + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.8_rocm4.1_upload + requires: + - nightly_binary_linux_wheel_py3.8_rocm4.1 + subfolder: rocm4.1/ + - smoke_test_linux_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_wheel_py3.8_rocm4.1_smoke_test_pip + python_version: '3.8' + requires: + - nightly_binary_linux_wheel_py3.8_rocm4.1_upload + - binary_linux_wheel: + cu_version: rocm4.2 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.8_rocm4.2 + python_version: '3.8' + wheel_docker_image: pytorch/manylinux-rocm:4.2 + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.8_rocm4.2_upload + requires: + - nightly_binary_linux_wheel_py3.8_rocm4.2 + subfolder: rocm4.2/ + - smoke_test_linux_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_wheel_py3.8_rocm4.2_smoke_test_pip + python_version: '3.8' + requires: + - nightly_binary_linux_wheel_py3.8_rocm4.2_upload + - binary_linux_wheel: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.9_cpu + python_version: '3.9' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.9_cpu_upload + requires: + - nightly_binary_linux_wheel_py3.9_cpu + subfolder: cpu/ + - smoke_test_linux_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_wheel_py3.9_cpu_smoke_test_pip + python_version: '3.9' + requires: + - nightly_binary_linux_wheel_py3.9_cpu_upload + - binary_linux_wheel: + conda_docker_image: pytorch/conda-builder:cuda102 + cu_version: cu102 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.9_cu102 + python_version: '3.9' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.9_cu102_upload + requires: + - nightly_binary_linux_wheel_py3.9_cu102 + subfolder: cu102/ + - smoke_test_linux_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_wheel_py3.9_cu102_smoke_test_pip + python_version: '3.9' + requires: + - nightly_binary_linux_wheel_py3.9_cu102_upload + - binary_linux_wheel: + conda_docker_image: pytorch/conda-builder:cuda111 + cu_version: cu111 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.9_cu111 + python_version: '3.9' + wheel_docker_image: pytorch/manylinux-cuda111 + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.9_cu111_upload + requires: + - nightly_binary_linux_wheel_py3.9_cu111 + subfolder: cu111/ + - smoke_test_linux_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_wheel_py3.9_cu111_smoke_test_pip + python_version: '3.9' + requires: + - nightly_binary_linux_wheel_py3.9_cu111_upload + - binary_linux_wheel: + cu_version: rocm4.1 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.9_rocm4.1 + python_version: '3.9' + wheel_docker_image: pytorch/manylinux-rocm:4.1 + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.9_rocm4.1_upload + requires: + - nightly_binary_linux_wheel_py3.9_rocm4.1 + subfolder: rocm4.1/ + - smoke_test_linux_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_wheel_py3.9_rocm4.1_smoke_test_pip + python_version: '3.9' + requires: + - nightly_binary_linux_wheel_py3.9_rocm4.1_upload + - binary_linux_wheel: + cu_version: rocm4.2 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.9_rocm4.2 + python_version: '3.9' + wheel_docker_image: pytorch/manylinux-rocm:4.2 + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_wheel_py3.9_rocm4.2_upload + requires: + - nightly_binary_linux_wheel_py3.9_rocm4.2 + subfolder: rocm4.2/ + - smoke_test_linux_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_wheel_py3.9_rocm4.2_smoke_test_pip + python_version: '3.9' + requires: + - nightly_binary_linux_wheel_py3.9_rocm4.2_upload + - binary_macos_wheel: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_macos_wheel_py3.6_cpu + python_version: '3.6' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_macos_wheel_py3.6_cpu_upload + requires: + - nightly_binary_macos_wheel_py3.6_cpu + subfolder: '' + - binary_macos_wheel: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_macos_wheel_py3.7_cpu + python_version: '3.7' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_macos_wheel_py3.7_cpu_upload + requires: + - nightly_binary_macos_wheel_py3.7_cpu + subfolder: '' + - binary_macos_wheel: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_macos_wheel_py3.8_cpu + python_version: '3.8' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_macos_wheel_py3.8_cpu_upload + requires: + - nightly_binary_macos_wheel_py3.8_cpu + subfolder: '' + - binary_macos_wheel: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_macos_wheel_py3.9_cpu + python_version: '3.9' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_macos_wheel_py3.9_cpu_upload + requires: + - nightly_binary_macos_wheel_py3.9_cpu + subfolder: '' + - binary_win_wheel: + cu_version: cpu + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_wheel_py3.6_cpu + python_version: '3.6' + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_wheel_py3.6_cpu_upload + requires: + - nightly_binary_win_wheel_py3.6_cpu + subfolder: cpu/ + - smoke_test_win_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_win_wheel_py3.6_cpu_smoke_test_pip + python_version: '3.6' + requires: + - nightly_binary_win_wheel_py3.6_cpu_upload + - binary_win_wheel: + cu_version: cu102 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_wheel_py3.6_cu102 + python_version: '3.6' + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_wheel_py3.6_cu102_upload + requires: + - nightly_binary_win_wheel_py3.6_cu102 + subfolder: cu102/ + - smoke_test_win_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_win_wheel_py3.6_cu102_smoke_test_pip + python_version: '3.6' + requires: + - nightly_binary_win_wheel_py3.6_cu102_upload + - binary_win_wheel: + cu_version: cu111 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_wheel_py3.6_cu111 + python_version: '3.6' + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_wheel_py3.6_cu111_upload + requires: + - nightly_binary_win_wheel_py3.6_cu111 + subfolder: cu111/ + - smoke_test_win_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_win_wheel_py3.6_cu111_smoke_test_pip + python_version: '3.6' + requires: + - nightly_binary_win_wheel_py3.6_cu111_upload + - binary_win_wheel: + cu_version: cpu + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_wheel_py3.7_cpu + python_version: '3.7' + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_wheel_py3.7_cpu_upload + requires: + - nightly_binary_win_wheel_py3.7_cpu + subfolder: cpu/ + - smoke_test_win_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_win_wheel_py3.7_cpu_smoke_test_pip + python_version: '3.7' + requires: + - nightly_binary_win_wheel_py3.7_cpu_upload + - binary_win_wheel: + cu_version: cu102 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_wheel_py3.7_cu102 + python_version: '3.7' + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_wheel_py3.7_cu102_upload + requires: + - nightly_binary_win_wheel_py3.7_cu102 + subfolder: cu102/ + - smoke_test_win_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_win_wheel_py3.7_cu102_smoke_test_pip + python_version: '3.7' + requires: + - nightly_binary_win_wheel_py3.7_cu102_upload + - binary_win_wheel: + cu_version: cu111 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_wheel_py3.7_cu111 + python_version: '3.7' + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_wheel_py3.7_cu111_upload + requires: + - nightly_binary_win_wheel_py3.7_cu111 + subfolder: cu111/ + - smoke_test_win_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_win_wheel_py3.7_cu111_smoke_test_pip + python_version: '3.7' + requires: + - nightly_binary_win_wheel_py3.7_cu111_upload + - binary_win_wheel: + cu_version: cpu + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_wheel_py3.8_cpu + python_version: '3.8' + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_wheel_py3.8_cpu_upload + requires: + - nightly_binary_win_wheel_py3.8_cpu + subfolder: cpu/ + - smoke_test_win_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_win_wheel_py3.8_cpu_smoke_test_pip + python_version: '3.8' + requires: + - nightly_binary_win_wheel_py3.8_cpu_upload + - binary_win_wheel: + cu_version: cu102 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_wheel_py3.8_cu102 + python_version: '3.8' + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_wheel_py3.8_cu102_upload + requires: + - nightly_binary_win_wheel_py3.8_cu102 + subfolder: cu102/ + - smoke_test_win_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_win_wheel_py3.8_cu102_smoke_test_pip + python_version: '3.8' + requires: + - nightly_binary_win_wheel_py3.8_cu102_upload + - binary_win_wheel: + cu_version: cu111 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_wheel_py3.8_cu111 + python_version: '3.8' + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_wheel_py3.8_cu111_upload + requires: + - nightly_binary_win_wheel_py3.8_cu111 + subfolder: cu111/ + - smoke_test_win_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_win_wheel_py3.8_cu111_smoke_test_pip + python_version: '3.8' + requires: + - nightly_binary_win_wheel_py3.8_cu111_upload + - binary_win_wheel: + cu_version: cpu + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_wheel_py3.9_cpu + python_version: '3.9' + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_wheel_py3.9_cpu_upload + requires: + - nightly_binary_win_wheel_py3.9_cpu + subfolder: cpu/ + - smoke_test_win_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_win_wheel_py3.9_cpu_smoke_test_pip + python_version: '3.9' + requires: + - nightly_binary_win_wheel_py3.9_cpu_upload + - binary_win_wheel: + cu_version: cu102 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_wheel_py3.9_cu102 + python_version: '3.9' + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_wheel_py3.9_cu102_upload + requires: + - nightly_binary_win_wheel_py3.9_cu102 + subfolder: cu102/ + - smoke_test_win_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_win_wheel_py3.9_cu102_smoke_test_pip + python_version: '3.9' + requires: + - nightly_binary_win_wheel_py3.9_cu102_upload + - binary_win_wheel: + cu_version: cu111 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_wheel_py3.9_cu111 + python_version: '3.9' + - binary_wheel_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_wheel_py3.9_cu111_upload + requires: + - nightly_binary_win_wheel_py3.9_cu111 + subfolder: cu111/ + - smoke_test_win_pip: + filters: + branches: + only: + - nightly + name: nightly_binary_win_wheel_py3.9_cu111_smoke_test_pip + python_version: '3.9' + requires: + - nightly_binary_win_wheel_py3.9_cu111_upload + - binary_linux_conda: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_conda_py3.6_cpu + python_version: '3.6' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_conda_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_conda_py3.6_cpu_upload + requires: + - nightly_binary_linux_conda_py3.6_cpu + - smoke_test_linux_conda: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_conda_py3.6_cpu_smoke_test_conda + python_version: '3.6' + requires: + - nightly_binary_linux_conda_py3.6_cpu_upload + - binary_linux_conda: + conda_docker_image: pytorch/conda-builder:cuda102 + cu_version: cu102 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_conda_py3.6_cu102 + python_version: '3.6' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_conda_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_conda_py3.6_cu102_upload + requires: + - nightly_binary_linux_conda_py3.6_cu102 + - smoke_test_linux_conda: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_conda_py3.6_cu102_smoke_test_conda + python_version: '3.6' + requires: + - nightly_binary_linux_conda_py3.6_cu102_upload + - binary_linux_conda: + conda_docker_image: pytorch/conda-builder:cuda111 + cu_version: cu111 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_conda_py3.6_cu111 + python_version: '3.6' + wheel_docker_image: pytorch/manylinux-cuda111 + - binary_conda_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_conda_py3.6_cu111_upload + requires: + - nightly_binary_linux_conda_py3.6_cu111 + - smoke_test_linux_conda: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_conda_py3.6_cu111_smoke_test_conda + python_version: '3.6' + requires: + - nightly_binary_linux_conda_py3.6_cu111_upload + - binary_linux_conda: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_conda_py3.7_cpu + python_version: '3.7' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_conda_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_conda_py3.7_cpu_upload + requires: + - nightly_binary_linux_conda_py3.7_cpu + - smoke_test_linux_conda: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_conda_py3.7_cpu_smoke_test_conda + python_version: '3.7' + requires: + - nightly_binary_linux_conda_py3.7_cpu_upload + - binary_linux_conda: + conda_docker_image: pytorch/conda-builder:cuda102 + cu_version: cu102 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_conda_py3.7_cu102 + python_version: '3.7' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_conda_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_conda_py3.7_cu102_upload + requires: + - nightly_binary_linux_conda_py3.7_cu102 + - smoke_test_linux_conda: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_conda_py3.7_cu102_smoke_test_conda + python_version: '3.7' + requires: + - nightly_binary_linux_conda_py3.7_cu102_upload + - binary_linux_conda: + conda_docker_image: pytorch/conda-builder:cuda111 + cu_version: cu111 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_conda_py3.7_cu111 + python_version: '3.7' + wheel_docker_image: pytorch/manylinux-cuda111 + - binary_conda_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_conda_py3.7_cu111_upload + requires: + - nightly_binary_linux_conda_py3.7_cu111 + - smoke_test_linux_conda: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_conda_py3.7_cu111_smoke_test_conda + python_version: '3.7' + requires: + - nightly_binary_linux_conda_py3.7_cu111_upload + - binary_linux_conda: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_conda_py3.8_cpu + python_version: '3.8' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_conda_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_conda_py3.8_cpu_upload + requires: + - nightly_binary_linux_conda_py3.8_cpu + - smoke_test_linux_conda: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_conda_py3.8_cpu_smoke_test_conda + python_version: '3.8' + requires: + - nightly_binary_linux_conda_py3.8_cpu_upload + - binary_linux_conda: + conda_docker_image: pytorch/conda-builder:cuda102 + cu_version: cu102 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_conda_py3.8_cu102 + python_version: '3.8' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_conda_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_conda_py3.8_cu102_upload + requires: + - nightly_binary_linux_conda_py3.8_cu102 + - smoke_test_linux_conda: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_conda_py3.8_cu102_smoke_test_conda + python_version: '3.8' + requires: + - nightly_binary_linux_conda_py3.8_cu102_upload + - binary_linux_conda: + conda_docker_image: pytorch/conda-builder:cuda111 + cu_version: cu111 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_conda_py3.8_cu111 + python_version: '3.8' + wheel_docker_image: pytorch/manylinux-cuda111 + - binary_conda_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_conda_py3.8_cu111_upload + requires: + - nightly_binary_linux_conda_py3.8_cu111 + - smoke_test_linux_conda: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_conda_py3.8_cu111_smoke_test_conda + python_version: '3.8' + requires: + - nightly_binary_linux_conda_py3.8_cu111_upload + - binary_linux_conda: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_conda_py3.9_cpu + python_version: '3.9' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_conda_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_conda_py3.9_cpu_upload + requires: + - nightly_binary_linux_conda_py3.9_cpu + - smoke_test_linux_conda: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_conda_py3.9_cpu_smoke_test_conda + python_version: '3.9' + requires: + - nightly_binary_linux_conda_py3.9_cpu_upload + - binary_linux_conda: + conda_docker_image: pytorch/conda-builder:cuda102 + cu_version: cu102 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_conda_py3.9_cu102 + python_version: '3.9' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_conda_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_conda_py3.9_cu102_upload + requires: + - nightly_binary_linux_conda_py3.9_cu102 + - smoke_test_linux_conda: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_conda_py3.9_cu102_smoke_test_conda + python_version: '3.9' + requires: + - nightly_binary_linux_conda_py3.9_cu102_upload + - binary_linux_conda: + conda_docker_image: pytorch/conda-builder:cuda111 + cu_version: cu111 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_conda_py3.9_cu111 + python_version: '3.9' + wheel_docker_image: pytorch/manylinux-cuda111 + - binary_conda_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_linux_conda_py3.9_cu111_upload + requires: + - nightly_binary_linux_conda_py3.9_cu111 + - smoke_test_linux_conda: + filters: + branches: + only: + - nightly + name: nightly_binary_linux_conda_py3.9_cu111_smoke_test_conda + python_version: '3.9' + requires: + - nightly_binary_linux_conda_py3.9_cu111_upload + - binary_macos_conda: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_macos_conda_py3.6_cpu + python_version: '3.6' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_conda_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_macos_conda_py3.6_cpu_upload + requires: + - nightly_binary_macos_conda_py3.6_cpu + - binary_macos_conda: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_macos_conda_py3.7_cpu + python_version: '3.7' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_conda_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_macos_conda_py3.7_cpu_upload + requires: + - nightly_binary_macos_conda_py3.7_cpu + - binary_macos_conda: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_macos_conda_py3.8_cpu + python_version: '3.8' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_conda_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_macos_conda_py3.8_cpu_upload + requires: + - nightly_binary_macos_conda_py3.8_cpu + - binary_macos_conda: + conda_docker_image: pytorch/conda-builder:cpu + cu_version: cpu + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_macos_conda_py3.9_cpu + python_version: '3.9' + wheel_docker_image: pytorch/manylinux-cuda102 + - binary_conda_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_macos_conda_py3.9_cpu_upload + requires: + - nightly_binary_macos_conda_py3.9_cpu + - binary_win_conda: + cu_version: cpu + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_conda_py3.6_cpu + python_version: '3.6' + - binary_conda_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_conda_py3.6_cpu_upload + requires: + - nightly_binary_win_conda_py3.6_cpu + - smoke_test_win_conda: + filters: + branches: + only: + - nightly + name: nightly_binary_win_conda_py3.6_cpu_smoke_test_conda + python_version: '3.6' + requires: + - nightly_binary_win_conda_py3.6_cpu_upload + - binary_win_conda: + cu_version: cu102 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_conda_py3.6_cu102 + python_version: '3.6' + - binary_conda_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_conda_py3.6_cu102_upload + requires: + - nightly_binary_win_conda_py3.6_cu102 + - smoke_test_win_conda: + filters: + branches: + only: + - nightly + name: nightly_binary_win_conda_py3.6_cu102_smoke_test_conda + python_version: '3.6' + requires: + - nightly_binary_win_conda_py3.6_cu102_upload + - binary_win_conda: + cu_version: cu111 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_conda_py3.6_cu111 + python_version: '3.6' + - binary_conda_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_conda_py3.6_cu111_upload + requires: + - nightly_binary_win_conda_py3.6_cu111 + - smoke_test_win_conda: + filters: + branches: + only: + - nightly + name: nightly_binary_win_conda_py3.6_cu111_smoke_test_conda + python_version: '3.6' + requires: + - nightly_binary_win_conda_py3.6_cu111_upload + - binary_win_conda: + cu_version: cpu + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_conda_py3.7_cpu + python_version: '3.7' + - binary_conda_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_conda_py3.7_cpu_upload + requires: + - nightly_binary_win_conda_py3.7_cpu + - smoke_test_win_conda: + filters: + branches: + only: + - nightly + name: nightly_binary_win_conda_py3.7_cpu_smoke_test_conda + python_version: '3.7' + requires: + - nightly_binary_win_conda_py3.7_cpu_upload + - binary_win_conda: + cu_version: cu102 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_conda_py3.7_cu102 + python_version: '3.7' + - binary_conda_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_conda_py3.7_cu102_upload + requires: + - nightly_binary_win_conda_py3.7_cu102 + - smoke_test_win_conda: + filters: + branches: + only: + - nightly + name: nightly_binary_win_conda_py3.7_cu102_smoke_test_conda + python_version: '3.7' + requires: + - nightly_binary_win_conda_py3.7_cu102_upload + - binary_win_conda: + cu_version: cu111 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_conda_py3.7_cu111 + python_version: '3.7' + - binary_conda_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_conda_py3.7_cu111_upload + requires: + - nightly_binary_win_conda_py3.7_cu111 + - smoke_test_win_conda: + filters: + branches: + only: + - nightly + name: nightly_binary_win_conda_py3.7_cu111_smoke_test_conda + python_version: '3.7' + requires: + - nightly_binary_win_conda_py3.7_cu111_upload + - binary_win_conda: + cu_version: cpu + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_conda_py3.8_cpu + python_version: '3.8' + - binary_conda_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_conda_py3.8_cpu_upload + requires: + - nightly_binary_win_conda_py3.8_cpu + - smoke_test_win_conda: + filters: + branches: + only: + - nightly + name: nightly_binary_win_conda_py3.8_cpu_smoke_test_conda + python_version: '3.8' + requires: + - nightly_binary_win_conda_py3.8_cpu_upload + - binary_win_conda: + cu_version: cu102 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_conda_py3.8_cu102 + python_version: '3.8' + - binary_conda_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_conda_py3.8_cu102_upload + requires: + - nightly_binary_win_conda_py3.8_cu102 + - smoke_test_win_conda: + filters: + branches: + only: + - nightly + name: nightly_binary_win_conda_py3.8_cu102_smoke_test_conda + python_version: '3.8' + requires: + - nightly_binary_win_conda_py3.8_cu102_upload + - binary_win_conda: + cu_version: cu111 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_conda_py3.8_cu111 + python_version: '3.8' + - binary_conda_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_conda_py3.8_cu111_upload + requires: + - nightly_binary_win_conda_py3.8_cu111 + - smoke_test_win_conda: + filters: + branches: + only: + - nightly + name: nightly_binary_win_conda_py3.8_cu111_smoke_test_conda + python_version: '3.8' + requires: + - nightly_binary_win_conda_py3.8_cu111_upload + - binary_win_conda: + cu_version: cpu + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_conda_py3.9_cpu + python_version: '3.9' + - binary_conda_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_conda_py3.9_cpu_upload + requires: + - nightly_binary_win_conda_py3.9_cpu + - smoke_test_win_conda: + filters: + branches: + only: + - nightly + name: nightly_binary_win_conda_py3.9_cpu_smoke_test_conda + python_version: '3.9' + requires: + - nightly_binary_win_conda_py3.9_cpu_upload + - binary_win_conda: + cu_version: cu102 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_conda_py3.9_cu102 + python_version: '3.9' + - binary_conda_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_conda_py3.9_cu102_upload + requires: + - nightly_binary_win_conda_py3.9_cu102 + - smoke_test_win_conda: + filters: + branches: + only: + - nightly + name: nightly_binary_win_conda_py3.9_cu102_smoke_test_conda + python_version: '3.9' + requires: + - nightly_binary_win_conda_py3.9_cu102_upload + - binary_win_conda: + cu_version: cu111 + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_conda_py3.9_cu111 + python_version: '3.9' + - binary_conda_upload: + context: org-member + filters: + branches: + only: nightly + tags: + only: /v[0-9]+(\.[0-9]+)*-rc[0-9]+/ + name: nightly_binary_win_conda_py3.9_cu111_upload + requires: + - nightly_binary_win_conda_py3.9_cu111 + - smoke_test_win_conda: + filters: + branches: + only: + - nightly + name: nightly_binary_win_conda_py3.9_cu111_smoke_test_conda + python_version: '3.9' + requires: + - nightly_binary_win_conda_py3.9_cu111_upload + docker_build: + triggers: + - schedule: + cron: "0 10 * * 0" + filters: + branches: + only: + - master + jobs: + - smoke_test_docker_image_build: + context: org-member diff --git a/pretrained_model/pytorch_vision_v0.10.0/.circleci/config.yml.in b/pretrained_model/pytorch_vision_v0.10.0/.circleci/config.yml.in new file mode 100644 index 0000000000000000000000000000000000000000..7a94e01fc7e257517dd4e722b782a926134481d0 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.circleci/config.yml.in @@ -0,0 +1,1025 @@ +version: 2.1 + +# How to test the Linux jobs: +# - Install CircleCI local CLI: https://circleci.com/docs/2.0/local-cli/ +# - circleci config process .circleci/config.yml > gen.yml && circleci local execute -c gen.yml --job binary_linux_wheel_py3.7 +# - Replace binary_linux_wheel_py3.7 with the name of the job you want to test. +# Job names are 'name:' key. + +executors: + windows-cpu: + machine: + resource_class: windows.xlarge + image: windows-server-2019-vs2019:stable + shell: bash.exe + + windows-gpu: + machine: + resource_class: windows.gpu.nvidia.medium + image: windows-server-2019-nvidia:stable + shell: bash.exe + +commands: + checkout_merge: + description: "checkout merge branch" + steps: + - checkout +# - run: +# name: Checkout merge branch +# command: | +# set -ex +# BRANCH=$(git rev-parse --abbrev-ref HEAD) +# if [[ "$BRANCH" != "master" ]]; then +# git fetch --force origin ${CIRCLE_BRANCH}/merge:merged/${CIRCLE_BRANCH} +# git checkout "merged/$CIRCLE_BRANCH" +# fi + designate_upload_channel: + description: "inserts the correct upload channel into ${BASH_ENV}" + steps: + - run: + name: adding UPLOAD_CHANNEL to BASH_ENV + command: | + our_upload_channel=test + echo "export UPLOAD_CHANNEL=${our_upload_channel}" >> ${BASH_ENV} + install_cuda_compatible_cmath: + description: "Install CUDA compatible cmath" + steps: + - run: + name: _HACK_ Install CUDA compatible cmath + no_output_timeout: 1m + command: | + powershell .circleci/scripts/vs_install_cmath.ps1 + + brew_update: + description: "Update Homebrew and install base formulae" + steps: + - run: + name: Update Homebrew + no_output_timeout: "10m" + command: | + set -ex + + # Update repositories manually. + # Running `brew update` produces a comparison between the + # current checkout and the updated checkout, which takes a + # very long time because the existing checkout is 2y old. + for path in $(find /usr/local/Homebrew -type d -name .git) + do + cd $path/.. + git fetch --depth=1 origin + git reset --hard origin/master + done + + export HOMEBREW_NO_AUTO_UPDATE=1 + + # Install expect and moreutils so that we can call `unbuffer` and `ts`. + # moreutils installs a `parallel` executable by default, which conflicts + # with the executable from the GNU `parallel`, so we must unlink GNU + # `parallel` first, and relink it afterwards. + brew install coreutils + brew unlink parallel + brew install moreutils + brew link parallel --overwrite + brew install expect + + brew_install: + description: "Install Homebrew formulae" + parameters: + formulae: + type: string + default: "" + steps: + - run: + name: Install << parameters.formulae >> + no_output_timeout: "10m" + command: | + set -ex + export HOMEBREW_NO_AUTO_UPDATE=1 + brew install << parameters.formulae >> + + run_brew_for_ios_build: + steps: + - brew_update + - brew_install: + formulae: libtool + +binary_common: &binary_common + parameters: + # Edit these defaults to do a release + build_version: + description: "version number of release binary; by default, build a nightly" + type: string + default: "0.10.0" + pytorch_version: + description: "PyTorch version to build against; by default, use a nightly" + type: string + default: "1.9.0" + # Don't edit these + python_version: + description: "Python version to build against (e.g., 3.7)" + type: string + cu_version: + description: "CUDA version to build against, in CU format (e.g., cpu or cu100)" + type: string + default: "cpu" + unicode_abi: + description: "Python 2.7 wheel only: whether or not we are cp27mu (default: no)" + type: string + default: "" + wheel_docker_image: + description: "Wheel only: what docker image to use" + type: string + default: "pytorch/manylinux-cuda102" + conda_docker_image: + description: "Conda only: what docker image to use" + type: string + default: "pytorch/conda-builder:cpu" + environment: + PYTHON_VERSION: << parameters.python_version >> + PYTORCH_VERSION: << parameters.pytorch_version >> + UNICODE_ABI: << parameters.unicode_abi >> + CU_VERSION: << parameters.cu_version >> + +torchvision_ios_params: &torchvision_ios_params + parameters: + build_environment: + type: string + default: "" + ios_arch: + type: string + default: "" + ios_platform: + type: string + default: "" + environment: + BUILD_ENVIRONMENT: << parameters.build_environment >> + IOS_ARCH: << parameters.ios_arch >> + IOS_PLATFORM: << parameters.ios_platform >> + +torchvision_android_params: &torchvision_android_params + parameters: + build_environment: + type: string + default: "" + environment: + BUILD_ENVIRONMENT: << parameters.build_environment >> + +smoke_test_common: &smoke_test_common + <<: *binary_common + docker: + - image: torchvision/smoke_test:latest + +jobs: + circleci_consistency: + docker: + - image: circleci/python:3.7 + steps: + - checkout + - run: + command: | + pip install --user --progress-bar off jinja2 pyyaml + python .circleci/regenerate.py + git diff --exit-code || (echo ".circleci/config.yml not in sync with config.yml.in! Run .circleci/regenerate.py to update config"; exit 1) + + python_lint: + docker: + - image: circleci/python:3.7 + steps: + - checkout + - run: + command: | + pip install --user --progress-bar off flake8 typing + flake8 --config=setup.cfg . + + python_type_check: + docker: + - image: circleci/python:3.7 + steps: + - checkout + - run: + command: | + sudo apt-get update -y + sudo apt install -y libturbojpeg-dev + pip install --user --progress-bar off mypy + pip install --user --progress-bar off --pre torch -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html + pip install --user --progress-bar off --editable . + mypy --config-file mypy.ini + + docstring_parameters_sync: + docker: + - image: circleci/python:3.7 + steps: + - checkout + - run: + command: | + pip install --user pydocstyle + pydocstyle + + clang_format: + docker: + - image: circleci/python:3.7 + steps: + - checkout + - run: + command: | + curl https://oss-clang-format.s3.us-east-2.amazonaws.com/linux64/clang-format-linux64 -o clang-format + chmod +x clang-format + sudo mv clang-format /opt/clang-format + ./.circleci/unittest/linux/scripts/run-clang-format.py -r torchvision/csrc --clang-format-executable /opt/clang-format + + torchhub_test: + docker: + - image: circleci/python:3.7 + steps: + - checkout + - run: + command: | + pip install --user --progress-bar off --pre torch -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html + # need to install torchvision dependencies due to transitive imports + pip install --user --progress-bar off --editable . + python test/test_hub.py + + torch_onnx_test: + docker: + - image: circleci/python:3.7 + steps: + - checkout + - run: + command: | + pip install --user --progress-bar off --pre torch -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html + # need to install torchvision dependencies due to transitive imports + pip install --user --progress-bar off --editable . + pip install --user onnx + pip install --user onnxruntime + python test/test_onnx.py + + binary_linux_wheel: + <<: *binary_common + docker: + - image: << parameters.wheel_docker_image >> + resource_class: 2xlarge+ + steps: + - checkout_merge + - designate_upload_channel + - run: packaging/build_wheel.sh + - store_artifacts: + path: dist + - persist_to_workspace: + root: dist + paths: + - "*" + + binary_linux_conda: + <<: *binary_common + docker: + - image: "<< parameters.conda_docker_image >>" + resource_class: 2xlarge+ + steps: + - checkout_merge + - designate_upload_channel + - run: packaging/build_conda.sh + - store_artifacts: + path: /opt/conda/conda-bld/linux-64 + - persist_to_workspace: + root: /opt/conda/conda-bld/linux-64 + paths: + - "*" + - store_test_results: + path: build_results/ + + binary_win_conda: + <<: *binary_common + executor: windows-cpu + steps: + - checkout_merge + - designate_upload_channel + - install_cuda_compatible_cmath + - run: + name: Build conda packages + no_output_timeout: 20m + command: | + set -ex + source packaging/windows/internal/vc_install_helper.sh + packaging/windows/internal/cuda_install.bat + eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" + conda activate base + conda install -yq conda-build "conda-package-handling!=1.5.0" + packaging/build_conda.sh + rm /C/tools/miniconda3/conda-bld/win-64/vs${VC_YEAR}*.tar.bz2 + - store_artifacts: + path: C:/tools/miniconda3/conda-bld/win-64 + - persist_to_workspace: + root: C:/tools/miniconda3/conda-bld/win-64 + paths: + - "*" + - store_test_results: + path: build_results/ + + binary_win_wheel: + <<: *binary_common + executor: windows-cpu + steps: + - checkout_merge + - designate_upload_channel + - install_cuda_compatible_cmath + - run: + name: Build wheel packages + command: | + set -ex + source packaging/windows/internal/vc_install_helper.sh + packaging/windows/internal/cuda_install.bat + packaging/build_wheel.sh + - store_artifacts: + path: dist + - persist_to_workspace: + root: dist + paths: + - "*" + - store_test_results: + path: build_results/ + + binary_macos_wheel: + <<: *binary_common + macos: + xcode: "12.0" + steps: + - checkout_merge + - designate_upload_channel + - run: + # Cannot easily deduplicate this as source'ing activate + # will set environment variables which we need to propagate + # to build_wheel.sh + command: | + curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh + sh conda.sh -b + source $HOME/miniconda3/bin/activate + packaging/build_wheel.sh + - store_artifacts: + path: dist + - persist_to_workspace: + root: dist + paths: + - "*" + + binary_ios_build: + <<: *torchvision_ios_params + macos: + xcode: "12.0" + steps: + - attach_workspace: + at: ~/workspace + - checkout + - run_brew_for_ios_build + - run: + name: Build + no_output_timeout: "1h" + command: | + script="/Users/distiller/project/.circleci/unittest/ios/scripts/binary_ios_build.sh" + cat "$script" + source "$script" + - persist_to_workspace: + root: /Users/distiller/workspace/ + paths: ios + + binary_ios_upload: + <<: *torchvision_ios_params + macos: + xcode: "12.0" + steps: + - attach_workspace: + at: ~/workspace + - checkout + - run_brew_for_ios_build + - run: + name: Upload + no_output_timeout: "1h" + command: | + script="/Users/distiller/project/.circleci/unittest/ios/scripts/binary_ios_upload.sh" + cat "$script" + source "$script" + + binary_android_build: + <<: *torchvision_android_params + docker: + - image: circleci/android:api-29-ndk + resource_class: xlarge + steps: + - attach_workspace: + at: ~/workspace + - checkout + - run: + name: Build + no_output_timeout: "1h" + command: | + script="/home/circleci/project/.circleci/unittest/android/scripts/binary_android_build.sh" + cat "$script" + source "$script" + - store_artifacts: + path: ~/workspace/artifacts + + binary_android_upload: + <<: *torchvision_android_params + docker: + - image: circleci/android:api-29-ndk + resource_class: xlarge + steps: + - attach_workspace: + at: ~/workspace + - checkout + - run: + name: Upload + no_output_timeout: "1h" + command: | + script="/home/circleci/project/.circleci/unittest/android/scripts/binary_android_upload.sh" + cat "$script" + source "$script" + + binary_macos_conda: + <<: *binary_common + macos: + xcode: "12.0" + steps: + - checkout_merge + - designate_upload_channel + - run: + command: | + curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh + sh conda.sh -b + source $HOME/miniconda3/bin/activate + conda install -yq conda-build + packaging/build_conda.sh + - store_artifacts: + path: /Users/distiller/miniconda3/conda-bld/osx-64 + - persist_to_workspace: + root: /Users/distiller/miniconda3/conda-bld/osx-64 + paths: + - "*" + - store_test_results: + path: build_results/ + + # Requires org-member context + binary_conda_upload: + docker: + - image: continuumio/miniconda + steps: + - attach_workspace: + at: ~/workspace + - designate_upload_channel + - run: + command: | + # Prevent credential from leaking + conda install -yq anaconda-client + set -x + anaconda -t "${CONDA_PYTORCHBOT_TOKEN}" upload ~/workspace/*.tar.bz2 -u "pytorch-${UPLOAD_CHANNEL}" --label main --no-progress --force + + # Requires org-member context + binary_wheel_upload: + parameters: + subfolder: + description: "What whl subfolder to upload to, e.g., blank or cu100/ (trailing slash is important)" + type: string + docker: + - image: circleci/python:3.7 + steps: + - attach_workspace: + at: ~/workspace + - designate_upload_channel + - checkout + - run: + command: | + pip install --user awscli + export PATH="$HOME/.local/bin:$PATH" + # Prevent credential from leaking + set +x + export AWS_ACCESS_KEY_ID="${PYTORCH_BINARY_AWS_ACCESS_KEY_ID}" + export AWS_SECRET_ACCESS_KEY="${PYTORCH_BINARY_AWS_SECRET_ACCESS_KEY}" + set -x + for pkg in ~/workspace/*.whl; do + aws s3 cp "$pkg" "s3://pytorch/whl/${UPLOAD_CHANNEL}/<< parameters.subfolder >>" --acl public-read + done + + smoke_test_linux_conda: + <<: *smoke_test_common + steps: + - attach_workspace: + at: ~/workspace + - designate_upload_channel + - run: + name: install binaries + command: | + set -x + source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} + conda install -v -y -c pytorch-nightly pytorch + conda install -v -y $(ls ~/workspace/torchvision*.tar.bz2) + - run: + name: smoke test + command: | + source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} + python -c "import torchvision" + + smoke_test_linux_pip: + <<: *smoke_test_common + steps: + - attach_workspace: + at: ~/workspace + - designate_upload_channel + - run: + name: install binaries + command: | + set -x + source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} + pip install $(ls ~/workspace/torchvision*.whl) --pre -f https://download.pytorch.org/whl/nightly/torch_nightly.html + - run: + name: smoke test + command: | + source /usr/local/etc/profile.d/conda.sh && conda activate python${PYTHON_VERSION} + python -c "import torchvision" + + smoke_test_docker_image_build: + machine: + image: ubuntu-1604:201903-01 + resource_class: large + environment: + image_name: torchvision/smoke_test + steps: + - checkout + - designate_upload_channel + - run: + name: Build and push Docker image + no_output_timeout: "1h" + command: | + set +x + echo "${DOCKER_HUB_TOKEN}" | docker login --username "${DOCKER_HUB_USERNAME}" --password-stdin + set -x + cd .circleci/smoke_test/docker && docker build . -t ${image_name}:${CIRCLE_WORKFLOW_ID} + docker tag ${image_name}:${CIRCLE_WORKFLOW_ID} ${image_name}:latest + docker push ${image_name}:${CIRCLE_WORKFLOW_ID} + docker push ${image_name}:latest + + smoke_test_win_conda: + <<: *binary_common + executor: + name: windows-cpu + steps: + - attach_workspace: + at: ~/workspace + - designate_upload_channel + - run: + name: install binaries + command: | + set -x + eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" + conda env remove -n python${PYTHON_VERSION} || true + CONDA_CHANNEL_FLAGS="" + if [[ "${PYTHON_VERSION}" = 3.9 ]]; then + CONDA_CHANNEL_FLAGS="-c=conda-forge" + fi + conda create ${CONDA_CHANNEL_FLAGS} -yn python${PYTHON_VERSION} python=${PYTHON_VERSION} + conda activate python${PYTHON_VERSION} + conda install Pillow>=5.3.0 + conda install -v -y -c pytorch-nightly pytorch + conda install -v -y $(ls ~/workspace/torchvision*.tar.bz2) + - run: + name: smoke test + command: | + eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" + conda activate python${PYTHON_VERSION} + python -c "import torchvision" + + smoke_test_win_pip: + <<: *binary_common + executor: + name: windows-cpu + steps: + - attach_workspace: + at: ~/workspace + - designate_upload_channel + - run: + name: install binaries + command: | + set -x + eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" + CONDA_CHANNEL_FLAGS="" + if [[ "${PYTHON_VERSION}" = 3.9 ]]; then + CONDA_CHANNEL_FLAGS="-c=conda-forge" + fi + conda create ${CONDA_CHANNEL_FLAGS} -yn python${PYTHON_VERSION} python=${PYTHON_VERSION} + conda create -yn python${PYTHON_VERSION} python=${PYTHON_VERSION} + conda activate python${PYTHON_VERSION} + pip install $(ls ~/workspace/torchvision*.whl) --pre -f https://download.pytorch.org/whl/nightly/torch_nightly.html + - run: + name: smoke test + command: | + eval "$('/C/tools/miniconda3/Scripts/conda.exe' 'shell.bash' 'hook')" + conda activate python${PYTHON_VERSION} + python -c "import torchvision" + + unittest_linux_cpu: + <<: *binary_common + docker: + - image: "pytorch/manylinux-cuda102" + resource_class: 2xlarge+ + steps: + - checkout + - designate_upload_channel + - run: + name: Generate cache key + # This will refresh cache on Sundays, nightly build should generate new cache. + command: echo "$(date +"%Y-%U")" > .circleci-weekly + - restore_cache: + {% raw %} + keys: + - env-v2-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + {% endraw %} + - run: + name: Setup + command: .circleci/unittest/linux/scripts/setup_env.sh + - save_cache: + {% raw %} + key: env-v2-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + {% endraw %} + paths: + - conda + - env + - run: + name: Install torchvision + command: .circleci/unittest/linux/scripts/install.sh + - run: + name: Run tests + command: .circleci/unittest/linux/scripts/run_test.sh + - run: + name: Post process + command: .circleci/unittest/linux/scripts/post_process.sh + - store_test_results: + path: test-results + + unittest_linux_gpu: + <<: *binary_common + machine: + image: ubuntu-1604-cuda-10.2:202012-01 + resource_class: gpu.nvidia.medium + environment: + image_name: "pytorch/manylinux-cuda102" + PYTHON_VERSION: << parameters.python_version >> + steps: + - checkout + - designate_upload_channel + - run: + name: Generate cache key + # This will refresh cache on Sundays, nightly build should generate new cache. + command: echo "$(date +"%Y-%U")" > .circleci-weekly + - restore_cache: + {% raw %} + keys: + - env-v3-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + {% endraw %} + - run: + name: Setup + command: docker run -e PYTHON_VERSION -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/setup_env.sh + - save_cache: + {% raw %} + key: env-v3-linux-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + {% endraw %} + paths: + - conda + - env + - run: + name: Install torchvision + command: docker run -t --gpus all -v $PWD:$PWD -w $PWD -e UPLOAD_CHANNEL -e CU_VERSION "${image_name}" .circleci/unittest/linux/scripts/install.sh + - run: + name: Run tests + command: docker run -e CIRCLECI -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/run_test.sh + - run: + name: Post Process + command: docker run -t --gpus all -v $PWD:$PWD -w $PWD "${image_name}" .circleci/unittest/linux/scripts/post_process.sh + - store_test_results: + path: test-results + + unittest_windows_cpu: + <<: *binary_common + executor: + name: windows-cpu + steps: + - checkout + - designate_upload_channel + - install_cuda_compatible_cmath + - run: + name: Generate cache key + # This will refresh cache on Sundays, nightly build should generate new cache. + command: echo "$(date +"%Y-%U")" > .circleci-weekly + - restore_cache: + {% raw %} + keys: + - env-v2-windows-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/windows/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + {% endraw %} + - run: + name: Setup + command: .circleci/unittest/windows/scripts/setup_env.sh + - save_cache: + {% raw %} + key: env-v2-windows-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/windows/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + {% endraw %} + paths: + - conda + - env + - run: + name: Install torchvision + command: .circleci/unittest/windows/scripts/install.sh + - run: + name: Run tests + command: .circleci/unittest/windows/scripts/run_test.sh + - run: + name: Post process + command: .circleci/unittest/windows/scripts/post_process.sh + - store_test_results: + path: test-results + + unittest_windows_gpu: + <<: *binary_common + executor: + name: windows-gpu + environment: + CUDA_VERSION: "10.2" + PYTHON_VERSION: << parameters.python_version >> + steps: + - checkout + - designate_upload_channel + - install_cuda_compatible_cmath + - run: + name: Generate cache key + # This will refresh cache on Sundays, nightly build should generate new cache. + command: echo "$(date +"%Y-%U")" > .circleci-weekly + - restore_cache: + {% raw %} + keys: + - env-v1-windows-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/windows/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + {% endraw %} + - run: + name: Setup + command: .circleci/unittest/windows/scripts/setup_env.sh + - save_cache: + {% raw %} + key: env-v1-windows-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/windows/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + {% endraw %} + paths: + - conda + - env + - run: + name: Install torchvision + command: .circleci/unittest/windows/scripts/install.sh + - run: + name: Run tests + command: .circleci/unittest/windows/scripts/run_test.sh + - run: + name: Post process + command: .circleci/unittest/windows/scripts/post_process.sh + - store_test_results: + path: test-results + + unittest_macos_cpu: + <<: *binary_common + macos: + xcode: "12.0" + resource_class: large + steps: + - checkout + - designate_upload_channel + - run: + name: Install wget + command: HOMEBREW_NO_AUTO_UPDATE=1 brew install wget + # Disable brew auto update which is very slow + - run: + name: Generate cache key + # This will refresh cache on Sundays, nightly build should generate new cache. + command: echo "$(date +"%Y-%U")" > .circleci-weekly + - restore_cache: + {% raw %} + keys: + - env-v3-macos-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + {% endraw %} + - run: + name: Setup + command: .circleci/unittest/linux/scripts/setup_env.sh + - save_cache: + {% raw %} + key: env-v3-macos-{{ arch }}-py<< parameters.python_version >>-{{ checksum ".circleci/unittest/linux/scripts/environment.yml" }}-{{ checksum ".circleci-weekly" }} + {% endraw %} + paths: + - conda + - env + - run: + name: Install torchvision + command: .circleci/unittest/linux/scripts/install.sh + - run: + name: Run tests + command: .circleci/unittest/linux/scripts/run_test.sh + - run: + name: Post process + command: .circleci/unittest/linux/scripts/post_process.sh + - store_test_results: + path: test-results + + cmake_linux_cpu: + <<: *binary_common + docker: + - image: "pytorch/manylinux-cuda102" + resource_class: 2xlarge+ + steps: + - checkout_merge + - designate_upload_channel + - run: + name: Setup conda + command: .circleci/unittest/linux/scripts/setup_env.sh + - run: packaging/build_cmake.sh + + cmake_linux_gpu: + <<: *binary_common + machine: + image: ubuntu-1604-cuda-10.2:202012-01 + resource_class: gpu.small + environment: + PYTHON_VERSION: << parameters.python_version >> + PYTORCH_VERSION: << parameters.pytorch_version >> + UNICODE_ABI: << parameters.unicode_abi >> + CU_VERSION: << parameters.cu_version >> + steps: + - checkout_merge + - designate_upload_channel + - run: + name: Setup conda + command: docker run -e CU_VERSION -e PYTHON_VERSION -e UNICODE_ABI -e PYTORCH_VERSION -t --gpus all -v $PWD:$PWD -w $PWD << parameters.wheel_docker_image >> .circleci/unittest/linux/scripts/setup_env.sh + - run: + name: Build torchvision C++ distribution and test + command: docker run -e CU_VERSION -e PYTHON_VERSION -e UNICODE_ABI -e PYTORCH_VERSION -e UPLOAD_CHANNEL -t --gpus all -v $PWD:$PWD -w $PWD << parameters.wheel_docker_image >> packaging/build_cmake.sh + + cmake_macos_cpu: + <<: *binary_common + macos: + xcode: "12.0" + steps: + - checkout_merge + - designate_upload_channel + - run: + command: | + curl -o conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh + sh conda.sh -b + source $HOME/miniconda3/bin/activate + conda install -yq conda-build cmake + packaging/build_cmake.sh + + cmake_windows_cpu: + <<: *binary_common + executor: + name: windows-cpu + steps: + - checkout_merge + - designate_upload_channel + - install_cuda_compatible_cmath + - run: + command: | + set -ex + source packaging/windows/internal/vc_install_helper.sh + packaging/build_cmake.sh + + cmake_windows_gpu: + <<: *binary_common + executor: + name: windows-gpu + steps: + - checkout_merge + - designate_upload_channel + - install_cuda_compatible_cmath + - run: + command: | + set -ex + source packaging/windows/internal/vc_install_helper.sh + packaging/windows/internal/cuda_install.bat + packaging/build_cmake.sh + + build_docs: + <<: *binary_common + docker: + - image: "pytorch/manylinux-cuda100" + resource_class: 2xlarge+ + steps: + - attach_workspace: + at: ~/workspace + - checkout + - run: + name: Setup + command: .circleci/unittest/linux/scripts/setup_env.sh + - designate_upload_channel + - run: + name: Install torchvision + command: .circleci/unittest/linux/scripts/install.sh + - run: + name: Generate cache key + # This will refresh cache on Sundays, nightly build should generate new cache. + command: echo "$(date +"%Y-%U")" > .circleci-weekly + - restore_cache: + {% raw %} + keys: + - sphinx-gallery-{{ checksum "./docs/source/conf.py" }}-{{ checksum ".circleci-weekly" }} + {% endraw %} + - run: + name: Build docs + command: | + set -ex + tag=${CIRCLE_TAG:1:5} + VERSION=${tag:-master} + eval "$(./conda/bin/conda shell.bash hook)" + conda activate ./env + pushd docs + pip install -r requirements.txt + make html + popd + - save_cache: + {% raw %} + key: sphinx-gallery-{{ checksum "./docs/source/conf.py" }}-{{ checksum ".circleci-weekly" }} + {% endraw %} + paths: + - ./docs/source/auto_examples + - persist_to_workspace: + root: ./ + paths: + - "*" + - store_artifacts: + path: ./docs/build/html + destination: docs + + upload_docs: + <<: *binary_common + docker: + - image: "pytorch/manylinux-cuda100" + resource_class: 2xlarge+ + steps: + - attach_workspace: + at: ~/workspace + - run: + name: Generate netrc + command: | + # set credentials for https pushing + # requires the org-member context + cat > ~/.netrc \<<DONE + machine github.com + login pytorchbot + password ${GITHUB_PYTORCHBOT_TOKEN} + DONE + - run: + name: Upload docs + command: | + # Don't use "checkout" step since it uses ssh, which cannot git push + # https://circleci.com/docs/2.0/configuration-reference/#checkout + set -ex + tag=${CIRCLE_TAG:1:5} + target=${tag:-master} + ~/workspace/.circleci/build_docs/commit_docs.sh ~/workspace $target + + +workflows: + build: +{%- if True %} + jobs: + - circleci_consistency + {{ build_workflows(windows_latest_only=True) }} + - python_lint + - python_type_check + - docstring_parameters_sync + - clang_format + - torchhub_test + - torch_onnx_test + {{ ios_workflows() }} + {{ android_workflows() }} + + unittest: + jobs: + {{ unittest_workflows() }} + + cmake: + jobs: + {{ cmake_workflows() }} + + nightly: +{%- endif %} + jobs: + - circleci_consistency + - python_lint + - python_type_check + - docstring_parameters_sync + - clang_format + - torchhub_test + - torch_onnx_test + {{ ios_workflows(nightly=True) }} + {{ android_workflows(nightly=True) }} + {{ build_workflows(prefix="nightly_", filter_branch="nightly", upload=True) }} + docker_build: + triggers: + - schedule: + cron: "0 10 * * 0" + filters: + branches: + only: + - master + jobs: + - smoke_test_docker_image_build: + context: org-member diff --git a/pretrained_model/pytorch_vision_v0.10.0/.circleci/regenerate.py b/pretrained_model/pytorch_vision_v0.10.0/.circleci/regenerate.py new file mode 100644 index 0000000000000000000000000000000000000000..ce7cf4cedbbab0b51e155f3b3ceb3bfb9d432952 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.circleci/regenerate.py @@ -0,0 +1,343 @@ +#!/usr/bin/env python3 + +""" +This script should use a very simple, functional programming style. +Avoid Jinja macros in favor of native Python functions. + +Don't go overboard on code generation; use Python only to generate +content that can't be easily declared statically using CircleCI's YAML API. + +Data declarations (e.g. the nested loops for defining the configuration matrix) +should be at the top of the file for easy updating. + +See this comment for design rationale: +https://github.com/pytorch/vision/pull/1321#issuecomment-531033978 +""" + +import jinja2 +from jinja2 import select_autoescape +import yaml +import os.path + + +PYTHON_VERSIONS = ["3.6", "3.7", "3.8", "3.9"] + +RC_PATTERN = r"/v[0-9]+(\.[0-9]+)*-rc[0-9]+/" + + +def build_workflows(prefix='', filter_branch=None, upload=False, indentation=6, windows_latest_only=False): + w = [] + for btype in ["wheel", "conda"]: + for os_type in ["linux", "macos", "win"]: + python_versions = PYTHON_VERSIONS + cu_versions_dict = {"linux": ["cpu", "cu102", "cu111", "rocm4.1", "rocm4.2"], + "win": ["cpu", "cu102", "cu111"], + "macos": ["cpu"]} + cu_versions = cu_versions_dict[os_type] + for python_version in python_versions: + for cu_version in cu_versions: + # ROCm conda packages not yet supported + if cu_version.startswith('rocm') and btype == "conda": + continue + for unicode in [False]: + fb = filter_branch + if windows_latest_only and os_type == "win" and filter_branch is None and \ + (python_version != python_versions[-1] or + (cu_version not in [cu_versions[0], cu_versions[-1]])): + fb = "master" + if not fb and (os_type == 'linux' and + cu_version == 'cpu' and + btype == 'wheel' and + python_version == '3.7'): + # the fields must match the build_docs "requires" dependency + fb = "/.*/" + w += workflow_pair( + btype, os_type, python_version, cu_version, + unicode, prefix, upload, filter_branch=fb) + + if not filter_branch: + # Build on every pull request, but upload only on nightly and tags + w += build_doc_job('/.*/') + w += upload_doc_job('nightly') + return indent(indentation, w) + + +def workflow_pair(btype, os_type, python_version, cu_version, unicode, prefix='', upload=False, *, filter_branch=None): + + w = [] + unicode_suffix = "u" if unicode else "" + base_workflow_name = f"{prefix}binary_{os_type}_{btype}_py{python_version}{unicode_suffix}_{cu_version}" + + w.append(generate_base_workflow( + base_workflow_name, python_version, cu_version, + unicode, os_type, btype, filter_branch=filter_branch)) + + if upload: + w.append(generate_upload_workflow(base_workflow_name, os_type, btype, cu_version, filter_branch=filter_branch)) + if filter_branch == 'nightly' and os_type in ['linux', 'win']: + pydistro = 'pip' if btype == 'wheel' else 'conda' + w.append(generate_smoketest_workflow(pydistro, base_workflow_name, filter_branch, python_version, os_type)) + + return w + + +def build_doc_job(filter_branch): + job = { + "name": "build_docs", + "python_version": "3.7", + "requires": ["binary_linux_wheel_py3.7_cpu", ], + } + + if filter_branch: + job["filters"] = gen_filter_branch_tree(filter_branch, + tags_list=RC_PATTERN) + return [{"build_docs": job}] + + +def upload_doc_job(filter_branch): + job = { + "name": "upload_docs", + "context": "org-member", + "python_version": "3.7", + "requires": ["build_docs", ], + } + + if filter_branch: + job["filters"] = gen_filter_branch_tree(filter_branch, + tags_list=RC_PATTERN) + return [{"upload_docs": job}] + + +manylinux_images = { + "cu92": "pytorch/manylinux-cuda92", + "cu101": "pytorch/manylinux-cuda101", + "cu102": "pytorch/manylinux-cuda102", + "cu110": "pytorch/manylinux-cuda110", + "cu111": "pytorch/manylinux-cuda111", + "cu112": "pytorch/manylinux-cuda112", +} + + +def get_manylinux_image(cu_version): + if cu_version == "cpu": + return "pytorch/manylinux-cuda102" + elif cu_version.startswith('cu'): + cu_suffix = cu_version[len('cu'):] + return f"pytorch/manylinux-cuda{cu_suffix}" + elif cu_version.startswith('rocm'): + rocm_suffix = cu_version[len('rocm'):] + return f"pytorch/manylinux-rocm:{rocm_suffix}" + + +def get_conda_image(cu_version): + if cu_version == "cpu": + return "pytorch/conda-builder:cpu" + elif cu_version.startswith('cu'): + cu_suffix = cu_version[len('cu'):] + return f"pytorch/conda-builder:cuda{cu_suffix}" + + +def generate_base_workflow(base_workflow_name, python_version, cu_version, + unicode, os_type, btype, *, filter_branch=None): + + d = { + "name": base_workflow_name, + "python_version": python_version, + "cu_version": cu_version, + } + + if os_type != "win" and unicode: + d["unicode_abi"] = '1' + + if os_type != "win": + d["wheel_docker_image"] = get_manylinux_image(cu_version) + # ROCm conda packages not yet supported + if "rocm" not in cu_version: + d["conda_docker_image"] = get_conda_image(cu_version) + + if filter_branch is not None: + d["filters"] = { + "branches": { + "only": filter_branch + }, + "tags": { + # Using a raw string here to avoid having to escape + # anything + "only": r"/v[0-9]+(\.[0-9]+)*-rc[0-9]+/" + } + } + + w = f"binary_{os_type}_{btype}" + return {w: d} + + +def gen_filter_branch_tree(*branches, tags_list=None): + filter_dict = {"branches": {"only": [b for b in branches]}} + if tags_list is not None: + filter_dict["tags"] = {"only": tags_list} + return filter_dict + + +def generate_upload_workflow(base_workflow_name, os_type, btype, cu_version, *, filter_branch=None): + d = { + "name": f"{base_workflow_name}_upload", + "context": "org-member", + "requires": [base_workflow_name], + } + + if btype == 'wheel': + d["subfolder"] = "" if os_type == 'macos' else cu_version + "/" + + if filter_branch is not None: + d["filters"] = { + "branches": { + "only": filter_branch + }, + "tags": { + # Using a raw string here to avoid having to escape + # anything + "only": r"/v[0-9]+(\.[0-9]+)*-rc[0-9]+/" + } + } + + return {f"binary_{btype}_upload": d} + + +def generate_smoketest_workflow(pydistro, base_workflow_name, filter_branch, python_version, os_type): + + required_build_suffix = "_upload" + required_build_name = base_workflow_name + required_build_suffix + + smoke_suffix = f"smoke_test_{pydistro}" + d = { + "name": f"{base_workflow_name}_{smoke_suffix}", + "requires": [required_build_name], + "python_version": python_version, + } + + if filter_branch: + d["filters"] = gen_filter_branch_tree(filter_branch) + + return {f"smoke_test_{os_type}_{pydistro}": d} + + +def indent(indentation, data_list): + return ("\n" + " " * indentation).join( + yaml.dump(data_list, default_flow_style=False).splitlines()) + + +def unittest_workflows(indentation=6): + jobs = [] + for os_type in ["linux", "windows", "macos"]: + for device_type in ["cpu", "gpu"]: + if os_type == "macos" and device_type == "gpu": + continue + for i, python_version in enumerate(PYTHON_VERSIONS): + job = { + "name": f"unittest_{os_type}_{device_type}_py{python_version}", + "python_version": python_version, + } + + if device_type == 'gpu': + if python_version != "3.8": + job['filters'] = gen_filter_branch_tree('master', 'nightly') + job['cu_version'] = 'cu102' + else: + job['cu_version'] = 'cpu' + + jobs.append({f"unittest_{os_type}_{device_type}": job}) + + return indent(indentation, jobs) + + +def cmake_workflows(indentation=6): + jobs = [] + python_version = '3.8' + for os_type in ['linux', 'windows', 'macos']: + # Skip OSX CUDA + device_types = ['cpu', 'gpu'] if os_type != 'macos' else ['cpu'] + for device in device_types: + job = { + 'name': f'cmake_{os_type}_{device}', + 'python_version': python_version + } + + job['cu_version'] = 'cu102' if device == 'gpu' else 'cpu' + if device == 'gpu' and os_type == 'linux': + job['wheel_docker_image'] = 'pytorch/manylinux-cuda102' + jobs.append({f'cmake_{os_type}_{device}': job}) + return indent(indentation, jobs) + + +def ios_workflows(indentation=6, nightly=False): + jobs = [] + build_job_names = [] + name_prefix = "nightly_" if nightly else "" + env_prefix = "nightly-" if nightly else "" + for arch, platform in [('x86_64', 'SIMULATOR'), ('arm64', 'OS')]: + name = f'{name_prefix}binary_libtorchvision_ops_ios_12.0.0_{arch}' + build_job_names.append(name) + build_job = { + 'build_environment': f'{env_prefix}binary-libtorchvision_ops-ios-12.0.0-{arch}', + 'ios_arch': arch, + 'ios_platform': platform, + 'name': name, + } + if nightly: + build_job['filters'] = gen_filter_branch_tree('nightly') + jobs.append({'binary_ios_build': build_job}) + + if nightly: + upload_job = { + 'build_environment': f'{env_prefix}binary-libtorchvision_ops-ios-12.0.0-upload', + 'context': 'org-member', + 'filters': gen_filter_branch_tree('nightly'), + 'requires': build_job_names, + } + jobs.append({'binary_ios_upload': upload_job}) + return indent(indentation, jobs) + + +def android_workflows(indentation=6, nightly=False): + jobs = [] + build_job_names = [] + name_prefix = "nightly_" if nightly else "" + env_prefix = "nightly-" if nightly else "" + + name = f'{name_prefix}binary_libtorchvision_ops_android' + build_job_names.append(name) + build_job = { + 'build_environment': f'{env_prefix}binary-libtorchvision_ops-android', + 'name': name, + } + + if nightly: + upload_job = { + 'build_environment': f'{env_prefix}binary-libtorchvision_ops-android-upload', + 'context': 'org-member', + 'filters': gen_filter_branch_tree('nightly'), + 'name': f'{name_prefix}binary_libtorchvision_ops_android_upload' + } + jobs.append({'binary_android_upload': upload_job}) + else: + jobs.append({'binary_android_build': build_job}) + return indent(indentation, jobs) + + +if __name__ == "__main__": + d = os.path.dirname(__file__) + env = jinja2.Environment( + loader=jinja2.FileSystemLoader(d), + lstrip_blocks=True, + autoescape=select_autoescape(enabled_extensions=('html', 'xml')), + keep_trailing_newline=True, + ) + + with open(os.path.join(d, 'config.yml'), 'w') as f: + f.write(env.get_template('config.yml.in').render( + build_workflows=build_workflows, + unittest_workflows=unittest_workflows, + cmake_workflows=cmake_workflows, + ios_workflows=ios_workflows, + android_workflows=android_workflows, + )) diff --git a/pretrained_model/pytorch_vision_v0.10.0/.circleci/scripts/vs_install_cmath.ps1 b/pretrained_model/pytorch_vision_v0.10.0/.circleci/scripts/vs_install_cmath.ps1 new file mode 100644 index 0000000000000000000000000000000000000000..c2998eba25217ee5e27ea408b815122f8a03bab3 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.circleci/scripts/vs_install_cmath.ps1 @@ -0,0 +1,5 @@ +$CMATH_DOWNLOAD_LINK = "https://raw.githubusercontent.com/microsoft/STL/12c684bba78f9b032050526abdebf14f58ca26a3/stl/inc/cmath" +$VC14_28_INSTALL_PATH="C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Tools\MSVC\14.28.29910\include" + +curl.exe --retry 3 -kL $CMATH_DOWNLOAD_LINK --output "$home\cmath" +Move-Item -Path "$home\cmath" -Destination "$VC14_28_INSTALL_PATH" -Force diff --git a/pretrained_model/pytorch_vision_v0.10.0/.circleci/smoke_test/docker/Dockerfile b/pretrained_model/pytorch_vision_v0.10.0/.circleci/smoke_test/docker/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..e2227cf5e8b537e3e146baa2a4983a4b148abbff --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.circleci/smoke_test/docker/Dockerfile @@ -0,0 +1,36 @@ +# this Dockerfile is for torchvision smoke test, it will be created periodically via CI system +# if you need to do it locally, follow below steps once you have Docker installed +# assuming you're within the directory where this Dockerfile located +# $ docker build . -t torchvision/smoketest + +# if you want to push to aws ecr, make sure you have the rights to write to ECR, then run +# $ eval $(aws ecr get-login --region us-east-1 --no-include-email) +# $ export MYTAG=localbuild ## you can choose whatever tag you like +# $ docker tag torchvision/smoketest 308535385114.dkr.ecr.us-east-1.amazonaws.com/torchvision/smoke_test:${MYTAG} +# $ docker push 308535385114.dkr.ecr.us-east-1.amazonaws.com/torchvision/smoke_test:${MYTAG} + +FROM ubuntu:latest + +RUN apt-get -qq update && apt-get -qq -y install curl bzip2 libsox-fmt-all \ + && curl -sSL https://repo.continuum.io/miniconda/Miniconda3-latest-Linux-x86_64.sh -o /tmp/miniconda.sh \ + && bash /tmp/miniconda.sh -bfp /usr/local \ + && rm -rf /tmp/miniconda.sh \ + && conda install -y python=3 \ + && conda update conda \ + && apt-get -qq -y remove curl bzip2 \ + && apt-get -qq -y autoremove \ + && apt-get autoclean \ + && rm -rf /var/lib/apt/lists/* /var/log/dpkg.log \ + && conda clean --all --yes + +ENV PATH /opt/conda/bin:$PATH + +RUN conda create -y --name python3.6 python=3.6 +RUN conda create -y --name python3.7 python=3.7 +RUN conda create -y --name python3.8 python=3.8 +SHELL [ "/bin/bash", "-c" ] +RUN echo "source /usr/local/etc/profile.d/conda.sh" >> ~/.bashrc +RUN source /usr/local/etc/profile.d/conda.sh && conda activate python3.6 && conda install -y Pillow>=5.3.0 +RUN source /usr/local/etc/profile.d/conda.sh && conda activate python3.7 && conda install -y Pillow>=5.3.0 +RUN source /usr/local/etc/profile.d/conda.sh && conda activate python3.8 && conda install -y Pillow>=5.3.0 +CMD [ "/bin/bash"] diff --git a/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/android/scripts/binary_android_build.sh b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/android/scripts/binary_android_build.sh new file mode 100644 index 0000000000000000000000000000000000000000..0d8c0d47d8a624bcf4cf4c43492f2a92d97b771f --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/android/scripts/binary_android_build.sh @@ -0,0 +1,27 @@ +#!/bin/bash +set -ex -o pipefail + +echo "DIR: $(pwd)" +echo "ANDROID_HOME=${ANDROID_HOME}" +echo "ANDROID_NDK_HOME=${ANDROID_NDK_HOME}" +echo "JAVA_HOME=${JAVA_HOME}" + +WORKSPACE=/home/circleci/workspace +VISION_ANDROID=/home/circleci/project/android + +. /home/circleci/project/.circleci/unittest/android/scripts/install_gradle.sh + +GRADLE_LOCAL_PROPERTIES=${VISION_ANDROID}/local.properties +rm -f $GRADLE_LOCAL_PROPERTIES + +echo "sdk.dir=${ANDROID_HOME}" >> $GRADLE_LOCAL_PROPERTIES +echo "ndk.dir=${ANDROID_NDK_HOME}" >> $GRADLE_LOCAL_PROPERTIES + +echo "GRADLE_PATH $GRADLE_PATH" +echo "GRADLE_HOME $GRADLE_HOME" + +${GRADLE_PATH} --scan --stacktrace --debug --no-daemon -p ${VISION_ANDROID} assemble || true + +mkdir -p ~/workspace/artifacts +find . -type f -name *aar -print | xargs tar cfvz ~/workspace/artifacts/artifacts-aars.tgz +find . -type f -name *apk -print | xargs tar cfvz ~/workspace/artifacts/artifacts-apks.tgz diff --git a/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/android/scripts/binary_android_upload.sh b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/android/scripts/binary_android_upload.sh new file mode 100644 index 0000000000000000000000000000000000000000..1472a877d9001c6f24d1a26da26284dcc73bc27c --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/android/scripts/binary_android_upload.sh @@ -0,0 +1,34 @@ +#!/bin/bash +set -ex -o pipefail + +echo "DIR: $(pwd)" +echo "ANDROID_HOME=${ANDROID_HOME}" +echo "ANDROID_NDK_HOME=${ANDROID_NDK_HOME}" +echo "JAVA_HOME=${JAVA_HOME}" + +WORKSPACE=/home/circleci/workspace +VISION_ANDROID=/home/circleci/project/android + +. /home/circleci/project/.circleci/unittest/android/scripts/install_gradle.sh + +GRADLE_LOCAL_PROPERTIES=${VISION_ANDROID}/local.properties +rm -f $GRADLE_LOCAL_PROPERTIES +GRADLE_PROPERTIES=/home/circleci/project/android/gradle.properties + +echo "sdk.dir=${ANDROID_HOME}" >> $GRADLE_LOCAL_PROPERTIES +echo "ndk.dir=${ANDROID_NDK_HOME}" >> $GRADLE_LOCAL_PROPERTIES + +echo "SONATYPE_NEXUS_USERNAME=${SONATYPE_NEXUS_USERNAME}" >> $GRADLE_PROPERTIES +echo "mavenCentralRepositoryUsername=${SONATYPE_NEXUS_USERNAME}" >> $GRADLE_PROPERTIES +echo "SONATYPE_NEXUS_PASSWORD=${SONATYPE_NEXUS_PASSWORD}" >> $GRADLE_PROPERTIES +echo "mavenCentralRepositoryPassword=${SONATYPE_NEXUS_PASSWORD}" >> $GRADLE_PROPERTIES + +echo "signing.keyId=${ANDROID_SIGN_KEY}" >> $GRADLE_PROPERTIES +echo "signing.password=${ANDROID_SIGN_PASS}" >> $GRADLE_PROPERTIES + +cat /home/circleci/project/android/gradle.properties | grep VERSION + +${GRADLE_PATH} --scan --stacktrace --debug --no-daemon -p ${VISION_ANDROID} ops:uploadArchives + +mkdir -p ~/workspace/artifacts +find . -type f -name *aar -print | xargs tar cfvz ~/workspace/artifacts/artifacts-aars.tgz diff --git a/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/android/scripts/install_gradle.sh b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/android/scripts/install_gradle.sh new file mode 100644 index 0000000000000000000000000000000000000000..5f803abfa949d95ec3d742f678ad4471b77c9854 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/android/scripts/install_gradle.sh @@ -0,0 +1,19 @@ +#!/bin/bash +set -ex + +_https_amazon_aws=https://ossci-android.s3.amazonaws.com +GRADLE_VERSION=6.8.3 + +_gradle_home=/opt/gradle +sudo rm -rf $gradle_home +sudo mkdir -p $_gradle_home + +curl --silent --output /tmp/gradle.zip --retry 3 $_https_amazon_aws/gradle-${GRADLE_VERSION}-bin.zip + +sudo unzip -q /tmp/gradle.zip -d $_gradle_home +rm /tmp/gradle.zip + +sudo chmod -R 777 $_gradle_home + +export GRADLE_HOME=$_gradle_home/gradle-$GRADLE_VERSION +export GRADLE_PATH=${GRADLE_HOME}/bin/gradle diff --git a/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/ios/scripts/binary_ios_build.sh b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/ios/scripts/binary_ios_build.sh new file mode 100644 index 0000000000000000000000000000000000000000..e2ad7b0c55faa836d9cadfceca964490833d5391 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/ios/scripts/binary_ios_build.sh @@ -0,0 +1,47 @@ +#!/bin/bash +set -ex -o pipefail + +echo "" +echo "DIR: $(pwd)" +WORKSPACE=/Users/distiller/workspace +PROJ_ROOT_IOS=/Users/distiller/project/ios +PYTORCH_IOS_NIGHTLY_NAME=libtorch_ios_nightly_build.zip +export TCLLIBPATH="/usr/local/lib" + +# install conda +curl --retry 3 -o ~/conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh +chmod +x ~/conda.sh +/bin/bash ~/conda.sh -b -p ~/anaconda +export PATH="~/anaconda/bin:${PATH}" +source ~/anaconda/bin/activate + +# install dependencies +conda install numpy ninja pyyaml mkl mkl-include setuptools cmake cffi requests typing_extensions wget --yes +conda install -c conda-forge valgrind --yes +export CMAKE_PREFIX_PATH=${CONDA_PREFIX:-"$(dirname $(which conda))/../"} + +# sync submodules +cd ${PROJ_ROOT_IOS} +git submodule sync +git submodule update --init --recursive + +# download pytorch-iOS nightly build and unzip it +mkdir -p ${PROJ_ROOT_IOS}/lib +mkdir -p ${PROJ_ROOT_IOS}/build +mkdir -p ${PROJ_ROOT_IOS}/pytorch +TORCH_ROOT="${PROJ_ROOT_IOS}/pytorch" + +cd ${TORCH_ROOT} +wget https://ossci-ios-build.s3.amazonaws.com/${PYTORCH_IOS_NIGHTLY_NAME} +mkdir -p ./build_ios +unzip -d ./build_ios ./${PYTORCH_IOS_NIGHTLY_NAME} + +LIBTORCH_HEADER_ROOT="${TORCH_ROOT}/build_ios/install/include" +cd ${PROJ_ROOT_IOS} +IOS_ARCH=${IOS_ARCH} LIBTORCH_HEADER_ROOT=${LIBTORCH_HEADER_ROOT} ./build_ios.sh +rm -rf ${TORCH_ROOT} + +# store the binary +DEST_DIR=${WORKSPACE}/ios/${IOS_ARCH} +mkdir -p ${DEST_DIR} +cp ${PROJ_ROOT_IOS}/lib/*.a ${DEST_DIR} diff --git a/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/ios/scripts/binary_ios_upload.sh b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/ios/scripts/binary_ios_upload.sh new file mode 100644 index 0000000000000000000000000000000000000000..ce56388e5da417a4b240b5c0389fef8439cb2510 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/ios/scripts/binary_ios_upload.sh @@ -0,0 +1,42 @@ +#!/bin/bash +set -ex -o pipefail + +echo "" +echo "DIR: $(pwd)" + +WORKSPACE=/Users/distiller/workspace +PROJ_ROOT=/Users/distiller/project +ARTIFACTS_DIR=${WORKSPACE}/ios +ls ${ARTIFACTS_DIR} +ZIP_DIR=${WORKSPACE}/zip +mkdir -p ${ZIP_DIR}/install/lib + +# build a FAT bianry +cd ${ZIP_DIR}/install/lib +libs=("${ARTIFACTS_DIR}/x86_64/libtorchvision_ops.a" "${ARTIFACTS_DIR}/arm64/libtorchvision_ops.a") +lipo -create "${libs[@]}" -o ${ZIP_DIR}/install/lib/libtorchvision_ops.a +lipo -i ${ZIP_DIR}/install/lib/*.a + +# copy the license +cp ${PROJ_ROOT}/LICENSE ${ZIP_DIR}/ +# zip the library +ZIPFILE=libtorchvision_ops_ios_nightly_build.zip +cd ${ZIP_DIR} +#for testing +touch version.txt +echo $(date +%s) > version.txt +zip -r ${ZIPFILE} install version.txt LICENSE + +# upload to aws +# Install conda then 'conda install' awscli +curl --retry 3 -o ~/conda.sh https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh +chmod +x ~/conda.sh +/bin/bash ~/conda.sh -b -p ~/anaconda +export PATH="~/anaconda/bin:${PATH}" +source ~/anaconda/bin/activate +conda install -c conda-forge awscli --yes +set +x +export AWS_ACCESS_KEY_ID=${AWS_S3_ACCESS_KEY_FOR_PYTORCH_BINARY_UPLOAD} +export AWS_SECRET_ACCESS_KEY=${AWS_S3_ACCESS_SECRET_FOR_PYTORCH_BINARY_UPLOAD} +set -x +aws s3 cp ${ZIPFILE} s3://ossci-ios-build/ --acl public-read diff --git a/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/linux/scripts/environment.yml b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/linux/scripts/environment.yml new file mode 100644 index 0000000000000000000000000000000000000000..fcf61a6e2f86f2cf93f763a93b1af632597caf9a --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/linux/scripts/environment.yml @@ -0,0 +1,18 @@ +channels: + - pytorch + - defaults + # using conda-forge for python v3.9 + - conda-forge +dependencies: + - pytest + - pytest-cov + - pip + - libpng + # NOTE: Pinned to fix issues with size_t on Windows + - jpeg <=9b + - ca-certificates + - pip: + - future + - pillow >=5.3.0 + - scipy + - av diff --git a/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/linux/scripts/install.sh b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/linux/scripts/install.sh new file mode 100644 index 0000000000000000000000000000000000000000..7058e4d70958f97d6598f8f31668e8da5b5eb6bd --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/linux/scripts/install.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash + +unset PYTORCH_VERSION +# For unittest, nightly PyTorch is used as the following section, +# so no need to set PYTORCH_VERSION. +# In fact, keeping PYTORCH_VERSION forces us to hardcode PyTorch version in config. + +set -e + +eval "$(./conda/bin/conda shell.bash hook)" +conda activate ./env + +if [ "${CU_VERSION:-}" == cpu ] ; then + cudatoolkit="cpuonly" +else + if [[ ${#CU_VERSION} -eq 4 ]]; then + CUDA_VERSION="${CU_VERSION:2:1}.${CU_VERSION:3:1}" + elif [[ ${#CU_VERSION} -eq 5 ]]; then + CUDA_VERSION="${CU_VERSION:2:2}.${CU_VERSION:4:1}" + fi + echo "Using CUDA $CUDA_VERSION as determined by CU_VERSION" + version="$(python -c "print('.'.join(\"${CUDA_VERSION}\".split('.')[:2]))")" + cudatoolkit="cudatoolkit=${version}" +fi + +printf "Installing PyTorch with %s\n" "${cudatoolkit}" +conda install -y -c "pytorch-${UPLOAD_CHANNEL}" -c conda-forge "pytorch-${UPLOAD_CHANNEL}"::pytorch "${cudatoolkit}" pytest + +if [ $PYTHON_VERSION == "3.6" ]; then + printf "Installing minimal PILLOW version\n" + # Install the minimal PILLOW version. Otherwise, let setup.py install the latest + pip install pillow>=5.3.0 +fi + +printf "* Installing torchvision\n" +python setup.py develop diff --git a/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/linux/scripts/post_process.sh b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/linux/scripts/post_process.sh new file mode 100644 index 0000000000000000000000000000000000000000..e97bf2a7b1b19fe99eaf0889a157f46c38cc0060 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/linux/scripts/post_process.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +set -e + +eval "$(./conda/bin/conda shell.bash hook)" +conda activate ./env diff --git a/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/linux/scripts/run-clang-format.py b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/linux/scripts/run-clang-format.py new file mode 100644 index 0000000000000000000000000000000000000000..7bbd1acd0f4b6e8a03f37b91e43af739e4428551 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/linux/scripts/run-clang-format.py @@ -0,0 +1,363 @@ +#!/usr/bin/env python +""" +MIT License + +Copyright (c) 2017 Guillaume Papin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +""" +"""A wrapper script around clang-format, suitable for linting multiple files +and to use for continuous integration. + +This is an alternative API for the clang-format command line. +It runs over multiple files and directories in parallel. +A diff output is produced and a sensible exit code is returned. + +""" + +import argparse +import codecs +import difflib +import fnmatch +import io +import multiprocessing +import os +import signal +import subprocess +import sys +import traceback + +from functools import partial + +try: + from subprocess import DEVNULL # py3k +except ImportError: + DEVNULL = open(os.devnull, "wb") + + +DEFAULT_EXTENSIONS = 'c,h,C,H,cpp,hpp,cc,hh,c++,h++,cxx,hxx,cu' + + +class ExitStatus: + SUCCESS = 0 + DIFF = 1 + TROUBLE = 2 + + +def list_files(files, recursive=False, extensions=None, exclude=None): + if extensions is None: + extensions = [] + if exclude is None: + exclude = [] + + out = [] + for file in files: + if recursive and os.path.isdir(file): + for dirpath, dnames, fnames in os.walk(file): + fpaths = [os.path.join(dirpath, fname) for fname in fnames] + for pattern in exclude: + # os.walk() supports trimming down the dnames list + # by modifying it in-place, + # to avoid unnecessary directory listings. + dnames[:] = [ + x for x in dnames + if + not fnmatch.fnmatch(os.path.join(dirpath, x), pattern) + ] + fpaths = [ + x for x in fpaths if not fnmatch.fnmatch(x, pattern) + ] + for f in fpaths: + ext = os.path.splitext(f)[1][1:] + if ext in extensions: + out.append(f) + else: + out.append(file) + return out + + +def make_diff(file, original, reformatted): + return list( + difflib.unified_diff( + original, + reformatted, + fromfile='{}\t(original)'.format(file), + tofile='{}\t(reformatted)'.format(file), + n=3)) + + +class DiffError(Exception): + def __init__(self, message, errs=None): + super(DiffError, self).__init__(message) + self.errs = errs or [] + + +class UnexpectedError(Exception): + def __init__(self, message, exc=None): + super(UnexpectedError, self).__init__(message) + self.formatted_traceback = traceback.format_exc() + self.exc = exc + + +def run_clang_format_diff_wrapper(args, file): + try: + ret = run_clang_format_diff(args, file) + return ret + except DiffError: + raise + except Exception as e: + raise UnexpectedError('{}: {}: {}'.format(file, e.__class__.__name__, + e), e) + + +def run_clang_format_diff(args, file): + try: + with io.open(file, 'r', encoding='utf-8') as f: + original = f.readlines() + except IOError as exc: + raise DiffError(str(exc)) + invocation = [args.clang_format_executable, file] + + # Use of utf-8 to decode the process output. + # + # Hopefully, this is the correct thing to do. + # + # It's done due to the following assumptions (which may be incorrect): + # - clang-format will returns the bytes read from the files as-is, + # without conversion, and it is already assumed that the files use utf-8. + # - if the diagnostics were internationalized, they would use utf-8: + # > Adding Translations to Clang + # > + # > Not possible yet! + # > Diagnostic strings should be written in UTF-8, + # > the client can translate to the relevant code page if needed. + # > Each translation completely replaces the format string + # > for the diagnostic. + # > -- http://clang.llvm.org/docs/InternalsManual.html#internals-diag-translation + + try: + proc = subprocess.Popen( + invocation, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + encoding='utf-8') + except OSError as exc: + raise DiffError( + "Command '{}' failed to start: {}".format( + subprocess.list2cmdline(invocation), exc + ) + ) + proc_stdout = proc.stdout + proc_stderr = proc.stderr + + # hopefully the stderr pipe won't get full and block the process + outs = list(proc_stdout.readlines()) + errs = list(proc_stderr.readlines()) + proc.wait() + if proc.returncode: + raise DiffError( + "Command '{}' returned non-zero exit status {}".format( + subprocess.list2cmdline(invocation), proc.returncode + ), + errs, + ) + return make_diff(file, original, outs), errs + + +def bold_red(s): + return '\x1b[1m\x1b[31m' + s + '\x1b[0m' + + +def colorize(diff_lines): + def bold(s): + return '\x1b[1m' + s + '\x1b[0m' + + def cyan(s): + return '\x1b[36m' + s + '\x1b[0m' + + def green(s): + return '\x1b[32m' + s + '\x1b[0m' + + def red(s): + return '\x1b[31m' + s + '\x1b[0m' + + for line in diff_lines: + if line[:4] in ['--- ', '+++ ']: + yield bold(line) + elif line.startswith('@@ '): + yield cyan(line) + elif line.startswith('+'): + yield green(line) + elif line.startswith('-'): + yield red(line) + else: + yield line + + +def print_diff(diff_lines, use_color): + if use_color: + diff_lines = colorize(diff_lines) + sys.stdout.writelines(diff_lines) + + +def print_trouble(prog, message, use_colors): + error_text = 'error:' + if use_colors: + error_text = bold_red(error_text) + print("{}: {} {}".format(prog, error_text, message), file=sys.stderr) + + +def main(): + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + '--clang-format-executable', + metavar='EXECUTABLE', + help='path to the clang-format executable', + default='clang-format') + parser.add_argument( + '--extensions', + help='comma separated list of file extensions (default: {})'.format( + DEFAULT_EXTENSIONS), + default=DEFAULT_EXTENSIONS) + parser.add_argument( + '-r', + '--recursive', + action='store_true', + help='run recursively over directories') + parser.add_argument('files', metavar='file', nargs='+') + parser.add_argument( + '-q', + '--quiet', + action='store_true') + parser.add_argument( + '-j', + metavar='N', + type=int, + default=0, + help='run N clang-format jobs in parallel' + ' (default number of cpus + 1)') + parser.add_argument( + '--color', + default='auto', + choices=['auto', 'always', 'never'], + help='show colored diff (default: auto)') + parser.add_argument( + '-e', + '--exclude', + metavar='PATTERN', + action='append', + default=[], + help='exclude paths matching the given glob-like pattern(s)' + ' from recursive search') + + args = parser.parse_args() + + # use default signal handling, like diff return SIGINT value on ^C + # https://bugs.python.org/issue14229#msg156446 + signal.signal(signal.SIGINT, signal.SIG_DFL) + try: + signal.SIGPIPE + except AttributeError: + # compatibility, SIGPIPE does not exist on Windows + pass + else: + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + + colored_stdout = False + colored_stderr = False + if args.color == 'always': + colored_stdout = True + colored_stderr = True + elif args.color == 'auto': + colored_stdout = sys.stdout.isatty() + colored_stderr = sys.stderr.isatty() + + version_invocation = [args.clang_format_executable, str("--version")] + try: + subprocess.check_call(version_invocation, stdout=DEVNULL) + except subprocess.CalledProcessError as e: + print_trouble(parser.prog, str(e), use_colors=colored_stderr) + return ExitStatus.TROUBLE + except OSError as e: + print_trouble( + parser.prog, + "Command '{}' failed to start: {}".format( + subprocess.list2cmdline(version_invocation), e + ), + use_colors=colored_stderr, + ) + return ExitStatus.TROUBLE + + retcode = ExitStatus.SUCCESS + files = list_files( + args.files, + recursive=args.recursive, + exclude=args.exclude, + extensions=args.extensions.split(',')) + + if not files: + return + + njobs = args.j + if njobs == 0: + njobs = multiprocessing.cpu_count() + 1 + njobs = min(len(files), njobs) + + if njobs == 1: + # execute directly instead of in a pool, + # less overhead, simpler stacktraces + it = (run_clang_format_diff_wrapper(args, file) for file in files) + pool = None + else: + pool = multiprocessing.Pool(njobs) + it = pool.imap_unordered( + partial(run_clang_format_diff_wrapper, args), files) + while True: + try: + outs, errs = next(it) + except StopIteration: + break + except DiffError as e: + print_trouble(parser.prog, str(e), use_colors=colored_stderr) + retcode = ExitStatus.TROUBLE + sys.stderr.writelines(e.errs) + except UnexpectedError as e: + print_trouble(parser.prog, str(e), use_colors=colored_stderr) + sys.stderr.write(e.formatted_traceback) + retcode = ExitStatus.TROUBLE + # stop at the first unexpected error, + # something could be very wrong, + # don't process all files unnecessarily + if pool: + pool.terminate() + break + else: + sys.stderr.writelines(errs) + if outs == []: + continue + if not args.quiet: + print_diff(outs, use_color=colored_stdout) + if retcode == ExitStatus.SUCCESS: + retcode = ExitStatus.DIFF + return retcode + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/linux/scripts/run_test.sh b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/linux/scripts/run_test.sh new file mode 100644 index 0000000000000000000000000000000000000000..419b9eb562c8c9318d2fc82666d0ff895d6e2eaa --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/linux/scripts/run_test.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +set -e + +eval "$(./conda/bin/conda shell.bash hook)" +conda activate ./env + +export PYTORCH_TEST_WITH_SLOW='1' +python -m torch.utils.collect_env +pytest --cov=torchvision --junitxml=test-results/junit.xml -v --durations 20 test --ignore=test/test_datasets_download.py diff --git a/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/linux/scripts/setup_env.sh b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/linux/scripts/setup_env.sh new file mode 100644 index 0000000000000000000000000000000000000000..773bd78f202faa3dfa2dd1b97fe78281d7532e8a --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/linux/scripts/setup_env.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash + +# This script is for setting up environment in which unit test is ran. +# To speed up the CI time, the resulting environment is cached. +# +# Do not install PyTorch and torchvision here, otherwise they also get cached. + +set -e + +this_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +root_dir="$(git rev-parse --show-toplevel)" +conda_dir="${root_dir}/conda" +env_dir="${root_dir}/env" + +cd "${root_dir}" + +case "$(uname -s)" in + Darwin*) os=MacOSX;; + *) os=Linux +esac + +# 1. Install conda at ./conda +if [ ! -d "${conda_dir}" ]; then + printf "* Installing conda\n" + wget -O miniconda.sh "http://repo.continuum.io/miniconda/Miniconda3-latest-${os}-x86_64.sh" + bash ./miniconda.sh -b -f -p "${conda_dir}" +fi +eval "$(${conda_dir}/bin/conda shell.bash hook)" + +# 2. Create test environment at ./env +if [ ! -d "${env_dir}" ]; then + printf "* Creating a test environment\n" + conda create --prefix "${env_dir}" -y python="$PYTHON_VERSION" +fi +conda activate "${env_dir}" + +# 3. Install Conda dependencies +printf "* Installing dependencies (except PyTorch)\n" +FFMPEG_PIN="=4.2" +if [[ "${PYTHON_VERSION}" = "3.9" ]]; then + FFMPEG_PIN=">=4.2" +fi + +conda install -y -c pytorch "ffmpeg${FFMPEG_PIN}" +conda env update --file "${this_dir}/environment.yml" --prune diff --git a/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/windows/scripts/environment.yml b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/windows/scripts/environment.yml new file mode 100644 index 0000000000000000000000000000000000000000..9a916a27d077c9e6fbcb0bb2f7ff533a50c283a2 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/windows/scripts/environment.yml @@ -0,0 +1,19 @@ +channels: + - pytorch + - defaults + # use conda-forge for python v3.9+ + - conda-forge +dependencies: + - pytest + - pytest-cov + - pip + - libpng + # NOTE: Pinned to fix issues with size_t on Windows + - jpeg <=9b + - ca-certificates + - pip: + - future + - pillow >=5.3.0 + - scipy + - av + - dataclasses diff --git a/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/windows/scripts/install.sh b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/windows/scripts/install.sh new file mode 100644 index 0000000000000000000000000000000000000000..a90e88a71d4efde69a8736be2472f34820375f53 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/windows/scripts/install.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash + +unset PYTORCH_VERSION +# For unittest, nightly PyTorch is used as the following section, +# so no need to set PYTORCH_VERSION. +# In fact, keeping PYTORCH_VERSION forces us to hardcode PyTorch version in config. + +set -e + +this_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" + +eval "$(./conda/Scripts/conda.exe 'shell.bash' 'hook')" +conda activate ./env + +if [ "${CU_VERSION:-}" == cpu ] ; then + cudatoolkit="cpuonly" +else + if [[ ${#CU_VERSION} -eq 4 ]]; then + CUDA_VERSION="${CU_VERSION:2:1}.${CU_VERSION:3:1}" + elif [[ ${#CU_VERSION} -eq 5 ]]; then + CUDA_VERSION="${CU_VERSION:2:2}.${CU_VERSION:4:1}" + fi + echo "Using CUDA $CUDA_VERSION as determined by CU_VERSION" + version="$(python -c "print('.'.join(\"${CUDA_VERSION}\".split('.')[:2]))")" + cudatoolkit="cudatoolkit=${version}" +fi + +printf "Installing PyTorch with %s\n" "${cudatoolkit}" +conda install -y -c "pytorch-${UPLOAD_CHANNEL}" -c conda-forge "pytorch-${UPLOAD_CHANNEL}"::pytorch "${cudatoolkit}" pytest + +if [ $PYTHON_VERSION == "3.6" ]; then + printf "Installing minimal PILLOW version\n" + # Install the minimal PILLOW version. Otherwise, let setup.py install the latest + pip install pillow>=5.3.0 +fi + +printf "* Installing torchvision\n" +"$this_dir/vc_env_helper.bat" python setup.py develop diff --git a/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/windows/scripts/install_conda.bat b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/windows/scripts/install_conda.bat new file mode 100644 index 0000000000000000000000000000000000000000..6612fba56f63aa006867f2da08a22809cf569eac --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/windows/scripts/install_conda.bat @@ -0,0 +1 @@ +start /wait "" "%miniconda_exe%" /S /InstallationType=JustMe /RegisterPython=0 /AddToPath=0 /D=%tmp_conda% \ No newline at end of file diff --git a/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/windows/scripts/post_process.sh b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/windows/scripts/post_process.sh new file mode 100644 index 0000000000000000000000000000000000000000..5c5cbb758a9ef2b235e6e5af308bef77fc26a253 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/windows/scripts/post_process.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + +set -e + +eval "$(./conda/Scripts/conda.exe 'shell.bash' 'hook')" +conda activate ./env diff --git a/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/windows/scripts/run_test.sh b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/windows/scripts/run_test.sh new file mode 100644 index 0000000000000000000000000000000000000000..96d9cbd6b2d5c52b225f5a78d87db2e362af2cf5 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/windows/scripts/run_test.sh @@ -0,0 +1,10 @@ +#!/usr/bin/env bash + +set -e + +eval "$(./conda/Scripts/conda.exe 'shell.bash' 'hook')" +conda activate ./env + +export PYTORCH_TEST_WITH_SLOW='1' +python -m torch.utils.collect_env +pytest --cov=torchvision --junitxml=test-results/junit.xml -v --durations 20 test --ignore=test/test_datasets_download.py diff --git a/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/windows/scripts/setup_env.sh b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/windows/scripts/setup_env.sh new file mode 100644 index 0000000000000000000000000000000000000000..b0b70631112046579ae570a7733b55b54895a9c4 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/windows/scripts/setup_env.sh @@ -0,0 +1,39 @@ +#!/usr/bin/env bash + +# This script is for setting up environment in which unit test is ran. +# To speed up the CI time, the resulting environment is cached. +# +# Do not install PyTorch and torchvision here, otherwise they also get cached. + +set -e + +this_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +root_dir="$(git rev-parse --show-toplevel)" +conda_dir="${root_dir}/conda" +env_dir="${root_dir}/env" + +cd "${root_dir}" + +# 1. Install conda at ./conda +if [ ! -d "${conda_dir}" ]; then + printf "* Installing conda\n" + export tmp_conda="$(echo $conda_dir | tr '/' '\\')" + export miniconda_exe="$(echo $root_dir | tr '/' '\\')\\miniconda.exe" + curl --output miniconda.exe https://repo.anaconda.com/miniconda/Miniconda3-latest-Windows-x86_64.exe -O + "$this_dir/install_conda.bat" + unset tmp_conda + unset miniconda_exe +fi + +eval "$(${conda_dir}/Scripts/conda.exe 'shell.bash' 'hook')" + +# 2. Create test environment at ./env +if [ ! -d "${env_dir}" ]; then + printf "* Creating a test environment\n" + conda create --prefix "${env_dir}" -y python="$PYTHON_VERSION" +fi +conda activate "${env_dir}" + +# 3. Install Conda dependencies +printf "* Installing dependencies (except PyTorch)\n" +conda env update --file "${this_dir}/environment.yml" --prune diff --git a/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/windows/scripts/vc_env_helper.bat b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/windows/scripts/vc_env_helper.bat new file mode 100644 index 0000000000000000000000000000000000000000..9410135677a4fdc1113d96c5a422583992c688c3 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.circleci/unittest/windows/scripts/vc_env_helper.bat @@ -0,0 +1,39 @@ +@echo on + +set VC_VERSION_LOWER=16 +set VC_VERSION_UPPER=17 + +for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -legacy -products * -version [%VC_VERSION_LOWER%^,%VC_VERSION_UPPER%^) -property installationPath`) do ( + if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" ( + set "VS15INSTALLDIR=%%i" + set "VS15VCVARSALL=%%i\VC\Auxiliary\Build\vcvarsall.bat" + goto vswhere + ) +) + +:vswhere +if "%VSDEVCMD_ARGS%" == "" ( + call "%VS15VCVARSALL%" x64 || exit /b 1 +) else ( + call "%VS15VCVARSALL%" x64 %VSDEVCMD_ARGS% || exit /b 1 +) + +@echo on + +set DISTUTILS_USE_SDK=1 + +set args=%1 +shift +:start +if [%1] == [] goto done +set args=%args% %1 +shift +goto start + +:done +if "%args%" == "" ( + echo Usage: vc_env_helper.bat [command] [args] + echo e.g. vc_env_helper.bat cl /c test.cpp +) + +%args% || exit /b 1 diff --git a/pretrained_model/pytorch_vision_v0.10.0/.clang-format b/pretrained_model/pytorch_vision_v0.10.0/.clang-format new file mode 100644 index 0000000000000000000000000000000000000000..6d0ab740db4bd2ce6debe0008785a7d7c7468461 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.clang-format @@ -0,0 +1,88 @@ +--- +AccessModifierOffset: -1 +AlignAfterOpenBracket: AlwaysBreak +AlignConsecutiveAssignments: false +AlignConsecutiveDeclarations: false +AlignEscapedNewlinesLeft: true +AlignOperands: false +AlignTrailingComments: false +AllowAllParametersOfDeclarationOnNextLine: false +AllowShortBlocksOnASingleLine: false +AllowShortCaseLabelsOnASingleLine: false +AllowShortFunctionsOnASingleLine: Empty +AllowShortIfStatementsOnASingleLine: false +AllowShortLoopsOnASingleLine: false +AlwaysBreakAfterReturnType: None +AlwaysBreakBeforeMultilineStrings: true +AlwaysBreakTemplateDeclarations: true +BinPackArguments: false +BinPackParameters: false +BraceWrapping: + AfterClass: false + AfterControlStatement: false + AfterEnum: false + AfterFunction: false + AfterNamespace: false + AfterObjCDeclaration: false + AfterStruct: false + AfterUnion: false + BeforeCatch: false + BeforeElse: false + IndentBraces: false +BreakBeforeBinaryOperators: None +BreakBeforeBraces: Attach +BreakBeforeTernaryOperators: true +BreakConstructorInitializersBeforeComma: false +BreakAfterJavaFieldAnnotations: false +BreakStringLiterals: false +ColumnLimit: 80 +CommentPragmas: '^ IWYU pragma:' +#CompactNamespaces: false +ConstructorInitializerAllOnOneLineOrOnePerLine: true +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: true +DerivePointerAlignment: false +DisableFormat: false +ForEachMacros: [ FOR_EACH_RANGE, FOR_EACH, ] +IncludeCategories: + - Regex: '^<.*\.h(pp)?>' + Priority: 1 + - Regex: '^<.*' + Priority: 2 + - Regex: '.*' + Priority: 3 +IndentCaseLabels: true +IndentWidth: 2 +IndentWrappedFunctionNames: false +KeepEmptyLinesAtTheStartOfBlocks: false +MacroBlockBegin: '' +MacroBlockEnd: '' +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +ObjCBlockIndentWidth: 2 +ObjCSpaceAfterProperty: false +ObjCSpaceBeforeProtocolList: false +PenaltyBreakBeforeFirstCallParameter: 1 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 120 +PenaltyBreakString: 1000 +PenaltyExcessCharacter: 1000000 +PenaltyReturnTypeOnItsOwnLine: 2000000 +PointerAlignment: Left +ReflowComments: true +SortIncludes: true +SpaceAfterCStyleCast: false +SpaceBeforeAssignmentOperators: true +SpaceBeforeParens: ControlStatements +SpaceInEmptyParentheses: false +SpacesBeforeTrailingComments: 1 +SpacesInAngles: false +SpacesInContainerLiterals: true +SpacesInCStyleCastParentheses: false +SpacesInParentheses: false +SpacesInSquareBrackets: false +Standard: Cpp11 +TabWidth: 8 +UseTab: Never +... diff --git a/pretrained_model/pytorch_vision_v0.10.0/.coveragerc b/pretrained_model/pytorch_vision_v0.10.0/.coveragerc new file mode 100644 index 0000000000000000000000000000000000000000..c765e471155feb956dbcef3a8c7b802dedcaa562 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.coveragerc @@ -0,0 +1,7 @@ +[run] +branch = True + +[paths] +source = + torchvision + /**/site-packages/torchvision diff --git a/pretrained_model/pytorch_vision_v0.10.0/.gitattributes b/pretrained_model/pytorch_vision_v0.10.0/.gitattributes new file mode 100644 index 0000000000000000000000000000000000000000..22d0452f8d7e02ba33fa717d8a1792a76b050182 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.gitattributes @@ -0,0 +1,8 @@ +*.pkl binary +# Jupyter notebook + +# For text count +# *.ipynb text + +# To ignore it use below +*.ipynb linguist-documentation diff --git a/pretrained_model/pytorch_vision_v0.10.0/.github/ISSUE_TEMPLATE/bug-report.md b/pretrained_model/pytorch_vision_v0.10.0/.github/ISSUE_TEMPLATE/bug-report.md new file mode 100644 index 0000000000000000000000000000000000000000..7f64d09da50468fdc823237dc7b9bf34108bc2da --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.github/ISSUE_TEMPLATE/bug-report.md @@ -0,0 +1,52 @@ +--- +name: "\U0001F41B Bug Report" +about: Create a report to help us improve torchvision +title: '' +labels: '' +assignees: '' + +--- + +## 🛠Bug + +<!-- A clear and concise description of what the bug is. --> + +## To Reproduce + +Steps to reproduce the behavior: + +1. +1. +1. + +<!-- If you have a code sample, error messages, stack traces, please provide it here as well --> + +## Expected behavior + +<!-- A clear and concise description of what you expected to happen. --> + +## Environment + +Please copy and paste the output from our +[environment collection script](https://raw.githubusercontent.com/pytorch/pytorch/master/torch/utils/collect_env.py) +(or fill out the checklist below manually). + +You can get the script and run it with: +``` +wget https://raw.githubusercontent.com/pytorch/pytorch/master/torch/utils/collect_env.py +# For security purposes, please check the contents of collect_env.py before running it. +python collect_env.py +``` + + - PyTorch / torchvision Version (e.g., 1.0 / 0.4.0): + - OS (e.g., Linux): + - How you installed PyTorch / torchvision (`conda`, `pip`, source): + - Build command you used (if compiling from source): + - Python version: + - CUDA/cuDNN version: + - GPU models and configuration: + - Any other relevant information: + +## Additional context + +<!-- Add any other context about the problem here. --> diff --git a/pretrained_model/pytorch_vision_v0.10.0/.github/ISSUE_TEMPLATE/documentation.md b/pretrained_model/pytorch_vision_v0.10.0/.github/ISSUE_TEMPLATE/documentation.md new file mode 100644 index 0000000000000000000000000000000000000000..a3618080a8700813e21fc89d0d68d5804bdcbbfb --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.github/ISSUE_TEMPLATE/documentation.md @@ -0,0 +1,12 @@ +--- +name: "\U0001F4DA Documentation" +about: Report an issue related to https://pytorch.org/docs +title: '' +labels: '' +assignees: '' + +--- + +## 📚 Documentation + +<!-- A clear and concise description of what content in https://pytorch.org/docs is an issue. If this has to do with the general https://pytorch.org website, please file an issue at https://github.com/pytorch/pytorch.github.io/issues/new/choose instead. If this has to do with https://pytorch.org/tutorials, please file an issue at https://github.com/pytorch/tutorials/issues/new --> diff --git a/pretrained_model/pytorch_vision_v0.10.0/.github/ISSUE_TEMPLATE/feature-request.md b/pretrained_model/pytorch_vision_v0.10.0/.github/ISSUE_TEMPLATE/feature-request.md new file mode 100644 index 0000000000000000000000000000000000000000..2187981e6d4497817682d7e09a27d147a9cf9dba --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.github/ISSUE_TEMPLATE/feature-request.md @@ -0,0 +1,27 @@ +--- +name: "\U0001F680Feature Request" +about: Submit a proposal/request for a new torchvision feature +title: '' +labels: '' +assignees: '' + +--- + +## 🚀 Feature +<!-- A clear and concise description of the feature proposal --> + +## Motivation + +<!-- Please outline the motivation for the proposal. Is your feature request related to a problem? e.g., I'm always frustrated when [...]. If this is related to another GitHub issue, please link here too --> + +## Pitch + +<!-- A clear and concise description of what you want to happen. --> + +## Alternatives + +<!-- A clear and concise description of any alternative solutions or features you've considered, if any. --> + +## Additional context + +<!-- Add any other context or screenshots about the feature request here. --> diff --git a/pretrained_model/pytorch_vision_v0.10.0/.github/ISSUE_TEMPLATE/questions-help-support.md b/pretrained_model/pytorch_vision_v0.10.0/.github/ISSUE_TEMPLATE/questions-help-support.md new file mode 100644 index 0000000000000000000000000000000000000000..fb59e084128f149af2e7a966f821958f7c8fd48a --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.github/ISSUE_TEMPLATE/questions-help-support.md @@ -0,0 +1,16 @@ +--- +name: "â“Questions/Help/Support" +about: Do you need support? We have resources. +title: '' +labels: '' +assignees: '' + +--- + +## â“ Questions and Help + +### Please note that this issue tracker is not a help form and this issue will be closed. + +We have a set of [listed resources available on the website](https://pytorch.org/resources). Our primary means of support is our discussion forum: + +- [Discussion Forum](https://discuss.pytorch.org/) diff --git a/pretrained_model/pytorch_vision_v0.10.0/.github/failed_schedule_issue_template.md b/pretrained_model/pytorch_vision_v0.10.0/.github/failed_schedule_issue_template.md new file mode 100644 index 0000000000000000000000000000000000000000..5e2d77550acec19fbf6bbd149d130fa42d39532e --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.github/failed_schedule_issue_template.md @@ -0,0 +1,13 @@ +--- +title: Scheduled workflow failed +labels: + - bug + - "module: datasets" +--- + +Oh no, something went wrong in the scheduled workflow {{ env.WORKFLOW }}/{{ env.JOB }}. +Please look into it: + +https://github.com/{{ env.REPO }}/actions/runs/{{ env.ID }} + +Feel free to close this if this was just a one-off error. diff --git a/pretrained_model/pytorch_vision_v0.10.0/.github/pytorch-probot.yml b/pretrained_model/pytorch_vision_v0.10.0/.github/pytorch-probot.yml new file mode 100644 index 0000000000000000000000000000000000000000..27d0f2a1f0b239bd5108a9ce77a81f69bb11edfe --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.github/pytorch-probot.yml @@ -0,0 +1 @@ +tracking_issue: 2447 diff --git a/pretrained_model/pytorch_vision_v0.10.0/.github/workflows/bandit.yml b/pretrained_model/pytorch_vision_v0.10.0/.github/workflows/bandit.yml new file mode 100644 index 0000000000000000000000000000000000000000..93bae80f9bd20730b05de994c36e19f4f3bee920 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.github/workflows/bandit.yml @@ -0,0 +1,23 @@ +# GitHub Actions Bandit Workflow + +name: Bandit + +on: + pull_request: + branches: [ master ] + + workflow_dispatch: + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + + # Task will fail if any high-severity issues are found + # Ignoring submodules + - name: Run Bandit Security Analysis + run: | + python -m pip install bandit + python -m bandit -r . -x ./third_party -lll diff --git a/pretrained_model/pytorch_vision_v0.10.0/.github/workflows/codeql.yml b/pretrained_model/pytorch_vision_v0.10.0/.github/workflows/codeql.yml new file mode 100644 index 0000000000000000000000000000000000000000..3c8bc96a5bd458752c037c130a9a41f77c61b4fa --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.github/workflows/codeql.yml @@ -0,0 +1,43 @@ +# GitHub Actions CodeQL Workflow + +name: CodeQL + +on: + pull_request: + branches: [ master ] + + workflow_dispatch: + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + + - name: Initialize CodeQL + uses: github/codeql-action/init@v1 + with: + languages: python, cpp + + - name: Install Ninja + run: | + sudo apt-get update -y + sudo apt-get install -y ninja-build + + - name: Update submodules + run: git submodule update --init --recursive + + - name: Install Torch + run: | + python -m pip install cmake + python -m pip install --pre torch -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html + sudo ln -s /usr/bin/ninja /usr/bin/ninja-build + + - name: Build TorchVision + run: python setup.py develop --user + + # If any code scanning alerts are found, they will be under Security -> CodeQL + # Link: https://github.com/pytorch/vision/security/code-scanning + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v1 diff --git a/pretrained_model/pytorch_vision_v0.10.0/.github/workflows/tests-schedule.yml b/pretrained_model/pytorch_vision_v0.10.0/.github/workflows/tests-schedule.yml new file mode 100644 index 0000000000000000000000000000000000000000..65f805ce471d09060d9d6a73597f050f47f250a2 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.github/workflows/tests-schedule.yml @@ -0,0 +1,54 @@ +name: tests + +on: + pull_request: + paths: + - "test/test_datasets_download.py" + - ".github/failed_schedule_issue_template.md" + - ".github/workflows/tests-schedule.yml" + + schedule: + - cron: "0 9 * * *" + +jobs: + download: + runs-on: ubuntu-latest + + steps: + - name: Set up python + uses: actions/setup-python@v2 + with: + python-version: 3.6 + + - name: Upgrade pip + run: python -m pip install --upgrade pip + + - name: Checkout repository + uses: actions/checkout@v2 + + - name: Install torch nightly build + run: pip install --pre torch -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html + + - name: Install torchvision + run: pip install -e . + + - name: Install all optional dataset requirements + run: pip install scipy pandas pycocotools lmdb requests + + - name: Install tests requirements + run: pip install pytest + + - name: Run tests + run: pytest -ra -v test/test_datasets_download.py + + - uses: JasonEtco/create-an-issue@v2.4.0 + name: Create issue if download tests failed + if: failure() && github.event_name == 'schedule' + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + REPO: ${{ github.repository }} + WORKFLOW: ${{ github.workflow }} + JOB: ${{ github.job }} + ID: ${{ github.run_id }} + with: + filename: .github/failed_schedule_issue_template.md diff --git a/pretrained_model/pytorch_vision_v0.10.0/.gitignore b/pretrained_model/pytorch_vision_v0.10.0/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..3a1e8b6232a5eb9df2053c4ff6729bffc0d3bf7c --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/.gitignore @@ -0,0 +1,38 @@ +build/ +dist/ +torchvision.egg-info/ +torchvision/version.py +*/**/__pycache__ +*/__pycache__ +*/*.pyc +*/**/*.pyc +*/**/**/*.pyc +*/**/*~ +*~ + +docs/build +# sphinx-gallery +docs/source/auto_examples/ +docs/source/gen_modules/ +# pytorch-sphinx-theme gets installed here +docs/src + +.coverage +htmlcov +.*.swp +*.so* +*.dylib* +*/*.so* +*/*.dylib* +*.swp +*.swo +gen.yml +.mypy_cache +.vscode/ +.idea/ +*.orig +*-checkpoint.ipynb +*.venv + +## Xcode User settings +xcuserdata/ diff --git a/pretrained_model/pytorch_vision_v0.10.0/CMakeLists.txt b/pretrained_model/pytorch_vision_v0.10.0/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..2dec2de88e7758231e95869ebf0dfd0edfaa70e3 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/CMakeLists.txt @@ -0,0 +1,108 @@ +cmake_minimum_required(VERSION 3.12) +project(torchvision) +set(CMAKE_CXX_STANDARD 14) +file(STRINGS version.txt TORCHVISION_VERSION) + +option(WITH_CUDA "Enable CUDA support" OFF) + +if(WITH_CUDA) + enable_language(CUDA) + add_definitions(-D__CUDA_NO_HALF_OPERATORS__) + add_definitions(-DWITH_CUDA) + set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr") +endif() + +find_package(Python3 COMPONENTS Development) + +find_package(Torch REQUIRED) +find_package(PNG REQUIRED) +find_package(JPEG REQUIRED) + +function(CUDA_CONVERT_FLAGS EXISTING_TARGET) + get_property(old_flags TARGET ${EXISTING_TARGET} PROPERTY INTERFACE_COMPILE_OPTIONS) + if(NOT "${old_flags}" STREQUAL "") + string(REPLACE ";" "," CUDA_flags "${old_flags}") + set_property(TARGET ${EXISTING_TARGET} PROPERTY INTERFACE_COMPILE_OPTIONS + "$<$<BUILD_INTERFACE:$<COMPILE_LANGUAGE:CXX>>:${old_flags}>$<$<BUILD_INTERFACE:$<COMPILE_LANGUAGE:CUDA>>:-Xcompiler=${CUDA_flags}>" + ) + endif() +endfunction() + +if(MSVC) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /wd4819") + if(WITH_CUDA) + set(CMAKE_CUDA_FLAGS "${CMAKE_CUDA_FLAGS} -Xcompiler=/wd4819") + foreach(diag cc_clobber_ignored integer_sign_change useless_using_declaration + set_but_not_used field_without_dll_interface + base_class_has_different_dll_interface + dll_interface_conflict_none_assumed + dll_interface_conflict_dllexport_assumed + implicit_return_from_non_void_function + unsigned_compare_with_zero + declared_but_not_referenced + bad_friend_decl) + string(APPEND CMAKE_CUDA_FLAGS " -Xcudafe --diag_suppress=${diag}") + endforeach() + CUDA_CONVERT_FLAGS(torch_cpu) + if(TARGET torch_cuda) + CUDA_CONVERT_FLAGS(torch_cuda) + endif() + if(TARGET torch_cuda_cu) + CUDA_CONVERT_FLAGS(torch_cuda_cu) + endif() + if(TARGET torch_cuda_cpp) + CUDA_CONVERT_FLAGS(torch_cuda_cpp) + endif() + endif() +endif() + +include(GNUInstallDirs) +include(CMakePackageConfigHelpers) + +set(TVCPP torchvision/csrc) +list(APPEND ALLOW_LISTED ${TVCPP} ${TVCPP}/io/image ${TVCPP}/io/image/cpu ${TVCPP}/models ${TVCPP}/ops + ${TVCPP}/ops/autograd ${TVCPP}/ops/cpu ${TVCPP}/io/image/cuda) +if(WITH_CUDA) + list(APPEND ALLOW_LISTED ${TVCPP}/ops/cuda ${TVCPP}/ops/autocast) +endif() + +FOREACH(DIR ${ALLOW_LISTED}) + file(GLOB ALL_SOURCES ${ALL_SOURCES} ${DIR}/*.*) +ENDFOREACH() + +add_library(${PROJECT_NAME} SHARED ${ALL_SOURCES}) +target_link_libraries(${PROJECT_NAME} PRIVATE ${TORCH_LIBRARIES} ${PNG_LIBRARY} ${JPEG_LIBRARIES} Python3::Python) +set_target_properties(${PROJECT_NAME} PROPERTIES + EXPORT_NAME TorchVision + INSTALL_RPATH ${TORCH_INSTALL_PREFIX}/lib) + +include_directories(torchvision/csrc ${JPEG_INCLUDE_DIRS} ${PNG_INCLUDE_DIRS}) + +set(TORCHVISION_CMAKECONFIG_INSTALL_DIR "share/cmake/TorchVision" CACHE STRING "install path for TorchVisionConfig.cmake") + +configure_package_config_file(cmake/TorchVisionConfig.cmake.in + "${CMAKE_CURRENT_BINARY_DIR}/TorchVisionConfig.cmake" + INSTALL_DESTINATION ${TORCHVISION_CMAKECONFIG_INSTALL_DIR}) + +write_basic_package_version_file(${CMAKE_CURRENT_BINARY_DIR}/TorchVisionConfigVersion.cmake + VERSION ${TORCHVISION_VERSION} + COMPATIBILITY AnyNewerVersion) + +install(FILES ${CMAKE_CURRENT_BINARY_DIR}/TorchVisionConfig.cmake + ${CMAKE_CURRENT_BINARY_DIR}/TorchVisionConfigVersion.cmake + DESTINATION ${TORCHVISION_CMAKECONFIG_INSTALL_DIR}) + +install(TARGETS ${PROJECT_NAME} + EXPORT TorchVisionTargets + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + ) + +install(EXPORT TorchVisionTargets + NAMESPACE TorchVision:: + DESTINATION ${TORCHVISION_CMAKECONFIG_INSTALL_DIR}) + +FOREACH(INPUT_DIR ${ALLOW_LISTED}) + string(REPLACE "${TVCPP}" "${CMAKE_INSTALL_INCLUDEDIR}/${PROJECT_NAME}" OUTPUT_DIR ${INPUT_DIR}) + file(GLOB INPUT_FILES ${INPUT_DIR}/*.*) + install(FILES ${INPUT_FILES} DESTINATION ${OUTPUT_DIR}) +ENDFOREACH() diff --git a/pretrained_model/pytorch_vision_v0.10.0/CODE_OF_CONDUCT.md b/pretrained_model/pytorch_vision_v0.10.0/CODE_OF_CONDUCT.md new file mode 100644 index 0000000000000000000000000000000000000000..b91e23b17c023f10a34c7973c6f8614eed61ad1f --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/CODE_OF_CONDUCT.md @@ -0,0 +1,76 @@ +# Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to make participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, sex characteristics, gender identity and expression, +level of experience, education, socio-economic status, nationality, personal +appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or +advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic +address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a +professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or +reject comments, commits, code, wiki edits, issues, and other contributions +that are not aligned to this Code of Conduct, or to ban temporarily or +permanently any contributor for other behaviors that they deem inappropriate, +threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies within all project spaces, and it also applies when +an individual is representing the project or its community in public spaces. +Examples of representing a project or community include using an official +project e-mail address, posting via an official social media account, or acting +as an appointed representative at an online or offline event. Representation of +a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported by contacting the project team at <conduct@pytorch.org>. All +complaints will be reviewed and investigated and will result in a response that +is deemed necessary and appropriate to the circumstances. The project team is +obligated to maintain confidentiality with regard to the reporter of an incident. +Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good +faith may face temporary or permanent repercussions as determined by other +members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, +available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html + +[homepage]: https://www.contributor-covenant.org + +For answers to common questions about this code of conduct, see +https://www.contributor-covenant.org/faq diff --git a/pretrained_model/pytorch_vision_v0.10.0/CONTRIBUTING.md b/pretrained_model/pytorch_vision_v0.10.0/CONTRIBUTING.md new file mode 100644 index 0000000000000000000000000000000000000000..748dc50df9eee006cc7fe61e1e9f72737d6eb4a5 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/CONTRIBUTING.md @@ -0,0 +1,171 @@ +# Contributing to Torchvision + +We want to make contributing to this project as easy and transparent as possible. + +## TL;DR + +We appreciate all contributions. If you are interested in contributing to Torchvision, there are many ways to help out. +Your contributions may fall into the following categories: + +- It helps the project if you could + - Report issues you're facing + - Give a :+1: on issues that others reported and that are relevant to you + +- Answering queries on the issue tracker, investigating bugs are very valuable contributions to the project. + +- You would like to improve the documentation. This is no less important than improving the library itself! +If you find a typo in the documentation, do not hesitate to submit a GitHub pull request. + +- If you would like to fix a bug + - please pick one from the [list of open issues labelled as "help wanted"](https://github.com/pytorch/vision/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) + - comment on the issue that you want to work on this issue + - send a PR with your fix, see below. + +- If you plan to contribute new features, utility functions or extensions, please first open an issue and discuss the feature with us. + +## Issues + +We use GitHub issues to track public bugs. Please ensure your description is +clear and has sufficient instructions to be able to reproduce the issue. + +## Development installation + +### Install PyTorch Nightly + +```bash +conda install pytorch -c pytorch-nightly -c conda-forge +# or with pip (see https://pytorch.org/get-started/locally/) +# pip install numpy +# pip install --pre torch -f https://download.pytorch.org/whl/nightly/cu102/torch_nightly.html +``` + +### Install Torchvision + +```bash +git clone https://github.com/pytorch/vision.git +cd vision +python setup.py develop +# or, for OSX +# MACOSX_DEPLOYMENT_TARGET=10.9 CC=clang CXX=clang++ python setup.py develop +# for C++ debugging, please use DEBUG=1 +# DEBUG=1 python setup.py develop +pip install flake8 typing mypy pytest scipy +``` +You may also have to install `libpng-dev` and `libjpeg-turbo8-dev` libraries: +```bash +conda install libpng jpeg +``` + +## Development Process + +If you plan to modify the code or documentation, please follow the steps below: + +1. Fork the repository and create your branch from `master`. +2. If you have modified the code (new feature or bug-fix), please add unit tests. +3. If you have changed APIs, update the documentation. Make sure the documentation builds. +4. Ensure the test suite passes. +5. Make sure your code passes `flake8` formatting check. + +For more details about pull requests, +please read [GitHub's guides](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request). + +If you would like to contribute a new model, please see [here](#New-model). + +If you would like to contribute a new dataset, please see [here](#New-dataset). + +### Code formatting and typing + +New code should be compatible with Python 3.X versions and be compliant with PEP8. To check the codebase, please run +```bash +flake8 --config=setup.cfg . +``` + +The codebase has type annotations, please make sure to add type hints if required. We use `mypy` tool for type checking: +```bash +mypy --config-file mypy.ini +``` + +### Unit tests + +If you have modified the code by adding a new feature or a bug-fix, please add unit tests for that. To run a specific +test: +```bash +pytest test/<test-module.py> -vvv -k <test_myfunc> +# e.g. pytest test/test_transforms.py -vvv -k test_center_crop +``` + +If you would like to run all tests: +```bash +pytest test -vvv +``` + +Tests that require internet access should be in +`test/test_internet.py`. + +### Documentation + +Torchvision uses [Google style](http://sphinxcontrib-napoleon.readthedocs.io/en/latest/example_google.html) +for formatting docstrings. Length of line inside docstrings block must be limited to 120 characters. + +Please, follow the instructions to build and deploy the documentation locally. + +#### Install requirements + +```bash +cd docs +pip install -r requirements.txt +``` + +#### Build + +```bash +cd docs +make html +``` + +Then open `docs/build/html/index.html` in your favorite browser. + +The docs are also automatically built when you submit a PR. The job that +builds the docs is named `build_docs`. You can access the rendered docs by +clicking on that job and then going to the "Artifacts" tab. + +You can clean the built docs and re-start the build from scratch by doing ``make +clean``. + +#### Building the example gallery - or not + +When you run ``make html`` for the first time, all the examples in the gallery +will be built. Subsequent builds should be faster, and will only build the +examples that have been modified. + +You can run ``make html-noplot`` to not build the examples at all. This is +useful after a ``make clean`` to do some quick checks that are not related to +the examples. + +You can also choose to only build a subset of the examples by using the +``EXAMPLES_PATTERN`` env variable, which accepts a regular expression. For +example ``EXAMPLES_PATTERN="transforms" make html`` will only build the examples +with "transforms" in their name. + +### New model + +More details on how to add a new model will be provided later. Please, do not send any PR with a new model without discussing +it in an issue as, most likely, it will not be accepted. + +### New dataset + +More details on how to add a new dataset will be provided later. Please, do not send any PR with a new dataset without discussing +it in an issue as, most likely, it will not be accepted. + +### Pull Request + +If all previous checks (flake8, mypy, unit tests) are passing, please send a PR. Submitted PR will pass other tests on +different operation systems, python versions and hardwares. + +For more details about pull requests workflow, +please read [GitHub's guides](https://docs.github.com/en/github/collaborating-with-issues-and-pull-requests/creating-a-pull-request). + +## License + +By contributing to Torchvision, you agree that your contributions will be licensed +under the LICENSE file in the root directory of this source tree. diff --git a/pretrained_model/pytorch_vision_v0.10.0/LICENSE b/pretrained_model/pytorch_vision_v0.10.0/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..1edcf92c3317b90fedd187e2eaad101bd1c1efc5 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) Soumith Chintala 2016, +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pretrained_model/pytorch_vision_v0.10.0/MANIFEST.in b/pretrained_model/pytorch_vision_v0.10.0/MANIFEST.in new file mode 100644 index 0000000000000000000000000000000000000000..75f238c0a2c97812ebe5fdf3e2b43667c7c7f6af --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/MANIFEST.in @@ -0,0 +1,5 @@ +include README.rst +include LICENSE + +recursive-exclude * __pycache__ +recursive-exclude * *.py[co] diff --git a/pretrained_model/pytorch_vision_v0.10.0/README.rst b/pretrained_model/pytorch_vision_v0.10.0/README.rst new file mode 100644 index 0000000000000000000000000000000000000000..db50cae8fd0229e780a7bfdcc67c38fba2d39274 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/README.rst @@ -0,0 +1,151 @@ +torchvision +=========== + +.. image:: https://pepy.tech/badge/torchvision + :target: https://pepy.tech/project/torchvision + +.. image:: https://img.shields.io/badge/dynamic/json.svg?label=docs&url=https%3A%2F%2Fpypi.org%2Fpypi%2Ftorchvision%2Fjson&query=%24.info.version&colorB=brightgreen&prefix=v + :target: https://pytorch.org/vision/stable/index.html + + +The torchvision package consists of popular datasets, model architectures, and common image transformations for computer vision. + + +Installation +============ + +We recommend Anaconda as Python package management system. Please refer to `pytorch.org <https://pytorch.org/>`_ +for the detail of PyTorch (``torch``) installation. The following is the corresponding ``torchvision`` versions and +supported Python versions. + ++--------------------------+--------------------------+---------------------------------+ +| ``torch`` | ``torchvision`` | ``python`` | ++==========================+==========================+=================================+ +| ``master`` / ``nightly`` | ``master`` / ``nightly`` | ``>=3.6`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.8.0`` | ``0.9.0`` | ``>=3.6`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.7.1`` | ``0.8.2`` | ``>=3.6`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.7.0`` | ``0.8.1`` | ``>=3.6`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.7.0`` | ``0.8.0`` | ``>=3.6`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.6.0`` | ``0.7.0`` | ``>=3.6`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.5.1`` | ``0.6.1`` | ``>=3.5`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.5.0`` | ``0.6.0`` | ``>=3.5`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.4.0`` | ``0.5.0`` | ``==2.7``, ``>=3.5``, ``<=3.8`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.3.1`` | ``0.4.2`` | ``==2.7``, ``>=3.5``, ``<=3.7`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.3.0`` | ``0.4.1`` | ``==2.7``, ``>=3.5``, ``<=3.7`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.2.0`` | ``0.4.0`` | ``==2.7``, ``>=3.5``, ``<=3.7`` | ++--------------------------+--------------------------+---------------------------------+ +| ``1.1.0`` | ``0.3.0`` | ``==2.7``, ``>=3.5``, ``<=3.7`` | ++--------------------------+--------------------------+---------------------------------+ +| ``<=1.0.1`` | ``0.2.2`` | ``==2.7``, ``>=3.5``, ``<=3.7`` | ++--------------------------+--------------------------+---------------------------------+ + +Anaconda: + +.. code:: bash + + conda install torchvision -c pytorch + +pip: + +.. code:: bash + + pip install torchvision + +From source: + +.. code:: bash + + python setup.py install + # or, for OSX + # MACOSX_DEPLOYMENT_TARGET=10.9 CC=clang CXX=clang++ python setup.py install + + +In case building TorchVision from source fails, install the nightly version of PyTorch following +the linked guide on the `contributing page <https://github.com/pytorch/vision/blob/master/CONTRIBUTING.md#development-installation>`_ and retry the install. + +By default, GPU support is built if CUDA is found and ``torch.cuda.is_available()`` is true. +It's possible to force building GPU support by setting ``FORCE_CUDA=1`` environment variable, +which is useful when building a docker image. + +Image Backend +============= +Torchvision currently supports the following image backends: + +* `Pillow`_ (default) + +* `Pillow-SIMD`_ - a **much faster** drop-in replacement for Pillow with SIMD. If installed will be used as the default. + +* `accimage`_ - if installed can be activated by calling :code:`torchvision.set_image_backend('accimage')` + +* `libpng`_ - can be installed via conda :code:`conda install libpng` or any of the package managers for debian-based and RHEL-based Linux distributions. + +* `libjpeg`_ - can be installed via conda :code:`conda install jpeg` or any of the package managers for debian-based and RHEL-based Linux distributions. `libjpeg-turbo`_ can be used as well. + +**Notes:** ``libpng`` and ``libjpeg`` must be available at compilation time in order to be available. Make sure that it is available on the standard library locations, +otherwise, add the include and library paths in the environment variables ``TORCHVISION_INCLUDE`` and ``TORCHVISION_LIBRARY``, respectively. + +.. _libpng : http://www.libpng.org/pub/png/libpng.html +.. _Pillow : https://python-pillow.org/ +.. _Pillow-SIMD : https://github.com/uploadcare/pillow-simd +.. _accimage: https://github.com/pytorch/accimage +.. _libjpeg: http://ijg.org/ +.. _libjpeg-turbo: https://libjpeg-turbo.org/ + +C++ API +======= +TorchVision also offers a C++ API that contains C++ equivalent of python models. + +Installation From source: + +.. code:: bash + + mkdir build + cd build + # Add -DWITH_CUDA=on support for the CUDA if needed + cmake .. + make + make install + +Once installed, the library can be accessed in cmake (after properly configuring ``CMAKE_PREFIX_PATH``) via the :code:`TorchVision::TorchVision` target: + +.. code:: rest + + find_package(TorchVision REQUIRED) + target_link_libraries(my-target PUBLIC TorchVision::TorchVision) + +The ``TorchVision`` package will also automatically look for the ``Torch`` package and add it as a dependency to ``my-target``, +so make sure that it is also available to cmake via the ``CMAKE_PREFIX_PATH``. + +For an example setup, take a look at ``examples/cpp/hello_world``. + +TorchVision Operators +--------------------- +In order to get the torchvision operators registered with torch (eg. for the JIT), all you need to do is to ensure that you +:code:`#include <torchvision/vision.h>` in your project. + +Documentation +============= +You can find the API documentation on the pytorch website: https://pytorch.org/vision/stable/index.html + +Contributing +============ + +See the `CONTRIBUTING <CONTRIBUTING.md>`_ file for how to help out. + +Disclaimer on Datasets +====================== + +This is a utility library that downloads and prepares public datasets. We do not host or distribute these datasets, vouch for their quality or fairness, or claim that you have license to use the dataset. It is your responsibility to determine whether you have permission to use the dataset under the dataset's license. + +If you're a dataset owner and wish to update any part of it (description, citation, etc.), or do not want your dataset to be included in this library, please get in touch through a GitHub issue. Thanks for your contribution to the ML community! diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/.gitignore b/pretrained_model/pytorch_vision_v0.10.0/android/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..adcfad04c915f27b68699e2bbc615bd4c17fb700 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/android/.gitignore @@ -0,0 +1,6 @@ +local.properties +**/*.iml +.gradle +.idea/* +.externalNativeBuild +build diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/build.gradle b/pretrained_model/pytorch_vision_v0.10.0/android/build.gradle new file mode 100644 index 0000000000000000000000000000000000000000..b905bdf3a17f3c21f17cc55714aad3fe4005daf8 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/android/build.gradle @@ -0,0 +1,42 @@ +allprojects { + buildscript { + ext { + minSdkVersion = 21 + targetSdkVersion = 28 + compileSdkVersion = 28 + buildToolsVersion = '28.0.3' + + coreVersion = "1.2.0" + extJUnitVersion = "1.1.1" + runnerVersion = "1.2.0" + rulesVersion = "1.2.0" + junitVersion = "4.12" + + androidSupportAppCompatV7Version = "28.0.0" + fbjniJavaOnlyVersion = "0.0.3" + soLoaderNativeLoaderVersion = "0.8.0" + pytorchAndroidVersion = "1.9.0-SNAPSHOT" + } + + repositories { + google() + mavenCentral() + jcenter() + } + + dependencies { + classpath 'com.android.tools.build:gradle:4.1.2' + classpath 'com.vanniktech:gradle-maven-publish-plugin:0.14.2' + } + } + + repositories { + google() + jcenter() + } +} + +ext.deps = [ + jsr305: 'com.google.code.findbugs:jsr305:3.0.1', +] + diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/gradle.properties b/pretrained_model/pytorch_vision_v0.10.0/android/gradle.properties new file mode 100644 index 0000000000000000000000000000000000000000..a8105544f30cbbb97869bd0ab6d4ab2bc4a0bf02 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/android/gradle.properties @@ -0,0 +1,24 @@ +ABI_FILTERS=armeabi-v7a,arm64-v8a,x86,x86_64 + +VERSION_NAME=0.10.0-SNAPSHOT +GROUP=org.pytorch +MAVEN_GROUP=org.pytorch +SONATYPE_STAGING_PROFILE=orgpytorch +POM_URL=https://github.com/pytorch/vision/ +POM_SCM_URL=https://github.com/pytorch/vision.git +POM_SCM_CONNECTION=scm:git:https://github.com/pytorch/vision +POM_SCM_DEV_CONNECTION=scm:git:git@github.com:pytorch/vision.git +POM_LICENSE_NAME=BSD 3-Clause +POM_LICENSE_URL=https://github.com/pytorch/vision/blob/master/LICENSE +POM_ISSUES_URL=https://github.com/pytorch/vision/issues +POM_LICENSE_DIST=repo +POM_DEVELOPER_ID=pytorch +POM_DEVELOPER_NAME=pytorch + +# Gradle internals +android.useAndroidX=true +android.enableJetifier=true + +testAppAllVariantsEnabled=false + +org.gradle.jvmargs=-Xmx12g diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/gradle/wrapper/gradle-wrapper.jar b/pretrained_model/pytorch_vision_v0.10.0/android/gradle/wrapper/gradle-wrapper.jar new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/gradle/wrapper/gradle-wrapper.properties b/pretrained_model/pytorch_vision_v0.10.0/android/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 0000000000000000000000000000000000000000..442d9132ea32808ad980df4bd233b359f76341a7 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/android/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,5 @@ +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-6.8.3-bin.zip +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/gradle_scripts/android_tasks.gradle b/pretrained_model/pytorch_vision_v0.10.0/android/gradle_scripts/android_tasks.gradle new file mode 100644 index 0000000000000000000000000000000000000000..6bba126b2f66bd5cce63c8704220a55e8d43e83d --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/android/gradle_scripts/android_tasks.gradle @@ -0,0 +1,11 @@ +afterEvaluate { project -> + if (POM_PACKAGING == 'aar') { + task headersJar(type: Jar) { + archiveClassifier.set('headers') + from("$rootDir/cxx/") { + include '**/*.h' + } + } + artifacts.add('archives', headersJar) + } +} diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/gradle_scripts/release.gradle b/pretrained_model/pytorch_vision_v0.10.0/android/gradle_scripts/release.gradle new file mode 100644 index 0000000000000000000000000000000000000000..ada97f339644254f70e141b6fa4d35df99c62d7f --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/android/gradle_scripts/release.gradle @@ -0,0 +1,3 @@ +apply from: rootProject.file('gradle_scripts/android_tasks.gradle') + +apply plugin: 'com.vanniktech.maven.publish' diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/gradlew b/pretrained_model/pytorch_vision_v0.10.0/android/gradlew new file mode 100644 index 0000000000000000000000000000000000000000..cccdd3d517fc5249beaefa600691cf150f2fa3e6 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/android/gradlew @@ -0,0 +1,172 @@ +#!/usr/bin/env sh + +############################################################################## +## +## Gradle start up script for UN*X +## +############################################################################## + +# Attempt to set APP_HOME +# Resolve links: $0 may be a link +PRG="$0" +# Need this for relative symlinks. +while [ -h "$PRG" ] ; do + ls=`ls -ld "$PRG"` + link=`expr "$ls" : '.*-> \(.*\)$'` + if expr "$link" : '/.*' > /dev/null; then + PRG="$link" + else + PRG=`dirname "$PRG"`"/$link" + fi +done +SAVED="`pwd`" +cd "`dirname \"$PRG\"`/" >/dev/null +APP_HOME="`pwd -P`" +cd "$SAVED" >/dev/null + +APP_NAME="Gradle" +APP_BASE_NAME=`basename "$0"` + +# Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +DEFAULT_JVM_OPTS="" + +# Use the maximum available, or set MAX_FD != -1 to use that value. +MAX_FD="maximum" + +warn () { + echo "$*" +} + +die () { + echo + echo "$*" + echo + exit 1 +} + +# OS specific support (must be 'true' or 'false'). +cygwin=false +msys=false +darwin=false +nonstop=false +case "`uname`" in + CYGWIN* ) + cygwin=true + ;; + Darwin* ) + darwin=true + ;; + MINGW* ) + msys=true + ;; + NONSTOP* ) + nonstop=true + ;; +esac + +CLASSPATH=$APP_HOME/gradle/wrapper/gradle-wrapper.jar + +# Determine the Java command to use to start the JVM. +if [ -n "$JAVA_HOME" ] ; then + if [ -x "$JAVA_HOME/jre/sh/java" ] ; then + # IBM's JDK on AIX uses strange locations for the executables + JAVACMD="$JAVA_HOME/jre/sh/java" + else + JAVACMD="$JAVA_HOME/bin/java" + fi + if [ ! -x "$JAVACMD" ] ; then + die "ERROR: JAVA_HOME is set to an invalid directory: $JAVA_HOME + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." + fi +else + JAVACMD="java" + which java >/dev/null 2>&1 || die "ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. + +Please set the JAVA_HOME variable in your environment to match the +location of your Java installation." +fi + +# Increase the maximum file descriptors if we can. +if [ "$cygwin" = "false" -a "$darwin" = "false" -a "$nonstop" = "false" ] ; then + MAX_FD_LIMIT=`ulimit -H -n` + if [ $? -eq 0 ] ; then + if [ "$MAX_FD" = "maximum" -o "$MAX_FD" = "max" ] ; then + MAX_FD="$MAX_FD_LIMIT" + fi + ulimit -n $MAX_FD + if [ $? -ne 0 ] ; then + warn "Could not set maximum file descriptor limit: $MAX_FD" + fi + else + warn "Could not query maximum file descriptor limit: $MAX_FD_LIMIT" + fi +fi + +# For Darwin, add options to specify how the application appears in the dock +if $darwin; then + GRADLE_OPTS="$GRADLE_OPTS \"-Xdock:name=$APP_NAME\" \"-Xdock:icon=$APP_HOME/media/gradle.icns\"" +fi + +# For Cygwin, switch paths to Windows format before running java +if $cygwin ; then + APP_HOME=`cygpath --path --mixed "$APP_HOME"` + CLASSPATH=`cygpath --path --mixed "$CLASSPATH"` + JAVACMD=`cygpath --unix "$JAVACMD"` + + # We build the pattern for arguments to be converted via cygpath + ROOTDIRSRAW=`find -L / -maxdepth 1 -mindepth 1 -type d 2>/dev/null` + SEP="" + for dir in $ROOTDIRSRAW ; do + ROOTDIRS="$ROOTDIRS$SEP$dir" + SEP="|" + done + OURCYGPATTERN="(^($ROOTDIRS))" + # Add a user-defined pattern to the cygpath arguments + if [ "$GRADLE_CYGPATTERN" != "" ] ; then + OURCYGPATTERN="$OURCYGPATTERN|($GRADLE_CYGPATTERN)" + fi + # Now convert the arguments - kludge to limit ourselves to /bin/sh + i=0 + for arg in "$@" ; do + CHECK=`echo "$arg"|egrep -c "$OURCYGPATTERN" -` + CHECK2=`echo "$arg"|egrep -c "^-"` ### Determine if an option + + if [ $CHECK -ne 0 ] && [ $CHECK2 -eq 0 ] ; then ### Added a condition + eval `echo args$i`=`cygpath --path --ignore --mixed "$arg"` + else + eval `echo args$i`="\"$arg\"" + fi + i=$((i+1)) + done + case $i in + (0) set -- ;; + (1) set -- "$args0" ;; + (2) set -- "$args0" "$args1" ;; + (3) set -- "$args0" "$args1" "$args2" ;; + (4) set -- "$args0" "$args1" "$args2" "$args3" ;; + (5) set -- "$args0" "$args1" "$args2" "$args3" "$args4" ;; + (6) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" ;; + (7) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" ;; + (8) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" ;; + (9) set -- "$args0" "$args1" "$args2" "$args3" "$args4" "$args5" "$args6" "$args7" "$args8" ;; + esac +fi + +# Escape application args +save () { + for i do printf %s\\n "$i" | sed "s/'/'\\\\''/g;1s/^/'/;\$s/\$/' \\\\/" ; done + echo " " +} +APP_ARGS=$(save "$@") + +# Collect all arguments for the java command, following the shell quoting and substitution rules +eval set -- $DEFAULT_JVM_OPTS $JAVA_OPTS $GRADLE_OPTS "\"-Dorg.gradle.appname=$APP_BASE_NAME\"" -classpath "\"$CLASSPATH\"" org.gradle.wrapper.GradleWrapperMain "$APP_ARGS" + +# by default we should be in the correct project dir, but when run from Finder on Mac, the cwd is wrong +if [ "$(uname)" = "Darwin" ] && [ "$HOME" = "$PWD" ]; then + cd "$(dirname "$0")" +fi + +exec "$JAVACMD" "$@" diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/gradlew.bat b/pretrained_model/pytorch_vision_v0.10.0/android/gradlew.bat new file mode 100644 index 0000000000000000000000000000000000000000..f9553162f122c71b34635112e717c3e733b5b212 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/android/gradlew.bat @@ -0,0 +1,84 @@ +@if "%DEBUG%" == "" @echo off +@rem ########################################################################## +@rem +@rem Gradle startup script for Windows +@rem +@rem ########################################################################## + +@rem Set local scope for the variables with windows NT shell +if "%OS%"=="Windows_NT" setlocal + +set DIRNAME=%~dp0 +if "%DIRNAME%" == "" set DIRNAME=. +set APP_BASE_NAME=%~n0 +set APP_HOME=%DIRNAME% + +@rem Add default JVM options here. You can also use JAVA_OPTS and GRADLE_OPTS to pass JVM options to this script. +set DEFAULT_JVM_OPTS= + +@rem Find java.exe +if defined JAVA_HOME goto findJavaFromJavaHome + +set JAVA_EXE=java.exe +%JAVA_EXE% -version >NUL 2>&1 +if "%ERRORLEVEL%" == "0" goto init + +echo. +echo ERROR: JAVA_HOME is not set and no 'java' command could be found in your PATH. +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:findJavaFromJavaHome +set JAVA_HOME=%JAVA_HOME:"=% +set JAVA_EXE=%JAVA_HOME%/bin/java.exe + +if exist "%JAVA_EXE%" goto init + +echo. +echo ERROR: JAVA_HOME is set to an invalid directory: %JAVA_HOME% +echo. +echo Please set the JAVA_HOME variable in your environment to match the +echo location of your Java installation. + +goto fail + +:init +@rem Get command-line arguments, handling Windows variants + +if not "%OS%" == "Windows_NT" goto win9xME_args + +:win9xME_args +@rem Slurp the command line arguments. +set CMD_LINE_ARGS= +set _SKIP=2 + +:win9xME_args_slurp +if "x%~1" == "x" goto execute + +set CMD_LINE_ARGS=%* + +:execute +@rem Setup the command line + +set CLASSPATH=%APP_HOME%\gradle\wrapper\gradle-wrapper.jar + +@rem Execute Gradle +"%JAVA_EXE%" %DEFAULT_JVM_OPTS% %JAVA_OPTS% %GRADLE_OPTS% "-Dorg.gradle.appname=%APP_BASE_NAME%" -classpath "%CLASSPATH%" org.gradle.wrapper.GradleWrapperMain %CMD_LINE_ARGS% + +:end +@rem End local scope for the variables with windows NT shell +if "%ERRORLEVEL%"=="0" goto mainEnd + +:fail +rem Set variable GRADLE_EXIT_CONSOLE if you need the _script_ return code instead of +rem the _cmd.exe /c_ return code! +if not "" == "%GRADLE_EXIT_CONSOLE%" exit 1 +exit /b 1 + +:mainEnd +if "%OS%"=="Windows_NT" endlocal + +:omega diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/ops/CMakeLists.txt b/pretrained_model/pytorch_vision_v0.10.0/android/ops/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..6f5323c0d39f70f84592dda9760e507dd74ce46d --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/android/ops/CMakeLists.txt @@ -0,0 +1,54 @@ +cmake_minimum_required(VERSION 3.4.1) +set(TARGET torchvision_ops) +project(${TARGET} CXX) +set(CMAKE_CXX_STANDARD 14) + +string(APPEND CMAKE_CXX_FLAGS " -DMOBILE") + +set(build_DIR ${CMAKE_SOURCE_DIR}/build) +set(root_DIR ${CMAKE_CURRENT_LIST_DIR}/..) + +file(GLOB VISION_SRCS + ../../torchvision/csrc/ops/cpu/*.h + ../../torchvision/csrc/ops/cpu/*.cpp + ../../torchvision/csrc/ops/*.h + ../../torchvision/csrc/ops/*.cpp) + +# Remove interpolate_aa sources as they are temporary code +# see https://github.com/pytorch/vision/pull/3761 +# and IndexingUtils.h is unavailable on Android build +list(REMOVE_ITEM VISION_SRCS "${CMAKE_CURRENT_LIST_DIR}/../../torchvision/csrc/ops/cpu/interpolate_aa_kernels.cpp") +list(REMOVE_ITEM VISION_SRCS "${CMAKE_CURRENT_LIST_DIR}/../../torchvision/csrc/ops/interpolate_aa.cpp") +list(REMOVE_ITEM VISION_SRCS "${CMAKE_CURRENT_LIST_DIR}/../../torchvision/csrc/ops/interpolate_aa.h") + +add_library(${TARGET} SHARED + ${VISION_SRCS} +) + +file(GLOB PYTORCH_INCLUDE_DIRS "${build_DIR}/pytorch_android*.aar/headers") +file(GLOB PYTORCH_INCLUDE_DIRS_CSRC "${build_DIR}/pytorch_android*.aar/headers/torch/csrc/api/include") +file(GLOB PYTORCH_LINK_DIRS "${build_DIR}/pytorch_android*.aar/jni/${ANDROID_ABI}") + +target_compile_options(${TARGET} PRIVATE + -fexceptions +) + +set(BUILD_SUBDIR ${ANDROID_ABI}) + +find_library(PYTORCH_LIBRARY pytorch_jni_lite + PATHS ${PYTORCH_LINK_DIRS} + NO_CMAKE_FIND_ROOT_PATH) + +find_library(FBJNI_LIBRARY fbjni + PATHS ${PYTORCH_LINK_DIRS} + NO_CMAKE_FIND_ROOT_PATH) + +target_include_directories(${TARGET} PRIVATE + ${PYTORCH_INCLUDE_DIRS} + ${PYTORCH_INCLUDE_DIRS_CSRC} +) + +target_link_libraries(${TARGET} PRIVATE + ${PYTORCH_LIBRARY} + ${FBJNI_LIBRARY} +) diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/ops/build.gradle b/pretrained_model/pytorch_vision_v0.10.0/android/ops/build.gradle new file mode 100644 index 0000000000000000000000000000000000000000..df20f6f030db40b9c48a7fe6fb5e77a749a5e2ff --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/android/ops/build.gradle @@ -0,0 +1,93 @@ +apply plugin: 'com.android.library' +apply plugin: 'maven' + +repositories { + jcenter() + maven { + url "https://oss.sonatype.org/content/repositories/snapshots" + } + flatDir { + dirs 'aars' + } +} + +android { + configurations { + extractForNativeBuild + } + compileSdkVersion rootProject.compileSdkVersion + buildToolsVersion rootProject.buildToolsVersion + + + defaultConfig { + minSdkVersion rootProject.minSdkVersion + targetSdkVersion rootProject.targetSdkVersion + versionCode 0 + versionName "0.1" + + testInstrumentationRunner "androidx.test.runner.AndroidJUnitRunner" + ndk { + abiFilters ABI_FILTERS.split(",") + } + } + + buildTypes { + debug { + minifyEnabled false + debuggable true + } + release { + minifyEnabled false + } + } + + externalNativeBuild { + cmake { + path "CMakeLists.txt" + } + } + + useLibrary 'android.test.runner' + useLibrary 'android.test.base' + useLibrary 'android.test.mock' +} + +dependencies { + implementation 'com.android.support:appcompat-v7:' + rootProject.androidSupportAppCompatV7Version + + extractForNativeBuild "org.pytorch:pytorch_android:$pytorchAndroidVersion" + + // For testing: deps on local aar files + //implementation(name: 'pytorch_android-release', ext: 'aar') + //extractForNativeBuild(name: 'pytorch_android-release', ext: 'aar') + //implementation 'com.facebook.fbjni:fbjni-java-only:0.0.3' +} + +task extractAARForNativeBuild { + doLast { + configurations.extractForNativeBuild.files.each { + def file = it.absoluteFile + copy { + from zipTree(file) + into "$buildDir/$file.name" + include "headers/**" + include "jni/**" + } + } + } +} + +tasks.whenTaskAdded { task -> + if (task.name.contains('externalNativeBuild')) { + task.dependsOn(extractAARForNativeBuild) + } +} + +apply from: rootProject.file('gradle_scripts/release.gradle') + +task sourcesJar(type: Jar) { + from android.sourceSets.main.java.srcDirs + classifier = 'sources' +} + +artifacts.add('archives', sourcesJar) diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/ops/gradle.properties b/pretrained_model/pytorch_vision_v0.10.0/android/ops/gradle.properties new file mode 100644 index 0000000000000000000000000000000000000000..5a4ea2f3aba1c7d0708dca68f650d7561c700962 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/android/ops/gradle.properties @@ -0,0 +1,4 @@ +POM_NAME=torchvision ops +POM_DESCRIPTION=torchvision ops +POM_ARTIFACT_ID=torchvision_ops +POM_PACKAGING=aar diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/ops/src/main/AndroidManifest.xml b/pretrained_model/pytorch_vision_v0.10.0/android/ops/src/main/AndroidManifest.xml new file mode 100644 index 0000000000000000000000000000000000000000..8ca386493c458d3e5dbf5eb05e42706b41feb925 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/android/ops/src/main/AndroidManifest.xml @@ -0,0 +1 @@ +<manifest package="org.pytorch.torchvision.ops" /> diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/settings.gradle b/pretrained_model/pytorch_vision_v0.10.0/android/settings.gradle new file mode 100644 index 0000000000000000000000000000000000000000..6d34eb8d51ae3b17c1e0e48129477c5b9fe12420 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/android/settings.gradle @@ -0,0 +1,4 @@ +include ':ops', ':test_app' + +project(':ops').projectDir = file('ops') +project(':test_app').projectDir = file('test_app/app') diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/build.gradle b/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/build.gradle new file mode 100644 index 0000000000000000000000000000000000000000..76b2d7417934635c930e899a81f8ad464bc9f46e --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/build.gradle @@ -0,0 +1,135 @@ +apply plugin: 'com.android.application' + +repositories { + jcenter() + maven { + url "https://oss.sonatype.org/content/repositories/snapshots" + } + flatDir { + dirs 'aars' + } +} + +android { + configurations { + extractForNativeBuild + } + compileOptions { + sourceCompatibility 1.8 + targetCompatibility 1.8 + } + compileSdkVersion rootProject.compileSdkVersion + buildToolsVersion rootProject.buildToolsVersion + defaultConfig { + applicationId "org.pytorch.testapp" + minSdkVersion rootProject.minSdkVersion + targetSdkVersion rootProject.targetSdkVersion + versionCode 1 + versionName "1.0" + ndk { + abiFilters ABI_FILTERS.split(",") + } + externalNativeBuild { + cmake { + abiFilters ABI_FILTERS.split(",") + arguments "-DANDROID_STL=c++_shared" + } + } + buildConfigField("String", "MODULE_ASSET_NAME", "\"frcnn_mnetv3.pt\"") + buildConfigField("String", "LOGCAT_TAG", "@string/app_name") + buildConfigField("long[]", "INPUT_TENSOR_SHAPE", "new long[]{3, 96, 96}") + addManifestPlaceholders([APP_NAME: "@string/app_name", MAIN_ACTIVITY: "org.pytorch.testapp.MainActivity"]) + } + buildTypes { + debug { + minifyEnabled false + debuggable true + } + release { + minifyEnabled false + } + } + flavorDimensions "model", "activity", "build" + productFlavors { + frcnnMnetv3 { + dimension "model" + applicationIdSuffix ".frcnnMnetv3" + buildConfigField("String", "MODULE_ASSET_NAME", "\"frcnn_mnetv3.pt\"") + addManifestPlaceholders([APP_NAME: "TV_FRCNN_MNETV3"]) + buildConfigField("String", "LOGCAT_TAG", "\"pytorch-frcnn-mnetv3\"") + } + camera { + dimension "activity" + addManifestPlaceholders([APP_NAME: "TV_CAMERA_FRCNN"]) + addManifestPlaceholders([MAIN_ACTIVITY: "org.pytorch.testapp.CameraActivity"]) + } + base { + dimension "activity" + } + aar { + dimension "build" + } + local { + dimension "build" + } + } + packagingOptions { + doNotStrip '**.so' + pickFirst '**.so' + } + + // Filtering for CI + if (!testAppAllVariantsEnabled.toBoolean()) { + variantFilter { variant -> + def names = variant.flavors*.name + if (names.contains("aar")) { + setIgnore(true) + } + } + } +} + +tasks.all { task -> + // Disable externalNativeBuild for all but nativeBuild variant + if (task.name.startsWith('externalNativeBuild') + && !task.name.contains('NativeBuild')) { + task.enabled = false + } +} + +dependencies { + implementation 'com.android.support:appcompat-v7:28.0.0' + implementation 'com.facebook.soloader:nativeloader:0.8.0' + localImplementation project(':ops') + + implementation "org.pytorch:pytorch_android:$pytorchAndroidVersion" + implementation "org.pytorch:pytorch_android_torchvision:$pytorchAndroidVersion" + + aarImplementation(name: 'pytorch_android-release', ext: 'aar') + aarImplementation(name: 'pytorch_android_torchvision-release', ext: 'aar') + + def camerax_version = "1.0.0-alpha05" + implementation "androidx.camera:camera-core:$camerax_version" + implementation "androidx.camera:camera-camera2:$camerax_version" + implementation 'com.google.android.material:material:1.0.0-beta01' +} + +task extractAARForNativeBuild { + doLast { + configurations.extractForNativeBuild.files.each { + def file = it.absoluteFile + copy { + from zipTree(file) + into "$buildDir/$file.name" + include "headers/**" + include "jni/**" + } + } + } +} + +tasks.whenTaskAdded { task -> + if (task.name.contains('externalNativeBuild')) { + task.dependsOn(extractAARForNativeBuild) + } +} diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/AndroidManifest.xml b/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/AndroidManifest.xml new file mode 100644 index 0000000000000000000000000000000000000000..a83bf223bdaf21f49ba6d391a2c789135ea2e1cc --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/AndroidManifest.xml @@ -0,0 +1,21 @@ +<?xml version="1.0" encoding="utf-8"?> +<manifest xmlns:android="http://schemas.android.com/apk/res/android" + package="org.pytorch.testapp"> + + <application + android:allowBackup="true" + android:label="${APP_NAME}" + android:supportsRtl="true" + android:theme="@style/AppTheme"> + + <activity android:name="${MAIN_ACTIVITY}"> + <intent-filter> + <action android:name="android.intent.action.MAIN" /> + + <category android:name="android.intent.category.LAUNCHER" /> + </intent-filter> + </activity> + </application> + + <uses-permission android:name="android.permission.CAMERA" /> +</manifest> diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/java/org/pytorch/testapp/BBox.java b/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/java/org/pytorch/testapp/BBox.java new file mode 100644 index 0000000000000000000000000000000000000000..6fd60791864485b0d7f6a3f04db485ea12ad9de0 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/java/org/pytorch/testapp/BBox.java @@ -0,0 +1,22 @@ +package org.pytorch.testapp; + +class BBox { + public final float score; + public final float x0; + public final float y0; + public final float x1; + public final float y1; + + public BBox(float score, float x0, float y0, float x1, float y1) { + this.score = score; + this.x0 = x0; + this.y0 = y0; + this.x1 = x1; + this.y1 = y1; + } + + @Override + public String toString() { + return String.format("Box{score=%f x0=%f y0=%f x1=%f y1=%f", score, x0, y0, x1, y1); + } +} diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/java/org/pytorch/testapp/CameraActivity.java b/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/java/org/pytorch/testapp/CameraActivity.java new file mode 100644 index 0000000000000000000000000000000000000000..1c427bb82ba737e433ebe4a87f7b70e366bb425f --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/java/org/pytorch/testapp/CameraActivity.java @@ -0,0 +1,432 @@ +package org.pytorch.testapp; + +import android.Manifest; +import android.content.Context; +import android.content.pm.PackageManager; +import android.graphics.Bitmap; +import android.graphics.Canvas; +import android.graphics.Color; +import android.graphics.Paint; +import android.graphics.Rect; +import android.os.Bundle; +import android.os.Handler; +import android.os.HandlerThread; +import android.os.SystemClock; +import android.util.DisplayMetrics; +import android.util.Log; +import android.util.Size; +import android.view.TextureView; +import android.view.ViewStub; +import android.widget.ImageView; +import android.widget.TextView; +import android.widget.Toast; +import androidx.annotation.Nullable; +import androidx.annotation.UiThread; +import androidx.annotation.WorkerThread; +import androidx.appcompat.app.AppCompatActivity; +import androidx.camera.core.CameraX; +import androidx.camera.core.ImageAnalysis; +import androidx.camera.core.ImageAnalysisConfig; +import androidx.camera.core.ImageProxy; +import androidx.camera.core.Preview; +import androidx.camera.core.PreviewConfig; +import androidx.core.app.ActivityCompat; +import com.facebook.soloader.nativeloader.NativeLoader; +import com.facebook.soloader.nativeloader.SystemDelegate; +import java.io.File; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.nio.FloatBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import org.pytorch.IValue; +import org.pytorch.Module; +import org.pytorch.Tensor; + +public class CameraActivity extends AppCompatActivity { + + private static final float BBOX_SCORE_DRAW_THRESHOLD = 0.5f; + private static final String TAG = BuildConfig.LOGCAT_TAG; + private static final int TEXT_TRIM_SIZE = 4096; + private static final int RGB_MAX_CHANNEL_VALUE = 262143; + + private static final int REQUEST_CODE_CAMERA_PERMISSION = 200; + private static final String[] PERMISSIONS = {Manifest.permission.CAMERA}; + + static { + if (!NativeLoader.isInitialized()) { + NativeLoader.init(new SystemDelegate()); + } + NativeLoader.loadLibrary("pytorch_jni"); + NativeLoader.loadLibrary("torchvision_ops"); + } + + private Bitmap mInputTensorBitmap; + private Bitmap mBitmap; + private Canvas mCanvas; + + private long mLastAnalysisResultTime; + + protected HandlerThread mBackgroundThread; + protected Handler mBackgroundHandler; + protected Handler mUIHandler; + + private TextView mTextView; + private ImageView mCameraOverlay; + private StringBuilder mTextViewStringBuilder = new StringBuilder(); + + private Paint mBboxPaint; + + @Override + protected void onCreate(Bundle savedInstanceState) { + super.onCreate(savedInstanceState); + setContentView(R.layout.activity_camera); + mTextView = findViewById(R.id.text); + mCameraOverlay = findViewById(R.id.camera_overlay); + mUIHandler = new Handler(getMainLooper()); + startBackgroundThread(); + + if (ActivityCompat.checkSelfPermission(this, Manifest.permission.CAMERA) + != PackageManager.PERMISSION_GRANTED) { + ActivityCompat.requestPermissions(this, PERMISSIONS, REQUEST_CODE_CAMERA_PERMISSION); + } else { + setupCameraX(); + } + mBboxPaint = new Paint(); + mBboxPaint.setAntiAlias(true); + mBboxPaint.setDither(true); + mBboxPaint.setColor(Color.GREEN); + } + + @Override + protected void onPostCreate(@Nullable Bundle savedInstanceState) { + super.onPostCreate(savedInstanceState); + startBackgroundThread(); + } + + protected void startBackgroundThread() { + mBackgroundThread = new HandlerThread("ModuleActivity"); + mBackgroundThread.start(); + mBackgroundHandler = new Handler(mBackgroundThread.getLooper()); + } + + @Override + protected void onDestroy() { + stopBackgroundThread(); + super.onDestroy(); + } + + protected void stopBackgroundThread() { + mBackgroundThread.quitSafely(); + try { + mBackgroundThread.join(); + mBackgroundThread = null; + mBackgroundHandler = null; + } catch (InterruptedException e) { + Log.e(TAG, "Error on stopping background thread", e); + } + } + + @Override + public void onRequestPermissionsResult( + int requestCode, String[] permissions, int[] grantResults) { + if (requestCode == REQUEST_CODE_CAMERA_PERMISSION) { + if (grantResults[0] == PackageManager.PERMISSION_DENIED) { + Toast.makeText( + this, + "You can't use image classification example without granting CAMERA permission", + Toast.LENGTH_LONG) + .show(); + finish(); + } else { + setupCameraX(); + } + } + } + + private void setupCameraX() { + final TextureView textureView = + ((ViewStub) findViewById(R.id.camera_texture_view_stub)) + .inflate() + .findViewById(R.id.texture_view); + final PreviewConfig previewConfig = new PreviewConfig.Builder().build(); + final Preview preview = new Preview(previewConfig); + preview.setOnPreviewOutputUpdateListener( + new Preview.OnPreviewOutputUpdateListener() { + @Override + public void onUpdated(Preview.PreviewOutput output) { + textureView.setSurfaceTexture(output.getSurfaceTexture()); + } + }); + + final DisplayMetrics displayMetrics = new DisplayMetrics(); + getWindowManager().getDefaultDisplay().getMetrics(displayMetrics); + + final ImageAnalysisConfig imageAnalysisConfig = + new ImageAnalysisConfig.Builder() + .setTargetResolution(new Size(displayMetrics.widthPixels, displayMetrics.heightPixels)) + .setCallbackHandler(mBackgroundHandler) + .setImageReaderMode(ImageAnalysis.ImageReaderMode.ACQUIRE_LATEST_IMAGE) + .build(); + final ImageAnalysis imageAnalysis = new ImageAnalysis(imageAnalysisConfig); + imageAnalysis.setAnalyzer( + new ImageAnalysis.Analyzer() { + @Override + public void analyze(ImageProxy image, int rotationDegrees) { + if (SystemClock.elapsedRealtime() - mLastAnalysisResultTime < 500) { + return; + } + + final Result result = CameraActivity.this.analyzeImage(image, rotationDegrees); + + if (result != null) { + mLastAnalysisResultTime = SystemClock.elapsedRealtime(); + CameraActivity.this.runOnUiThread( + new Runnable() { + @Override + public void run() { + CameraActivity.this.handleResult(result); + } + }); + } + } + }); + + CameraX.bindToLifecycle(this, preview, imageAnalysis); + } + + private Module mModule; + private FloatBuffer mInputTensorBuffer; + private Tensor mInputTensor; + + private static int clamp0255(int x) { + if (x > 255) { + return 255; + } + return x < 0 ? 0 : x; + } + + protected void fillInputTensorBuffer( + ImageProxy image, int rotationDegrees, FloatBuffer inputTensorBuffer) { + + if (mInputTensorBitmap == null) { + final int tensorSize = Math.min(image.getWidth(), image.getHeight()); + mInputTensorBitmap = Bitmap.createBitmap(tensorSize, tensorSize, Bitmap.Config.ARGB_8888); + } + + ImageProxy.PlaneProxy[] planes = image.getPlanes(); + ImageProxy.PlaneProxy Y = planes[0]; + ImageProxy.PlaneProxy U = planes[1]; + ImageProxy.PlaneProxy V = planes[2]; + ByteBuffer yBuffer = Y.getBuffer(); + ByteBuffer uBuffer = U.getBuffer(); + ByteBuffer vBuffer = V.getBuffer(); + final int imageWidth = image.getWidth(); + final int imageHeight = image.getHeight(); + final int tensorSize = Math.min(imageWidth, imageHeight); + + int widthAfterRtn = imageWidth; + int heightAfterRtn = imageHeight; + boolean oddRotation = rotationDegrees == 90 || rotationDegrees == 270; + if (oddRotation) { + widthAfterRtn = imageHeight; + heightAfterRtn = imageWidth; + } + + int minSizeAfterRtn = Math.min(heightAfterRtn, widthAfterRtn); + int cropWidthAfterRtn = minSizeAfterRtn; + int cropHeightAfterRtn = minSizeAfterRtn; + + int cropWidthBeforeRtn = cropWidthAfterRtn; + int cropHeightBeforeRtn = cropHeightAfterRtn; + if (oddRotation) { + cropWidthBeforeRtn = cropHeightAfterRtn; + cropHeightBeforeRtn = cropWidthAfterRtn; + } + + int offsetX = (int) ((imageWidth - cropWidthBeforeRtn) / 2.f); + int offsetY = (int) ((imageHeight - cropHeightBeforeRtn) / 2.f); + + int yRowStride = Y.getRowStride(); + int yPixelStride = Y.getPixelStride(); + int uvRowStride = U.getRowStride(); + int uvPixelStride = U.getPixelStride(); + + float scale = cropWidthAfterRtn / tensorSize; + int yIdx, uvIdx, yi, ui, vi; + final int channelSize = tensorSize * tensorSize; + for (int y = 0; y < tensorSize; y++) { + for (int x = 0; x < tensorSize; x++) { + final int centerCropX = (int) Math.floor(x * scale); + final int centerCropY = (int) Math.floor(y * scale); + int srcX = centerCropX + offsetX; + int srcY = centerCropY + offsetY; + + if (rotationDegrees == 90) { + srcX = offsetX + centerCropY; + srcY = offsetY + (minSizeAfterRtn - 1) - centerCropX; + } else if (rotationDegrees == 180) { + srcX = offsetX + (minSizeAfterRtn - 1) - centerCropX; + srcY = offsetY + (minSizeAfterRtn - 1) - centerCropY; + } else if (rotationDegrees == 270) { + srcX = offsetX + (minSizeAfterRtn - 1) - centerCropY; + srcY = offsetY + centerCropX; + } + + yIdx = srcY * yRowStride + srcX * yPixelStride; + uvIdx = (srcY >> 1) * uvRowStride + (srcX >> 1) * uvPixelStride; + + yi = yBuffer.get(yIdx) & 0xff; + ui = uBuffer.get(uvIdx) & 0xff; + vi = vBuffer.get(uvIdx) & 0xff; + + yi = (yi - 16) < 0 ? 0 : (yi - 16); + ui -= 128; + vi -= 128; + + int a0 = 1192 * yi; + int ri = (a0 + 1634 * vi); + int gi = (a0 - 833 * vi - 400 * ui); + int bi = (a0 + 2066 * ui); + + ri = ri > RGB_MAX_CHANNEL_VALUE ? RGB_MAX_CHANNEL_VALUE : (ri < 0 ? 0 : ri); + gi = gi > RGB_MAX_CHANNEL_VALUE ? RGB_MAX_CHANNEL_VALUE : (gi < 0 ? 0 : gi); + bi = bi > RGB_MAX_CHANNEL_VALUE ? RGB_MAX_CHANNEL_VALUE : (bi < 0 ? 0 : bi); + + final int color = + 0xff000000 | ((ri << 6) & 0xff0000) | ((gi >> 2) & 0xff00) | ((bi >> 10) & 0xff); + mInputTensorBitmap.setPixel(x, y, color); + inputTensorBuffer.put(0 * channelSize + y * tensorSize + x, clamp0255(ri >> 10) / 255.f); + inputTensorBuffer.put(1 * channelSize + y * tensorSize + x, clamp0255(gi >> 10) / 255.f); + inputTensorBuffer.put(2 * channelSize + y * tensorSize + x, clamp0255(bi >> 10) / 255.f); + } + } + } + + public static String assetFilePath(Context context, String assetName) { + File file = new File(context.getFilesDir(), assetName); + if (file.exists() && file.length() > 0) { + return file.getAbsolutePath(); + } + + try (InputStream is = context.getAssets().open(assetName)) { + try (OutputStream os = new FileOutputStream(file)) { + byte[] buffer = new byte[4 * 1024]; + int read; + while ((read = is.read(buffer)) != -1) { + os.write(buffer, 0, read); + } + os.flush(); + } + return file.getAbsolutePath(); + } catch (IOException e) { + Log.e(TAG, "Error process asset " + assetName + " to file path"); + } + return null; + } + + @WorkerThread + @Nullable + protected Result analyzeImage(ImageProxy image, int rotationDegrees) { + Log.i(TAG, String.format("analyzeImage(%s, %d)", image, rotationDegrees)); + final int tensorSize = Math.min(image.getWidth(), image.getHeight()); + if (mModule == null) { + Log.i(TAG, "Loading module from asset '" + BuildConfig.MODULE_ASSET_NAME + "'"); + mInputTensorBuffer = Tensor.allocateFloatBuffer(3 * tensorSize * tensorSize); + mInputTensor = Tensor.fromBlob(mInputTensorBuffer, new long[] {3, tensorSize, tensorSize}); + final String modelFileAbsoluteFilePath = + new File(assetFilePath(this, BuildConfig.MODULE_ASSET_NAME)).getAbsolutePath(); + mModule = Module.load(modelFileAbsoluteFilePath); + } + + final long startTime = SystemClock.elapsedRealtime(); + fillInputTensorBuffer(image, rotationDegrees, mInputTensorBuffer); + + final long moduleForwardStartTime = SystemClock.elapsedRealtime(); + final IValue outputTuple = mModule.forward(IValue.listFrom(mInputTensor)); + final IValue out1 = outputTuple.toTuple()[1]; + final Map<String, IValue> map = out1.toList()[0].toDictStringKey(); + + float[] boxesData = new float[] {}; + float[] scoresData = new float[] {}; + final List<BBox> bboxes = new ArrayList<>(); + if (map.containsKey("boxes")) { + final Tensor boxesTensor = map.get("boxes").toTensor(); + final Tensor scoresTensor = map.get("scores").toTensor(); + boxesData = boxesTensor.getDataAsFloatArray(); + scoresData = scoresTensor.getDataAsFloatArray(); + final int n = scoresData.length; + for (int i = 0; i < n; i++) { + final BBox bbox = + new BBox( + scoresData[i], + boxesData[4 * i + 0], + boxesData[4 * i + 1], + boxesData[4 * i + 2], + boxesData[4 * i + 3]); + android.util.Log.i(TAG, String.format("Forward result %d: %s", i, bbox)); + bboxes.add(bbox); + } + } else { + android.util.Log.i(TAG, "Forward result empty"); + } + + final long moduleForwardDuration = SystemClock.elapsedRealtime() - moduleForwardStartTime; + final long analysisDuration = SystemClock.elapsedRealtime() - startTime; + return new Result(tensorSize, bboxes, moduleForwardDuration, analysisDuration); + } + + @UiThread + protected void handleResult(Result result) { + final int W = mCameraOverlay.getMeasuredWidth(); + final int H = mCameraOverlay.getMeasuredHeight(); + + final int size = Math.min(W, H); + final int offsetX = (W - size) / 2; + final int offsetY = (H - size) / 2; + + float scaleX = (float) size / result.tensorSize; + float scaleY = (float) size / result.tensorSize; + if (mBitmap == null) { + mBitmap = Bitmap.createBitmap(W, H, Bitmap.Config.ARGB_8888); + mCanvas = new Canvas(mBitmap); + } + + mCanvas.drawBitmap( + mInputTensorBitmap, + new Rect(0, 0, result.tensorSize, result.tensorSize), + new Rect(offsetX, offsetY, offsetX + size, offsetY + size), + null); + + for (final BBox bbox : result.bboxes) { + if (bbox.score < BBOX_SCORE_DRAW_THRESHOLD) { + continue; + } + + float c_x0 = offsetX + scaleX * bbox.x0; + float c_y0 = offsetY + scaleY * bbox.y0; + + float c_x1 = offsetX + scaleX * bbox.x1; + float c_y1 = offsetY + scaleY * bbox.y1; + + mCanvas.drawLine(c_x0, c_y0, c_x1, c_y0, mBboxPaint); + mCanvas.drawLine(c_x1, c_y0, c_x1, c_y1, mBboxPaint); + mCanvas.drawLine(c_x1, c_y1, c_x0, c_y1, mBboxPaint); + mCanvas.drawLine(c_x0, c_y1, c_x0, c_y0, mBboxPaint); + mCanvas.drawText(String.format("%.2f", bbox.score), c_x0, c_y0, mBboxPaint); + } + mCameraOverlay.setImageBitmap(mBitmap); + + String message = String.format("forwardDuration:%d", result.moduleForwardDuration); + Log.i(TAG, message); + mTextViewStringBuilder.insert(0, '\n').insert(0, message); + if (mTextViewStringBuilder.length() > TEXT_TRIM_SIZE) { + mTextViewStringBuilder.delete(TEXT_TRIM_SIZE, mTextViewStringBuilder.length()); + } + mTextView.setText(mTextViewStringBuilder.toString()); + } +} diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/java/org/pytorch/testapp/MainActivity.java b/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/java/org/pytorch/testapp/MainActivity.java new file mode 100644 index 0000000000000000000000000000000000000000..a9c13bffa6e8f0ac5410a809f0a9b2edcc407eca --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/java/org/pytorch/testapp/MainActivity.java @@ -0,0 +1,159 @@ +package org.pytorch.testapp; + +import android.os.Bundle; +import android.os.Handler; +import android.os.HandlerThread; +import android.os.SystemClock; +import android.util.Log; +import android.widget.TextView; +import androidx.annotation.Nullable; +import androidx.annotation.UiThread; +import androidx.annotation.WorkerThread; +import androidx.appcompat.app.AppCompatActivity; +import com.facebook.soloader.nativeloader.NativeLoader; +import com.facebook.soloader.nativeloader.SystemDelegate; +import java.nio.FloatBuffer; +import java.util.Map; +import org.pytorch.IValue; +import org.pytorch.Module; +import org.pytorch.PyTorchAndroid; +import org.pytorch.Tensor; + +public class MainActivity extends AppCompatActivity { + static { + if (!NativeLoader.isInitialized()) { + NativeLoader.init(new SystemDelegate()); + } + NativeLoader.loadLibrary("pytorch_jni"); + NativeLoader.loadLibrary("torchvision_ops"); + } + + private static final String TAG = BuildConfig.LOGCAT_TAG; + private static final int TEXT_TRIM_SIZE = 4096; + + private TextView mTextView; + + protected HandlerThread mBackgroundThread; + protected Handler mBackgroundHandler; + private Module mModule; + private FloatBuffer mInputTensorBuffer; + private Tensor mInputTensor; + private StringBuilder mTextViewStringBuilder = new StringBuilder(); + + private final Runnable mModuleForwardRunnable = + new Runnable() { + @Override + public void run() { + final Result result = doModuleForward(); + runOnUiThread( + () -> { + handleResult(result); + if (mBackgroundHandler != null) { + mBackgroundHandler.post(mModuleForwardRunnable); + } + }); + } + }; + + @Override + protected void onCreate(Bundle savedInstanceState) { + super.onCreate(savedInstanceState); + setContentView(R.layout.activity_main); + mTextView = findViewById(R.id.text); + startBackgroundThread(); + mBackgroundHandler.post(mModuleForwardRunnable); + } + + protected void startBackgroundThread() { + mBackgroundThread = new HandlerThread(TAG + "_bg"); + mBackgroundThread.start(); + mBackgroundHandler = new Handler(mBackgroundThread.getLooper()); + } + + @Override + protected void onDestroy() { + stopBackgroundThread(); + super.onDestroy(); + } + + protected void stopBackgroundThread() { + mBackgroundThread.quitSafely(); + try { + mBackgroundThread.join(); + mBackgroundThread = null; + mBackgroundHandler = null; + } catch (InterruptedException e) { + Log.e(TAG, "Error stopping background thread", e); + } + } + + @WorkerThread + @Nullable + protected Result doModuleForward() { + if (mModule == null) { + final long[] shape = BuildConfig.INPUT_TENSOR_SHAPE; + long numElements = 1; + for (int i = 0; i < shape.length; i++) { + numElements *= shape[i]; + } + mInputTensorBuffer = Tensor.allocateFloatBuffer((int) numElements); + mInputTensor = Tensor.fromBlob(mInputTensorBuffer, BuildConfig.INPUT_TENSOR_SHAPE); + PyTorchAndroid.setNumThreads(1); + mModule = PyTorchAndroid.loadModuleFromAsset(getAssets(), BuildConfig.MODULE_ASSET_NAME); + } + + final long startTime = SystemClock.elapsedRealtime(); + final long moduleForwardStartTime = SystemClock.elapsedRealtime(); + final IValue outputTuple = mModule.forward(IValue.listFrom(mInputTensor)); + final IValue[] outputArray = outputTuple.toTuple(); + final IValue out0 = outputArray[0]; + final Map<String, IValue> map = out0.toDictStringKey(); + if (map.containsKey("boxes")) { + final Tensor boxes = map.get("boxes").toTensor(); + final Tensor scores = map.get("scores").toTensor(); + final float[] boxesData = boxes.getDataAsFloatArray(); + final float[] scoresData = scores.getDataAsFloatArray(); + final int n = scoresData.length; + for (int i = 0; i < n; i++) { + android.util.Log.i( + TAG, + String.format( + "Forward result %d: score %f box:(%f, %f, %f, %f)", + scoresData[i], + boxesData[4 * i + 0], + boxesData[4 * i + 1], + boxesData[4 * i + 2], + boxesData[4 * i + 3])); + } + } else { + android.util.Log.i(TAG, "Forward result empty"); + } + + final long moduleForwardDuration = SystemClock.elapsedRealtime() - moduleForwardStartTime; + final long analysisDuration = SystemClock.elapsedRealtime() - startTime; + return new Result(new float[] {}, moduleForwardDuration, analysisDuration); + } + + static class Result { + + private final float[] scores; + private final long totalDuration; + private final long moduleForwardDuration; + + public Result(float[] scores, long moduleForwardDuration, long totalDuration) { + this.scores = scores; + this.moduleForwardDuration = moduleForwardDuration; + this.totalDuration = totalDuration; + } + } + + @UiThread + protected void handleResult(Result result) { + String message = String.format("forwardDuration:%d", result.moduleForwardDuration); + mTextViewStringBuilder.insert(0, '\n').insert(0, message); + if (mTextViewStringBuilder.length() > TEXT_TRIM_SIZE) { + mTextViewStringBuilder.delete(TEXT_TRIM_SIZE, mTextViewStringBuilder.length()); + } + mTextView.setText(mTextViewStringBuilder.toString()); + } +} diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/java/org/pytorch/testapp/Result.java b/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/java/org/pytorch/testapp/Result.java new file mode 100644 index 0000000000000000000000000000000000000000..ed7ebd006cdc5cbe6d069c1ce710925f03839def --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/java/org/pytorch/testapp/Result.java @@ -0,0 +1,17 @@ +package org.pytorch.testapp; + +import java.util.List; + +class Result { + public final int tensorSize; + public final List<BBox> bboxes; + public final long totalDuration; + public final long moduleForwardDuration; + + public Result(int tensorSize, List<BBox> bboxes, long moduleForwardDuration, long totalDuration) { + this.tensorSize = tensorSize; + this.bboxes = bboxes; + this.moduleForwardDuration = moduleForwardDuration; + this.totalDuration = totalDuration; + } +} diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/res/layout/activity_camera.xml b/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/res/layout/activity_camera.xml new file mode 100644 index 0000000000000000000000000000000000000000..7ba2e42b7c0d7edff97ff6d4b5111f7f8cf76943 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/res/layout/activity_camera.xml @@ -0,0 +1,28 @@ +<?xml version="1.0" encoding="utf-8"?> +<FrameLayout + xmlns:android="http://schemas.android.com/apk/res/android" + xmlns:tools="http://schemas.android.com/tools" + android:layout_width="match_parent" + android:layout_height="match_parent" + tools:context=".CameraActivity"> + + <ViewStub + android:id="@+id/camera_texture_view_stub" + android:layout_width="match_parent" + android:layout_height="match_parent" + android:layout="@layout/texture_view"/> + + <TextView + android:id="@+id/text" + android:layout_width="match_parent" + android:layout_height="match_parent" + android:layout_gravity="top" + android:textSize="16sp" + android:textStyle="bold" + android:textColor="#ff0000"/> + + <ImageView + android:id="@+id/camera_overlay" + android:layout_width="match_parent" + android:layout_height="match_parent"/> +</FrameLayout> diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/res/layout/activity_main.xml b/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/res/layout/activity_main.xml new file mode 100644 index 0000000000000000000000000000000000000000..c0939ebc0ebcaddd1a09467b564ce5f944a0448c --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/res/layout/activity_main.xml @@ -0,0 +1,17 @@ +<?xml version="1.0" encoding="utf-8"?> +<FrameLayout xmlns:android="http://schemas.android.com/apk/res/android" + xmlns:tools="http://schemas.android.com/tools" + android:layout_width="match_parent" + android:layout_height="match_parent" + tools:context=".MainActivity"> + + <TextView + android:id="@+id/text" + android:layout_width="match_parent" + android:layout_height="match_parent" + android:layout_gravity="top" + android:textSize="14sp" + android:background="@android:color/black" + android:textColor="@android:color/white" /> + +</FrameLayout> \ No newline at end of file diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/res/layout/texture_view.xml b/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/res/layout/texture_view.xml new file mode 100644 index 0000000000000000000000000000000000000000..6518c6c84c690271726e23bcef979f8961b7066f --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/res/layout/texture_view.xml @@ -0,0 +1,5 @@ +<?xml version="1.0" encoding="utf-8"?> +<TextureView xmlns:android="http://schemas.android.com/apk/res/android" + android:id="@+id/texture_view" + android:layout_width="match_parent" + android:layout_height="0dp" /> diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/res/mipmap-mdpi/ic_launcher.png b/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/res/mipmap-mdpi/ic_launcher.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/res/mipmap-mdpi/ic_launcher_round.png b/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/res/mipmap-mdpi/ic_launcher_round.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/res/values/colors.xml b/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/res/values/colors.xml new file mode 100644 index 0000000000000000000000000000000000000000..69b22338c6510250df3b43672635120dbce2fa49 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/res/values/colors.xml @@ -0,0 +1,6 @@ +<?xml version="1.0" encoding="utf-8"?> +<resources> + <color name="colorPrimary">#008577</color> + <color name="colorPrimaryDark">#00574B</color> + <color name="colorAccent">#D81B60</color> +</resources> diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/res/values/strings.xml b/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/res/values/strings.xml new file mode 100644 index 0000000000000000000000000000000000000000..cafbaad151114b5b5cd7965a8baf6df16109d244 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/res/values/strings.xml @@ -0,0 +1,3 @@ +<resources> + <string name="app_name">TV_FRCNN</string> +</resources> diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/res/values/styles.xml b/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/res/values/styles.xml new file mode 100644 index 0000000000000000000000000000000000000000..5885930df6d10edf3d6df40d6556297d11f953da --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/android/test_app/app/src/main/res/values/styles.xml @@ -0,0 +1,11 @@ +<resources> + + <!-- Base application theme. --> + <style name="AppTheme" parent="Theme.AppCompat.Light.DarkActionBar"> + <!-- Customize your theme here. --> + <item name="colorPrimary">@color/colorPrimary</item> + <item name="colorPrimaryDark">@color/colorPrimaryDark</item> + <item name="colorAccent">@color/colorAccent</item> + </style> + +</resources> diff --git a/pretrained_model/pytorch_vision_v0.10.0/android/test_app/make_assets.py b/pretrained_model/pytorch_vision_v0.10.0/android/test_app/make_assets.py new file mode 100644 index 0000000000000000000000000000000000000000..7860c759a573602ba9b1bd5123b1d3ae0029b3f1 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/android/test_app/make_assets.py @@ -0,0 +1,17 @@ +import torch +import torchvision +from torch.utils.mobile_optimizer import optimize_for_mobile + +print(torch.__version__) + +model = torchvision.models.detection.fasterrcnn_mobilenet_v3_large_320_fpn( + pretrained=True, + box_score_thresh=0.7, + rpn_post_nms_top_n_test=100, + rpn_score_thresh=0.4, + rpn_pre_nms_top_n_test=150) + +model.eval() +script_model = torch.jit.script(model) +opt_script_model = optimize_for_mobile(script_model) +opt_script_model.save("app/src/main/assets/frcnn_mnetv3.pt") diff --git a/pretrained_model/pytorch_vision_v0.10.0/cmake/TorchVisionConfig.cmake.in b/pretrained_model/pytorch_vision_v0.10.0/cmake/TorchVisionConfig.cmake.in new file mode 100644 index 0000000000000000000000000000000000000000..42a3d566166849816b4983d66e4de1c198ac88ce --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/cmake/TorchVisionConfig.cmake.in @@ -0,0 +1,43 @@ +# TorchVisionConfig.cmake +# -------------------- +# +# Exported targets:: Vision +# + +@PACKAGE_INIT@ + +set(PN TorchVision) + +# location of include/torchvision +set(${PN}_INCLUDE_DIR "${PACKAGE_PREFIX_DIR}/@CMAKE_INSTALL_INCLUDEDIR@") + +set(${PN}_LIBRARY "") +set(${PN}_DEFINITIONS USING_${PN}) + +check_required_components(${PN}) + + +if(NOT (CMAKE_VERSION VERSION_LESS 3.0)) +#----------------------------------------------------------------------------- +# Don't include targets if this file is being picked up by another +# project which has already built this as a subproject +#----------------------------------------------------------------------------- +if(NOT TARGET ${PN}::TorchVision) +include("${CMAKE_CURRENT_LIST_DIR}/${PN}Targets.cmake") + +if(NOT TARGET torch_library) +find_package(Torch REQUIRED) +endif() +if(NOT TARGET Python3::Python) +find_package(Python3 COMPONENTS Development) +endif() + +set_target_properties(TorchVision::TorchVision PROPERTIES INTERFACE_INCLUDE_DIRECTORIES "${${PN}_INCLUDE_DIR}" INTERFACE_LINK_LIBRARIES "torch;Python3::Python" ) + + +if(@WITH_CUDA@) + target_compile_definitions(TorchVision::TorchVision INTERFACE WITH_CUDA) +endif() + +endif() +endif() diff --git a/pretrained_model/pytorch_vision_v0.10.0/cmake/iOS.cmake b/pretrained_model/pytorch_vision_v0.10.0/cmake/iOS.cmake new file mode 100644 index 0000000000000000000000000000000000000000..d42ea4c9232c171312fdff20d42733d9ef379de1 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/cmake/iOS.cmake @@ -0,0 +1,207 @@ +# This file is based off of the Platform/Darwin.cmake and Platform/UnixPaths.cmake +# files which are included with CMake 2.8.4 +# It has been altered for iOS development + +# Options: +# +# IOS_PLATFORM = OS (default) or SIMULATOR +# This decides if SDKS will be selected from the iPhoneOS.platform or iPhoneSimulator.platform folders +# OS - the default, used to build for iPhone and iPad physical devices, which have an arm arch. +# SIMULATOR - used to build for the Simulator platforms, which have an x86 arch. +# +# CMAKE_IOS_DEVELOPER_ROOT = automatic(default) or /path/to/platform/Developer folder +# By default this location is automatcially chosen based on the IOS_PLATFORM value above. +# If set manually, it will override the default location and force the user of a particular Developer Platform +# +# CMAKE_IOS_SDK_ROOT = automatic(default) or /path/to/platform/Developer/SDKs/SDK folder +# By default this location is automatcially chosen based on the CMAKE_IOS_DEVELOPER_ROOT value. +# In this case it will always be the most up-to-date SDK found in the CMAKE_IOS_DEVELOPER_ROOT path. +# If set manually, this will force the use of a specific SDK version + +# Macros: +# +# set_xcode_property (TARGET XCODE_PROPERTY XCODE_VALUE) +# A convenience macro for setting xcode specific properties on targets +# example: set_xcode_property (myioslib IPHONEOS_DEPLOYMENT_TARGET "3.1") +# +# find_host_package (PROGRAM ARGS) +# A macro used to find executable programs on the host system, not within the iOS environment. +# Thanks to the android-cmake project for providing the command + +# Standard settings +set(CMAKE_SYSTEM_NAME Darwin) +set(CMAKE_SYSTEM_VERSION 1) +set(UNIX True) +set(APPLE True) +set(IOS True) + +# Required as of cmake 2.8.10 +set(CMAKE_OSX_DEPLOYMENT_TARGET "" CACHE STRING "Force unset of the deployment target for iOS" FORCE) + +# Determine the cmake host system version so we know where to find the iOS SDKs +find_program(CMAKE_UNAME uname /bin /usr/bin /usr/local/bin) +if(CMAKE_UNAME) + exec_program(uname ARGS -r OUTPUT_VARIABLE CMAKE_HOST_SYSTEM_VERSION) + string(REGEX REPLACE "^([0-9]+)\\.([0-9]+).*$" "\\1" DARWIN_MAJOR_VERSION "${CMAKE_HOST_SYSTEM_VERSION}") +endif(CMAKE_UNAME) + +# Force the compilers to gcc for iOS +set(CMAKE_C_COMPILER /usr/bin/gcc CACHE STRING "") +set(CMAKE_CXX_COMPILER /usr/bin/g++ CACHE STRING "") +set(CMAKE_AR ar CACHE FILEPATH "" FORCE) +set(CMAKE_RANLIB ranlib CACHE FILEPATH "" FORCE) +set(PKG_CONFIG_EXECUTABLE pkg-config CACHE FILEPATH "" FORCE) + +# Setup iOS platform unless specified manually with IOS_PLATFORM +if(NOT DEFINED IOS_PLATFORM) + set(IOS_PLATFORM "OS") +endif(NOT DEFINED IOS_PLATFORM) +set(IOS_PLATFORM ${IOS_PLATFORM} CACHE STRING "Type of iOS Platform") + +# Check the platform selection and setup for developer root +if(${IOS_PLATFORM} STREQUAL "OS") + set(IOS_PLATFORM_LOCATION "iPhoneOS.platform") + set(XCODE_IOS_PLATFORM iphoneos) + + # This causes the installers to properly locate the output libraries + set(CMAKE_XCODE_EFFECTIVE_PLATFORMS "-iphoneos") +elseif(${IOS_PLATFORM} STREQUAL "SIMULATOR") + set(IOS_PLATFORM_LOCATION "iPhoneSimulator.platform") + set(XCODE_IOS_PLATFORM iphonesimulator) + + # This causes the installers to properly locate the output libraries + set(CMAKE_XCODE_EFFECTIVE_PLATFORMS "-iphonesimulator") +elseif(${IOS_PLATFORM} STREQUAL "WATCHOS") + set(IOS_PLATFORM_LOCATION "WatchOS.platform") + set(XCODE_IOS_PLATFORM watchos) + + # This causes the installers to properly locate the output libraries + set(CMAKE_XCODE_EFFECTIVE_PLATFORMS "-watchos") +else(${IOS_PLATFORM} STREQUAL "OS") + message(FATAL_ERROR + "Unsupported IOS_PLATFORM value selected. " + "Please choose OS, SIMULATOR, or WATCHOS.") +endif() + +# All iOS/Darwin specific settings - some may be redundant +set(CMAKE_SHARED_LIBRARY_PREFIX "lib") +set(CMAKE_SHARED_LIBRARY_SUFFIX ".dylib") +set(CMAKE_SHARED_MODULE_PREFIX "lib") +set(CMAKE_SHARED_MODULE_SUFFIX ".so") +set(CMAKE_MODULE_EXISTS 1) +set(CMAKE_DL_LIBS "") + +set(CMAKE_C_OSX_COMPATIBILITY_VERSION_FLAG "-compatibility_version ") +set(CMAKE_C_OSX_CURRENT_VERSION_FLAG "-current_version ") +set(CMAKE_CXX_OSX_COMPATIBILITY_VERSION_FLAG "${CMAKE_C_OSX_COMPATIBILITY_VERSION_FLAG}") +set(CMAKE_CXX_OSX_CURRENT_VERSION_FLAG "${CMAKE_C_OSX_CURRENT_VERSION_FLAG}") + +if(IOS_DEPLOYMENT_TARGET) + set(XCODE_IOS_PLATFORM_VERSION_FLAGS "-m${XCODE_IOS_PLATFORM}-version-min=${IOS_DEPLOYMENT_TARGET}") +endif() + +# Hidden visibilty is required for cxx on iOS +set(CMAKE_C_FLAGS_INIT "${XCODE_IOS_PLATFORM_VERSION_FLAGS}") +set(CMAKE_CXX_FLAGS_INIT "${XCODE_IOS_PLATFORM_VERSION_FLAGS} -fvisibility-inlines-hidden") + +set(CMAKE_C_LINK_FLAGS "${XCODE_IOS_PLATFORM_VERSION_FLAGS} -Wl,-search_paths_first ${CMAKE_C_LINK_FLAGS}") +set(CMAKE_CXX_LINK_FLAGS "${XCODE_IOS_PLATFORM_VERSION_FLAGS} -Wl,-search_paths_first ${CMAKE_CXX_LINK_FLAGS}") + +set(CMAKE_PLATFORM_HAS_INSTALLNAME 1) +set(CMAKE_SHARED_LIBRARY_CREATE_C_FLAGS "-dynamiclib -headerpad_max_install_names") +set(CMAKE_SHARED_MODULE_CREATE_C_FLAGS "-bundle -headerpad_max_install_names") +set(CMAKE_SHARED_MODULE_LOADER_C_FLAG "-Wl,-bundle_loader,") +set(CMAKE_SHARED_MODULE_LOADER_CXX_FLAG "-Wl,-bundle_loader,") +set(CMAKE_FIND_LIBRARY_SUFFIXES ".dylib" ".so" ".a") + +# hack: if a new cmake (which uses CMAKE_INSTALL_NAME_TOOL) runs on an old build tree +# (where install_name_tool was hardcoded) and where CMAKE_INSTALL_NAME_TOOL isn't in the cache +# and still cmake didn't fail in CMakeFindBinUtils.cmake (because it isn't rerun) +# hardcode CMAKE_INSTALL_NAME_TOOL here to install_name_tool, so it behaves as it did before, Alex +if(NOT DEFINED CMAKE_INSTALL_NAME_TOOL) + find_program(CMAKE_INSTALL_NAME_TOOL install_name_tool) +endif(NOT DEFINED CMAKE_INSTALL_NAME_TOOL) + +# Setup iOS deployment target +set(IOS_DEPLOYMENT_TARGET ${IOS_DEPLOYMENT_TARGET} CACHE STRING "Minimum iOS version") + +# Setup iOS developer location unless specified manually with CMAKE_IOS_DEVELOPER_ROOT +# Note Xcode 4.3 changed the installation location, choose the most recent one available +exec_program(/usr/bin/xcode-select ARGS -print-path OUTPUT_VARIABLE CMAKE_XCODE_DEVELOPER_DIR) +set(XCODE_POST_43_ROOT "${CMAKE_XCODE_DEVELOPER_DIR}/Platforms/${IOS_PLATFORM_LOCATION}/Developer") +set(XCODE_PRE_43_ROOT "/Developer/Platforms/${IOS_PLATFORM_LOCATION}/Developer") +if(NOT DEFINED CMAKE_IOS_DEVELOPER_ROOT) + if(EXISTS ${XCODE_POST_43_ROOT}) + set(CMAKE_IOS_DEVELOPER_ROOT ${XCODE_POST_43_ROOT}) + elseif(EXISTS ${XCODE_PRE_43_ROOT}) + set(CMAKE_IOS_DEVELOPER_ROOT ${XCODE_PRE_43_ROOT}) + endif(EXISTS ${XCODE_POST_43_ROOT}) +endif(NOT DEFINED CMAKE_IOS_DEVELOPER_ROOT) +set(CMAKE_IOS_DEVELOPER_ROOT ${CMAKE_IOS_DEVELOPER_ROOT} CACHE PATH "Location of iOS Platform") + +# Find and use the most recent iOS sdk unless specified manually with CMAKE_IOS_SDK_ROOT +if(NOT DEFINED CMAKE_IOS_SDK_ROOT) + file(GLOB _CMAKE_IOS_SDKS "${CMAKE_IOS_DEVELOPER_ROOT}/SDKs/*") + if(_CMAKE_IOS_SDKS) + list(SORT _CMAKE_IOS_SDKS) + list(REVERSE _CMAKE_IOS_SDKS) + list(GET _CMAKE_IOS_SDKS 0 CMAKE_IOS_SDK_ROOT) + else(_CMAKE_IOS_SDKS) + message(FATAL_ERROR "No iOS SDK's found in default search path ${CMAKE_IOS_DEVELOPER_ROOT}. Manually set CMAKE_IOS_SDK_ROOT or install the iOS SDK.") + endif(_CMAKE_IOS_SDKS) + message(STATUS "Toolchain using default iOS SDK: ${CMAKE_IOS_SDK_ROOT}") +endif(NOT DEFINED CMAKE_IOS_SDK_ROOT) +set(CMAKE_IOS_SDK_ROOT ${CMAKE_IOS_SDK_ROOT} CACHE PATH "Location of the selected iOS SDK") + +# Set the sysroot default to the most recent SDK +set(CMAKE_OSX_SYSROOT ${CMAKE_IOS_SDK_ROOT} CACHE PATH "Sysroot used for iOS support") + +# set the architecture for iOS +if(IOS_PLATFORM STREQUAL "OS") + set(DEFAULT_IOS_ARCH "arm64") +elseif(IOS_PLATFORM STREQUAL "SIMULATOR") + set(DEFAULT_IOS_ARCH "x86_64") +elseif(IOS_PLATFORM STREQUAL "WATCHOS") + set(DEFAULT_IOS_ARCH "armv7k;arm64_32") +endif() + +set(IOS_ARCH ${DEFAULT_IOS_ARCH} CACHE STRING "Build architecture for iOS") +set(CMAKE_OSX_ARCHITECTURES ${IOS_ARCH} CACHE STRING "Build architecture for iOS") + +# Set the find root to the iOS developer roots and to user defined paths +set(CMAKE_FIND_ROOT_PATH ${CMAKE_IOS_DEVELOPER_ROOT} ${CMAKE_IOS_SDK_ROOT} ${CMAKE_PREFIX_PATH} CACHE STRING "iOS find search path root") + +# default to searching for frameworks first +set(CMAKE_FIND_FRAMEWORK FIRST) + +# set up the default search directories for frameworks +set(CMAKE_SYSTEM_FRAMEWORK_PATH + ${CMAKE_IOS_SDK_ROOT}/System/Library/Frameworks + ${CMAKE_IOS_SDK_ROOT}/System/Library/PrivateFrameworks + ${CMAKE_IOS_SDK_ROOT}/Developer/Library/Frameworks +) + +# only search the iOS sdks, not the remainder of the host filesystem +set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ONLY) +set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) +set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) + +# This little macro lets you set any XCode specific property +macro(set_xcode_property TARGET XCODE_PROPERTY XCODE_VALUE) + set_property(TARGET ${TARGET} PROPERTY XCODE_ATTRIBUTE_${XCODE_PROPERTY} ${XCODE_VALUE}) +endmacro(set_xcode_property) + +# This macro lets you find executable programs on the host system +macro(find_host_package) + set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) + set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY NEVER) + set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE NEVER) + set(IOS FALSE) + + find_package(${ARGN}) + + set(IOS TRUE) + set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM ONLY) + set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) + set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) +endmacro(find_host_package) diff --git a/pretrained_model/pytorch_vision_v0.10.0/docs/Makefile b/pretrained_model/pytorch_vision_v0.10.0/docs/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..0488c3db88f7ea88b7e79d9b06fb9394b358dfca --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/docs/Makefile @@ -0,0 +1,41 @@ +# Minimal makefile for Sphinx documentation +# + +ifneq ($(EXAMPLES_PATTERN),) + EXAMPLES_PATTERN_OPTS := -D sphinx_gallery_conf.filename_pattern="$(EXAMPLES_PATTERN)" +endif + +# You can set these variables from the command line. +SPHINXOPTS = -W -j auto $(EXAMPLES_PATTERN_OPTS) +SPHINXBUILD = sphinx-build +SPHINXPROJ = torchvision +SOURCEDIR = source +BUILDDIR = build + +# Put it first so that "make" without argument is like "make help". +help: + @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) + +docset: html + doc2dash --name $(SPHINXPROJ) --icon $(SOURCEDIR)/_static/img/pytorch-logo-flame.png --enable-js --online-redirect-url http://pytorch.org/vision/ --force $(BUILDDIR)/html/ + + # Manually fix because Zeal doesn't deal well with `icon.png`-only at 2x resolution. + cp $(SPHINXPROJ).docset/icon.png $(SPHINXPROJ).docset/icon@2x.png + convert $(SPHINXPROJ).docset/icon@2x.png -resize 16x16 $(SPHINXPROJ).docset/icon.png + +html-noplot: # Avoids running the gallery examples, which may take time + $(SPHINXBUILD) -D plot_gallery=0 -b html $(ASPHINXOPTS) "${SOURCEDIR}" "$(BUILDDIR)"/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +clean: + rm -rf $(BUILDDIR)/* + rm -rf $(SOURCEDIR)/auto_examples/ # sphinx-gallery + rm -rf $(SOURCEDIR)/gen_modules/ # sphinx-gallery + +.PHONY: help Makefile docset + +# Catch-all target: route all unknown targets to Sphinx using the new +# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). +%: Makefile + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/pretrained_model/pytorch_vision_v0.10.0/docs/make.bat b/pretrained_model/pytorch_vision_v0.10.0/docs/make.bat new file mode 100644 index 0000000000000000000000000000000000000000..6429a151515467686775d8913565e29c20070062 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/docs/make.bat @@ -0,0 +1,36 @@ +@ECHO OFF + +pushd %~dp0 + +REM Command file for Sphinx documentation + +if "%SPHINXBUILD%" == "" ( + set SPHINXBUILD=sphinx-build +) +set SOURCEDIR=source +set BUILDDIR=build +set SPHINXPROJ=torchvision + +if "%1" == "" goto help + +%SPHINXBUILD% >NUL 2>NUL +if errorlevel 9009 ( + echo. + echo.The 'sphinx-build' command was not found. Make sure you have Sphinx + echo.installed, then set the SPHINXBUILD environment variable to point + echo.to the full path of the 'sphinx-build' executable. Alternatively you + echo.may add the Sphinx directory to PATH. + echo. + echo.If you don't have Sphinx installed, grab it from + echo.http://sphinx-doc.org/ + exit /b 1 +) + +%SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% +goto end + +:help +%SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% + +:end +popd diff --git a/pretrained_model/pytorch_vision_v0.10.0/docs/requirements.txt b/pretrained_model/pytorch_vision_v0.10.0/docs/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..68efe2cb639502fd257065b9dad634cfe75eda4c --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/docs/requirements.txt @@ -0,0 +1,6 @@ +sphinx==2.4.4 +sphinx-gallery>=0.9.0 +sphinx-copybutton>=0.3.1 +matplotlib +numpy +-e git+git://github.com/pytorch/pytorch_sphinx_theme.git#egg=pytorch_sphinx_theme diff --git a/pretrained_model/pytorch_vision_v0.10.0/docs/source/_static/css/custom_torchvision.css b/pretrained_model/pytorch_vision_v0.10.0/docs/source/_static/css/custom_torchvision.css new file mode 100644 index 0000000000000000000000000000000000000000..fb039a47c0aab63c6f3f1ad93c64bab664221037 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/docs/source/_static/css/custom_torchvision.css @@ -0,0 +1,12 @@ +/* This rule (and possibly this entire file) should be removed once +https://github.com/pytorch/pytorch_sphinx_theme/issues/125 is fixed. + +We override the rule so that the links to the notebooks aren't hidden in the +gallery examples. pytorch_sphinx_theme is supposed to customize those links so +that they render nicely (look at the nice links on top of the tutorials +examples) but it doesn't work for repos that are not the tutorial repo, and in +torchvision it just hides the links. So we have to put them back here */ +article.pytorch-article .sphx-glr-download-link-note.admonition.note, +article.pytorch-article .reference.download.internal, article.pytorch-article .sphx-glr-signature { + display: block; +} \ No newline at end of file diff --git a/pretrained_model/pytorch_vision_v0.10.0/docs/source/_static/img/pytorch-logo-dark.png b/pretrained_model/pytorch_vision_v0.10.0/docs/source/_static/img/pytorch-logo-dark.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/docs/source/_static/img/pytorch-logo-dark.svg b/pretrained_model/pytorch_vision_v0.10.0/docs/source/_static/img/pytorch-logo-dark.svg new file mode 100644 index 0000000000000000000000000000000000000000..717a3ce942f8915a8ace66a0b4b78f2e2f6177ca --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/docs/source/_static/img/pytorch-logo-dark.svg @@ -0,0 +1,24 @@ +<?xml version="1.0" encoding="utf-8"?> +<!-- Generator: Adobe Illustrator 21.0.0, SVG Export Plug-In . SVG Version: 6.00 Build 0) --> +<svg version="1.1" id="Layer_1" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" x="0px" y="0px" + viewBox="0 0 199.7 40.2" style="enable-background:new 0 0 199.7 40.2;" xml:space="preserve"> +<style type="text/css"> + .st0{fill:#F05732;} + .st1{fill:#9E529F;} + .st2{fill:#333333;} +</style> +<path class="st0" d="M102.7,12.2c-1.3-1-1.8,3.9-4.4,3.9c-3,0-4-13-6.3-13c-0.7,0-0.8-0.4-7.9,21.3c-2.9,9,4.4,15.8,11.8,15.8 + c4.6,0,12.3-3,12.3-12.6C108.2,20.5,104.7,13.7,102.7,12.2z M95.8,35.3c-3.7,0-6.7-3.1-6.7-7c0-3.9,3-7,6.7-7s6.7,3.1,6.7,7 + C102.5,32.1,99.5,35.3,95.8,35.3z"/> +<path class="st1" d="M99.8,0c-0.5,0-1.8,2.5-1.8,3.6c0,1.5,1,2,1.8,2c0.8,0,1.8-0.5,1.8-2C101.5,2.5,100.2,0,99.8,0z"/> +<path class="st2" d="M0,39.5V14.9h11.5c5.3,0,8.3,3.6,8.3,7.9c0,4.3-3,7.9-8.3,7.9H5.2v8.8H0z M14.4,22.8c0-2.1-1.6-3.3-3.7-3.3H5.2 + v6.6h5.5C12.8,26.1,14.4,24.8,14.4,22.8z"/> +<path class="st2" d="M35.2,39.5V29.4l-9.4-14.5h6l6.1,9.8l6.1-9.8h5.9l-9.4,14.5v10.1H35.2z"/> +<path class="st2" d="M63.3,39.5v-20h-7.2v-4.6h19.6v4.6h-7.2v20H63.3z"/> +<path class="st2" d="M131.4,39.5l-4.8-8.7h-3.8v8.7h-5.2V14.9H129c5.1,0,8.3,3.4,8.3,7.9c0,4.3-2.8,6.7-5.4,7.3l5.6,9.4H131.4z + M131.9,22.8c0-2-1.6-3.3-3.7-3.3h-5.5v6.6h5.5C130.3,26.1,131.9,24.9,131.9,22.8z"/> +<path class="st2" d="M145.6,27.2c0-7.6,5.7-12.7,13.1-12.7c5.4,0,8.5,2.9,10.3,6l-4.5,2.2c-1-2-3.2-3.6-5.8-3.6 + c-4.5,0-7.7,3.4-7.7,8.1c0,4.6,3.2,8.1,7.7,8.1c2.5,0,4.7-1.6,5.8-3.6l4.5,2.2c-1.7,3.1-4.9,6-10.3,6 + C151.3,39.9,145.6,34.7,145.6,27.2z"/> +<path class="st2" d="M194.5,39.5V29.1h-11.6v10.4h-5.2V14.9h5.2v9.7h11.6v-9.7h5.3v24.6H194.5z"/> +</svg> diff --git a/pretrained_model/pytorch_vision_v0.10.0/docs/source/_static/img/pytorch-logo-flame.png b/pretrained_model/pytorch_vision_v0.10.0/docs/source/_static/img/pytorch-logo-flame.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/docs/source/_static/img/pytorch-logo-flame.svg b/pretrained_model/pytorch_vision_v0.10.0/docs/source/_static/img/pytorch-logo-flame.svg new file mode 100644 index 0000000000000000000000000000000000000000..22d7228b4fa96331ce9d1bd7cd8abb28abfb8166 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/docs/source/_static/img/pytorch-logo-flame.svg @@ -0,0 +1,33 @@ +<?xml version="1.0" encoding="UTF-8" standalone="no"?> +<svg + xmlns:dc="http://purl.org/dc/elements/1.1/" + xmlns:cc="http://creativecommons.org/ns#" + xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#" + xmlns:svg="http://www.w3.org/2000/svg" + xmlns="http://www.w3.org/2000/svg" + height="40.200001" + width="40.200001" + xml:space="preserve" + viewBox="0 0 40.200002 40.2" + y="0px" + x="0px" + id="Layer_1" + version="1.1"><metadata + id="metadata4717"><rdf:RDF><cc:Work + rdf:about=""><dc:format>image/svg+xml</dc:format><dc:type + rdf:resource="http://purl.org/dc/dcmitype/StillImage" /><dc:title></dc:title></cc:Work></rdf:RDF></metadata><defs + id="defs4715" /><style + id="style4694" + type="text/css"> + .st0{fill:#F05732;} + .st1{fill:#9E529F;} + .st2{fill:#333333;} +</style><path + style="fill:#f05732" + id="path4696" + d="m 26.975479,12.199999 c -1.3,-1 -1.8,3.9 -4.4,3.9 -3,0 -4,-12.9999998 -6.3,-12.9999998 -0.7,0 -0.8,-0.4 -7.9000003,21.2999998 -2.9000001,9 4.4000003,15.8 11.8000003,15.8 4.6,0 12.3,-3 12.3,-12.6 0,-7.1 -3.5,-13.9 -5.5,-15.4 z m -6.9,23.1 c -3.7,0 -6.7,-3.1 -6.7,-7 0,-3.9 3,-7 6.7,-7 3.7,0 6.7,3.1 6.7,7 0,3.8 -3,7 -6.7,7 z" + class="st0" /><path + style="fill:#9e529f" + id="path4698" + d="m 24.075479,-7.6293945e-7 c -0.5,0 -1.8,2.49999996293945 -1.8,3.59999996293945 0,1.5 1,2 1.8,2 0.8,0 1.8,-0.5 1.8,-2 -0.1,-1.1 -1.4,-3.59999996293945 -1.8,-3.59999996293945 z" + class="st1" /></svg> \ No newline at end of file diff --git a/pretrained_model/pytorch_vision_v0.10.0/docs/source/_templates/layout.html b/pretrained_model/pytorch_vision_v0.10.0/docs/source/_templates/layout.html new file mode 100644 index 0000000000000000000000000000000000000000..aaa15d56e02d2c580758ec829edcbdc312df6c63 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/docs/source/_templates/layout.html @@ -0,0 +1,8 @@ +{% extends "!layout.html" %} + +{% block sidebartitle %} + <div class="version"> + <a href='https://pytorch.org/vision/versions.html'>{{ version }} ▼</a> + </div> + {% include "searchbox.html" %} +{% endblock %} diff --git a/pretrained_model/pytorch_vision_v0.10.0/docs/source/conf.py b/pretrained_model/pytorch_vision_v0.10.0/docs/source/conf.py new file mode 100644 index 0000000000000000000000000000000000000000..aa5e60bff0c380b63abf2b34d2438b941151bc37 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/docs/source/conf.py @@ -0,0 +1,290 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# +# PyTorch documentation build configuration file, created by +# sphinx-quickstart on Fri Dec 23 13:31:47 2016. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# +# import os +# import sys +# sys.path.insert(0, os.path.abspath('.')) + +import torchvision +import pytorch_sphinx_theme + + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'sphinx.ext.autodoc', + 'sphinx.ext.autosummary', + 'sphinx.ext.doctest', + 'sphinx.ext.intersphinx', + 'sphinx.ext.todo', + 'sphinx.ext.coverage', + 'sphinx.ext.mathjax', + 'sphinx.ext.napoleon', + 'sphinx.ext.viewcode', + 'sphinx.ext.duration', + 'sphinx_gallery.gen_gallery', + "sphinx_copybutton" +] + +sphinx_gallery_conf = { + 'examples_dirs': '../../gallery/', # path to your example scripts + 'gallery_dirs': 'auto_examples', # path to where to save gallery generated output + 'backreferences_dir': 'gen_modules/backreferences', + 'doc_module': ('torchvision',), +} + +napoleon_use_ivar = True +napoleon_numpy_docstring = False +napoleon_google_docstring = True + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = '.rst' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +project = 'Torchvision' +copyright = '2017-present, Torch Contributors' +author = 'Torch Contributors' + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +# TODO: change to [:2] at v1.0 +version = '0.10.0' +# The full version, including alpha/beta/rc tags. +# TODO: verify this works as expected +release = torchvision.__version__ + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This patterns also effect to html_static_path and html_extra_path +exclude_patterns = [] + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'sphinx' + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = True + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = 'pytorch_sphinx_theme' +html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +html_theme_options = { + 'collapse_navigation': False, + 'display_version': True, + 'logo_only': True, + 'pytorch_project': 'docs', + 'navigation_with_keys': True, + 'analytics_id': 'UA-117752657-2', +} + +html_logo = '_static/img/pytorch-logo-dark.svg' + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# TODO: remove this once https://github.com/pytorch/pytorch_sphinx_theme/issues/125 is fixed +html_css_files = [ + 'css/custom_torchvision.css', +] + +# -- Options for HTMLHelp output ------------------------------------------ + +# Output file base name for HTML help builder. +htmlhelp_basename = 'PyTorchdoc' + + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, 'pytorch.tex', 'torchvision Documentation', + 'Torch Contributors', 'manual'), +] + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + (master_doc, 'torchvision', 'torchvision Documentation', + [author], 1) +] + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + (master_doc, 'torchvision', 'torchvision Documentation', + author, 'torchvision', 'One line description of project.', + 'Miscellaneous'), +] + + +# Example configuration for intersphinx: refer to the Python standard library. +intersphinx_mapping = { + 'python': ('https://docs.python.org/', None), + 'torch': ('https://pytorch.org/docs/stable/', None), + 'numpy': ('http://docs.scipy.org/doc/numpy/', None), + 'PIL': ('https://pillow.readthedocs.io/en/stable/', None), + 'matplotlib': ('https://matplotlib.org/stable/', None), +} + +# -- A patch that prevents Sphinx from cross-referencing ivar tags ------- +# See http://stackoverflow.com/a/41184353/3343043 + +from docutils import nodes +from sphinx.util.docfields import TypedField +from sphinx import addnodes + + +def patched_make_field(self, types, domain, items, **kw): + # `kw` catches `env=None` needed for newer sphinx while maintaining + # backwards compatibility when passed along further down! + + # type: (list, unicode, tuple) -> nodes.field # noqa: F821 + def handle_item(fieldarg, content): + par = nodes.paragraph() + par += addnodes.literal_strong('', fieldarg) # Patch: this line added + # par.extend(self.make_xrefs(self.rolename, domain, fieldarg, + # addnodes.literal_strong)) + if fieldarg in types: + par += nodes.Text(' (') + # NOTE: using .pop() here to prevent a single type node to be + # inserted twice into the doctree, which leads to + # inconsistencies later when references are resolved + fieldtype = types.pop(fieldarg) + if len(fieldtype) == 1 and isinstance(fieldtype[0], nodes.Text): + typename = u''.join(n.astext() for n in fieldtype) + typename = typename.replace('int', 'python:int') + typename = typename.replace('long', 'python:long') + typename = typename.replace('float', 'python:float') + typename = typename.replace('type', 'python:type') + par.extend(self.make_xrefs(self.typerolename, domain, typename, + addnodes.literal_emphasis, **kw)) + else: + par += fieldtype + par += nodes.Text(')') + par += nodes.Text(' -- ') + par += content + return par + + fieldname = nodes.field_name('', self.label) + if len(items) == 1 and self.can_collapse: + fieldarg, content = items[0] + bodynode = handle_item(fieldarg, content) + else: + bodynode = self.list_type() + for fieldarg, content in items: + bodynode += nodes.list_item('', handle_item(fieldarg, content)) + fieldbody = nodes.field_body('', bodynode) + return nodes.field('', fieldname, fieldbody) + + +TypedField.make_field = patched_make_field + + +def inject_minigalleries(app, what, name, obj, options, lines): + """Inject a minigallery into a docstring. + + This avoids having to manually write the .. minigallery directive for every item we want a minigallery for, + as it would be easy to miss some. + + This callback is called after the .. auto directives (like ..autoclass) have been processed, + and modifies the lines parameter inplace to add the .. minigallery that will show which examples + are using which object. + + It's a bit hacky, but not *that* hacky when you consider that the recommended way is to do pretty much the same, + but instead with templates using autosummary (which we don't want to use): + (https://sphinx-gallery.github.io/stable/configuration.html#auto-documenting-your-api-with-links-to-examples) + + For docs on autodoc-process-docstring, see the autodoc docs: + https://www.sphinx-doc.org/en/master/usage/extensions/autodoc.html + """ + + if what in ("class", "function"): + lines.append(f".. minigallery:: {name}") + lines.append(f" :add-heading: Examples using ``{name.split('.')[-1]}``:") + # avoid heading entirely to avoid warning. As a bonud it actually renders better + lines.append(" :heading-level: 9") + lines.append("\n") + + +def setup(app): + app.connect('autodoc-process-docstring', inject_minigalleries) diff --git a/pretrained_model/pytorch_vision_v0.10.0/docs/source/datasets.rst b/pretrained_model/pytorch_vision_v0.10.0/docs/source/datasets.rst new file mode 100644 index 0000000000000000000000000000000000000000..af1b95db4bbd8dc6b0857e6d113b1f935aeb4c59 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/docs/source/datasets.rst @@ -0,0 +1,260 @@ +torchvision.datasets +==================== + +All datasets are subclasses of :class:`torch.utils.data.Dataset` +i.e, they have ``__getitem__`` and ``__len__`` methods implemented. +Hence, they can all be passed to a :class:`torch.utils.data.DataLoader` +which can load multiple samples in parallel using ``torch.multiprocessing`` workers. +For example: :: + + imagenet_data = torchvision.datasets.ImageNet('path/to/imagenet_root/') + data_loader = torch.utils.data.DataLoader(imagenet_data, + batch_size=4, + shuffle=True, + num_workers=args.nThreads) + +.. currentmodule:: torchvision.datasets + +All the datasets have almost similar API. They all have two common arguments: +``transform`` and ``target_transform`` to transform the input and target respectively. +You can also create your own datasets using the provided :ref:`base classes <base_classes_datasets>`. + +Caltech +~~~~~~~ + +.. autoclass:: Caltech101 + :members: __getitem__ + :special-members: + +.. autoclass:: Caltech256 + :members: __getitem__ + :special-members: + +CelebA +~~~~~~ + +.. autoclass:: CelebA + :members: __getitem__ + :special-members: + +CIFAR +~~~~~ + +.. autoclass:: CIFAR10 + :members: __getitem__ + :special-members: + +.. autoclass:: CIFAR100 + +Cityscapes +~~~~~~~~~~ + +.. note :: + Requires Cityscape to be downloaded. + +.. autoclass:: Cityscapes + :members: __getitem__ + :special-members: + +COCO +~~~~ + +.. note :: + These require the `COCO API to be installed`_ + +.. _COCO API to be installed: https://github.com/pdollar/coco/tree/master/PythonAPI + + +Captions +^^^^^^^^ + +.. autoclass:: CocoCaptions + :members: __getitem__ + :special-members: + + +Detection +^^^^^^^^^ + +.. autoclass:: CocoDetection + :members: __getitem__ + :special-members: + + +EMNIST +~~~~~~ + +.. autoclass:: EMNIST + +FakeData +~~~~~~~~ + +.. autoclass:: FakeData + +Fashion-MNIST +~~~~~~~~~~~~~ + +.. autoclass:: FashionMNIST + +Flickr +~~~~~~ + +.. autoclass:: Flickr8k + :members: __getitem__ + :special-members: + +.. autoclass:: Flickr30k + :members: __getitem__ + :special-members: + +HMDB51 +~~~~~~~ + +.. autoclass:: HMDB51 + :members: __getitem__ + :special-members: + +ImageNet +~~~~~~~~~~~ + +.. autoclass:: ImageNet + +.. note :: + This requires `scipy` to be installed + +Kinetics-400 +~~~~~~~~~~~~ + +.. autoclass:: Kinetics400 + :members: __getitem__ + :special-members: + +KITTI +~~~~~~~~~ + +.. autoclass:: Kitti + :members: __getitem__ + :special-members: + +KMNIST +~~~~~~~~~~~~~ + +.. autoclass:: KMNIST + +LSUN +~~~~ + +.. autoclass:: LSUN + :members: __getitem__ + :special-members: + +MNIST +~~~~~ + +.. autoclass:: MNIST + +Omniglot +~~~~~~~~ + +.. autoclass:: Omniglot + +PhotoTour +~~~~~~~~~ + +.. autoclass:: PhotoTour + :members: __getitem__ + :special-members: + +Places365 +~~~~~~~~~ + +.. autoclass:: Places365 + :members: __getitem__ + :special-members: + +QMNIST +~~~~~~ + +.. autoclass:: QMNIST + +SBD +~~~~~~ + +.. autoclass:: SBDataset + :members: __getitem__ + :special-members: + +SBU +~~~ + +.. autoclass:: SBU + :members: __getitem__ + :special-members: + +SEMEION +~~~~~~~ + +.. autoclass:: SEMEION + :members: __getitem__ + :special-members: + +STL10 +~~~~~ + +.. autoclass:: STL10 + :members: __getitem__ + :special-members: + +SVHN +~~~~~ + +.. autoclass:: SVHN + :members: __getitem__ + :special-members: + +UCF101 +~~~~~~~ + +.. autoclass:: UCF101 + :members: __getitem__ + :special-members: + +USPS +~~~~~ + +.. autoclass:: USPS + :members: __getitem__ + :special-members: + +VOC +~~~~~~ + +.. autoclass:: VOCSegmentation + :members: __getitem__ + :special-members: + +.. autoclass:: VOCDetection + :members: __getitem__ + :special-members: + +WIDERFace +~~~~~~~~~ + +.. autoclass:: WIDERFace + :members: __getitem__ + :special-members: + + +.. _base_classes_datasets: + +Base classes for custom datasets +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. autoclass:: DatasetFolder + :members: __getitem__, find_classes, make_dataset + :special-members: + + +.. autoclass:: ImageFolder + :members: __getitem__ + :special-members: diff --git a/pretrained_model/pytorch_vision_v0.10.0/docs/source/index.rst b/pretrained_model/pytorch_vision_v0.10.0/docs/source/index.rst new file mode 100644 index 0000000000000000000000000000000000000000..61cb573c96f72c78486dd949cab9c600263d6870 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/docs/source/index.rst @@ -0,0 +1,60 @@ +torchvision +=========== +This library is part of the `PyTorch +<http://pytorch.org/>`_ project. PyTorch is an open source +machine learning framework. + +Features described in this documentation are classified by release status: + + *Stable:* These features will be maintained long-term and there should generally + be no major performance limitations or gaps in documentation. + We also expect to maintain backwards compatibility (although + breaking changes can happen and notice will be given one release ahead + of time). + + *Beta:* Features are tagged as Beta because the API may change based on + user feedback, because the performance needs to improve, or because + coverage across operators is not yet complete. For Beta features, we are + committing to seeing the feature through to the Stable classification. + We are not, however, committing to backwards compatibility. + + *Prototype:* These features are typically not available as part of + binary distributions like PyPI or Conda, except sometimes behind run-time + flags, and are at an early stage for feedback and testing. + + + +The :mod:`torchvision` package consists of popular datasets, model +architectures, and common image transformations for computer vision. + +.. toctree:: + :maxdepth: 2 + :caption: Package Reference + + datasets + io + models + ops + transforms + utils + +.. toctree:: + :maxdepth: 1 + :caption: Examples + + auto_examples/index + +.. automodule:: torchvision + :members: + +.. toctree:: + :maxdepth: 1 + :caption: PyTorch Libraries + + PyTorch <https://pytorch.org/docs> + torchaudio <https://pytorch.org/audio> + torchtext <https://pytorch.org/text> + torchvision <https://pytorch.org/vision> + TorchElastic <https://pytorch.org/elastic/> + TorchServe <https://pytorch.org/serve> + PyTorch on XLA Devices <http://pytorch.org/xla/> diff --git a/pretrained_model/pytorch_vision_v0.10.0/docs/source/io.rst b/pretrained_model/pytorch_vision_v0.10.0/docs/source/io.rst new file mode 100644 index 0000000000000000000000000000000000000000..2e416469d1774dbc0a29ad960a1609e2b421d764 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/docs/source/io.rst @@ -0,0 +1,82 @@ +torchvision.io +============== + +.. currentmodule:: torchvision.io + +The :mod:`torchvision.io` package provides functions for performing IO +operations. They are currently specific to reading and writing video and +images. + +Video +----- + +.. autofunction:: read_video + +.. autofunction:: read_video_timestamps + +.. autofunction:: write_video + + +Fine-grained video API +---------------------- + +In addition to the :mod:`read_video` function, we provide a high-performance +lower-level API for more fine-grained control compared to the :mod:`read_video` function. +It does all this whilst fully supporting torchscript. + +.. autoclass:: VideoReader + :members: __next__, get_metadata, set_current_stream, seek + + +Example of inspecting a video: + +.. code:: python + + import torchvision + video_path = "path to a test video" + # Constructor allocates memory and a threaded decoder + # instance per video. At the moment it takes two arguments: + # path to the video file, and a wanted stream. + reader = torchvision.io.VideoReader(video_path, "video") + + # The information about the video can be retrieved using the + # `get_metadata()` method. It returns a dictionary for every stream, with + # duration and other relevant metadata (often frame rate) + reader_md = reader.get_metadata() + + # metadata is structured as a dict of dicts with following structure + # {"stream_type": {"attribute": [attribute per stream]}} + # + # following would print out the list of frame rates for every present video stream + print(reader_md["video"]["fps"]) + + # we explicitly select the stream we would like to operate on. In + # the constructor we select a default video stream, but + # in practice, we can set whichever stream we would like + video.set_current_stream("video:0") + + +Image +----- + +.. autoclass:: ImageReadMode + +.. autofunction:: read_image + +.. autofunction:: decode_image + +.. autofunction:: encode_jpeg + +.. autofunction:: decode_jpeg + +.. autofunction:: write_jpeg + +.. autofunction:: encode_png + +.. autofunction:: decode_png + +.. autofunction:: write_png + +.. autofunction:: read_file + +.. autofunction:: write_file diff --git a/pretrained_model/pytorch_vision_v0.10.0/docs/source/models.rst b/pretrained_model/pytorch_vision_v0.10.0/docs/source/models.rst new file mode 100644 index 0000000000000000000000000000000000000000..b9bff7a36e86a399ce453e646d52f93cf38e45b1 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/docs/source/models.rst @@ -0,0 +1,585 @@ +torchvision.models +################## + + +The models subpackage contains definitions of models for addressing +different tasks, including: image classification, pixelwise semantic +segmentation, object detection, instance segmentation, person +keypoint detection and video classification. + + +Classification +============== + +The models subpackage contains definitions for the following model +architectures for image classification: + +- `AlexNet`_ +- `VGG`_ +- `ResNet`_ +- `SqueezeNet`_ +- `DenseNet`_ +- `Inception`_ v3 +- `GoogLeNet`_ +- `ShuffleNet`_ v2 +- `MobileNetV2`_ +- `MobileNetV3`_ +- `ResNeXt`_ +- `Wide ResNet`_ +- `MNASNet`_ + +You can construct a model with random weights by calling its constructor: + +.. code:: python + + import torchvision.models as models + resnet18 = models.resnet18() + alexnet = models.alexnet() + vgg16 = models.vgg16() + squeezenet = models.squeezenet1_0() + densenet = models.densenet161() + inception = models.inception_v3() + googlenet = models.googlenet() + shufflenet = models.shufflenet_v2_x1_0() + mobilenet_v2 = models.mobilenet_v2() + mobilenet_v3_large = models.mobilenet_v3_large() + mobilenet_v3_small = models.mobilenet_v3_small() + resnext50_32x4d = models.resnext50_32x4d() + wide_resnet50_2 = models.wide_resnet50_2() + mnasnet = models.mnasnet1_0() + +We provide pre-trained models, using the PyTorch :mod:`torch.utils.model_zoo`. +These can be constructed by passing ``pretrained=True``: + +.. code:: python + + import torchvision.models as models + resnet18 = models.resnet18(pretrained=True) + alexnet = models.alexnet(pretrained=True) + squeezenet = models.squeezenet1_0(pretrained=True) + vgg16 = models.vgg16(pretrained=True) + densenet = models.densenet161(pretrained=True) + inception = models.inception_v3(pretrained=True) + googlenet = models.googlenet(pretrained=True) + shufflenet = models.shufflenet_v2_x1_0(pretrained=True) + mobilenet_v2 = models.mobilenet_v2(pretrained=True) + mobilenet_v3_large = models.mobilenet_v3_large(pretrained=True) + mobilenet_v3_small = models.mobilenet_v3_small(pretrained=True) + resnext50_32x4d = models.resnext50_32x4d(pretrained=True) + wide_resnet50_2 = models.wide_resnet50_2(pretrained=True) + mnasnet = models.mnasnet1_0(pretrained=True) + +Instancing a pre-trained model will download its weights to a cache directory. +This directory can be set using the `TORCH_MODEL_ZOO` environment variable. See +:func:`torch.utils.model_zoo.load_url` for details. + +Some models use modules which have different training and evaluation +behavior, such as batch normalization. To switch between these modes, use +``model.train()`` or ``model.eval()`` as appropriate. See +:meth:`~torch.nn.Module.train` or :meth:`~torch.nn.Module.eval` for details. + +All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB images of shape (3 x H x W), +where H and W are expected to be at least 224. +The images have to be loaded in to a range of [0, 1] and then normalized +using ``mean = [0.485, 0.456, 0.406]`` and ``std = [0.229, 0.224, 0.225]``. +You can use the following transform to normalize:: + + normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]) + +An example of such normalization can be found in the imagenet example +`here <https://github.com/pytorch/examples/blob/42e5b996718797e45c46a25c55b031e6768f8440/imagenet/main.py#L89-L101>`_ + +The process for obtaining the values of `mean` and `std` is roughly equivalent +to:: + + import torch + from torchvision import datasets, transforms as T + + transform = T.Compose([T.Resize(256), T.CenterCrop(224), T.ToTensor()]) + dataset = datasets.ImageNet(".", split="train", transform=transform) + + means = [] + stds = [] + for img in subset(dataset): + means.append(torch.mean(img)) + stds.append(torch.std(img)) + + mean = torch.mean(torch.tensor(means)) + std = torch.mean(torch.tensor(stds)) + +Unfortunately, the concrete `subset` that was used is lost. For more +information see `this discussion <https://github.com/pytorch/vision/issues/1439>`_ +or `these experiments <https://github.com/pytorch/vision/pull/1965>`_. + +ImageNet 1-crop error rates (224x224) + +================================ ============= ============= +Model Acc@1 Acc@5 +================================ ============= ============= +AlexNet 56.522 79.066 +VGG-11 69.020 88.628 +VGG-13 69.928 89.246 +VGG-16 71.592 90.382 +VGG-19 72.376 90.876 +VGG-11 with batch normalization 70.370 89.810 +VGG-13 with batch normalization 71.586 90.374 +VGG-16 with batch normalization 73.360 91.516 +VGG-19 with batch normalization 74.218 91.842 +ResNet-18 69.758 89.078 +ResNet-34 73.314 91.420 +ResNet-50 76.130 92.862 +ResNet-101 77.374 93.546 +ResNet-152 78.312 94.046 +SqueezeNet 1.0 58.092 80.420 +SqueezeNet 1.1 58.178 80.624 +Densenet-121 74.434 91.972 +Densenet-169 75.600 92.806 +Densenet-201 76.896 93.370 +Densenet-161 77.138 93.560 +Inception v3 77.294 93.450 +GoogleNet 69.778 89.530 +ShuffleNet V2 x1.0 69.362 88.316 +ShuffleNet V2 x0.5 60.552 81.746 +MobileNet V2 71.878 90.286 +MobileNet V3 Large 74.042 91.340 +MobileNet V3 Small 67.668 87.402 +ResNeXt-50-32x4d 77.618 93.698 +ResNeXt-101-32x8d 79.312 94.526 +Wide ResNet-50-2 78.468 94.086 +Wide ResNet-101-2 78.848 94.284 +MNASNet 1.0 73.456 91.510 +MNASNet 0.5 67.734 87.490 +================================ ============= ============= + + +.. _AlexNet: https://arxiv.org/abs/1404.5997 +.. _VGG: https://arxiv.org/abs/1409.1556 +.. _ResNet: https://arxiv.org/abs/1512.03385 +.. _SqueezeNet: https://arxiv.org/abs/1602.07360 +.. _DenseNet: https://arxiv.org/abs/1608.06993 +.. _Inception: https://arxiv.org/abs/1512.00567 +.. _GoogLeNet: https://arxiv.org/abs/1409.4842 +.. _ShuffleNet: https://arxiv.org/abs/1807.11164 +.. _MobileNetV2: https://arxiv.org/abs/1801.04381 +.. _MobileNetV3: https://arxiv.org/abs/1905.02244 +.. _ResNeXt: https://arxiv.org/abs/1611.05431 +.. _MNASNet: https://arxiv.org/abs/1807.11626 + +.. currentmodule:: torchvision.models + +Alexnet +------- + +.. autofunction:: alexnet + +VGG +--- + +.. autofunction:: vgg11 +.. autofunction:: vgg11_bn +.. autofunction:: vgg13 +.. autofunction:: vgg13_bn +.. autofunction:: vgg16 +.. autofunction:: vgg16_bn +.. autofunction:: vgg19 +.. autofunction:: vgg19_bn + + +ResNet +------ + +.. autofunction:: resnet18 +.. autofunction:: resnet34 +.. autofunction:: resnet50 +.. autofunction:: resnet101 +.. autofunction:: resnet152 + +SqueezeNet +---------- + +.. autofunction:: squeezenet1_0 +.. autofunction:: squeezenet1_1 + +DenseNet +--------- + +.. autofunction:: densenet121 +.. autofunction:: densenet169 +.. autofunction:: densenet161 +.. autofunction:: densenet201 + +Inception v3 +------------ + +.. autofunction:: inception_v3 + +.. note :: + This requires `scipy` to be installed + + +GoogLeNet +------------ + +.. autofunction:: googlenet + +.. note :: + This requires `scipy` to be installed + + +ShuffleNet v2 +------------- + +.. autofunction:: shufflenet_v2_x0_5 +.. autofunction:: shufflenet_v2_x1_0 +.. autofunction:: shufflenet_v2_x1_5 +.. autofunction:: shufflenet_v2_x2_0 + +MobileNet v2 +------------- + +.. autofunction:: mobilenet_v2 + +MobileNet v3 +------------- + +.. autofunction:: mobilenet_v3_large +.. autofunction:: mobilenet_v3_small + +ResNext +------- + +.. autofunction:: resnext50_32x4d +.. autofunction:: resnext101_32x8d + +Wide ResNet +----------- + +.. autofunction:: wide_resnet50_2 +.. autofunction:: wide_resnet101_2 + +MNASNet +-------- + +.. autofunction:: mnasnet0_5 +.. autofunction:: mnasnet0_75 +.. autofunction:: mnasnet1_0 +.. autofunction:: mnasnet1_3 + +Quantized Models +---------------- + +The following architectures provide support for INT8 quantized models. You can get +a model with random weights by calling its constructor: + +.. code:: python + + import torchvision.models as models + googlenet = models.quantization.googlenet() + inception_v3 = models.quantization.inception_v3() + mobilenet_v2 = models.quantization.mobilenet_v2() + mobilenet_v3_large = models.quantization.mobilenet_v3_large() + resnet18 = models.quantization.resnet18() + resnet50 = models.quantization.resnet50() + resnext101_32x8d = models.quantization.resnext101_32x8d() + shufflenet_v2_x0_5 = models.quantization.shufflenet_v2_x0_5() + shufflenet_v2_x1_0 = models.quantization.shufflenet_v2_x1_0() + shufflenet_v2_x1_5 = models.quantization.shufflenet_v2_x1_5() + shufflenet_v2_x2_0 = models.quantization.shufflenet_v2_x2_0() + +Obtaining a pre-trained quantized model can be done with a few lines of code: + +.. code:: python + + import torchvision.models as models + model = models.quantization.mobilenet_v2(pretrained=True, quantize=True) + model.eval() + # run the model with quantized inputs and weights + out = model(torch.rand(1, 3, 224, 224)) + +We provide pre-trained quantized weights for the following models: + +================================ ============= ============= +Model Acc@1 Acc@5 +================================ ============= ============= +MobileNet V2 71.658 90.150 +MobileNet V3 Large 73.004 90.858 +ShuffleNet V2 68.360 87.582 +ResNet 18 69.494 88.882 +ResNet 50 75.920 92.814 +ResNext 101 32x8d 78.986 94.480 +Inception V3 77.176 93.354 +GoogleNet 69.826 89.404 +================================ ============= ============= + + +Semantic Segmentation +===================== + +The models subpackage contains definitions for the following model +architectures for semantic segmentation: + +- `FCN ResNet50, ResNet101 <https://arxiv.org/abs/1411.4038>`_ +- `DeepLabV3 ResNet50, ResNet101, MobileNetV3-Large <https://arxiv.org/abs/1706.05587>`_ +- `LR-ASPP MobileNetV3-Large <https://arxiv.org/abs/1905.02244>`_ + +As with image classification models, all pre-trained models expect input images normalized in the same way. +The images have to be loaded in to a range of ``[0, 1]`` and then normalized using +``mean = [0.485, 0.456, 0.406]`` and ``std = [0.229, 0.224, 0.225]``. +They have been trained on images resized such that their minimum size is 520. + +For details on how to plot the masks of such models, you may refer to :ref:`semantic_seg_output`. + +The pre-trained models have been trained on a subset of COCO train2017, on the 20 categories that are +present in the Pascal VOC dataset. You can see more information on how the subset has been selected in +``references/segmentation/coco_utils.py``. The classes that the pre-trained model outputs are the following, +in order: + + .. code-block:: python + + ['__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', + 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', + 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'] + +The accuracies of the pre-trained models evaluated on COCO val2017 are as follows + +================================ ============= ==================== +Network mean IoU global pixelwise acc +================================ ============= ==================== +FCN ResNet50 60.5 91.4 +FCN ResNet101 63.7 91.9 +DeepLabV3 ResNet50 66.4 92.4 +DeepLabV3 ResNet101 67.4 92.4 +DeepLabV3 MobileNetV3-Large 60.3 91.2 +LR-ASPP MobileNetV3-Large 57.9 91.2 +================================ ============= ==================== + + +Fully Convolutional Networks +---------------------------- + +.. autofunction:: torchvision.models.segmentation.fcn_resnet50 +.. autofunction:: torchvision.models.segmentation.fcn_resnet101 + + +DeepLabV3 +--------- + +.. autofunction:: torchvision.models.segmentation.deeplabv3_resnet50 +.. autofunction:: torchvision.models.segmentation.deeplabv3_resnet101 +.. autofunction:: torchvision.models.segmentation.deeplabv3_mobilenet_v3_large + + +LR-ASPP +------- + +.. autofunction:: torchvision.models.segmentation.lraspp_mobilenet_v3_large + +.. _object_det_inst_seg_pers_keypoint_det: + +Object Detection, Instance Segmentation and Person Keypoint Detection +===================================================================== + +The models subpackage contains definitions for the following model +architectures for detection: + +- `Faster R-CNN <https://arxiv.org/abs/1506.01497>`_ +- `Mask R-CNN <https://arxiv.org/abs/1703.06870>`_ +- `RetinaNet <https://arxiv.org/abs/1708.02002>`_ +- `SSD <https://arxiv.org/abs/1512.02325>`_ +- `SSDlite <https://arxiv.org/abs/1801.04381>`_ + +The pre-trained models for detection, instance segmentation and +keypoint detection are initialized with the classification models +in torchvision. + +The models expect a list of ``Tensor[C, H, W]``, in the range ``0-1``. +The models internally resize the images but the behaviour varies depending +on the model. Check the constructor of the models for more information. The +output format of such models is illustrated in :ref:`instance_seg_output`. + + +For object detection and instance segmentation, the pre-trained +models return the predictions of the following classes: + + .. code-block:: python + + COCO_INSTANCE_CATEGORY_NAMES = [ + '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', + 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign', + 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', + 'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A', + 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', + 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', + 'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', + 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', + 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table', + 'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', + 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book', + 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush' + ] + + +Here are the summary of the accuracies for the models trained on +the instances set of COCO train2017 and evaluated on COCO val2017. + +====================================== ======= ======== =========== +Network box AP mask AP keypoint AP +====================================== ======= ======== =========== +Faster R-CNN ResNet-50 FPN 37.0 - - +Faster R-CNN MobileNetV3-Large FPN 32.8 - - +Faster R-CNN MobileNetV3-Large 320 FPN 22.8 - - +RetinaNet ResNet-50 FPN 36.4 - - +SSD300 VGG16 25.1 - - +SSDlite320 MobileNetV3-Large 21.3 - - +Mask R-CNN ResNet-50 FPN 37.9 34.6 - +====================================== ======= ======== =========== + +For person keypoint detection, the accuracies for the pre-trained +models are as follows + +================================ ======= ======== =========== +Network box AP mask AP keypoint AP +================================ ======= ======== =========== +Keypoint R-CNN ResNet-50 FPN 54.6 - 65.0 +================================ ======= ======== =========== + +For person keypoint detection, the pre-trained model return the +keypoints in the following order: + + .. code-block:: python + + COCO_PERSON_KEYPOINT_NAMES = [ + 'nose', + 'left_eye', + 'right_eye', + 'left_ear', + 'right_ear', + 'left_shoulder', + 'right_shoulder', + 'left_elbow', + 'right_elbow', + 'left_wrist', + 'right_wrist', + 'left_hip', + 'right_hip', + 'left_knee', + 'right_knee', + 'left_ankle', + 'right_ankle' + ] + +Runtime characteristics +----------------------- + +The implementations of the models for object detection, instance segmentation +and keypoint detection are efficient. + +In the following table, we use 8 GPUs to report the results. During training, +we use a batch size of 2 per GPU for all models except SSD which uses 4 +and SSDlite which uses 24. During testing a batch size of 1 is used. + +For test time, we report the time for the model evaluation and postprocessing +(including mask pasting in image), but not the time for computing the +precision-recall. + +====================================== =================== ================== =========== +Network train time (s / it) test time (s / it) memory (GB) +====================================== =================== ================== =========== +Faster R-CNN ResNet-50 FPN 0.2288 0.0590 5.2 +Faster R-CNN MobileNetV3-Large FPN 0.1020 0.0415 1.0 +Faster R-CNN MobileNetV3-Large 320 FPN 0.0978 0.0376 0.6 +RetinaNet ResNet-50 FPN 0.2514 0.0939 4.1 +SSD300 VGG16 0.2093 0.0744 1.5 +SSDlite320 MobileNetV3-Large 0.1773 0.0906 1.5 +Mask R-CNN ResNet-50 FPN 0.2728 0.0903 5.4 +Keypoint R-CNN ResNet-50 FPN 0.3789 0.1242 6.8 +====================================== =================== ================== =========== + + +Faster R-CNN +------------ + +.. autofunction:: torchvision.models.detection.fasterrcnn_resnet50_fpn +.. autofunction:: torchvision.models.detection.fasterrcnn_mobilenet_v3_large_fpn +.. autofunction:: torchvision.models.detection.fasterrcnn_mobilenet_v3_large_320_fpn + + +RetinaNet +--------- + +.. autofunction:: torchvision.models.detection.retinanet_resnet50_fpn + + +SSD +--- + +.. autofunction:: torchvision.models.detection.ssd300_vgg16 + + +SSDlite +------- + +.. autofunction:: torchvision.models.detection.ssdlite320_mobilenet_v3_large + + +Mask R-CNN +---------- + +.. autofunction:: torchvision.models.detection.maskrcnn_resnet50_fpn + + +Keypoint R-CNN +-------------- + +.. autofunction:: torchvision.models.detection.keypointrcnn_resnet50_fpn + + +Video classification +==================== + +We provide models for action recognition pre-trained on Kinetics-400. +They have all been trained with the scripts provided in ``references/video_classification``. + +All pre-trained models expect input images normalized in the same way, +i.e. mini-batches of 3-channel RGB videos of shape (3 x T x H x W), +where H and W are expected to be 112, and T is a number of video frames in a clip. +The images have to be loaded in to a range of [0, 1] and then normalized +using ``mean = [0.43216, 0.394666, 0.37645]`` and ``std = [0.22803, 0.22145, 0.216989]``. + + +.. note:: + The normalization parameters are different from the image classification ones, and correspond + to the mean and std from Kinetics-400. + +.. note:: + For now, normalization code can be found in ``references/video_classification/transforms.py``, + see the ``Normalize`` function there. Note that it differs from standard normalization for + images because it assumes the video is 4d. + +Kinetics 1-crop accuracies for clip length 16 (16x112x112) + +================================ ============= ============= +Network Clip acc@1 Clip acc@5 +================================ ============= ============= +ResNet 3D 18 52.75 75.45 +ResNet MC 18 53.90 76.29 +ResNet (2+1)D 57.50 78.81 +================================ ============= ============= + + +ResNet 3D +---------- + +.. autofunction:: torchvision.models.video.r3d_18 + +ResNet Mixed Convolution +------------------------ + +.. autofunction:: torchvision.models.video.mc3_18 + +ResNet (2+1)D +------------- + +.. autofunction:: torchvision.models.video.r2plus1d_18 diff --git a/pretrained_model/pytorch_vision_v0.10.0/docs/source/ops.rst b/pretrained_model/pytorch_vision_v0.10.0/docs/source/ops.rst new file mode 100644 index 0000000000000000000000000000000000000000..cdebe9721c3203c7b7d72ea8638ad38efe9f7a47 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/docs/source/ops.rst @@ -0,0 +1,33 @@ +torchvision.ops +=============== + +.. currentmodule:: torchvision.ops + +:mod:`torchvision.ops` implements operators that are specific for Computer Vision. + +.. note:: + All operators have native support for TorchScript. + + +.. autofunction:: nms +.. autofunction:: batched_nms +.. autofunction:: remove_small_boxes +.. autofunction:: clip_boxes_to_image +.. autofunction:: box_convert +.. autofunction:: box_area +.. autofunction:: box_iou +.. autofunction:: generalized_box_iou +.. autofunction:: roi_align +.. autofunction:: ps_roi_align +.. autofunction:: roi_pool +.. autofunction:: ps_roi_pool +.. autofunction:: deform_conv2d +.. autofunction:: sigmoid_focal_loss + +.. autoclass:: RoIAlign +.. autoclass:: PSRoIAlign +.. autoclass:: RoIPool +.. autoclass:: PSRoIPool +.. autoclass:: DeformConv2d +.. autoclass:: MultiScaleRoIAlign +.. autoclass:: FeaturePyramidNetwork diff --git a/pretrained_model/pytorch_vision_v0.10.0/docs/source/transforms.rst b/pretrained_model/pytorch_vision_v0.10.0/docs/source/transforms.rst new file mode 100644 index 0000000000000000000000000000000000000000..59479f238997942d9461e9845bff0b29e9047528 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/docs/source/transforms.rst @@ -0,0 +1,283 @@ +.. _transforms: + +torchvision.transforms +====================== + +.. currentmodule:: torchvision.transforms + +Transforms are common image transformations. They can be chained together using :class:`Compose`. +Most transform classes have a function equivalent: :ref:`functional +transforms <functional_transforms>` give fine-grained control over the +transformations. +This is useful if you have to build a more complex transformation pipeline +(e.g. in the case of segmentation tasks). + +Most transformations accept both `PIL <https://pillow.readthedocs.io>`_ +images and tensor images, although some transformations are :ref:`PIL-only +<transforms_pil_only>` and some are :ref:`tensor-only +<transforms_tensor_only>`. The :ref:`conversion_transforms` may be used to +convert to and from PIL images. + +The transformations that accept tensor images also accept batches of tensor +images. A Tensor Image is a tensor with ``(C, H, W)`` shape, where ``C`` is a +number of channels, ``H`` and ``W`` are image height and width. A batch of +Tensor Images is a tensor of ``(B, C, H, W)`` shape, where ``B`` is a number +of images in the batch. + +The expected range of the values of a tensor image is implicitely defined by +the tensor dtype. Tensor images with a float dtype are expected to have +values in ``[0, 1)``. Tensor images with an integer dtype are expected to +have values in ``[0, MAX_DTYPE]`` where ``MAX_DTYPE`` is the largest value +that can be represented in that dtype. + +Randomized transformations will apply the same transformation to all the +images of a given batch, but they will produce different transformations +across calls. For reproducible transformations across calls, you may use +:ref:`functional transforms <functional_transforms>`. + +The following examples illustate the use of the available transforms: + + * :ref:`sphx_glr_auto_examples_plot_transforms.py` + + .. figure:: ../source/auto_examples/images/sphx_glr_plot_transforms_001.png + :align: center + :scale: 65% + + * :ref:`sphx_glr_auto_examples_plot_scripted_tensor_transforms.py` + + .. figure:: ../source/auto_examples/images/sphx_glr_plot_scripted_tensor_transforms_001.png + :align: center + :scale: 30% + +.. warning:: + + Since v0.8.0 all random transformations are using torch default random generator to sample random parameters. + It is a backward compatibility breaking change and user should set the random state as following: + + .. code:: python + + # Previous versions + # import random + # random.seed(12) + + # Now + import torch + torch.manual_seed(17) + + Please, keep in mind that the same seed for torch random generator and Python random generator will not + produce the same results. + + +Scriptable transforms +--------------------- + +In order to script the transformations, please use ``torch.nn.Sequential`` instead of :class:`Compose`. + +.. code:: python + + transforms = torch.nn.Sequential( + transforms.CenterCrop(10), + transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), + ) + scripted_transforms = torch.jit.script(transforms) + +Make sure to use only scriptable transformations, i.e. that work with ``torch.Tensor`` and does not require +`lambda` functions or ``PIL.Image``. + +For any custom transformations to be used with ``torch.jit.script``, they should be derived from ``torch.nn.Module``. + + +Compositions of transforms +-------------------------- + +.. autoclass:: Compose + + +Transforms on PIL Image and torch.\*Tensor +------------------------------------------ + +.. autoclass:: CenterCrop + :members: + +.. autoclass:: ColorJitter + :members: + +.. autoclass:: FiveCrop + :members: + +.. autoclass:: Grayscale + :members: + +.. autoclass:: Pad + :members: + +.. autoclass:: RandomAffine + :members: + +.. autoclass:: RandomApply + +.. autoclass:: RandomCrop + :members: + +.. autoclass:: RandomGrayscale + :members: + +.. autoclass:: RandomHorizontalFlip + :members: + +.. autoclass:: RandomPerspective + :members: + +.. autoclass:: RandomResizedCrop + :members: + +.. autoclass:: RandomRotation + :members: + +.. autoclass:: RandomSizedCrop + :members: + +.. autoclass:: RandomVerticalFlip + :members: + +.. autoclass:: Resize + :members: + +.. autoclass:: Scale + :members: + +.. autoclass:: TenCrop + :members: + +.. autoclass:: GaussianBlur + :members: + +.. autoclass:: RandomInvert + :members: + +.. autoclass:: RandomPosterize + :members: + +.. autoclass:: RandomSolarize + :members: + +.. autoclass:: RandomAdjustSharpness + :members: + +.. autoclass:: RandomAutocontrast + :members: + +.. autoclass:: RandomEqualize + :members: + +.. _transforms_pil_only: + +Transforms on PIL Image only +---------------------------- + +.. autoclass:: RandomChoice + +.. autoclass:: RandomOrder + +.. _transforms_tensor_only: + +Transforms on torch.\*Tensor only +--------------------------------- + +.. autoclass:: LinearTransformation + :members: + +.. autoclass:: Normalize + :members: + +.. autoclass:: RandomErasing + :members: + +.. autoclass:: ConvertImageDtype + +.. _conversion_transforms: + +Conversion Transforms +--------------------- + +.. autoclass:: ToPILImage + :members: + +.. autoclass:: ToTensor + :members: + + +Generic Transforms +------------------ + +.. autoclass:: Lambda + :members: + + +AutoAugment Transforms +---------------------- + +`AutoAugment <https://arxiv.org/pdf/1805.09501.pdf>`_ is a common Data Augmentation technique that can improve the accuracy of Image Classification models. +Though the data augmentation policies are directly linked to their trained dataset, empirical studies show that +ImageNet policies provide significant improvements when applied to other datasets. +In TorchVision we implemented 3 policies learned on the following datasets: ImageNet, CIFAR10 and SVHN. +The new transform can be used standalone or mixed-and-matched with existing transforms: + +.. autoclass:: AutoAugmentPolicy + :members: + +.. autoclass:: AutoAugment + :members: + + +.. _functional_transforms: + +Functional Transforms +--------------------- + +Functional transforms give you fine-grained control of the transformation pipeline. +As opposed to the transformations above, functional transforms don't contain a random number +generator for their parameters. +That means you have to specify/generate all parameters, but the functional transform will give you +reproducible results across calls. + +Example: +you can apply a functional transform with the same parameters to multiple images like this: + +.. code:: python + + import torchvision.transforms.functional as TF + import random + + def my_segmentation_transforms(image, segmentation): + if random.random() > 0.5: + angle = random.randint(-30, 30) + image = TF.rotate(image, angle) + segmentation = TF.rotate(segmentation, angle) + # more transforms ... + return image, segmentation + + +Example: +you can use a functional transform to build transform classes with custom behavior: + +.. code:: python + + import torchvision.transforms.functional as TF + import random + + class MyRotationTransform: + """Rotate by one of the given angles.""" + + def __init__(self, angles): + self.angles = angles + + def __call__(self, x): + angle = random.choice(self.angles) + return TF.rotate(x, angle) + + rotation_transform = MyRotationTransform(angles=[-30, -15, 0, 15, 30]) + + +.. automodule:: torchvision.transforms.functional + :members: diff --git a/pretrained_model/pytorch_vision_v0.10.0/docs/source/utils.rst b/pretrained_model/pytorch_vision_v0.10.0/docs/source/utils.rst new file mode 100644 index 0000000000000000000000000000000000000000..acaf785d8176fce48160161d054bce902807bf2d --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/docs/source/utils.rst @@ -0,0 +1,12 @@ +torchvision.utils +================= + +.. currentmodule:: torchvision.utils + +.. autofunction:: make_grid + +.. autofunction:: save_image + +.. autofunction:: draw_bounding_boxes + +.. autofunction:: draw_segmentation_masks diff --git a/pretrained_model/pytorch_vision_v0.10.0/examples/cpp/hello_world/CMakeLists.txt b/pretrained_model/pytorch_vision_v0.10.0/examples/cpp/hello_world/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..3244efb392b6b7e4671f40b395b926945123bada --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/examples/cpp/hello_world/CMakeLists.txt @@ -0,0 +1,16 @@ +cmake_minimum_required(VERSION 3.10) +project(hello-world) + +# The first thing do is to tell cmake to find the TorchVision library. +# The package pulls in all the necessary torch libraries, +# so there is no need to also add `find_package(Torch)` here. +find_package(TorchVision REQUIRED) + +add_executable(hello-world main.cpp) + +# We now need to link the TorchVision library to our executable. +# We can do that by using the TorchVision::TorchVision target, +# which also adds all the necessary torch dependencies. +target_compile_features(hello-world PUBLIC cxx_range_for) +target_link_libraries(hello-world TorchVision::TorchVision) +set_property(TARGET hello-world PROPERTY CXX_STANDARD 14) diff --git a/pretrained_model/pytorch_vision_v0.10.0/examples/cpp/hello_world/README.rst b/pretrained_model/pytorch_vision_v0.10.0/examples/cpp/hello_world/README.rst new file mode 100644 index 0000000000000000000000000000000000000000..aa5427a6f1c34275035280d58f4444e42fb63204 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/examples/cpp/hello_world/README.rst @@ -0,0 +1,19 @@ +Hello World! +============ + +This is a minimal example of getting TorchVision to work in C++ with CMake. + + +In order to successfully compile this example, make sure you have both ``LibTorch`` and +``TorchVision`` installed. +Once both dependencies are sorted, we can start the CMake fun: + +1) Create a ``build`` directory inside the current one. +2) from within the ``build`` directory, run the following commands: + - | ``cmake -DCMAKE_PREFIX_PATH="<PATH_TO_LIBTORCH>;<PATH_TO_TORCHVISION>" ..`` + | where ``<PATH_TO_LIBTORCH>`` and ``<PATH_TO_TORCHVISION>`` are the paths to the libtorch and torchvision installations. + - ``cmake --build .`` + +| That's it! +| You should now have a ``hello-world`` executable in your ``build`` folder. + Running it will output a (fairly long) tensor of random values to your terminal. \ No newline at end of file diff --git a/pretrained_model/pytorch_vision_v0.10.0/examples/cpp/hello_world/main.cpp b/pretrained_model/pytorch_vision_v0.10.0/examples/cpp/hello_world/main.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3a75bdec6cb3f02de0b30a6e515d54e81141296f --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/examples/cpp/hello_world/main.cpp @@ -0,0 +1,25 @@ +#include <iostream> +#include <torch/torch.h> +#include <torchvision/vision.h> +#include <torchvision/models/resnet.h> + +int main() +{ + auto model = vision::models::ResNet18(); + model->eval(); + + // Create a random input tensor and run it through the model. + auto in = torch::rand({1, 3, 10, 10}); + auto out = model->forward(in); + + std::cout << out.sizes(); + + if (torch::cuda::is_available()) { + // Move model and inputs to GPU + model->to(torch::kCUDA); + auto gpu_in = in.to(torch::kCUDA); + auto gpu_out = model->forward(gpu_in); + + std::cout << gpu_out.sizes(); + } +} diff --git a/pretrained_model/pytorch_vision_v0.10.0/examples/python/README.md b/pretrained_model/pytorch_vision_v0.10.0/examples/python/README.md new file mode 100644 index 0000000000000000000000000000000000000000..1e6c66b5219c5126b717b41d289449383ce4bc09 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/examples/python/README.md @@ -0,0 +1,22 @@ +# Python examples + +- [](https://colab.research.google.com/github/pytorch/vision/blob/master/examples/python/tensor_transforms.ipynb) +[Examples of Tensor Images transformations](https://github.com/pytorch/vision/blob/master/examples/python/tensor_transforms.ipynb) +- [](https://colab.research.google.com/github/pytorch/vision/blob/master/examples/python/video_api.ipynb) +[Example of VideoAPI](https://github.com/pytorch/vision/blob/master/examples/python/video_api.ipynb) +- [](https://colab.research.google.com/github/pytorch/vision/blob/master/examples/python/visualization_utils.ipynb) +[Example of Visualization Utils](https://github.com/pytorch/vision/blob/master/examples/python/visualization_utils.ipynb) + + +Prior to v0.8.0, transforms in torchvision have traditionally been PIL-centric and presented multiple limitations due to +that. Now, since v0.8.0, transforms implementations are Tensor and PIL compatible and we can achieve the following new +features: +- transform multi-band torch tensor images (with more than 3-4 channels) +- torchscript transforms together with your model for deployment +- support for GPU acceleration +- batched transformation such as for videos +- read and decode data directly as torch tensor with torchscript support (for PNG and JPEG image formats) + +Furthermore, previously we used to provide a very high-level API for video decoding which left little control to the user. We're now expanding that API (and replacing it in the future) with a lower-level API that allows the user a frame-based access to a video. + +Torchvision also provides utilities to visualize results. You can make grid of images, plot bounding boxes as well as segmentation masks. Thse utilities work standalone as well as with torchvision models for detection and segmentation. diff --git a/pretrained_model/pytorch_vision_v0.10.0/examples/python/tensor_transforms.ipynb b/pretrained_model/pytorch_vision_v0.10.0/examples/python/tensor_transforms.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..7bb5741947c159e20da8f49289ba802a7b227e0b --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/examples/python/tensor_transforms.ipynb @@ -0,0 +1,388 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": { + "id": "vjAC2mZnb4nz" + }, + "source": [ + "# Image transformations\n", + "\n", + "This notebook shows new features of torchvision image transformations. \n", + "\n", + "Prior to v0.8.0, transforms in torchvision have traditionally been PIL-centric and presented multiple limitations due to that. Now, since v0.8.0, transforms implementations are Tensor and PIL compatible and we can achieve the following new \n", + "features:\n", + "- transform multi-band torch tensor images (with more than 3-4 channels) \n", + "- torchscript transforms together with your model for deployment\n", + "- support for GPU acceleration\n", + "- batched transformation such as for videos\n", + "- read and decode data directly as torch tensor with torchscript support (for PNG and JPEG image formats)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 35 + }, + "id": "btaDWPDbgIyW", + "outputId": "8a83d408-f643-42da-d247-faf3a1bd3ae0" + }, + "outputs": [], + "source": [ + "import torch, torchvision\n", + "torch.__version__, torchvision.__version__" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "9Vj9draNb4oA" + }, + "source": [ + "## Transforms on CPU/CUDA tensor images\n", + "\n", + "Let's show how to apply transformations on images opened directly as a torch tensors.\n", + "Now, torchvision provides image reading functions for PNG and JPG images with torchscript support. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Epp3hCy0b4oD" + }, + "outputs": [], + "source": [ + "from torchvision.datasets.utils import download_url\n", + "\n", + "download_url(\"https://farm1.static.flickr.com/152/434505223_8d1890e1e2.jpg\", \".\", \"test-image.jpg\")\n", + "download_url(\"https://farm3.static.flickr.com/2142/1896267403_24939864ba.jpg\", \".\", \"test-image2.jpg\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "Y-m7lYDPb4oK" + }, + "outputs": [], + "source": [ + "import matplotlib.pylab as plt\n", + "%matplotlib inline" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 303 + }, + "id": "5bi8Q7L3b4oc", + "outputId": "e5de5c73-e16d-4992-ebee-94c7ddf0bf54" + }, + "outputs": [], + "source": [ + "from torchvision.io.image import read_image\n", + "\n", + "tensor_image = read_image(\"test-image.jpg\")\n", + "\n", + "print(\"tensor image info: \", tensor_image.shape, tensor_image.dtype)\n", + "\n", + "plt.imshow(tensor_image.numpy().transpose((1, 2, 0)))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def to_rgb_image(tensor):\n", + " \"\"\"Helper method to get RGB numpy array for plotting\"\"\"\n", + " np_img = tensor.cpu().numpy().transpose((1, 2, 0))\n", + " m1, m2 = np_img.min(axis=(0, 1)), np_img.max(axis=(0, 1))\n", + " return (255.0 * (np_img - m1) / (m2 - m1)).astype(\"uint8\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 322 + }, + "id": "PgWpjxQ3b4pF", + "outputId": "e9a138e8-b45c-4f75-d849-3b41de0e5472" + }, + "outputs": [], + "source": [ + "import torchvision.transforms as T\n", + "\n", + "# to fix random seed is now:\n", + "torch.manual_seed(12)\n", + "\n", + "transforms = T.Compose([\n", + " T.RandomCrop(224),\n", + " T.RandomHorizontalFlip(p=0.3),\n", + " T.ConvertImageDtype(torch.float),\n", + " T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n", + "])\n", + "\n", + "out_image = transforms(tensor_image)\n", + "print(\"output tensor image info: \", out_image.shape, out_image.dtype)\n", + "\n", + "plt.imshow(to_rgb_image(out_image))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "LmYQB4cxb4pI" + }, + "source": [ + "Tensor images can be on GPU" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 322 + }, + "id": "S6syYJGEb4pN", + "outputId": "86bddb64-e648-45f2-c216-790d43cfc26d" + }, + "outputs": [], + "source": [ + "out_image = transforms(tensor_image.to(\"cuda\"))\n", + "print(\"output tensor image info: \", out_image.shape, out_image.dtype, out_image.device)\n", + "\n", + "plt.imshow(to_rgb_image(out_image))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "jg9TQd7ajfyn" + }, + "source": [ + "## Scriptable transforms for easier deployment via torchscript\n", + "\n", + "Next, we show how to combine input transformations and model's forward pass and use `torch.jit.script` to obtain a single scripted module.\n", + "\n", + "**Note:** we have to use only scriptable transformations that should be derived from `torch.nn.Module`. \n", + "Since v0.8.0, all transformations are scriptable except `Compose`, `RandomChoice`, `RandomOrder`, `Lambda` and those applied on PIL images.\n", + "The transformations like `Compose` are kept for backward compatibility and can be easily replaced by existing torch modules, like `nn.Sequential`.\n", + "\n", + "Let's define a module `Predictor` that transforms input tensor and applies ImageNet pretrained resnet18 model on it." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "NSDOJ3RajfvO" + }, + "outputs": [], + "source": [ + "import torch\n", + "import torch.nn as nn\n", + "import torchvision.transforms as T\n", + "from torchvision.io.image import read_image\n", + "from torchvision.models import resnet18\n", + "\n", + "\n", + "class Predictor(nn.Module):\n", + "\n", + " def __init__(self):\n", + " super().__init__()\n", + " self.resnet18 = resnet18(pretrained=True).eval()\n", + " self.transforms = nn.Sequential(\n", + " T.Resize([256, ]), # We use single int value inside a list due to torchscript type restrictions\n", + " T.CenterCrop(224),\n", + " T.ConvertImageDtype(torch.float),\n", + " T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n", + " )\n", + "\n", + " def forward(self, x: torch.Tensor) -> torch.Tensor:\n", + " with torch.no_grad():\n", + " x = self.transforms(x)\n", + " y_pred = self.resnet18(x)\n", + " return y_pred.argmax(dim=1)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "ZZKDovqej5vA" + }, + "source": [ + "Now, let's define scripted and non-scripted instances of `Predictor` and apply on multiple tensor images of the same size" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "GBBMSo7vjfr0" + }, + "outputs": [], + "source": [ + "from torchvision.io.image import read_image\n", + "\n", + "predictor = Predictor().to(\"cuda\")\n", + "scripted_predictor = torch.jit.script(predictor).to(\"cuda\")\n", + "\n", + "\n", + "tensor_image1 = read_image(\"test-image.jpg\")\n", + "tensor_image2 = read_image(\"test-image2.jpg\")\n", + "batch = torch.stack([tensor_image1[:, -320:, :], tensor_image2[:, -320:, :]]).to(\"cuda\")\n", + "\n", + "res1 = scripted_predictor(batch)\n", + "res2 = predictor(batch)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 501 + }, + "id": "Dmi9r_p-oKsk", + "outputId": "b9c55e7d-5db1-4975-c485-fecc4075bf47" + }, + "outputs": [], + "source": [ + "import json\n", + "from torchvision.datasets.utils import download_url\n", + "\n", + "\n", + "download_url(\"https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json\", \".\", \"imagenet_class_index.json\")\n", + "\n", + "\n", + "with open(\"imagenet_class_index.json\", \"r\") as h:\n", + " labels = json.load(h)\n", + "\n", + "\n", + "plt.figure(figsize=(12, 7))\n", + "for i, p in enumerate(res1):\n", + " plt.subplot(1, 2, i + 1)\n", + " plt.title(\"Scripted predictor:\\n{label})\".format(label=labels[str(p.item())]))\n", + " plt.imshow(batch[i, ...].cpu().numpy().transpose((1, 2, 0)))\n", + "\n", + "\n", + "plt.figure(figsize=(12, 7))\n", + "for i, p in enumerate(res2):\n", + " plt.subplot(1, 2, i + 1)\n", + " plt.title(\"Original predictor:\\n{label})\".format(label=labels[str(p.item())]))\n", + " plt.imshow(batch[i, ...].cpu().numpy().transpose((1, 2, 0)))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "id": "7IYsjzpFqcK8" + }, + "source": [ + "We save and reload scripted predictor in Python or C++ and use it for inference:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "colab": { + "base_uri": "https://localhost:8080/", + "height": 52 + }, + "id": "0kk9LLw5jfol", + "outputId": "05ea6db7-7fcf-4b74-a763-5f117c14cc00" + }, + "outputs": [], + "source": [ + "scripted_predictor.save(\"scripted_predictor.pt\")\n", + "\n", + "scripted_predictor = torch.jit.load(\"scripted_predictor.pt\")\n", + "res1 = scripted_predictor(batch)\n", + "\n", + "for i, p in enumerate(res1):\n", + " print(\"Scripted predictor: {label})\".format(label=labels[str(p.item())]))\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Data reading and decoding functions also support torch script and therefore can be part of the model as well:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "class AnotherPredictor(Predictor):\n", + "\n", + " def forward(self, path: str) -> int:\n", + " with torch.no_grad():\n", + " x = read_image(path).unsqueeze(0)\n", + " x = self.transforms(x)\n", + " y_pred = self.resnet18(x)\n", + " return int(y_pred.argmax(dim=1).item())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "-cMwTs3Yjffy" + }, + "outputs": [], + "source": [ + "scripted_predictor2 = torch.jit.script(AnotherPredictor())\n", + "\n", + "res = scripted_predictor2(\"test-image.jpg\")\n", + "\n", + "print(\"Scripted another predictor: {label})\".format(label=labels[str(res)]))" + ] + } + ], + "metadata": { + "accelerator": "GPU", + "colab": { + "collapsed_sections": [], + "name": "torchvision_scriptable_transforms.ipynb", + "provenance": [] + }, + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.4" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/pretrained_model/pytorch_vision_v0.10.0/examples/python/video_api.ipynb b/pretrained_model/pytorch_vision_v0.10.0/examples/python/video_api.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..724de2f0a12e7829111af5744f1d16f8f0f3c82e --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/examples/python/video_api.ipynb @@ -0,0 +1,772 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Welcome to torchvision's new video API\n", + "\n", + "Here, we're going to examine the capabilities of the new video API, together with the examples on how to build datasets and more. \n", + "\n", + "### Table of contents\n", + "1. Introduction: building a new video object and examining the properties\n", + "2. Building a sample `read_video` function\n", + "3. Building an example dataset (can be applied to e.g. kinetics400)" + ] + }, + { + "cell_type": "code", + "execution_count": 47, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "('1.8.0a0+7580962', '0.8.0a0+4db3dc6')" + ] + }, + "execution_count": 47, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "import torch, torchvision\n", + "torch.__version__, torchvision.__version__" + ] + }, + { + "cell_type": "code", + "execution_count": 48, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Downloading https://github.com/pytorch/vision/blob/master/test/assets/videos/WUzgd7C1pWA.mp4?raw=true to ./WUzgd7C1pWA.mp4\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100.4%" + ] + } + ], + "source": [ + "# download the sample video\n", + "from torchvision.datasets.utils import download_url\n", + "download_url(\"https://github.com/pytorch/vision/blob/master/test/assets/videos/WUzgd7C1pWA.mp4?raw=true\", \".\", \"WUzgd7C1pWA.mp4\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. Introduction: building a new video object and examining the properties\n", + "\n", + "First we select a video to test the object out. For the sake of argument we're using one from Kinetics400 dataset. To create it, we need to define the path and the stream we want to use. See inline comments for description. " + ] + }, + { + "cell_type": "code", + "execution_count": 49, + "metadata": {}, + "outputs": [], + "source": [ + "import torch, torchvision\n", + "\"\"\"\n", + "chosen video statistics:\n", + "WUzgd7C1pWA.mp4\n", + " - source: kinetics-400\n", + " - video: H-264 - MPEG-4 AVC (part 10) (avc1)\n", + " - fps: 29.97\n", + " - audio: MPEG AAC audio (mp4a)\n", + " - sample rate: 48K Hz\n", + "\"\"\"\n", + "video_path = \"./WUzgd7C1pWA.mp4\"\n", + "\n", + "\"\"\"\n", + "streams are defined in a similar fashion as torch devices. We encode them as strings in a form\n", + "of `stream_type:stream_id` where stream_type is a string and stream_id a long int. \n", + "\n", + "The constructor accepts passing a stream_type only, in which case the stream is auto-discovered.\n", + "\"\"\"\n", + "stream = \"video\"\n", + "\n", + "\n", + "\n", + "video = torchvision.io.VideoReader(video_path, stream)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "First, let's get the metadata for our particular video:" + ] + }, + { + "cell_type": "code", + "execution_count": 50, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'video': {'duration': [10.9109], 'fps': [29.97002997002997]},\n", + " 'audio': {'duration': [10.9], 'framerate': [48000.0]}}" + ] + }, + "execution_count": 50, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "video.get_metadata()" + ] + }, + { + "source": [ + "Here we can see that video has two streams - a video and an audio stream. \n", + "Currently available stream types include ``['video', 'audio']``.\n", + "Each descriptor consists of two parts: stream type (e.g. 'video') and\n", + "a unique stream id (which are determined by video encoding).\n", + "In this way, if the video contaner contains multiple\n", + "streams of the same type, users can acces the one they want.\n", + "If only stream type is passed, the decoder auto-detects first stream\n", + "of that type and returns it.\n", + "\n", + "Let's read all the frames from the video stream.\n", + "By default, the return value of `next(video_reader)` is a dict containing the following fields.\n", + "\n", + "The return fields are \n", + "- `data` containing a torch.tensor\n", + "- `pts` containing a float timestamp of this particular frame. " + ], + "cell_type": "markdown", + "metadata": {} + }, + { + "cell_type": "code", + "execution_count": 51, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "PTS for first five frames [0.0, 0.033367, 0.066733, 0.1001, 0.133467]\n", + "Total number of frames: 327\n", + "We can expect approx: 327.0\n", + "Tensor size: torch.Size([3, 256, 340])\n" + ] + } + ], + "source": [ + "# first we select the video stream \n", + "metadata = video.get_metadata()\n", + "video.set_current_stream(\"video:0\")\n", + "\n", + "frames = [] # we are going to save the frames here.\n", + "ptss = [] # pts is a presentation timestamp in seconds (float) of each frame\n", + "for frame in video:\n", + " frames.append(frame['data'])\n", + " ptss.append(frame['pts'])\n", + "\n", + "print(\"PTS for first five frames \", ptss[:5])\n", + "print(\"Total number of frames: \", len(frames))\n", + "approx_nf = metadata['video']['duration'][0] * metadata['video']['fps'][0]\n", + "print(\"We can expect approx: \", approx_nf)\n", + "print(\"Tensor size: \", frames[0].size())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note that selecting zero video stream is equivalent to selecting video stream automatically. I.e. `video:0` and `video` will end up with same results in this case. \n", + "\n", + "Let's try this for audio. Note that presentation timestamps are different so aligment has to be done carefully. " + ] + }, + { + "cell_type": "code", + "execution_count": 52, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "PTS for first five frames [0.0, 0.021332999999999998, 0.042667, 0.064, 0.08533299999999999]\n", + "Total number of frames: 511\n", + "Approx total number of datapoints we can expect: 523200.0\n", + "Read data size: 523264\n" + ] + } + ], + "source": [ + "metadata = video.get_metadata()\n", + "video.set_current_stream(\"audio\")\n", + "\n", + "frames = [] # we are going to save the frames here.\n", + "ptss = [] # pts is a presentation timestamp in seconds (float) of each frame\n", + "for frame in video:\n", + " frames.append(frame['data'])\n", + " ptss.append(frame['pts'])\n", + "\n", + "print(\"PTS for first five frames \", ptss[:5])\n", + "print(\"Total number of frames: \", len(frames))\n", + "approx_nf = metadata['audio']['duration'][0] * metadata['audio']['framerate'][0]\n", + "print(\"Approx total number of datapoints we can expect: \", approx_nf)\n", + "print(\"Read data size: \", frames[0].size(0) * len(frames))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "But what if we only want to read certain time segment of the video?\n", + "\n", + "That can be done easily using the combination of our seek function, and the fact that each call to next returns the presentation timestamp of the returned frame in seconds. Given that our implementation relies on python iterators, we can leverage `itertools` to simplify the process and make it more pythonic. \n", + "\n", + "For example, if we wanted to read ten frames from second second:" + ] + }, + { + "cell_type": "code", + "execution_count": 53, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Total number of frames: 10\n" + ] + } + ], + "source": [ + "import itertools\n", + "video.set_current_stream(\"video\")\n", + "\n", + "frames = [] # we are going to save the frames here.\n", + "\n", + "# we seek into a second second of the video\n", + "# and use islice to get 10 frames since\n", + "for frame, pts in itertools.islice(video.seek(2), 10):\n", + " frames.append(frame)\n", + " \n", + "print(\"Total number of frames: \", len(frames))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Or if we wanted to read from 2nd to 5th second:" + ] + }, + { + "cell_type": "code", + "execution_count": 54, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Total number of frames: 90\n", + "We can expect approx: 89.91008991008991\n", + "Tensor size: torch.Size([3, 256, 340])\n" + ] + } + ], + "source": [ + "video.set_current_stream(\"video\")\n", + "\n", + "frames = [] # we are going to save the frames here.\n", + "\n", + "# we seek into a second second of the video\n", + "video = video.seek(2)\n", + "# then we utilize the itertools takewhile to get the \n", + "# correct number of frames\n", + "for frame in itertools.takewhile(lambda x: x['pts'] <= 5, video):\n", + " frames.append(frame['data'])\n", + "\n", + "print(\"Total number of frames: \", len(frames))\n", + "approx_nf = (5-2) * video.get_metadata()['video']['fps'][0]\n", + "print(\"We can expect approx: \", approx_nf)\n", + "print(\"Tensor size: \", frames[0].size())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Building a sample `read_video` function\n", + "\n", + "We can utilize the methods above to build the read video function that follows the same API to the existing `read_video` function " + ] + }, + { + "cell_type": "code", + "execution_count": 55, + "metadata": {}, + "outputs": [], + "source": [ + "def example_read_video(video_object, start=0, end=None, read_video=True, read_audio=True):\n", + "\n", + " if end is None:\n", + " end = float(\"inf\")\n", + " if end < start:\n", + " raise ValueError(\n", + " \"end time should be larger than start time, got \"\n", + " \"start time={} and end time={}\".format(s, e)\n", + " )\n", + " \n", + " video_frames = torch.empty(0)\n", + " video_pts = []\n", + " if read_video:\n", + " video_object.set_current_stream(\"video\")\n", + " frames = []\n", + " for frame in itertools.takewhile(lambda x: x['pts'] <= end, video_object.seek(start)):\n", + " frames.append(frame['data'])\n", + " video_pts.append(frame['pts'])\n", + " if len(frames) > 0:\n", + " video_frames = torch.stack(frames, 0)\n", + "\n", + " audio_frames = torch.empty(0)\n", + " audio_pts = []\n", + " if read_audio:\n", + " video_object.set_current_stream(\"audio\")\n", + " frames = []\n", + " for frame in itertools.takewhile(lambda x: x['pts'] <= end, video_object.seek(start)):\n", + " frames.append(frame['data'])\n", + " video_pts.append(frame['pts'])\n", + " if len(frames) > 0:\n", + " audio_frames = torch.cat(frames, 0)\n", + "\n", + " return video_frames, audio_frames, (video_pts, audio_pts), video_object.get_metadata()" + ] + }, + { + "cell_type": "code", + "execution_count": 56, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "torch.Size([327, 3, 256, 340]) torch.Size([523264, 1])\n" + ] + } + ], + "source": [ + "vf, af, info, meta = example_read_video(video)\n", + "# total number of frames should be 327 for video and 523264 datapoints for audio\n", + "print(vf.size(), af.size())" + ] + }, + { + "cell_type": "code", + "execution_count": 57, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "torch.Size([523264, 1])" + ] + }, + "execution_count": 57, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# you can also get the sequence of audio frames as well\n", + "af.size()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. Building an example randomly sampled dataset (can be applied to training dataest of kinetics400)\n", + "\n", + "Cool, so now we can use the same principle to make the sample dataset. We suggest trying out iterable dataset for this purpose. \n", + "\n", + "Here, we are going to build\n", + "\n", + "a. an example dataset that reads randomly selected 10 frames of video" + ] + }, + { + "cell_type": "code", + "execution_count": 58, + "metadata": {}, + "outputs": [], + "source": [ + "# make sample dataest\n", + "import os\n", + "os.makedirs(\"./dataset\", exist_ok=True)\n", + "os.makedirs(\"./dataset/1\", exist_ok=True)\n", + "os.makedirs(\"./dataset/2\", exist_ok=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 59, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "18.4%" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Downloading https://github.com/pytorch/vision/blob/master/test/assets/videos/WUzgd7C1pWA.mp4?raw=true to ./dataset/1/WUzgd7C1pWA.mp4\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100.4%" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Downloading https://github.com/pytorch/vision/blob/master/test/assets/videos/RATRACE_wave_f_nm_np1_fr_goo_37.avi?raw=true to ./dataset/1/RATRACE_wave_f_nm_np1_fr_goo_37.avi\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "102.5%" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Downloading https://github.com/pytorch/vision/blob/master/test/assets/videos/SOX5yA1l24A.mp4?raw=true to ./dataset/2/SOX5yA1l24A.mp4\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "100.9%" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Downloading https://github.com/pytorch/vision/blob/master/test/assets/videos/v_SoccerJuggling_g23_c01.avi?raw=true to ./dataset/2/v_SoccerJuggling_g23_c01.avi\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "101.5%" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Downloading https://github.com/pytorch/vision/blob/master/test/assets/videos/v_SoccerJuggling_g24_c01.avi?raw=true to ./dataset/2/v_SoccerJuggling_g24_c01.avi\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "101.3%" + ] + } + ], + "source": [ + "# download the videos \n", + "from torchvision.datasets.utils import download_url\n", + "download_url(\"https://github.com/pytorch/vision/blob/master/test/assets/videos/WUzgd7C1pWA.mp4?raw=true\", \"./dataset/1\", \"WUzgd7C1pWA.mp4\")\n", + "download_url(\"https://github.com/pytorch/vision/blob/master/test/assets/videos/RATRACE_wave_f_nm_np1_fr_goo_37.avi?raw=true\", \"./dataset/1\", \"RATRACE_wave_f_nm_np1_fr_goo_37.avi\")\n", + "download_url(\"https://github.com/pytorch/vision/blob/master/test/assets/videos/SOX5yA1l24A.mp4?raw=true\", \"./dataset/2\", \"SOX5yA1l24A.mp4\")\n", + "download_url(\"https://github.com/pytorch/vision/blob/master/test/assets/videos/v_SoccerJuggling_g23_c01.avi?raw=true\", \"./dataset/2\", \"v_SoccerJuggling_g23_c01.avi\")\n", + "download_url(\"https://github.com/pytorch/vision/blob/master/test/assets/videos/v_SoccerJuggling_g24_c01.avi?raw=true\", \"./dataset/2\", \"v_SoccerJuggling_g24_c01.avi\")" + ] + }, + { + "cell_type": "code", + "execution_count": 60, + "metadata": {}, + "outputs": [], + "source": [ + "# housekeeping and utilities\n", + "import os\n", + "import random\n", + "\n", + "import torch\n", + "from torchvision.datasets.folder import make_dataset\n", + "from torchvision import transforms as t\n", + "\n", + "def _find_classes(dir):\n", + " classes = [d.name for d in os.scandir(dir) if d.is_dir()]\n", + " classes.sort()\n", + " class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)}\n", + " return classes, class_to_idx\n", + "\n", + "def get_samples(root, extensions=(\".mp4\", \".avi\")):\n", + " _, class_to_idx = _find_classes(root)\n", + " return make_dataset(root, class_to_idx, extensions=extensions)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We are going to define the dataset and some basic arguments. We asume the structure of the FolderDataset, and add the following parameters:\n", + " \n", + "1. frame transform: with this API, we can chose to apply transforms on every frame of the video\n", + "2. videotransform: equally, we can also apply transform to a 4D tensor\n", + "3. length of the clip: do we want a single or multiple frames?\n", + "\n", + "Note that we actually add `epoch size` as using `IterableDataset` class allows us to naturally oversample clips or images from each video if needed. " + ] + }, + { + "cell_type": "code", + "execution_count": 61, + "metadata": {}, + "outputs": [], + "source": [ + "class RandomDataset(torch.utils.data.IterableDataset):\n", + " def __init__(self, root, epoch_size=None, frame_transform=None, video_transform=None, clip_len=16):\n", + " super(RandomDataset).__init__()\n", + " \n", + " self.samples = get_samples(root)\n", + " \n", + " # allow for temporal jittering\n", + " if epoch_size is None:\n", + " epoch_size = len(self.samples)\n", + " self.epoch_size = epoch_size\n", + " \n", + " self.clip_len = clip_len # length of a clip in frames\n", + " self.frame_transform = frame_transform # transform for every frame individually\n", + " self.video_transform = video_transform # transform on a video sequence\n", + "\n", + " def __iter__(self):\n", + " for i in range(self.epoch_size):\n", + " # get random sample\n", + " path, target = random.choice(self.samples)\n", + " # get video object\n", + " vid = torchvision.io.VideoReader(path, \"video\")\n", + " metadata = vid.get_metadata()\n", + " video_frames = [] # video frame buffer \n", + " # seek and return frames\n", + " \n", + " max_seek = metadata[\"video\"]['duration'][0] - (self.clip_len / metadata[\"video\"]['fps'][0])\n", + " start = random.uniform(0., max_seek)\n", + " for frame in itertools.islice(vid.seek(start), self.clip_len):\n", + " video_frames.append(self.frame_transform(frame['data']))\n", + " current_pts = frame['pts']\n", + " # stack it into a tensor\n", + " video = torch.stack(video_frames, 0)\n", + " if self.video_transform:\n", + " video = self.video_transform(video)\n", + " output = {\n", + " 'path': path,\n", + " 'video': video,\n", + " 'target': target,\n", + " 'start': start,\n", + " 'end': current_pts}\n", + " yield output" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Given a path of videos in a folder structure, i.e:\n", + "```\n", + "dataset:\n", + " -class 1:\n", + " file 0\n", + " file 1\n", + " ...\n", + " - class 2:\n", + " file 0\n", + " file 1\n", + " ...\n", + " - ...\n", + "```\n", + "We can generate a dataloader and test the dataset. \n", + " " + ] + }, + { + "cell_type": "code", + "execution_count": 62, + "metadata": {}, + "outputs": [], + "source": [ + "from torchvision import transforms as t\n", + "transforms = [t.Resize((112, 112))]\n", + "frame_transform = t.Compose(transforms)\n", + "\n", + "ds = RandomDataset(\"./dataset\", epoch_size=None, frame_transform=frame_transform)" + ] + }, + { + "cell_type": "code", + "execution_count": 63, + "metadata": {}, + "outputs": [], + "source": [ + "from torch.utils.data import DataLoader\n", + "loader = DataLoader(ds, batch_size=12)\n", + "d = {\"video\":[], 'start':[], 'end':[], 'tensorsize':[]}\n", + "for b in loader:\n", + " for i in range(len(b['path'])):\n", + " d['video'].append(b['path'][i])\n", + " d['start'].append(b['start'][i].item())\n", + " d['end'].append(b['end'][i].item())\n", + " d['tensorsize'].append(b['video'][i].size())" + ] + }, + { + "cell_type": "code", + "execution_count": 64, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'video': ['./dataset/2/SOX5yA1l24A.mp4',\n", + " './dataset/1/RATRACE_wave_f_nm_np1_fr_goo_37.avi',\n", + " './dataset/2/v_SoccerJuggling_g23_c01.avi',\n", + " './dataset/2/SOX5yA1l24A.mp4',\n", + " './dataset/2/v_SoccerJuggling_g24_c01.avi'],\n", + " 'start': [2.9344678384893816,\n", + " 1.6827470772443045,\n", + " 3.9380918322335887,\n", + " 8.400625043794742,\n", + " 0.9696198736175933],\n", + " 'end': [3.4367669999999997,\n", + " 2.1999999999999997,\n", + " 4.471133,\n", + " 8.9089,\n", + " 1.5014999999999998],\n", + " 'tensorsize': [torch.Size([16, 3, 112, 112]),\n", + " torch.Size([16, 3, 112, 112]),\n", + " torch.Size([16, 3, 112, 112]),\n", + " torch.Size([16, 3, 112, 112]),\n", + " torch.Size([16, 3, 112, 112])]}" + ] + }, + "execution_count": 64, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "d" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Visualisation:\n", + " \n", + "example of visualsed video" + ] + }, + { + "cell_type": "code", + "execution_count": 69, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAqgAAAKaCAYAAADyCqv6AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/d3fzzAAAACXBIWXMAAAsTAAALEwEAmpwYAAEAAElEQVR4nOz9aaxlWZbfh/32cM65w5tijsg5sypr6O6qrmYXSTdF0aRJWyQNWIQo2hAM6oMMEJYNeIAswbJkQwSkD5QhgBBEG4IgWTBswwIlyLRlk5TZpLtbcLObbVZ3DVlZVZmVQwwZc8Sb7nDOHvxhrX3OeS9evIjKyozID7GrXsZ79557zrl7r7P2Wv/1X2uZnDMvxovxYrwYL8aL8WK8GC/Gi/FFGfZ538CL8WK8GC/Gi/FivBgvxovxYozHCwP1xXgxXowX48V4MV6MF+PF+EKNFwbqi/FivBgvxovxYrwYL8aL8YUaLwzUF+PFeDFejBfjxXgxXowX4ws1XhioL8aL8WK8GC/Gi/FivBgvxhdq+NPe/Ov/q/9Ffvvtt5lOGtq2495ey+6q4+bN26y7jul8ypWLF5hMLD985/u88+4Pee/DD6m8Z3NWM2kq6srTOPAYsjFgACzWWiCRc4asLxswGDIGjMFaA4BxBmPMcBzI38i5qqrCG5jP5ly4cIE333iDl6+8RNNMcM5ijMFap5+TH7AYW4G1YBwxQ9sFdg/2sa6Rz1pDjB2LZcu9Ow+4e+s2D+7c4nD3Dr/w1bf5I9/8FXY2tyEbcky8/+F7/ODdd7l97w6RxNu/+DUigel0Qt3UuMpRNTXOWIw1GGupqob1es3h4YrJZMbO9lmmk01+4Vf+MNV0zt7+Abdu3OK1l88TQoetKpyvMK46uliGk8eTijQ87nPHTpGf4rjvf3Cbf/l//1/y//kbf+Upjv58x1/7N/+X+fz581S+pusCu7uHXPv4PnVTs7m1wbkz20wnln/8e/+I7//wh9y8fYeUIceEt5naWyZ1xdbGnOl0KnJKxpgMOQLlXzDWAo4ymcYarMqsKRNsiswayAYLOFOeA3DG4H3FZDLhjTde46tf+Spbm5vUdYUxBucczhmMsRjryIjcGufAOMASYmDdJdZtS4yRmBIffPAxF89eoFuvaCrHN77+Zf6Lv/Wf45Phy1/6Mr/w9V9g4itijNy9d4+f/PR9vvO97/LwYI9f+tYvMZk2VLVnOptiKrnfnDMpZUDuq+sSzlVMJ3PO7JzjtdfeZPv8Sxhfs1otWS8PmE8n4ETmXVXr/Y/E5CSJeZoCI0+QtKeV3e+98wn/0r/2/+C3/sv/2XOX3X/73/hX8sWLF4kh8eDBAZ98ss/m9iZbOxtsbsyYNp6rH7/Pf/Z/+3+yd7gmZoCMIVI72JzNmM+mNLXnYH+fuvY0TU0zqTBZdG4mkcmknEkAOeMMWGNEJo2BbAf5BcDirOpTA17fstbinMdbj7WWra0tXn3lZV5+6TIXLpynaRq891jrsM6TFBfJxoExZJP1PDUxRJarFfcf7vLRR1d58623Odh/CDnwq9/6JVJo+dH3fsDHH33MarHmn/oTf5qz587h64qu63jw4AG//Xu/yw9/9ENeff0Vzl86D0Y2jkAiZ0gpE0JkMpnqHuGofM1LV15lPt/k5VdeZ2vnPPiGq1evcWZng8pbrDVka3D17KjslvGzyvBnpHvfeecB//q//nv87b/7Tz9X2f1X/sd/JZ85c4a2jRwerphvb3HmzDY7m1vEruX69Y/5P/0n/ykRR8qiG73LVMCbb7xK13Xs7++zv7+PtVDXFVXlqaqK6bzSvTthR3YAKWIQ3Zr7iTJgLGBl/7flVYszYgdYB0W6izzXVcV0MuHs2bN89Stf4bXXXqWuG7CWjOh1Y7zoYESXoXYF2XBwuOCTm7dICTa2trl+4yoxtPyZP/XHcWSIiZs3PuH9n7zHj77/Ln/hv/MXuHLlJabTKSkl7ty7y2/91m9x/eZ1rLe89aU3wBmSSaScyTkTQiYCVVVTVzV13TCbzXnt1TeZzzfZOXue85dfAtPw3nvvsbO9wdbmDJDPeT8hW33uEPkyJp0sij+n7VA+/jj5ffcH7/Fv/Kt/nb/16//RY890qoFqXFln2ZhtbzSa/v7K5pnIxJSIEVKMmLymXbc0tSNOKmZNI+KQDeRMThHrIKUEOWMy4Kx+LVVgMWOdGKalHJbJxSDI+s0zIQSyNRwsDsm3ErHrcMZw+fJlmqaR+0sZq8q1/GSTRdicwWCxzpLSjMU60XUd0RqsyazXazKwtbXJxqTGvnyBqx/9lK+98SVmkzl1VeNqx5mzZ7ny0hWW7YqrN67z8OFD5ltzNcLlqco5k22ZO4v3nrZthzkv95fBpEzsIqvVgowIpa8rrK/UKCkfOkWW8uPlqAhOzifo29GL+bHnz4zPPouHvHT9N4G/8ri7eWbjiOxa0xuC/fvG4L0npETMyE/MsimnRIiZtossli2T2ZwQOyDjnKX2FTF2JBzoRg9yHoPBJvGCRPGpstSN0RjIOZER+bMpAZCdJQdgBdev38BgeO3VV3WDr3RjtYhfV5y9BFkcPUzGGMNkUoOB9bqlbVuWyyVNU2OIOAvWWf7Qt3+V3/2vfpuPr11lY3OTr771ZbxznDl/jpe7lgf7e3zne3/A4nCJdRbnHTFGUg5i+OpPCInpdIazFc56rDE4pyrFZKw1uGKAey/PmbW4yiMGvYwMjxVS8xjBzqN/T5Xdx57/qOxuuwt8I/y5ky/2rIdFvrjNjMXWZNlUnXF47xm+nehDgyOnLI5WhpgyISRCWBGCOCx1XY2cJRg/2Vn1bMoRYxzkqFcYAQRmWJNoBpAhZ0hWdOzu7gNyiiwXC2JMvPTSFYyxYkLnAMZirSORIBtZKmPIKeCsoXKOynmapmI6aVgvK0IUZ9A5y+tvvsFqueL9H7/P93/4Dt/+9rfZmTTMZjOMs3zpzbf46KMPWRwu2H+4y+bWJjgwep9ikRvW6zXOVQKKODG0rXX9PoEB5+Rv7zzOGpI1uMr3hns/e4+T36NTfGQkTpfd/rgnyO6G2+at8MsnX+RZDiuqzpjcfwWrP1lBIochGwuIgRQTWJuJWeQsYxQogNW6o+0Cznd0uWY+E8DJGnkOcs4klT95NsqzXp4LMeyIYI0Fk4kklV8LI8MsW+iiIS+XpLt3gUzTVFy4eJG6aUQ+g8E6J3rXyD7Rj2yYNDUvXb7Cw4d7TJuaxnu6HIBMNmCt4ey5M3Ttq/zonR/x8bWrbG5uMZlM8FUljt2rr7K7v8vu/i77D/fZOb9Dec5TNr0N5qxVebWklHHOjZ5pwBhSSuQ0fEfnPK72ajuY3vmxg5Z8ZJykf3MRP3PsxROctqMO1lG5nfvXeT3/Sydet4zTDVTrwFox4hTtOeJRjx5i78RDzhlizqy7SEqQUoSU8NZTVwNqUoQqq2dgcoaUVZElTJZNPiGGWjGJM2Cyvl++tCqcnDKHeUG+D/Pr1/G+4uzZM8xmM/F+M+RsjyrnnMTzshlrLHVdEXOkC5EUIwlYr1ainJxlMp/y6uVX+OTGx9y49Ql13XD50mWcs2xubnLxwgV29/e4fe8uD+4/YL45F+WdMy5DygkiJGOwGdn4UxoMcMNgTGXUmJf3rbMYK578IAxGDST6xT+y9xzZhwZpM70HNSiTfl6ObfAF2R4+fPTkpZTu1GTezIOx/TyHsQ7KfKnxVp624YF1YjgZS86m/x4JQ0wQIoQkqEvbduSc8d7hJ7Vs4CSd0SxoFCqhGUwSYyxjRLYxOq2y0SczetBBlV2GAAcHB9y6dQvvPDlnLlw4x2TSlG+GMZlsUm/sGujv32ZH5RzRWWKMOGtpmooUW0yOZJM5d/4cZ8+fZblYcu36dd545TWm0yl13bC9vc2VK1f44OMPefjgIVXtaSY1XdsSrTiDKSZCjHRdxNqKurbU6jg6p3rimIyQM4aiS4x+D5FdcTOPye7446fJ7ui1cp0jsjt+r1/8R2V3bR7ygfm7wFefIFmf/xBkxso+2D+cjKxtBMlEnsuCHokMGlLWTR5DTIJ2x9wRYsJXHfW06hFBg8WQxGjI4myJHOVH5zJnUkpYI89IWbU80lEpGXIy7B/sA/K8TKcTzpw5Q1XX4jSaNHwfXUubLdgExuCcYTpp2JzPaaoK5wwp0jth8405Fy9dZPfhLh99/BFf/4Wvs7m1SdXUTMyESxcvcunSJe4/vM/ug122Njd1Co1sxkaiF9kcgVoU+NA5RRzKI3XCjcEaK06nKMXR54d/joyTZFdfs0/Su+Wa/e/lnaOy25oVH+UfA2+ccAPPbhincmuHPTYrumEQ58J7T0oKQOk8ZHSuMb0jkwXNIWVIMcG6U6Tbkp0VQ3FkJRV9ILMjnxVdWzT0YEz2S5KzmjBGAYBEyJnVOnP/wQOuXruGqyq2t7eZTCaAJacE1omeN0BOvTPjncVOJ4QQ8XWF944YR/uOgaapOXv2LG+8+QbXb1znlZdfYXN7i6mT6OrFS5c4d/sm63bNw4e7nDl3Bu88KWeizYIeW9m3JCph5ZlU58oY9RIwxJgEDNHony3IrylOQlag7Nj+3k/UyXI7mB76ADxG5x6xSsyjcrsyK97jXeAXHytTpxuoxvU/1sbBs2QQjWxML3jeeTEqsyGkqPZfJsVWUEZfY40Rr0a9IDCQkxhuWdRl1q3HIt6B1S/cm2JRVchYeWPlc10m50Ou3/iEytcYK/dX1zVFXiUUkEgZjE2QjC6ceCKTiYPVmraLhBBYrdfMJjO6tbz/yquvcuXKFT65+Ql1XXPm7BnqeoPZbMb5c+fZPzzk9t073Lhzm5deeVnmICUxRAOEHMTgtGIYhRAVSUY38cE/N+hmlHL/oB91avIJvz/GnX/Mp3obovzXjF+U6+aRgD16Dflk4zyvb20/8drPYojMitOULOpcHTkAay11VfUOQXloxYtX5ZjlQWu7QAxBnDHvcLZ4/CKxCdl0cwmZpiSGMUZcyDwYZWSrEQmIZEXMi9Ua6IAHu7sYYwl6zfPnz9I0ZSOV+3PWQIqjtTGkGPHW4J0YidNJQ11VtM6SYiDnRN1UvPbma3z0wUfcvH2TB7u7TGZTrLXM5zMuX7rES1de4r2fvs/m5pyNjRltjiSbiVHkOEb9XX9yLhEW11NzUAdUlFsUmEKADbBlayrjc5JdynVOl92WW9yw/wfgf/LE63/eQ+TWg01YM0aaB4PJjiJBsvyWrIhnkV1wIsMYQpdYrTucs8zNlNxUVN4JElMMTgUL9AkAOIagJHISFNEoKmQAm9GogMoyjrZr2dvfJ4TA1tYGVV2xYTapKtElmRJdKN8jqZFtsBZm0wk7W1s0tcdbSzSiswGct5y/eJ52veaD9z7g/oP77Jw5QzOZ4Lxne2eH1159jd3dhxzsHdC1gWndQEo9ailO/oCQjiNrg/gkoiK3Ze6NRkOOiukpsptP/rM4/sNrJ8vuUTfs+DXkmEN2ecf+BvDfevT6z3BYJ3JrTFIjXvb/rI6ItVaMNglGqXsv2jPmYiyJcZUo6wHZGrqUWbWBECB5h60N3hUbQBF85Dq9cQr9WpUoVxlFT5d932RBHLGWmBLL1ZqPr15jMp3Kd7OWqqrJSbT+4DVkMMWSUQdqPiUxyFYx7NDXNjbm/MIvfp2//+v/gLv377G1vSXGZuU5e/4sly9f5vDwgBs3bhBDYlI3JIPQBACrwIpVADHGYqBaBRWd6t5CoUy9yPbgcnEue0fx9LU9rnPz+J0n6NzM6P3RKy13+cT+p8BffOx1TzdQsxiJzjqS8xgTSTH1CEhKSYzMXLgcuiFjiNmSsxqBxrK7v6SZbuAwhBDwlUxu/1WzhDyj8oTEq5bzS8TU0JNJ+okoKEzC5TLhYvDu7e3x/k9/ynq9Zr1qefnly0Al4f7cOxmYLIa0EaiWnDN11fRzvlqtWS4WXDhzltStsCSwmW//0W/z9/723+XDqx9RNQ2/+o1v4aqKM2fPElLi4PCQ9z/6kMXBIVXtsN6QVoloEjFE9W7AmD0mk9lgmPaCJgtpDWr4K6qRMj1m3q/54KUdFwJGhxwfGUbw++OEQCW4f96LuJlHjjGNYfra/AknfDYjx4zDYSWohDVOEE0N8xkMJhmcFcUyOIti/Bkk/BQjZBwxGdoukdtAiInJpMbXXnmh8jnxk3ovSJ4VGBmnVuSzmLXFK81iHFsDziaIATrL3fv3WCwOWSwW/PIvf0NRKCfGrMmkGHWjFeTJYMFEcjZ4a9iaz1jtbFM5gyUh+Jo4WV/60lu06zUPH7zLD3/0LucunGM6nbKxsYHznq+8/TYfffwR6+WS5cEh082ZRp0VYcP0vMKyaZdQU9FguWAaWackJYwtimswqvNIvnT1ji7m42T3SULwM8juG8z438bHe/LPdCTVvUaQosL7zQnVtQ7vxcD0zkIyxCRWf0pCvehiYqpc5ZSToKq6q+7tL+lCYlJ76sozrb2GYa2ul0augIIylTWT+xAjsqxiSqbf7EwuOlmNgJR498c/AWu5cuUKZ86ckY01J7JVBKuAPsUUMxZbe7a3N5nNJtR1RQi231hzhvlsxisvv8wvf+ub/OCdH7C1JaHSummYzKa8/MrLfHLzBtevX+f2rdt8eftLOA+5hIStwzqHr2rlQ8p39d4Lhcr5fvcuvFVjEi4VvT3MydGRj/56kn41jxx5wjF6bT3+NNltaHkt3z3tbM9kVLbCWYezXnjyWe46anTIGtuH/wsKnXJGsBlxvHGGqJ+zSBTMeYd1gniv12u6dUfXRebTCb4qG3n5Rx0JehMBGC1DMVhz0QsyryFnTLR4pR7l0LG7u8tPfvIey+WSEAIXL17G+6DKzGJRmczFoMg9SrpYrrBWdCQ5E0dIq/OeV156iS+99SYfffQBq9WSP/Qrf4jN7W3m8zkvvXSFxeKQ27dv83B3j1e2X8J5R9JzWO9x3svcxkwInXxH8bzEQA1JzSsBEKyzqj+G+cgwAjaPyW0ZJ/hG+YTDhmOOPhODbj9J52b+2hMMkFOz+FP5MYZsLcZZRaaK4vT9fVeVo6r86N4kcSRjCRFiMvp7Zrlcs/twj7ZLojStE8K8siESgix1JFIKpBRFyaZIDIEYoirKTAyy8YoSzoruREIKHK6WfHztKu/88B1+8pP36bpIIVLL8UGQnZTIKZKT8EVi6HAWGu+onKWuHBvzOU2Pwkam05qvfu2rVE3Nj3/yIx7u7Qlfr/Jsbm/x6uuv8dorr3L75i0We/vkLtKt1oRVS7duCW1Hu+5YLleELmrSie2pEvRh0DLXhpQTOamR+hmMp0keGR+XTxbJftxZ1/xfPjz/c97VZzNCSmIsKsFdQk9Gwxy2NxqbSpPW8hjRF5pKyIkuBDCOGKGLmTZkdg+W3Huwy4O9Qw6WLSEawOGsk3MjIdZimA2jWGq5V97yk3sZjjkTUqQLLSF0HC4Puf7JdX7v//d73LhxneXigJ7enhOo/BKT/JsjZOGbzmdTzuxssrWxQVPXWH2+IGMsvPLKK/zCL3ydd370Djdv3eRwcUhICV9VvPraa3zl7a+wWqy5ce0TXBbek3ee2tdM6ppZM2HaSCKDRE6SoMu+wuAwWecaldlednWef97xFKcY41Cnjesc8O/wuz//PX0GI5FVPE1PiXDWUSnaH1OHMRZnJbmjcPIsBQEsXN9KQv5YkrFk48FVmKomJDhctezuH3C4bIlq/NLLyJBEcbwdtlHAoMhu0v9l/Qk5iCynREiR3b09fvSjH/Huu+9y7do1YlSdnkT/FvR1/LBYYGtjQ5LsVktiCDI3qqONMWzM5/yRP/yHMSbz8bWPuf7JdYnEARcvXuDNN9/k0uXL7O8fUFU102YiP9Mps+mU+WxG09RU3gmKnKKEp12Fcb6X3RQ1TyLlI5y+U8eTVPTTir95suw69tnkv3rKE35+wzgLzpKteB3WeirvZV+0jqpu2Jhv0FQV3lmcExS0x0yV9keMSmnzOF/j66b/sSq7e/sLHu7us1x1hATJDMmihTZyfORhI+vtypLwKdzrRIiBLnSSY5Aie3u7fPjhh3z/+9/no48+oG3XKrdB5TiQUyCnlpxbcu4gBZrGs7O9yc72JjGl3iDMSZwda+Cb3/gG0/mUO3fv8MMf/hDIVM5x9sxZXnnlFd544w3u3btHipnKVzRNQ13XeGtxxkiSrZHob4wBX9fUkwac7XN7CoWyJ6aP14sTNPHPYlo8UYZPP9kt9viP82+cesypCOpw95pEZGyfmyNoihFOBrLxO+XLFO5HRuYlZtk/wZJypA2JEALsHTJpanzllFxd3KDU0/4FpYUco9xHlvdz4Z0q9y5qiDHrOYRU37LKiQcqeN47XnnlZeYbc1xVUfidmCyh/jzw+2AINZ3Z2aH2FmcgGVlw5zyvvfEqq9WKDz/4kPd++j7bZ7ap65rZfMaF8+d58/XX+cd/8Psc7B0wmTRMZo2gc9n0pG5BTHVOzTiZJ+umLx6SQQwAmwRJKetQDA5xh45JzAkOfn7kmONSdvTvUmWhRwZPPoyc4eJO5i//6cgXYugGX/6VEL9S9gX2kflWfrVBMumNIvlkQa06pV7k0YklXxrWbadOUWZSOSa11xAVEg3IaeQBZgXzTIHr+8S9gtKA8K3k+vKZjCPnlnv37vPBRx/RhcDly5mNzQ0KX9AYxRusItkYjCrrzY0N2rYVDqlu7iVoOJ/PuHzlEq++/grf+8H3+Jb7FS5duoSrKubzOa+88gp3797h4e4D9nb3OXfhrDqpYgRlY6mbRhIdzGC4WOsw1hQfQPmJJaELCiw0mOhlwU4fj8huQU6OLPro+Dw+9+myO6Hjyzx44j08k6EbrCAirqeFUFDprLx/X1E55Xxno+igIlEaHg0hSJa/Vn9wlceqYUCOxHbNwf6C0EUmkwrvrVzLWjVR0yNQlDw+Sflu5Z08HJYhxIgtUZ2c2d/fB6DrOowxXLp0haoCcCPQRfIIjFGkM0WMyRKt8MrjN6Y3LDJQV5a33/4yH1+7jjGGi+cvsKlo6uXLl1ksFjx8+JD79x5y8fJ5rPNkNGG2VHBBEnxTSlSVx1ROBBoBOwTEEKNrHKjqDccR7e1xYyy7/dFPI7sCSz/uMJXdNV/O10+5+rMZPf2v6NRCnUMSdKbTGZcuXaJ+sMf+4YJ12wpoFaJEopTiZxVSd84NXGtjFJGssBmSFfrTarGi8w5fe5ETJNN+rLMHpDDL85GV/1vsDVD6RtFI8smYIjlkFotFvx5N03Du3AWm0wmQsM6Q0qATJbs5igM1mxAnHlLU/VPzEAQxYWtrgzdef52rH1/lvfd/wre+9cvM53Omkynnz57njTfe5JObN7l39x5wVpL9VIVa6wSZVupfzkYpBRKdsGpPCTha8nyGuRiDmqeZkSfK7dFFP3r8WOc+QW4rIpdYnnL1J3JQCzdHoXkK94kewElRLHNrjIZRDTZZ9aZlJBJRSdA5G4GlIyyWLSEmLSXhqBtRxpYhflSy+kSUlEidS0qKrJaEZE3/SsKIAQIK3khyx9WrH9NMajKZzc1NrHOycdoIyapxMfCvrDE0dcXmfEZTeyrviEGyAQ2Gzc1NLlw8L2T9qx/x1a9/lS3ncM4xm8+5cuUl5j/5EV3bsVysmM2morQ1iUQMVIdzVg0N03NJxg9OjLF/aHJZ3SeO4+ryMX748RfNCa+XvekRA+Ho8E3g7OsPn+LePv9R0EyDw6jC6EcWL5acVQmW0jqSmJKxwimFHn0VOTSKblmwIiUhQRcixIgFvKLdaIbzeB2KYWjKLj9coXfoUD/AkI4kySxXK27fvqNKyOC8U+J+Ua6KEKQsSAbCE2yqihgiGOGsFo4hRs6xsTHnrTff4Pe/8wfcu3eX+cac7Z0dqqri/PnznD17luVywf7uPleuXMJUo2xRK5tI4R6KUtIs57GjNfrn6Tz0p5Td4+c7SXbLW0+Q3QzEzwLV/QxGn0g2LvOUcu+Dlo16Pt+gbROmDUjMiZ6bLHQRCflnCl3AYn2RHzVEU6JbLDGrVrRprYiXkyRA0aFJd7beb1J5yydMuR5oska9ZFm6ruPg4EAoVHVNVTWcObND00zI2ZAUcDAC4iI4sjzBk0lNirYHE4RbV+xmx0svv8zNW7fZfbjLxx9/zNe//jUq59nZ3uHSpcvcuXuHe/fucfb8GRov8ivfb9jE5bmQKhRm9I1SjCMk6qlW79jfn7/sRhoOee1pbu5zHiNMLhsFkeR1a22PAm5ubmCdZ9Wu6ULH4aLDWTFQRReLHrFlj9Qfi6CyVIbsLOuUCF0gxo6YrHBf64mCDqPbyqjTX0yskTPVP2FHP1AcoJQETFsul9y7d4/r169jjOXMmTPM5hPhfI6UmzEDxaCuK8BJZEudufJ8ZMB7z8UL59nf2+P27dtcvXqVt99+m8p7ZrM5586dZ2Nzg4ODA2azqQBrR+RWjCSLAIVJ+abEqM/QMed/hAeMXM6jx4zn4MkLfmSOHx2ny61liyl/7NRjnpzFj2MgkKMoDX04PYXUk8edkzp5JZRvRpMRUuoN1JxkM123kbbr8F6QmE03wSuVQJDYRFL+FbnYrHJS8Yg0bzohxoV6SEkvGkeE3ZwSN27eFAgcqKpqIEATe7fCyLM1EOe9Yz6bjGr5FSNWjIIL588Tu8hv/sZvcef2bbzzzGYzEb7Ll7hy5Qr3799ncbDg7NkzVFVFtIPn7XxF5TwhFgTKihdfSkFk+gSrAjQMHqE+Xse9k5PW8thx5jHHHXlRjeZ8/PXHnH+RAz9Y3eHPnn7oMxne1jjjscZpoomGLo0GJZNwOL2XMFTlHBEltFsgSY1IYw3WO7IR1DQlhI7ivNB9rMzPul0LmlF56sph1QDIpZZvHrRDcUbK+pbXDcX3MCQjlS1KKZOIYW9vj5wTIXQ0kwkXL16UDdXI+ZICXqgRY0wSDq4XRytYDfeYRFZErqoq3nz9Dd75wTvcuXOL6WzK5sYmtvKc2dnhyhUh7N+9exfnK5x3g8NiHaZ48oU2YcBVlSCoqf9Cx4Yo0h51ePRdhgn5DGT3+HvHhsGwouZ9Lj3+oGc6hB5VrLVS+km+kxiPTSP1GmMyuMMFZrUmpkBKoqOdNaJLc8Zmo6ipRsEUdHDW4mpDWnfEEFkv16QYaCYNVVXRwzXF+YHeSO635Jz7epKYoVJFj0gpupPSYKR+/PHHVFWNc46dncHhKSiZyIzwZo0xTOuajCPHQHalKobcWM6Zs2fO8NKVy1y7dp3vfe+7vPbaq2xubjKfb3Dx4kXeOHyD3/jN32S1XFFVHle5UaSqbN76PaxVKooYw6lY2OXb98imKGBjj8rvcdnlBNk9cZwgu/2fT5DdJdu8Y/7E4w96RkN0yoD0C/osz3j5PqvVirpp8M2EeYys1itibKm8wySovMVb0bU2JaEJZdGfDrSSDfJ6DLRtS4yt6KAcqZzHWf3RCKTcXKIgpmVZRv/o/eu//d9idyRF0pfLFR999JHKd8L7i6pnzfAMZDlR7hNzTQ+gMTpvzgmcY3Nzi4sXL3Lv3n2++93v8sorrzCfb1DXNRsbcy5euMBHH3/U82BddbSee7HNYoqE0BG6jhwCxldKRcj9MaMbHOaBo995GCfL7SPHHXvhCEc6D4/AycddJvE/eOTK43EqB1U4pE64S5q446wSnI0UPo450YaOnJN45r0hO5STAeWyKs+jYKuFa9WlxMFqxcO9Qw5XK7qulfJUmD4bcDwX/Vavm76YHFGK2TJiReXUG3cxJ9q246OPrvLjn7zHBx9+qNB9Uj5UHEKRJDCJUvt1Op2yXC5Zr9d6X/J9cobZfMZLL1/hl7/1Tf7h7/5Dbt78hC50GGc5d+4cX/nKV5lMJuwfHLA4XDKdTplPJ8ynU+bTKdPJhLqupd4rYjw4p5uD1owlZQhJMvkLudHAaSTSMkfj2sXHh3ncT0ZoCGjY5dFCfCdf8z6s//YTROoZjVIXTsLdYihWzVCiyygZv55M2NreZmd7i9pZfGVxTkqi9OfQDTclSTPCiQHqqgpX1Rhfg69YrtYcLBYsVitVdqqwjdWEQAnDQlmf3HOnj/5PS/ZkCT3KtSNt17K7u8vVq1f5/ve/z507d2jbtcrwoF1NUZI5EZPQEJrphNnGnJiD8msjMQu32znDt771y9x7+ID33n+f/f1dvLHMN2a89tprvPnmmzRNw90790lRkh3suKQUSUu/Mco6z2Q0PEqxQwWnFJ7s6R5P/4wPKuSRcar8Ftl9fBHfI6PhCpfNv/jkA5/BSBFysrLhGym7hLJJxeIzVFUtVUPOn+XKlctcvnSBs2d3qBuPr0wvw95KcmAp/WMYbWxYvPFsbGxQNw0pJVaLFYf7B+wd7BNj0Ds6+kwXP6i4VT3Prbxffhe4l6xoqtTOFTTqpz99nx//+F1u3LhG27YIFzWqvi5Ph0SOrFaJybnQVwZEKKsx+9Zbb3Lhwnk++eQGH3zwASllqqpiNptz5swZvPcsFodS03p0r8W4sFoFpus6YteRoiDSoVvpnBUhTH1JrceNI7r3FNX5eNlV/as/TxpTDnmL33/icc9iCN0uEQm97kHLnrVdx/3797l9+w4HiyXWV1y8eJEvv/UWG7MZtXfUzjKbTAZOdc6YnHBkKmuonaWpHHVtmDee2cSzMZuytbHJ5uYme/sHLBZLui4QtSoEir8Colt7wK1EYo+u5VBFRz6bkiQj5QzL5ZqPP77GT3/6ATdu3NBndaAaCvgm+7TVuqUmayMWDG4wZYldwFq4cOEcX/vqV/nwww+5du0ay+WKpq7ZnG/w+muv07Yt+wcHLBfL0TyLlAkaHFmvVqxWS9r1khRaifBF0bXGSHJqKkCcRUpvmDwIXP/l6YvOPEltPtF+UCrj4060wYpv8sGp1zidg9qX2S0eruy1xglnUzhGAoFb66irmqpypGAIKQmKAj38bpzFZEc2lkgkZquIgNEaaobDxYqudUyqmtm0UcPNjmyxUmhXoC5JeBlM15Rz7x0nA+SopS7Ek0irJTdv3pTFbFu+8pWvUDeNVCoAyE54iEkWUh4Tj3eWSVNhbSndIMkzYKibmm/+0i/x4U8/4Or1q7jK8/qrr1NVFS+9dIVbt24SoyQLvPzay8QUxUEx0plEar7RK/rJtFGOaeoVd/F6Sr3YUzVfP8pGAfmE4wfEjn59Hx1DrcoTz92fCS5svco/90/+q09xX5//cKbG5kqz+IfyIjEGyIla68htbm5x5fJlYkzcv/+Qh7t7xOwIUergYiJ15cR7zzKPXvl/1krhZq+1aZedJFXF2BJjpqosk+lMlJKBIf1v/DinI+tQeFByxwmDEN6NE60RYma1yty8eRNjDG+99Rbnz19gY2OrN6YFbc8aVXJi0HgxdmJMWD9SwBrmeOnKS9y9c5e7d+/xO7/zO/z5P//nqV3NmZ2zrF/q2N3b5eOrV9ne2cT7mYSLlZKyTllrWGqDi9DhazGqO00qgIafafQ83J9Rdo/wTp9edjOBaB78bPf4OY2kG55EcwqaGBUhFfc+pcitmzdZtx1VM2Fze4ud6hw7yzN0q0OIkcZbppOK1Vpk3imf32VR/N5JCT6XMj7VBCvJFplIjpH9gwO889q1phpt2nmAUntXSzjXhSMKqkNJPd2joGopJZbLpW7GSxaLJW+//RUqRTaNRucsnpKPYI3FKgfVUpZZEmW70OKc5dKli3z57S/z27/927z22mvs7JxlOplw7tx53nzjNe7fuy/lrrY2SCmpA5oHKmLOrFZLVqsFVdPgqpp2vcYkaVxgStegJ4xSggsEPT5RrT5RdvszHfv8SbL7Gon/zVPd2+c58mgPK1Sioca36MqU4XBxyOFK6nzWtefS2W0mtWUymzCvKyZ1za2798U5txmLdJuqrKEyBm+FwmUnNeSMbyom6oD/zu9+h73lIasqMJlMqKqKpqlQTgqlTnVxHoyRvJJhxgsNqsw+ve4OoRU9v15x8/ZtFqslxhhefvllqqrRiiaJUs1IaHyDkTpW+9YUpF6Qe185Qgj8xm/8Bn/yT1oBtpoJL7/8Mr/0i7/I/sE+q/Wqn+txfWRb8gJyJqdITB0uaYJ5SUA8Pk4Syn4T0i99kmF5qtyOXxzqs5vjH9RXFiz5AT884SLDeIKBevxbDFur3FfCeclebpqGjc1Nzp07y70Hh1jTSQ3JnLDK5bXWSEUAUE6qxfoKW8kG72shrIsXuyTGwMZcwvBGs7DFY4+9F5KPGF9D4Wj5K9PnXGYhPYN0EClI1HQ65eKlS8xnG1SVFr3tM1kTGIm9GwzTaUPdSFcqZ4aH0WQJlX7lK2/zySe3uHb1KufPnmPn3Fk25htC1l8u+OTmJ6yWK3zte5L+YBjLlzHGYr2XbFE3+mYjIxyyVB7ow8dloyjLdoL05SIsRbBPkr7x50bXO3b6x9nGt/0D/uPtv8Nf5esnH/AMx3hbOOIgmuHhlva3RsJEIbC5ucl0NmWxWrJYLlmtVqS4xhuJHDjdGUXhiBKSzdNQ+wo3m9GuV8SuI6w7YhCHqWl0gzcA7uhSMfozj5QnBamSgvw2C8JjMVJRoOu4e/cu1lpWqxUvvfQyOztnlc9sINkCpVJqrhorhcqdsYIsFmWdEs5Zrly5zHKx5OOPr3Lt2jVeeukVJnXD9tYWFy9c4ifvvcd6vaae1NS+7mk/os8EGWjbNW27ppoEcioJfj2mRCk9ZKwfieAJsjuWt59ZdssJ8tE/TzpMxwrPh2yd/OZzGkftldwbZUZ1XohReJ2HSw6XayazGU1lmTYTvMnUznFuZ4e9/UPaIGikzwlvLLUR5e+soXYVjbPk3Ejd6Nrh64obNz5hsVjhbGAynTCpK4xzsomjIIzey3ivKvzUbMQ1HFh6JUHPEGNkvV7z8OFD0CjVSy9doa5FxxtFULHD86rTIBxUKfAq3LuY+uz9yaTh7t27/PjHP+arX/0aZ86eYXM+5+233+bHP/kJVVX1hc37YbIaqmqcpAgpkIOl61rZN5T0mpC63IOcHpXd/nkue/2nkt1ygjz8+rjDgBX3+Cl/i+ddB/WxYzxVxmKcIwIxBELsePAgcnZrhqtrnDHUzrA1nbAOndD0DHgHXnn0zhoqb6grSXhztRio21tbeFdLB0rrSdnQtkGO8U4/q1VF8rhWajFIdaqPqaCxjk45EUNgDeztJX764U9pJg1nz5xjMpnq5zQJ2hoyjhQjnQGTMuv1iuViQVhKNaKUEqv1ivv3H+CM5d69+9y4cYOzZ89x8eIFNjfnfPnLX+LO3TscLg7JhRrF0KpYylOW5046aZLFycz5GEKc6feZYX1Gz++RAxkQ/KeW2zLSKe/J2GPJb/P+qcc8wUA9eRgrXyjl1JecqpuG+WzO9vY2XWdYrpYELRHVBfmxdlQ/L2VKb+bSL9f5CpsyXZRJXi7X1M5hvcN5o12BxupusNaHMZCUy/sl6G+ShoeSoFD379/n6tWrgjqdS2xtbeN9LQqwYNxGuC0Go20CNbFKhWFYUcvrr7/O3Tv3ePDgPh9f/Ygz584wacSD39vf49bt2zx8+JCz589KsosiUD3KXpAHoC9aZgrfdSxCY2P1NCE5yeA8+tHxw/cEPXjsVI9+sg1LPtn78ZM+/QUYZbMXcvlyueDg8JC6njCZTTHe4dXzbleSHd9UntY7uhAxOWGzpTBbrcnU1mEnDZ5MC5KB7CGFQGeKvEuo0ui1RaH0rtbwX12bHoXIhdM3JDnFFFksFty5c4dSVaJpJpKI13ci61MGe2/ewCjIRG+gkjM7O9tsbm2yXq957733OH/+gvR6nk45f+4cGxvznjJTRjH4S+H4EKT8Sk5BOhjFgMsixxL6Kpz1J63Rp5XdJ5z4MbJrWVLl05Xl8x4F2ZGvKEhlGwJdbFmFyLoLbMxq5o2nrj3eGrY3ptJ2dNXSxYgzWRo5QO94TbzDmAqUb91MJzSzGbdu3SGljpwiXRuxxuAzkiNgjK6pznnxd0bG5LiUWr/h6XeJKWKjZblakh5krl7/mOl0ws7OGSaTqda/jpiEyrM0bUkpEtqWdi3l+kw0dG1H17VKozqga1vee+99Lly4yPbONs2k5rXXXmNvf4+ohmwxosdyaG35ElHKDhpJwMlJO2zJIih4YE+RtM9Qdh851Umyu0/Nbz/2br4Io1SfqOpaWvRFMa5yShwsDpjVhtpIEfqcOurKknGyXjZjvcEZ0bXSY0EiRGJriW6sqgbvpXWt8xXWObq2JbUdPiUq78BbARpQZwpO9G1zb8BKhHOwZZWXHDvW68jtO7fZ2dkRNN4amrqhwKQZqQUvvR4Sqe14cP8+d27fYXWwInahd9T29w8wQLtuuXbtOtvb25w/f46mrrh8+bJSI4euUDkP1BpjBLwzRhHU0Gnpq0jJVJMkMW2m8dhVGr1znMLytHJ7zKcaXn/0k5HMrjm98+SnMlCdopllo/LOU/mauq5p6gkXL1bs7e3Rho4uBhbLJWmxEsEwBm/E+7ZWw6OaPW+xuKahwtCZNevVisODQ6qmoZ5Ib2/rPdkMRYD7Xs5lEox4rQlw+rp4GVkNQSsZ+yTatuXGjRuEEKQvs/dsbTpyVv5XTyoRxeu83GtO0k+6n3sV2jPb27z++qu8995P+e3f/m2+9tWvUdUTzp45y2q14u69e9y8eZPtHSlHBabn85aHLqVE6DpRhGJNyL1b6DkjvbKkPAscf8rMiZJiHv39iYaCnOPEMNSxz15eTPgX3vnyU5zwOQ4jD2tCNrsYM8vlioe7Dwkxc/b8eWbzDba3p1izxXpxQGUdG7MpKQQWyzUxRTxVYWjjydQWKuOpJw2xksYK080pD3d3WaxWLJfS93s2m1FpJv44ojJeR3FyJVxaJnnsimQSMYliWK1WykVtaSYNr7z8MlVVy2YbNetZykUUuEs32WIwQgpidPrKs7kxZ3Nzk9//zu/z9a//ImfOnKGpGy5cOM/XvvoVlqulysOxxTfl9FLPMIeOFCKx6/oi7KV3Czk/8p2Pjjxy2EYXOP7708ju+LBTZHcr3+TX+PeBv/50J30OoyAmPd3YWJyvSFbqSS/WK7r1gnVl2Z41zM6dofaG7fmEylmW6zVYcLJP443+WOm0l6xQrSRBY4O6bghdxhhpqtK2gRhFTnxVycaYJaRZ9PAjWijTo6r6Ug8YxCxNKfJqybXr1yRhz8A576iqUdKJESSnCx2Q2N/d5d7du9y/c4+4jqwWS5arFetWmrI0VcMHH3zAG2++wcXLYqRevHiRlx++zO7+Lm1o9cSJpOXkyr0Jpy/JBm8Rh0q5sNlI0mJK0himr612bJXgOOD06WW3P/QU2d3Olj/B5OlP+IxHyT3JJjPb2ubhctV3kcJIO96u7Yi1J5lE1y414qp6xml5S1vWDEIQkCimhI2Rqm7IGa0ZXomhql2WurAidC3BO5pJTV15KUkpd4Adrcl4mzsKCmkuQpFebeyzWkU+/OgDMCLTF85f1ATrUs5QuJ8hwmr/gOvXr/H+e+/TLjrQDnxZedcpS87DT3/6UyDzla9+hZ2dLTY3NphOJzhnCSnQ7wlqSFulmJVchRQ7CJ3SA0tOg1aVUQP3SNOj8h3Vhhxk9+eQ23J4j9Y+KrezXPN1Xj71NJ8OQXUG4yBHfRiNpe06Hu7ucu36VeYbm5y/dAmMQNr7B4fs3kvUztHZqJl6YHLEpij0NQ0/TowYqck5FkAbFoR2RdutOFw4tjc31Ut6dIIfRQCH34oRaZ2UwJKivBnWcPPmTRaLBfsHB/zSL/4im5ubKPeeIpgxBUqnJ/GpJHNZ9lhD6gL7iwVXLl/i8PCQGzc+4f333+frX/8F5rM5ly5eogstH37w/rFwvYyCSMQYWC0XhLCmqus+GYURivok3PRzG6ckBgB0m3Dz104/5nmPsZEnPDSPsV5WNCfuPdzj/sN9msoxn9ac35rhbObi2S02Jw37Bwse7O/jjThMTjf6Cph4i5vMwEk93guXLtJMp9y6fZcH9x+SU+TgcMmkbqjrCl8VbrX2WB4V0qfYksi0x6RcPrUCDJmQOkw00BruP3zA977/PQAuXrjAbDZHKpIYqVOq370NLcZULPb2OTw4YLVcUuWKw8ND7j98wIMHD9l98JDdh7t873vf45e++UtcvnKZ7ck2v/qrv8pPP3ifVbsaHKXcY0tHEqFibIldIMeOvv6EOYqmPdPxBNld8CV+wL/3jG7m043C37TWkgxMN2YcKJ2EGMlJElcX6xaXOy7sbNCtF6QQcSbRVFbj+iKz1mQwiZgCXactTJPD+oDB4qynqhuc0jna9ZouBkJscV1gvjHTFtVD4iyo32FKdnN5hd4hGhyWqFSvRG4TP/3gfVKKrLs1Vy6/ROW1GgSmjyLEdeD27dt8/MGH3Lh2HZckEoXq96B7UgiJ3/tHv8fBwQF/7s/9WXBWi8NbzLEcvTz6rST2pRSwSUpcyTuKohqwTxG+/EzHE2R3zZwP8reezb18ymGsUtcwvVNgECcLTcIWHnNg1a2JehxGS1OGjCVQUk0h0u1KUf35fI5vGqFfOKdVfQR5n84a7BrWywXrdUvbtmxtbhAtVL60uda6qPmoFSFGaikVKDKc+giQUYPYsru7y3vvvcfu7h7tWy3nz1+gaRpytkTo26g+3N3l7r373Lt3n6mbCAChBpxBKA+FE3P37l3+wT/4B/zFf+afITNQTOwjhqXcf+FT9+2VcqTksKSUsF7sFHNCUtiRdTr13Z9xPEFuDQ+Av3PqMZ/KQC0hwiGb0WgPXk/XBa5/8gn3H+4xnc2YTmdsb21zdmOKM9Bah9kK2JRZrFpsTnjEA2hyprEWbxBP3hpMtcF0PmNv/4D7uw958GAX7yomkylV7fG+lClRfmxpEI05kolWzMpi3WX15GPqsMlysDjkxic38N7x1a9+TZAuXwkaZJJuvlZrqiaclj2RK0kqVeUcq7Yl54y3lt/8zd/k8uWX2NraZDZtuHjxAt/85jcVPdU2f+UBMAUgTcTYEdoVvp6QYyAF7XilZZBOpIPAyPA9wbGXGz3+iUfeMOVpeYIJbE444YcfRP61f3mPv/BPnfrR5zuMPqiFC5UkqxPvkTCiIpSrQNcuict9XrtyAWLC28i0gS5WoqQ8WJtxJgm3U9sgG2cxzmsmcU1d1dT1BO9ruhiFwrJe44KhbhotrK7PsxluNCtaZqx6vOX90XEpR0KCHBL7B7v88EfvsFi+xuXLVzhz5vzQTdhkSVCM0AE3PrnBJ9dvcO/2XUyAHCNd6AhBNnnnPP/4H/9jNrY22NzcYGd7m43ZHO+kGcE4+3UwUovRmiTBjIS1eaAEGJQe9KgAP2/ZnfIxX+KvwhPKnjzPUWokW2vxTcPG1jb7q0AXF4Qg5XYkYUI29pQT63ZFSEoTMWrsxUwysaSY0oY1bZDOT1VTc0Fb2HrvsZ00d3DO00xq1ussvMzQMZlOSW0nJalKofWch+pUmf73sryZjBtt9qU0YAlXXrv+MYeLQ1arJW+8/hZWC7Jba3FG2kbu7x1wcLigbQN+FGlIWhoudsLT29vd5dbNm9y5fYcrL1+WOUT2gthHmMZRitRHL4osmzyuOiHh1OPjuOw+In2fUnZPUvMnyW7FXS7wnwH/uxM+8fxHaSphXcdy3fVATwbt7Idw0p0kxa27jsNlK5FSY7XZT5Z6zhTKW8IpcjjJ0jTEOot1pbykJG1aY2mqim5tCEFQ8BAiq3ZNXdU0dU3T1IJE9q3akbrWpV65FQRYDDwt8wSQFGCwWSJZt2/TrTrefjty7sJ56frkPN55YgyEEJSO0uHxCMoHIO1eU0rizGfYPzjgvZ+8x/3799k+s6W2jdayxmI0wlu6UuWkVZSczEPo1sSwpq98keSzMUalVj5e55pHfinjs5bbxEUWjxccfiYD9djdlv1o9FZpz4dxtF0gLVZ0IRNCy/a0wtWepnK4jZlkTZkDMMJLdRbhRim531lLU1fkynD23DmwlsPlioP2gGig7YLcQsr4yg/8ofGdjmfqMaS3UsrHhI7lcsHNWzeZzWdcvnSZzc0tLSQdIUvpFqnpJ0bB/u5DDvYPWC5WmE4U9+Fyyf0HD1kul6zWLdeuXeP1N15jY3OD7e0t3njjDfYO97SrT+9AjaZUFHUMHTlK27QBqpcHT7ivGmJ62vU6Ph2Pe+Pkj+rLA4J7Iopr90jNPzzlnp7fsFk6nyXdCAsn1HnPZDZjOpsTFytpnZtkrkMU7l4MgRwDMXSkGPBOqi9gTY8UYTJdlIL92QgVZUsdN+c8zjmqusbE2Jc1k9a7a1zpMMYQbipZoKKYig4QzyobTTbRkFMxALsOdncfcv2GowuBlOHsmTP9HEjNSkuKid2Huzx88IC93T2M1nWWKAN9PdXlcsnHH33M9tYWO9/4Rl/ybWxg9nKZyz2V17Q1cQyUzNge/XiCZ/1cZJcNDH/4Cff1/EaJAGWyGvm2X2OM1kw1mhinXaMkfBpZrTtChqSITtAqD2VvMUlpA8Yw00zrvrOf7MSAPi/WEhBEP8XIqu0IMeG9gAWVG3qUFyqNSbK5l2SkAmpkrXNqrRThz1bqZD58eF+anUw32N4WOpRzHusk6a5tW0LQDOVspFOWfp+cBow+xsSDBw/5g+99lwsXz4lxb2zPlDKUzoO9ndBvuClL1vZytZDSgjmXqVXONo8RpGcvu54NdvhDJ3/4OYyhiPzwt9WmHtZJk4RcqBVqFGZjpK6ysSRkTROldXkWJ6s0rDEZaxLeCN0kKeBgx20uR/WNpFZpKXspu+i6DYSYCUmaCE1qL3WDpUWmmAuqs1LOmlQKaOS0r7KShI8dQ2KV1jyID7lx4wbGwvbODrPpXJ8HMbBjjMSYoFLq4chx60eGGCQB8vf/4A/4I3/020eSe02xs0AfYPle0sjAYpFkVTGQpbBVb0f2xsaThe/zlts5Fd94Qu3ppzRQ1cssWYplk3/0bYzzVI0gfV1MdMslq+U+bDT47U0mVUXVVKTZhHUnBXbFS5FwqUMsfe+deALOMpvNWa5bJk3DcrES3lVGN3jBcupK/HJrhhIR5GFR+99H1n7JKM3a8aQLmYe7D7h2/SqlyPe2dVSVZHunjMD12RCiwPC3bt7k4f2H5HWiXa9ZrFa0IRBDpg2B995/j83tTTa2NqSF2fnzrMOK1bqUfxg4ZZRWazn1iSY5FfRUFP44I3qo7VY+f5KfMhqPSNzTQFWFVH06KpWBqlpz5cLVU497VkP1TP9jsmQTl5aGKYmB56uKejKhmUxZrjUpQs5ANoYQpX5jCh1taKXmLxp6IvetTg1ZnDLt6OG84yLCs7Zai9I6R+0cIXR0nZTcWa1WTKcNMSsyYGVzLwbCkZqPjH7vw0xiTUpCEqzXa+7evastJS1TLbUiXCUn7XpT4uDgkMVCiz9npRZkMxhCyWCd5eaNG2xuzHn7y1+mbjz9XZQagyWRIBePXLmmuYSOWypfDRt8f5x+laK5nrPsZraI/Nqpxz3PIRt11FagYgC061Y2u0I9str9STmjGFnXddfSxkzEEUKijUE6U+ncW2PwxuErT0wya6UA+XiLEZRJePNBuXNtF+i6gHOBuq4xTY030gVIqpMkrTBa5GYwUFGXLCfJ+M9ZWv0ul2vu3LnLbHoNY6Rj36QxYL0+O51219M76zffMWoiv+3v7/POO+/wh78tBlwpx9Nv1uYoSFDuKudEF6UpRmknWeo7kgqXTz/3nGXXsEXFHz/1uGcxhoY+5pHbNtYokupw3itQIOFoixXs2pZOig7jHKU1aMpa9SfRA96l7qjoEwM4jeKWCM1YFjR/xWq9UmQfEOM0EUMCJjS+UlqUfs7Jutus5SuxaKtBOY8xfVm1lBMkWKU1t27doqqk2oUzDjex5CglCEOIvf7rxUL/Hih8ct0uBL77/e/xla99mRCkG2LJpxkLVQ9iGHoKQNe2NE0jiWF6vt4By/rs2+crt54pZzk9Z+WpDVSpRmaARIpClh+/b5zBVhXVZEK7bqXDVFLCeUw8fPiAmTe42YTKWkhrJpXTPtEG6y3WKhytXyGmQLaSTOSsx/sG72umsw26rtOMtci6XcDGjMrZ0acHIRA1WHh4x1L4DIL25AwpEQLcunWL9XrNweE+b735Zc6ePSvCoQWkrfHkEPjkxg0+uX6dg/0DXPIqrBlbuhBh+c53vsPZc2c5d+4MZ3a2taXmSBiL635EGtQBiJJNmlIcnALVrKWAy9hoGS3Hib8/jhpwHFx+bLjVHPnnkWHXO0w++pOPeffZjiZCHYVrR8pay3AoGF64fNZL2bAQdGNHSnMYY8mmlDaj79TRhUDAEIOli4EUg2T1Rn06VJ6bpiEZ+nJixlpyjlgr9VdBCO3L5RoSWqw8M6lrLQnlivtENvRIlJxeX0tp4PpZSwryd9uuePAg0rYdk0Z6kjdNg3eyUYQgiYvrdSuGsXpIRhVXQZDB8uDBA65evcrHH3/Ml99+CyhGQQmHGUpdrKzWglATpBxLu15SSTyp3+TteP8wfDFklwOm9juPeff5jtLueKgpidSNNqaPWhnrsDYp0pJlg1ejNarzkqyhS5k25KEuI9LMIBFH7WlHzlHvgOTyRv9ememuE6Oxi6KnZhOrLStlMw0k5aPKLjIsUdHRVuUOshUKwnrR8cH7H2KBK1eu4M+eZ1I1kJKWMutIatjC49d13bbcuX2HH/3oR5y7eLbXo73azCV5L+GMlGgrIdEcE5ubm5IUnDTq0gtiiRyMhZQTf/+8ZbfFcOfTsfU+01H00HEEFXQPtgZnLL6qpLFOcliTqZwlpE4So7zUU3dtEHQcSgFq4WgagzQ/gRQzLlhCVKfKV1TOSQUVA5TkoJy12opQAUBqpMcYVa9LkvTGfM6sqagr1++5UrYvq60atXWvIL3WWmKUMpRWncGUEnt7B1z7+BrtuqPrAi9fuYJJmdVqLVVgjGPcma03AUb/Zv3f7Ts3+fCjD5lOJ/0Bg2EuTqJ0U809SJBzhpjE4C7UMDIph8G+Gpscz0luH7LJb/BH+Kcf8z58Sg7q4P1YRWbA1xWT2ZSN7S1W9/eIsSVnabWFteTshUcSIyYFFqsF4LDC1pemOznT5Y522RK6wKpdY5uGS5cuSUvKqlIvTEKmEiZt6VaB5XJNqj21d+KJAQmHLZYpjFAe6L0KCvHZkJLBORHM+/fvSfmhgwO++Y1vMZ/PtQ+6JWsk4f6DXfb3F6RQDEZ6LokY8FIj9p3v/wBy4s/8mT+NMRIuGO/LfZkWdYUkMCaJJil0mDwQs/uHP2V4urrRz2yk1zdZ/Jv/ted9GycOm4T7ZlImxURMsedChRhpQyBhJdTkhlJmxiYJ5VvDOgQeHhwQkjhWQesvFpFyqqi8d/gshiAmaTZqpvCIUWVZWlGGlFiupVTOcrlmNqmZTCYSClM0SuhKQwhdFFXZZa2iumJcJgyhixzuHfL97/6A0AYuXrrI1uYmxhlCu6JrJXrRS24+isdnEOcoZ27fvsOv//qv88abrwnvC0EgSr2W4gxKsgo9Ulr6WI8VlM5qD50et0+f13jAHn+b3+KvPe8becwoG09B7F0lZW0qX4GthN/cdRg6SGtpUuIs9XRGdbgiGnXAtABJUooUXgzNLkiC0KRLYKRs4NEsZhljw9VYK3WrcybGRO5EDy+mLbNJw7SpqHVD9d5j3MCrLnUSDZBtJFtIIeNd1iiYPD8/fe+nHB4csnh5yRuvvUYOEYw4byFGal9Rck1kno7OmVwp8Tu/94/41re+yXQ6oZQ86jsJjVDdcedCm2FzNse70t430yehpNgbSs971Bxwnt993rfRD8MJBqqW2LMWmrrGudIUMeHpMDGD8RhbUdUN6WA16DRTAuri2Nsie21LmyIpZnK2OFtx7uw5QoxS0WHdEkLqS4UZEKfNFMhKwvuRhM2JsLvPelozaSpqJzV1U8p4Z9B+J33ESmRF0N6UjerFQviH/YOIqxxV7Tl7ZofUBslT8R7nnSTo2nJS0+t0C8SsOKS2pv693/tHvPbaq1y6dFH2IgTh72PYqdgyihAnqOtakse0ViyoA1G6U/bowPMbjg0m6dunHvPpsvhRQ0kNPvHepTDuYrECPMYEQYGSWF5m1AJRyPtrugTJWEJOhBjFPgvSPafA5hMrhm1G0B9RcuKRO2vBezorxXBDp15WjDRNQ11bHFYcMCPlVUgFZQBnFCkrWYVGkp2yIqHtes29u/f48Y9/zKuvvsr29jaTZkKXOkJoxbA1qrKS9o4u6GdGUInsuHv3Lh9++CEfXf2Yza15D8nTw+yKTosF3beJFcSuJZPkoUylBpyiZwx68wiH5SRU9RF3KT96zIk83ZMR2vFGUCI652PLX1rceLzgPOPxSCkk6L3JwlUr9Tutc1TVkM1PTpgcyCGAdZiqwbglYHB1Q2gDkUxMpYyZ1nXIiRwNIUl5nqau8JUfhePpJ88qOpvIxJzpQqLLYizPushk0giPyBqMLZimiIyIShRuFRqpsOATfaJgSJHDw0M+/vBD1qslFy9d4vzZ84rMq6EeI9b4viaGEA1yv8YpZ9Zdy70H9/ju97+rEQcEvcvjzb3Mt+lpCpV1bM7m8pzqcb0RrLxuCSc8f9mdk/hGWvJFHrkHBkY1GtWwclUtTU9yR1xnpKWsxbsKSeENJGkDqGaWeBGiowVNCjkSgiBCKWS2t7ep6lo2/PVa+dlDW13ntBKw6s+UxUE6XK4IIdJ2ns1Zg2gnQSclEUUapgDqsIsDaZ2BZHqZBghRIgLtekUIHbHVNqSKJsesSVcZddgU3U2llFUkxcC9e3e4evUqFy9dYHNrk54Tq+0Y+8hb3+dR0MDZbNajgmVYlX2TklAp+vF8ZLfjgDs8f/R/3N1o7HkOzAvZ60LMYBTsKehm9mAsqed36pomqZYXMn0CERi8FT0dZcvsHacudFoTusE6T+giB11UTivK0/SS5AyUij5RbZnlqiXGQJpUdF1iMq2pvMElicYVLmjR96VyHtmSjdAMjYIUs+mE+XyG94LyrtcreY5ihLpmaHotQ9JKjcgtQj0wMXP3/gO2trfY3N5kOpn0AIrp9anYRhra0jnPj8itUdEuK/GIbD5jua24wwXzfwb+uyecR8apBupQrPbYLcgqjBBJUSogis4YJwapGR9rS2EzwNB2HesEMVtCEn5GBkzp9a3fwmtBX0zhp9geiTLQF7vPMWoWZ6Jbt3QpM0lQV5XwXqxySRieH9tvruVVMYpN1tIWMbJarrh98xZ1VZFiZHt7h0k9IYZO+YzjB/Aocg4FReq4e/ceP/zhD/nGN39RNxl6g7SvYZsH6L4YoaSk3S/6Vek3+yJkT8f6ePpxomH3FKM+dFz6/U34739GN/JzjKOIc1Eq+nBkekeiD5Ebg6s8lSpNciLHlpzWEuo3DmM91nmSc32iUlFyWKvolpT26DrJlnTOMWkmzGczQow96tn/d6REUpawVUiBnCEkaGKiqSomeo8lOYtY7l8yoUu+XIzivGXjtLxTZHf3IZNpw2w2Y3tzG6kcLSZASlKDSuz2weCROZR5TDGxWC1598c/4tVXX9GGFbqp93LIaBMSZemdYzKZYHpkYRDynNNnJq/j8Wll17LNBv/1z/huPuORszoWysfLUkMyJnAekSWU80wmW6colNBVsm7ywh22iqFYrBNJjmlIuCudmZxzkqBkLF2ndBalDljnFSsrNVL0PDGz1uiZtVJnFSy5clSevgNVWaohqW54Tq12dnLOUVWVtNBOWh4wdD29BVQDajZ40eY4gy1zYWucd6zWK7rQPaIxc/+fR0dVVZyoWR9z/M8zPq3sNmzzBn/qM76bz3YU+o/UMI1a6UTf08hV4bQLl9pqJ9Dc77FSJCn3aGxrEC616pWcM/cfPKDyVV+o31rLRMPjQZMKrXFYldneMTciPTEl2pBwIRJWS02ElXJUmTxUHSj/K1EjityKPSItVhvqqhoQX43a1XVN5SupHhAz1sg+cbBYiXNopHxVPanJJKbTmmYy0blSA1R/Tzlrvffc21tm5BwM8z8YrkXKnre90GB51WycesypBuqY43b8hmRORobSyHA2zmGcl24gWjeUHIX/4TyQaGMkJDFOQ8q0UQjyDjt0izCGLkr43Vkn8LjTTk4MGs5Zra1mZINeh8g6BLqQmE4mIijeCkcrS2kKa1AkoCg68e5SSpgEJXyTY2R3d1fatEoGAf7MOek0orykrIkFMg2D0swMpU/29vf4wQ9+wJWXLhFCYGg0oOhX4QGq9y9kfO2QoZzFITlttNmj7sgJ4naUHnX8/SOu1JPHsePGIlGehbiYsP/DL0ah/iOoXi/DwywU0M72tUUlC9RLwTggkQOE4ClJUbIpV0Ss1u8rXviQiRz1x9sO6zw5Z+qqZnNjg1Xb0q6lfd+Qke3UubNiICIO2LoNdDGz7gKzScTaWpIHhcVPSZyjtGhUL1WUqKHQAUBQha5r6UJLjJ3wUBgU1oC+yxwNqXipn5uYEh9//DHnzp3FuQ3t/jYkKgzSb3qBsNoCOcXca83MMUVZHFiOitizll3LGSb5zz/lCZ/DKI97zCN+P5rhnKS+mVKbUIdKnBQQS0CoKDHRr6mssKH20skvx9IlTPTM4eEhXQhUdaP0Kk+MiRClmKlzHoPDqCtkFN2VyiiSNMgiUXvhwc6oBdHFDs5jUb0guhDRzZLU4vruaJPJpN98hRsozo83ntpXwgHMotPbLuGdJPn5ylNPaqrK0kwmfd3LwQkbdKkhDxu8jnL8OLFFdp+Rfjm+VM9Ydje4wLf4557yhM9+lCk2+rsk9kXZ47Oi+VnmzahcQUnY1D0vKefdCN3KeY915eSp/7l9+zbWeJrJlMl0ymw21WShii4Eqf6T5Dq2vynAlgYpAhSEBIfLFZVyUm1diV1ibVFZ9JxbJDnJWacVW3x/TWcHIM05y2w2JecdKluzvb0NWUq4eV9z+869Xhc204az586QTGLaVMymE7yXcL3kopi+AYIaLAwlnwYdW/SGACfHFuaY7D5rua3Z4iX+iVNP80QE9fjJ5e+hZEO5GVksCfXXXsJNKUmotBRfNNYpHcBinZRAiDGTQ4JkCbHV/rUSBkgpglmDsUymNVvb2yyWS0FbM4Im6D167wkxKB1DkYXlmnUXqFdrZtMJmzNR1N45vFePKI2QNpOlR3ASfqAgETIP9+/d0zpjho3ZjKrUCbSWLna9IJYHi1wQKgk1taFjd2+Xd955h5devoKvvC6WcGEKLUDKWXkNhwn60NT1eFWQzMexJ9Uv2GiNnkaSyjmGY4fUB3PsqCfL5vLClPf+e196iut+/qPrOt1s3UDcHxmpOWcpJJ8hZ0tMEAkYV0EunrDH+QpcJbxr7R0ldfmsviaogHeenAWdyZoZagzs7x/QrluMcdR1zWQyY3F4SFqtSF3Qu3W60ctmT0ZCPDERYqsZ+ZnaW+qqoq78EG4qRqXJpBzJAUFZjcE5ydyfTBo2NzfY3JjjrPBHW82ELgZ6ztJOkGzImhnuXdVzpqy3zGeNGuBRm0cc9Y3E7hEHC7VJi+Gu8TVKAlVKacR5Nb3sZp6P7J7jkL/Ad5/ius9vFFAgxSQ1TLMFpWokG6XCCKUckqypq8SpNzmQY+p1jMWSNNmunjSsVwuFqbR/t818cvsTdh/u433N5vY2586dY7YxZzKdEkKUc9saa5Jct+CprnhLUnotJzF22dliNp1gjcMYoTCP2+RCqeZiqbyjqhum02nfvtICxlvOnTvDbDolpURlK1595VWsdXRtx+Hhgt3DJSEGptMJk9mEetJgLVRO9DdksskS/s9K6VG96vogXx4QsrExW6JeSUKsMnL5/3OR3cQ91uY/B/7MU1z72Q+ptyv7ujNenCvJbEKQ/kK10uSnlCQiGoXCkYqj7AxqzoIxVHVFTCsoYfrUsVytWCxbwOC9Zzabc/7cObY3tmiaKU1jOTw40KYnsSyaghhgcDgr0bS6aVitWmaTCc5VQsQz9A3ErBUdaZB92juPryucq5g2Dd65/pnw3nH+/DnOnDmDNY7ZdM75s+eYTKZCLViu+OTWXZbLJYvlAussZ86fUZ2qjpPOVSBgsiflSEodhgqTg9hZKSoAYZSbmiEnAQnqQT57x6yYcM9BbgNTHpivnHrMz1BmajRyxiR6HqV4N+hGJ8dKprLcbiYRcyBbSzZeZK2qJfsuZinGaxIQhSiaAaT+WdlEpchty8WLF9nb36fTDLkuSwkK69wIKZBs/kgmhkyILW0XMGaOIVF7L9xAB47Ue/HFe04ZSZgymhmLo649s+mE2XSC84bQtaxXq76IdMnoNAVxkseKbDLWCSq3aluu3bjO2QtnmWuDASvfmt50Moah1bmhqio2NjZ02oelF75jFmPkCzIe3L3O3/mb/y7/1l/+G8/7VrQnfCRGK44OBawTI0wy9xXJDFLcu02ZurH9s21yonKeDOrUVDjrIWSy1C9nXOi7aipIUucupo6UA4eLA27fvsvu7j6TyYSds+eYTafUzZTptMVXNd5UEo7s+YHgRi5ISom9gwU5BrY3NzizvYV1lSpJVZYM2dXOSsTBeWlBPJ/Nqata60BKJuvO9hazyZS6aZjWEy5evMC0mWlL3gesQ6QNHTs728zmMyKJ2kHTVOJEGqnHm42RZhfaSs+CNhYQC9VaeRZyikStt2qsY7FeMK9qrH3+2ccAH1DxPzdn+f3nfSNPMUqpmIGHKqiUVQ48MZJM7rPycymtFqXQOSCIkYGk6fzeOan3myMZqVixWq04ODwkpgW7+wfcvn2H7a1tNuebzOZzXLZ9SNPqtVLOve43SP+LylvWpnS7sVhLH8US1DJLIXfAKy3G1xVNM6X2XspWQX+Ny1euYI2lqmumzYzN+QaVr9nbP+CTm7fZPHeeW7du4WvfG+gKypFtHmpYGiuRsCzzkmIL0sNIkWij5biyRs8kROuNYbVe47RFbPmuz2tELrBrvrgIaqE+jSOyBe0uJciyzRhlAZKkzGIcdfwa8DDhMoeYmTQTFl1LthlcwlamL0GGcaSQiHuHrJYrPjG3mE7nbG1tM51MSJJFiNQMzhhnyTarDAqFYLYxx+Qs3dSsxxJV3oWC4qyh8gJmOee00pBjOpmJsWodlaKpMUZ2zjSAoK3NZMpkNsVbTwiJbBz1pCGkSFodSgUj+bb9PJbGQ2JraQveLNVpBuR0OD7EiAvSGjXESD2Z4BD7C81peJ5yu4vhNzD8t0855ql2h750AWWDF6vcZPEiY4jEkDQ7OhFDxPuRlW3AWS9CYJwS9CVUnyhVATLJWJIih1IdIWnh8Mjy8JC7d+9irMN5KXruqgrbOroYJTO1FdPNKrqlZqMqmMxiscKaTPBSr3J7c9rTT/u2yqaUpJLELmelq0ozmTCZTKjrWmqNaQvSpqnxxjObzmjqhslkQgiRvYMFSZ425ptzqqYmpcDmfEpdeZRx0yNgOtP9z1Fyc+kgVcL8kqygizNC0o6OEdovh5pj7/VvnPTBkwTh6OHlFsvhabbH4S/+fx/z4Wc7YgqEFHBJGis4Rf3HxeJzzjjrtPA5Io86ryZnbE49p7NEDTKFp6cln6zBZyk30tQNyTqSaTGqlI2FEAMHhwv29g84OFyxubnBpJlQ1w0b801CF4XbVJD8LLLXO0454T1SrDplYkjYqhaFfsSjt311AO88vpJWlZX3mmWqBqy3vPzKy4ChqRumkxnbm5vUVcPe/gHrkNh0jlu3bgkvt67wZCqhhvWKUgLEJVlK6P1G0f0i36URRimRZDFUjaVtV8x61HTggpXxrGV3zQHXzHce8+HnM0rFDmutdrIBGKJGfRJEkbWs1A5FR4wVPRZDUOOTgR5ltSRfFoqS854U2r5osHNOOk6pkRBCIsU1Oe5yeLCkrhsm06kkTkk7HQm9YigRNamSAdY5ptOZdP/zNU57jQp3WvnfTmpKOmPw3lA56QBUOac5CVbr+Sa8ayQUrE5Yb2yrrFVeql+gz62zRvYqq4FQXXRLUhghDfJwJBaZhTeYwiAo0VBZKw0okhSOPy5yz1p2r9Px181N/tJjPv48RymFlJTSVpyMrLF0WQFZm/J9hK4xJrKJTFnnsFGVT8r4SaW0ANNXEEmaQJ2gr5uauoQlkNKCECLTyZyoSaWi+a383xaASS7hrMM7S13V4nSlTE9nKYiroU/edt5SV56qtlSVFxpUAQ2cw3qpp16oASWcH1MkxCD2jB8SamWvMv0sZArP21CIWGPrQadJj1fdoJZ9CB3ZGTV3hNT7vOW24z638v8L+NOPOcGTQvyUEGbSzkf5yGzIFyrepQRhRJlpdnFRBAZtR0cPUzrryamlNEUS9FHbnznhRNGCIKKJtms5ODxgtWyppxNmigo555lOZxKyidCi184jLxgxSpLea06yUZrNDUzPd9Uwg4YbRGl65b16mrqR8lZWez9by86ZHTVMJ2xubLG5IYbHcrXm7r0HZGdZLBZsn9lmNp+ScpROWoVH2M9y+UseADuSlD40naVFm9Akcp9AZawbLUn5ro9d0GGYxx15/LWnL7zrsuVMOzn1uGc1pG1nh3OGqG3gBr7piIvKKMSYCy+5JNBJN6SxAyCc47KxFdRbPieIuyiunEVujSm8OEX8u0TXSX3SSTNlPp9TEgJEeVm9RdOXtzLZShvVWvtHO6GfSCu/8pWy9BpX9NQ74Ww32m7PGSmCIrJn2DmzgzVyzGQykzCWrajbQD1pcFXdO44YcRqL4s4m92FQMdQLB0yR02MaTJC7kvQX9TzpyDHPW3ZrEi+xPvW4ZzWOJPiNfuQ9ejV8NEtXKUJ9RQTZmAz06FWJUI8ns5QDc84TndZoVrkTAEG3xaxAwrpl3QbscsW0bWnqaS8jR3Yg/bvwmZ0Tg9JaK6HIsrmKKpU9QoupW2epKqmq4b3Ha2dBq8lWzgocKgawbOYxC/Uk5ahtNHP/HB2xOZGwpimyrOH9oUvakEWAGRyAMvF9yR431EgulsHzkt1gHrDL/xv4Z0899nmM0vijr54zMrnIhe+cRnNUEFZxfHIh1xt1wAuinRLON70MGKsperms67A2UYnyUkoQsnHgbF9FpazJ+N6SttQzfpycrRUfzMCTLgaq0BYllO+9JPZJOcyRkdrnG5jBUKckZnV0MWjrVKm728thFkrK8GwrSNUrA1OmqBeXHtywcr0Y9LlQnV7oKY8Dt8ql+/E5yG1iReCDU487PUlK/xdSp5mTUoLEaHZcjEMBeWetci40o94MBGIRxlLaR7xzXzpFpAxJJlhC5Um8DeUOFk5UJtGGljv37oF1TCdTtre2OXNmh6ZpwBraVcCatlfaVgutF+PDe0sKgZSkL25pj1Y8eWell28pYeW19FBTT7TdXuFOSSLXK6+8grOO6XTKxsYWZ7a2scaxt3dAPZkSgWvXrin3cIJVdEKjzGAyySQJZZRNPkvoQI6zg4LNiXbdaeF3TzSa4Z/toG1PkpmxF3Tc8ymvH5OlT1t4d7bY4evv/KnHvPtsx2q1YrVeofoArHRyGnppG1GazvQoJJi+K1TJkDTKMxWjSnnRKRb8VF83KOaON46oVSVK913hXGbIhhAiBweHHB4ucXaPyXTK1tYZ7Yom55Ebyn03KYuElapqwmQqSX+SLFhQKOVDeSsolLN4Z/HeMqkbvNJJpPyJPPJWS18VZZkKtw60k4uVcNNIkaWRxz6oyuJh9rMqJslQCZ4Ug1yrNJ0wGV/73pAxBnp79TnJ7jZT/nT+YiX4yRi1Aj1yEBIVGH1GfRx0BSTjfmzAoktV0B9kw41J64mqpWidlHXoAQr6DyJodyJHWLcdGEHnjTWKWuZ+c8RIX3QJYSatu1vKNOUBfTIi30YTm1xl8bWlrqW7lVdDtSBPw0ave4Y6hCmnvuQbaNnAkshnigE/OJYDojwgTvaYQFknUYlivDgrKFQ1qftn6ETZfIaye4a7/Dn+JvDvP+aIZzeOcxmlK50mAZvSJmf0wCsIUAwsawUkEofK9orGqF4LWmIsRkkuMip7vbOQS/IQcj0tkZOUcpSM1O7NIei1DMaBw2l/e5H7EMHkQOWUQ58L/1TAAldoKiW8X5wob/qa7XVVU1VV70iKraGovurv3kDtOg4OD3sQxYwMWMbOfJHvEkktFYGK49WvQR6eMSMViQz0JTRzTieXUX+GcuuAreNAxbFxuoGaA+t2SUod7WrZt9vy3uO8pwujMDPy0BeVWTLwDAabpcZeRls+WskKzilIWGYkhAYDkiBNVTvIgRBaQuik40MXgEC77jg4OOT27VvMZ5vsnD2DVPtxw7mcVSNZFE/lHa4S7l7tRUjIGVdQVCvcWW+t8Eo0hDSdTqmdl8QoTY4Cx9lz017Y6roWo9dl2hyJyLnEqz+6CLkUMzVQEqRKeDQrB8oUQ7aESHOi69ZUTYUnkZMiH6cu77Md25fP8Mf/h3/2ed8GALfu3cJPPGe2t5nPZ1T1DOe9KDIz4NZGs4WNtSQCKPJuyH1nJZAIgElZHZysXZ00+x5RhDmJc1X5ijZ2OC/8VbFpJQEP0F73Sbmq4qAUL93aUo4t90o3G6kHKckAeeTxZvWSR968yrJzhqryVNXQ2KIoVGMMXkl5thitRoznddux7jrmk6ZHgMXbj4+GdsjDjxq4BUjr8Qstx2WtFUdUXmU2nfYowhdhbLPDP8F/83nfxiOjR9PHk6rDe993itFCTcLryxGjtcPKuoPIICCveylBlbpA1yamTa1leRy2spjC2eTYsmcpYZatBeskmVXpB9bJHiCVrDXakDOx7XAmM5/F/lzF4CgbfSkn5b3w+Kx3fUKT856qqY8kO5afqhKAASRJMMSWxUJCufJwGAKyOUs/mNyjYKJ+1YCyxZNlNM+itwe0a8jqFzrNF0N+Z/l13o5/9XnfBkDvQEhnu6zUktQ7UN5LaTKUjlIMK2BET5LWzMUISxkq76nrhjZEYtdBNtodzzM4JAVUUAcEQ9S9N0dJ1crJYI1nsVyAA+cN3nipPKT3krU0JDnijXaQNHJGS+6R/mIL9Q2EKuE89xxr7xQUKY6VpXSgEidOHLaUA11Y8/DhQzY3N3s5T70zhTxVY7mFkdod9rLBQRCjVAIBqvetUgXVSH4apPPzHNss+GPmR6cec6qBuru7y927d6mcI4ZOwpFaYLfw3YDeqpdFU6UUByMLkKLGlPClFMEPMUqPXUZKOEvPe5clQahdr7HGkiJ0rdSYTIiQ55BJMdC2uxwuVzT1REpUGKnRN9gPwunLQEiJyhms8zhfk0LsN1XX/wybfFN57dFr+/CuU6EroaYeSbNWubhiXEg4QYW1ZLbSsxcHNCoHRrV6gCTtJ0vWcxTHoG1bptqi0yvC3DsEJo88nrIu49XMR98rfx8Ba04S1hPONXyV/mMGuGXf5/84/Z/yL3wBikbP53NNkoqKligCpI5TzzXFDI5R4T1lRUhzxJksFqV66tYM+KFBlYL1xK4jhEztHVVdETo7IFhGA+AZhORfyqQ5nK1IqgSN9i0XJ1qxBlVCKUVyjISmkrCrQYxpq2F9ZxU5lWQuX3lBoRoxUAYUfzCE+2ocClRkI1UDDg8P6GP6xqjpU9o8DhuARWW8R+8G+Smh5BgFOS6tZY1z5F7+C01IDKwjCvNZyy4r/iPz/hcwSCqjn6dCuwBFEvUL9ap4KEMnrXy1WkiPYgE4qTqBoW07QorMNzZIYYUxUhm9j33lpAklwqdHnTajZOSUkpTuMRlXO5qmwmvIMacScchgxD3zXgwBn9KRWqdimNbqUImTZ7QrYEGDMGAr22+2wjVMg+Hqpaf73fv3BQDRvro99lu4uwiQOo7eFwpDibZlhaPbtqOaNP05rJXWyKXw8Iko0zOW3QrPJXP2hHM8+yGh7lKCTuhMUfvPj3n8ZT1NNsTY4SaNGv9lXgs5BCBrXotQ7mIIkLNS/GwRUyyCSmIHorzoaNG5MXcYddpiiqSYCNGSkKx9tRaVwpU1QU6+U1VVpBAkUcra3qkqBqdzAto5hVflOxbaE5KP5XSpFc3NVqXKSrTaGDhYHACZpi4doBTWUO6CtWbQk1noY1L1CMhCB8tada60a7X6jGUFVYqpYbLYDcUWedZye5O3+Q/49/nLp8jTqQZqIev2HCa97mBY5X5zMorGyMYahYybs3D5oG8JafobLsk+ZWsy+KoidGtikqQR5yvyslj8AhXJOsnvpZCvNZkQIzZE0YVGIX8zSkHKkbYTI4BsqauirAqfpED2SmFwkozlK4tXpSiIVNVv8M4W0rOEjwtftw0ti+WCqm7kPmwxSgux2fT/jtGnvoxJWURd65TyiFKhXnw+RqEYRGM0TtKeTx6PlJx45DTHrpgBk9kyjl97QuHdZzU2NzfZ2NiQWomKZMo4+gAZjhlKPWp69Ev36KYxQ090pYZUVUVoO+Xy1XiTWZlR7Tk9b9JIQ+4fhKzy67REWpBCFjiqWu/KKBIVIwlFbzUEJBSVQXbFi6+lMYX+mCLThSelG0gxUCXslfFW+YokuthxuFwwhHjFRC2kfPLwXBm1vgv3q1AZxBkoc6d0BKuhqxEK3MvvI8L7bGW3IfOmOT3c9EUZfemjE8GPfASRGhAXEcRcnAFDj553bUvla82k7/QUJQRetBWUCwrnU5w4p3IecyJpnUg8vVOYU9KON+LSWGMlCVDlsm8EYJ06WSK31kkUoTe4dVctur3QyeWLovt1JqRI261pQysbNHlA8UfTY3ISQ6LUsS4OGUUn0O9TYzWtDzCjWzp5DT71up52mpNlN5C4b74Y/OmCFtpiB6RU2On9+72to7oxdh05VYOjpYcY5WKSxdFJKap8eGLoyEBVCfI/bKODUVt2XFMcD6W2OCe5LuJEiRHtWqWO6I2Ve+upAhrZEn0rYICU1XQj2S3Ov36HIrM9gjlG/xmSc0GeH61c5J3q/Sy0Pzu2Y7R7WW+bKdjRN1gZAShFRxilIcQYMc4PevuR8WzldpMVv8ZHp577VAO1rmslqVtKVLo8sMVYLUjU8ZvN+t8hG2/gqpUFKgIlIUApSB+7tdT1M4aq9v0mV5CunKVMTgkRyHKIh5+KB6TZxdZZrRcoAhBjwmQpG5VS6jvzjMOihVNincd6L6Emb5Xj5/FV1T+ExYs3xva90rPJhNixWC2YjJyOrMleGiEalCAjo6hf2EFZDkLIEU6JUcMYMxx7XOY+08K7x44dPwfltidLx5d/ss0XIVI6n8/Z3t5mOmnw1tEl3Xz0GRYRMgOKWB7ysaunWs9QyuJIBEDq+5aN3vQh8pSkxq51ujYjx2MwfHW+shp+iizmLElwmUyywn3DFoWuYdQSbSiGZrZYq0hZryi1WLSGl0aW5NBZVBH9/nsXOkFW+U2BsF6JUzkyUcvNF+8bK3LYl9sbK0oGA6KEmIvMxxAxpQxdf/hRQXzWslthOcvsZzjhsx2yT6qB38+XuhA5HzEkpe5smSPT6wyBTACSJpl46qqmXS962TG5ZOXr5qzn6L0Io00hVKtZLRyak1CRCg2sn2OlzKQsD52zDpJQEMZyW57DUge1UFbGe82wwY9Bi/KvgCBd6AgxEGKQTG1NwCmoTe+I9obmaI8q83RslJlWmBXjRtGvY8fBs5fdJQt+wg9/hhN+fqOfP9O/ABydpz4KYARgCqEjjnioZXKNVc5qEmOtbVusr6h8Reg6YkjiVJWGKcVAHS+0PhPGoEmnFle5IRyuTkjXBagMo8vL3gtgRE6zlkgTHWt7w7QYrL3cDpmrYIZkxiLfw3uqfxEjuQtSLSI7acwje0OZtP6biMwyAFoFGBtP+3G7yTmpE9xM6tEaPV+dO+Ehb/L34BQM9VQSzWw2YzqdMp/Nmc/n/YKWIaH82G/W5Ny36uwhFSDnwl/L/YI5hd2TpvFbKwYxRgt556Ro7IDIyLlUYemGnzDSRxenNRkz67aj1VakxWgEqQvWBaEVACOvZ+CUCBeq1vZ6stEX8r51AyHaOIP18mMcZJPAJbIVdGHdrlksF5JoormwPQqVpY6sTfog9AiUfE9rBKYv31mI5srfKYbPaP2PGrpDKOuIdXLiENWf+9848lt/VDFu+lE0yPj0hvXNyNX/8MFjrvVsx2w+5/z582xtbeOrSjegrEkggtKAGnuKRI47bwhCUhRpMWTL2XNvCBhFUJ1Xp8e63rETo9cMM9q3EtU51SRE6wS5DFFqqHZdR9sGuiDy2sWkDSigyEdBQIfuJcWbt5pJquEmRe9zEbKxsapcb03s11CTdCZq27WgZEmSFMu3MEeMbvliBSXBSMasKMVhLbLyrK0xmAyr5XLsgulBw3nR7/ksZXdBzbtceMy1vhhDOvRJ7cYBsdboVum4EyMpRt3wyy5levS6hFGlbWlmMp1oK8kkFBFNqJPEa020KIYwo3aUGsatao/zVv42hpCkW08ISbnzub9X4etXWu3Cj+R20MF94ooZUVEUHSnUqiMbbG+kSl7DarWi6zqVJ+WXHmstXcZxf1QM1fFeo58zg0GQYuwRLTmI5y67++Yuv23+r48Tm+c6+koTI4VQZDUlsR9CCNqGNINW/rRaLacARpBZrpZ4LwX0czYsl0sxUK3TROshO97AgFJqGbKChFZ1TdPUNJNauow1tSYPyZqnFHrZFQS11DmVFu52pHN7G6Kv6mB6lF//kuTxXm6P/hgjNUoXy8XAnS57RC4RK6B/jozWaS8bwhDllg5THBM73TWMYblYDOfs3xvL3bPWuXf5IX/zMdeScSqCurGxwcbGBpX3hHbNYrGA/qFWDwjdABkUZoxReiGb8nWGf0tLMGO0Y41WmAihhFErcmqx1rExnxPWc91oh/aLwqXQsFPOUrKKiJH0IbooodJsDElrQJqibPuFlg0+o0XNXQk3Vf0GL3woKR8hk55FWdkSZj3iKOGsI5igeFNmHVo1QEXojcmUFqpjdyKODJCsQmyOa09KoevRS0kyvXsZeM5jxzX8N6avPe/bGMYI5Qa07azMb1c2+GKpaZKT6YlCum6x7b1QWWvdqnUTDyGwXq+Zzebs7+4CAx3EVfWRjX7wB8WdTEYNZA3H++x6fmlIHTkoOsqIn6mok8NgbNUjqBImLcpSi0n3G79kZ2dNAjTKWT3iyWtptq5r2dt9wGS6gZdegsKBdnr9Yw5qvylrR6Ou7Y4pp+HYslFZI/xVUziR5vmLrzORDQ6f812cPkKWhNJsXN9THArlBMiJ0K2xuaN2lUablA5iLc5mdEVpuzWu8mzUW4BhuVyRs5EGKkX1jHc7UzS4vJQB4yw7OztszDZYdx1tbOlrW8dODOWc+ucH5SCTJLLljHbsU5DA+YF+UgxX4ayCzVA70e/lGbbaAhtrFImLtOuWxcGSuip1K9G20apfrXyXkk8+gsuEkxg0qXWM+By1YocIVnnpc1jrn2Vs5iv8sfg/es53cfIwRiKe0twoI9HLqJFXQ45DVjoFxtGkKqH+O2onSckYSZaqPEwnNd26ZWtjhreQsyVncYBySApaZY4vUjYSqb1y5TIhBroQWLct2Ui+QtdJpBXtFGYcgpj3eTdZEqiNUT0r+nbgUWuVFGN7GZHW02hdVqXXeNlniu1kEAMytB2N38YZKwnQJUBth8+WeZVEJ2nwErpIiPFIRLtEp42RetzCu9bsgWwHxBqeiwKesM0b/JOnHnN6HdTinfalQOyo85OQi0Mute4lS25UxVuGfvEcxPN0iqLI+yV0bUgxsl6tqauKNkkLSF9VfaH8QR2oYtHJzWZI4nC1x2HoQiAgmXEpx77LQ+HDxiy9op31WOO0K4Qowx6Z8lbCpLaUN5G5KGGg3psvGLzSBTBSg3Nvf4/ZdKM3jvJYGnJ5EI0YDiXEpEoy5ayK9MgU4r1ntVpBNsw3zxxZq5E+LRdRAv+xk5y64MMmdEKk69jZj14b4MblNf/ev3idP/EUl/q8h7HS5aXySpxfdVjrsVbayIaQ1HAs9SGNoNZWDNOYg9T+DUEUlp4TJ0iqs7YvDN62LWfOnGP/4Z70LveeqpogxfYVngSyiVKDD5ReII+L856dnW1FFALr0NGzlnMmh07oKRpSH4djiwcvPwPPtEemjO+5yjZLl5SiLKU4thgOOmvEmFmtWlI8pK68crTLBtIfJiPTU39AvPgQSum5AiypkWAz5XnvqQWM7IMjq/cpZPdnGCfJbp3v8FL+T4B//rO92KcYQzjQHvk9Z+maFzV5A1PQE5RipIk+/Tcsm37WaFWmqidkIr5yfQmyzfmcFDKTasK0qbDJQdZWz6YU92aQE2N629U5h5tIM4cu1iyWCyazhtXhgmVY9npP5ESbOTipilFKCg1c1FGESjdfQZlUVi1SLaPM04gzk5M4+l0btItc0ARJicIVRI2RaWmyweGIGXUkxeEsNJU+UbFkKsqHytUfY5k+e9k1HFB9gXqgHUG5YUCXM71jn9VgLVHY8fviVECOUonBOU9dVfiqErqh82xvb9MtD+R1Z7C2kudgdF2hH40RcwWYTCJGKfvkXSb0RfQF3Y3agtooUGRLVR1rcSaN/HozgAKjDHqjyVrOGXwllK+cjhSL6gGHmCCGRLcOfUJZDJsqtxItzUbvRWsmitErXG7RwR7nKt1rRoYD9HtMRBMftUyiPMvjAPqzl9uOHW7n0/mAT+wk5QrvMidWqzXFHM2quMSjzVqIv6CaDMaO3lkMLTkLx8uMEcR+IhLrds3GxlyPLQJrtd6Z0/OPZk4FpfCQmqbCuwpvHeu4JqRO0RlDzlHCjyU0iRoc1oqHYcrmbkZ8w/EmMSBx41CT0S9UDFVjpbzDarnEZMt00gyAaV90WI3dXskVKH8cJnp0GGM0M3Y4vuzyeZjY3mB4ZDwiJePjH4cEPH3hXVtvMLv8rVOPe1bDWKMOhtJE0OQyowZoX0Fi7E3lvgi/SRli1vkeDFSrPeid88rRFIOwbippdapr20wm9IlsPQFngAqL3BaOp/M1tbXkXMNKsqlzzoQoHn5/f3q/xknZpuPK0o6M1MI7HJR2Hp6H8mf/d+4NyhgTHR3euRGSRn/jfVG5Ydcf5Bujhv+AsJZatNmMAqh9yKBsHD+f7HLCXz+L7AaW3M9fHB7fSUZqTlKMvhSkt70+iLJBllBzkRGjDkJxFlLGZqic72tZkxMb8zklQ13K9rjeWAD6TbLX6aMlL7zTpM0wjEV4rX5I2smZXs5TjngNhWZ9HIT/b4/I7bBHZOVMW4qf1IMC5eSl546xlNa5KSqqPII2dHaRxJmyeRQDVnmGI8cKlG+u/OwS3Sid6Mo+1z9e/XN+bHzOstsCN/sids9vlCjJkCNRwBd90DUCmbS+suzLGprWPU2a5NhhL7SSK1LK4qWYqCrDxnzGQViLs64JywOTI0NZ337uh3vJCA0Qra+ecsKbmr66Spn5oq7L7Rdlq++5gvqPonR6B2SSVPDR4v1dKAmyBYiKfaUUa6Uzpin3WCpuMNxORn2jrNHcLEWvSsTOlO5bat8MalmfvJRwthqeq6IXThOvz11uAzd5cOpxT8zid1rny+SoyQ7DIgk5VzPpyz0VQ1W90IRk8pdC/8WQMna4htUOUilHmklDu1qSuo6UMt55rBPuZUmJGitJQLh01lLXFZNmSu0bmlizXC/0vInQtqScsHm4buE2FZ6KNYUHZY9mPffcLbls6QRVJrqcr/ydUqJdt5Ass0ndC7lRr5HeEzK9MI03kaNG6mA8mV5I1FsrttVxnViO+zm8oL622mPOcdLL83bCNz5569Nf9LMcRrimMHyXUn9OastK+LFsMEYTSPpSPDlT+nD3PFR1XHLOeOtIXpSEJCQZJtOJcFuNo2mm5DxybEZyB6pwNPSTVElabN/EohRyhswqDzJQFKokNfX2MFYTqoYEk+LRH90cjuhsin+kxrPc4RFFWbw5QRNGH+o1d4luDAZC24UefSu8vlw2d6M1ZUdy/WhPvU8vu+Nn52eR3ZaOW/nOp7voZzzGBmoxUgvKlFIkaFc/qw5O0kz5I5QL3VCLkUZS2c4ZbyuSgUwixo75fEa3XqhxoM1WxrXBKU59FuS/R8QyISeWqyUxRnHQyL38FaO26NmUpR02tR0lcWl9Ulf4g2NZUJkdJZ6Uc7ZtS9u2Uj5tugEa1arrun+uZC7VoDyC6MnlrfZ1H2RXkmMlkiHJi9I/3vZ5BPIAjTmt5UIMvzxz2XXcYOfTXfQzHMOeMcxBX62HQvWQ8kcyNA+lr4pCry+FT53UGRHDN8ZMCIEmN0wmDetFBZiRUdvfifoOg07rb8gohSp0rNs1kCUsX+myFgKllmoavsNA7SgmkFWqVKm1PrjXeTCG1YbqonC+1+u10mkyO2fO4XyF9xWTZqKUnUJvEKqMLaUnRv812rFTvs/IgVVwIWluxJCgLjZJXZdkgwwjWevn6Dno3DucrnNPrzSsnqtzDm81QQcVIpxY8VnBkD4sYvqbzjGRQqRrW7quI3QaVgWZZDMUuq2qhmnf736C8zU5ZzY2tjFGOCi270MRpb7diFGAlQBSNla4VRsb1N4zn83YmG0IRy7pg2KzFIgm6/cbJ0sVTsnYMNVwFKIzy6SlbEi5JMIYQkrELhGD8FhCCMSgWaRlk6YYBfJNDFIouHTViiFJAkOP9BbStV7cHBOC/rk4wVodv5SPW7L6d86PEdhjFzh2+kc+ljOzm/f5+n/wX5wqUs9q9N07oDesnPb1Ro3UpBt9QSDLfBQCfwkPlc2ZnIXLnMQpk9CTxzlDCIHpbCqtSI3Buapvm6qsH0EIRvM15gut2zX7hwc82N+li53Qa3RTLmVajDXkFAlde9SAMQZnwFe2R43HSAZG5Fx4T3K9npcbpb5qyZqyVuqoHs301LlRl7sXJ0qyjsVoU4t6MmFv/5D1uu3r0ILpw9LGDt2szKgQ9fOUXXWpMU/oavKsxnEDFYZi8SklYujkNVeiPWhrXt0U1bE44kinTOgiMWaMcaQoTnTbtmxubVDVkg0tm3PpNpNHekjvDXpRgAQm04XAcrXi4PCQLibWbTtskvo5p2h8iBGHOlLFuNA9ppfXbCFrGbRs+2exrM5iccgHH3zA7/7u7/LDd36kSVEG76p+/6ibRqMd5ekpbL9xxoRGAVWGsZbJdE5MmbbtVH7VQI2SxFhaq/aTMbJ9hsnmmcouTAl86THneT5jbDT1BiglJC6lIU3JJ8lSCkw+41Q/R40WJIqaTEk47iFoCN4aKdofwWhN3qM3cfzH9Cioq52Ug1ytaLtIF8PQUKdHSeX5KYlcfdWIfs8Yyg8a65Qu6BlqBOt312O7ruPDDz/kN3/zN/j1v/f3OTg47OXW+5rpdCpls7zVByxyLMOnT/juJdgY0M5/UW2QoM1kZN7VRktJk6zpP99P0ZEoGM9MbhNzDsy3HitD8BQh/nKywh+1Rlt9ugFVHLhlCV9KcEh/vL6GoymbnFhr6kALv8T7mqzZe23bMp/PYDYho5n9OffI5QBLm8HYALLJZJNZrhc8uL/LfGNGzpnGOW3/6Egxaq2zgiw49YKMhgnkO3nN2Cst9UwfQoJCKZQ6b7aH67MxGISz6l1NU0tRdKMp0kd1WREIJQKWItjabq/Vnu3GDN6ldRIO8c7jvCHlICWz+nDG06zk5ztSfp1V/ree920cHUba0lU+42yUOSt1kSjITpT5FDxKjNRCSkvC8yzOBECOidh22MrK9tcFVssl58+fo1utyF1L7StqV/fK01gL2pNeVls8WaOybZylDR37B/vMZzOk46kqXX10vJOya10Qw9o5jzFS1mfg7hlVkGWTlw0/RfGui8748IMPuHbtGvv7+/zKt36VV19/EzLUvmFjY1PQqUmlG3Ic0VQLxQeGSr5DuMlYy3Q+IwFdiKWeOd77I4ZzceS/AGILwKts8r/m28/7Np44soolCIJTOUPlJAkudIkYIiCVF2xvIATpUW+yUluE00yWbj/LxZKu7TDW0HhP5Z/cny7nLJ1ytGxPDoZ1J93TkjrsZa2dAY8ldoHlcsF0skltDNkmiX4VDWuOto4Eg7EejKdQaVLO/Pqv/33effdd7t9/wLmzF7h06Qrnz18ihSTdDQ34utYObFH3JJVVNayFCz6U55HKKULNWa1WdGHNVBHSrBbGuILFCJB77kL8Buf5t7+wLSY4gkA6g1CHFJUXaklJ8pT92HtxYm2SqGsILcZUhJCwNrFarjEZ2tWa3Aa8VrUwj4lXG7UTxnVIm+mUZr0mLBYcrhdsbm4CaXAMrdyrAZbLBQ8eGra3N5k3rgfGjsitGdO51FBQYI3s8B6+851/yO/8zu/y7rs/4tzZ81y8eJlf+ZVvY4xnsVgCti9taZxk64vaFatOHKmktkbhqNreqdvc2qKpa/Z39zizXZOidnhT5V1Q5hL9eN6yG2nY56VTj3kqA1UMLQs2a9awdpXRProS+ZAdJ3YdPeZTujLkrP24R9Y6CLwfxRCzzkqrxVXL5uYGlbOsDg/UKJVaplaNUs1loeAewgkaeT/GcLhcUHkNNxZlop8rgaSck7RlM+mIJy+JUUI4zsmIjKFJKk5CDjFG3nvvJ9y+fZsYI7/yK9/m/PmLpNDhq5rJbC4UhVo3eUpfbPGEEgP6lTQMKvFPKba+WK0EPes5p4JADGU35P7LbH7WcnaUU3P6KIhCe+ket/75/zvwRz/ju/nZR9SMRqtly8Y8zL68WXlJDVHpQ2NErlU2bRrWLZVwlJ7D5CyhUzNkEMeuk45PIWByJgXpWHLEGe0RSUGYrJOmFD6BdZ5l2zKZTjWsmY7ILdqGb7laMW9KPT9ZA3csLGytVWRIavjJJl+xt7fLb/7mb3D9+g2s9Rzsr/hnL7+EwUsmaIZkjLbpK3OW1ai04sVnQWaNeogKUGCMYWNri8P9fdquo6lqrBXEDi/KO4SINxkTozhozn1m8vuzyO14LJnzrvllvvUZ3cdnPUrYr3SnyTnpeksY0Fee+XTC3sM1JhXnxarzLesYU8TE0JelMqrHloeHrFcr1jFCzFw4d+7xN2KKo65GhPPMNjexVcXh4ZKU1djQ/cDmUpwqEzrpUlZ52Nmc4W3pZiYgQOH9D6580XlO6116jMvcvn2bh7t7ZKT6wI9+9BOm001iRlr1hkjtFArQEOkQ7izGpnDNRaxtT91xztFMJnShY7luxakzRg17qdUZY8Bj6bN9+sosP98oiGNZbyjQxuljzXU+sv8uv8Tf+Plv4ucYUR3wHgmHHkFNWu6rb8TgVU7mUx7eXQqVb1SSxlcO07V9RCDGjpRk7tdrkaWwPoSUmFT+MYheidbKf4psGWOpvWdre4eqmbBcrnBVDSlICbGk9kTO5JhZr2CfhLMwq3eEDjKS2zFXXHS6aGvvK6qq7q95585dDg8XNM0EMLz77o/48pe/xmxjk1XbsuxarB3Wnl4+FQJUD0sAztRH+Aqf2jkP1tKmyHK17psMlES01PPVve4lkWyl3fun1ZtlvT/NOE/HX3oCreqpDNSeaKtISp+NydEwpUxEkPdySZJShLEk9ugQfn4mEIT3ZJ2GxSO5KIUQabuuxx97LuAJ/Akz4o362rNq15goGa9DTTLBaa1uuqv1io2J1sM0YgD07QNHmGfJnpNC5OIdLRb7/MF3/4BPbtzEVxXz2bZ48Vn0lrGOTNQyR0fh8lx2dEqIKffXsc7RTKe0IVCFMCRFjZQXCGRvXEECj81HeSrNoy+X//aKb3zMCQ95Hwo4dq7x3+Vj6yZy7c29R87xPEYfXjZau7D08wQ05kFJdhAFI6WZUCScFHuldiQhTk4gxqtLiihaYoisVytS15JCB7p247jGkQxTUyIBSHao9zTGMYuJ9XqlFQfAmIBsodJkIqdA17YsFoc0biYJL30HtyE5oST2lRJaJQlEOook7t9/wMMHu9R1w9Wr19jfO2Ay3STGTBcjISa1OIuaL5nj5V9Rkn3ClBqqOWeayYTF4aHW9GuZTqdH6nIWI182geJulqgCn052TxhPJ7vlzC0dt04/4XMfBfnI5Bz71rZV5WkmNU3VsNz3YJImO5Xe30YSOqwh5khOQrvIWVpI7u3tk8IaUqJ2pTnKY9Ao/W+fsWwsdSW1WWO2hFZQVGO0pWKSGtfkSI5BWgJ3nTo1peOQodTsEwOSXq6g1P2VihilPJFzHmdFlm7evKmJhJYuJtoY8Xmgs5TkvFQcw6xzeQKlw1hHU1WY1rBYSz1tslTriOpoxhhxNmtXwlFizWcou/3JnkJ2LR0zbj/phJ/7GNsCR6h+xUjNwvecTWd9E4jQrvGanDdEZIfWzTGK3ZGzVGRIKdK1iS4nUmghpp6f/TRmkmzt4rRXjVYiimC8x0ZZn1wcq5whZkyWUpWldJQ1EtLvS0to9Les8Nhu6JO7syF0Eec8s9kc5zx3795jtVrRTGd0IdB1AVfZvkJGjzIXC7vIrSmh++Ksql7QfcR7LzSbEU2n57DnrM04FEQco6jPWOdOsLye56ee7/QyU2PvJ0U1PiU0UjZdUXaDcOQsvAkhptveoBxCPnKcdVoeKkYwgey9GGMxCn+tM3Rty7IkamjYdaAK9F9dzqe1/nzlmcymtKU2oLHaXhH15qVkRCZysDhg2mwWYIeSZDJk7evvVvi2ScqjYa1jb2+fd37wQ+7cucvW9g7efoc/+kd+jdAF2hAIMRFz1EXLDP3XhwSybMRgTiNlba1jMptyeHhIiEkLb5dyXCpw6gmZlHCP44IcJSmddMATDnl0jp80IhP245tPdeznPbpOuUoGLTdj+nB6edgLN3VIiFOqh/dYMl3qICpFpCTLKTITU8Cm2HOUY4wsFgsxULuOVguHFx6prF2PK9AjUVqU3HuPrz3WV+ztGZyvMEiihvBoRbGkGAhdy2plCdMJNIUHJYj/EEI3faesnE3PEXVOymw1tfC9rfUcHBzy8MEu59yUmJFC67EbKd2Sgw22tNZDogrjRARx8DPeV9T1hNVixeHhIbPZ7EgpGeHmSm1gm506Cgxa7NPI7qO//EzDsWbbfPCpPvvMRvFr1REwRurtTpqa2WyKyVoU3GpL5lF3Ju8ddeVYhsKEVycpJ/YPDrAm4TKESTjVQD3pHWOlJfRkalhmJAKlz4VJCYvDRKHWeGMFVR+hULJPOIUPRmWtih62dhTRSsymG2zMNqSbUEw8fPiQEALW1YQQ6GLqEdOCOonxq00kdP8q3f36eJyRZ6lpGqyxtMs167WgeGUfBMkkT14cRlLUAu2OoVPXadP3s8huMVBOl+maKVfS26ce8yxGoaMNzstxDqpQfeazOSFG1us1q+UC7zyVV9Qd+n3QeY/NUQEcrRRkxDCLQSJVVp+FPFboZYzWYsB0zajijnaMdE4qtKA1sLN2nNS8kElds7O9xfbWplIRGMmtVeCKgbLUV4gwDFxqcL5mMpkSQyQlWC4WtFpaKoRIUPsm6yl63ZsNpkSrZKL13CUBTf5njKGqaqaTKfu7u1IGMSataCGyJDXZpdSU00i30XOVuTp5/Gw692ji1ONOOsHx+uMuCDzBQG3bVh74riN3nWTVGY/N2k85gcumJ0VZTYAwykUqsH1uKg73ViOPXjkR1mj7xgGJjUFCThISWpOspeuCKIVRVvF4akRBiIRUVcX2zgRXNbSrNU3dYNXLMClhk4EuEFZrls6RtjdQ7BSpW2l6pV6ge3BHOrEYY1kslpzZOUuKslFcv36DxWLFsg2s1h3LtlVebBE2ue+I5jTnDLhRizKD5JU6fNWwtV2zVgEOIzg/ZzGSTcrY2JFNM/JORl5OjxYefevpxtgfPDmP7qTTXknn+Svrv/izXOhzG71zZQSh7pIWbx7AGroUtZ6vfB/rxIDbmM8JlWdloVtFvK/peUxAXXmkgVrhVmVS7Lh/bxeTJQlkUjfklKhqj/WWkpGf84jPrJt4No6M1OqtGo+rJORkciR1QbxjgJiwCRrn2d7YpGmkaoAxTguyeyniPjbAtdRV6ThlcBzsL5nNNtnaWBJDYL0O7O8fcOacbOQhRDoSUT3uAWWU//ZoqaJQxSnNIMgrcP7iRXLK3LpxnUuXLmiSVCIb+QntGmzAG2kxjPWDAu4XUf99WtkdH/8IoqIPRY/Oqe4oSA9v0qZ/5ykv9PmPk3S6Hb/oKkLMgKWZTNna3uGT69dwztL4CdPJpHe0ZbOv8JWhQgOQVpJJq6YmdEtyAKMh6xilDu/jb67MW0Hph45WyUCypQxhQWyAnJhNGi5fvMRXv/Y2d+/cwhkNqZboRY+kW016TdLmzHhy1gQuDJXzTOsJydes12va5ZrQRUzsWC4WEs4s3Z9QbZZLFvZ4X+3dLNng+xrCnmpWU3nPtWtXSVlAGAmrirMbu3WPtDmDEN3HUZZPpXePTXJBt3rEdzi1MYOMLDnPu/kv8rVPe6nPaBwpM3WEyifZ7DhHBz1VxZBx1jGbT6mrStvmapQlZqbNFF8bupiFM5wds405xhqWywXL/T1SFwYQDTheBxRENktt3VI03ygamoFkxJBz1snurNFeD3hjOLezw1feeosrVy7x4QfvYU2WmqKUyjriYGEl8TvlgLFTea0U07fm/8/cn8bctmb7fdDvaWazmrfbe5/+VN/cpuo6cdwFNzLoXsXYAjvyJyQLA0o+IL4i+BAhSBBESJCvBoQIEggpWLEMlmMwsUjcxXHse11176329M1u3261s3s6PoxnzrXec07tqjr31HHNqvfsvd9uzTXnmM8zxn/8x/8v+4KtUDPY7/cE54kuEHzED3nwSwnPWt6PnrpWn0j1yEPT8jxqtDKUZcXJySlXT56SEJnCEEXLPSSPjorgRIZNa+lyJGNzMaj+wHE7AZV3A+Nj3yPZz4zFT4nan9riP0iXKMqypBkCPopnuLF5Uk8polIE1MFGVGuqsqK0lr5tqYpSgjDLRgh/CQiRFDzRSIXsvadrpU2qVcLUcwkYfdBCO3zkBIGDnE1MUtHYsmIY/MSxSMoQYqRMGmKi1JYvvf4FylJjzagpmLffyQVCSAEq92OtLSmLGkVB8IrClNRFBQqafU/f9yKMnukJutB5CjHdrbCmG5nRqJTuPFZGa6qiJnQ9XZ7KAyGTixVsQEeN61vsfJ4pD+oPsBh+9Biv70+faj4OvVv9Nn+3/nf5Nn/jszqRT30ciOF5QM45UJmQn9s1KQs7W1tMk+UPHjzADY7ON0Qf7mz0KlNRbGkxw4FQb6ymrmtidESnRD81CUqfpjZgvlKHns10HC/oYzIYYsDkZ8+HQKkUy3rGoi65ODnlj/6hf5X1dsXQNwi/eSwkclt/zMKlghMOahKplrZtqW3Bop4zDANdtyY6n4WcE4MbUIXOcjDj+X4SUp+TbvKsdEagtDFgDS5F9k0jKUcaOeRq4oRvNxvSbo+tamaLObPF2WcTwz+Bj/ZJnxlfriDykmo+gxf/gx9j8iEf6Wi4bORKyvfZssDHyGazoe16+q6jtpaiyCCAGjsvots7m1eUSeODFG1lXbFYLgi+Z7/ZImvIyAMdJZimszo6v6MWv5HiSKc0re2yKUoxFpIM9/36N7/G/fMTThY1pYrMKk0M+qjoNxO//tDqJYNQYoKSlFBvzhZn+LMe7x1P2meUtsAojfOermlJdqQN/LRg0kA2kMmaQsqaPGcghi7JBwgHrv8w9FhTSYem6wkxoYuS+vSUqp7fUU74TI6coNxZi7P+8uHtPUObvwH8kc/2tT/FcdzBGY9Jr1OJ29JqtaHvekgxi+wrlJaWuo6j850GbbFa+KqDjzLtXgg6X/iaXdjIfmvM1NkS6uhYPI3rlnzovD5ZpSdrU4UI5aUUcS5rY2cdVmsNX/7iF3n9lZdYziuCa2XgTo122TZTsfLzORa+Sk0xGwGZ21OUuuSkXjC3JbEd2CVFoYV/PxpEmBx78RPXsHw986Udv2UsNsckQhnNvm2Y1zVkJaWUBPCzWmgyru+lc1KU1MslZT3LfNnP9hBEOf9HwUSJUYmYHF51z/35T4bHxl8+oYk6a6AeIcHqQAxOSaDsEZqPEapqRl1XGKsZ+o66KDN5/lAlG20py4qyLHPSAMvlnLouBVVJwmmNSbiEYqwwWqHlzZ/jrUcejOkGZvQyqhF1l0RiMas5WS549aWXWMxHHp/KNqOyMIsJgfzOUdNxDMAQAkPfM69nzGdzrLb0fZc5IGaaNk3kavG4+j0UPfktZRQqAweidiKTsSiZ5CPJsI3K7Y0YPCmKfNdudctus6Jt98TgclJ0lAkfJfQqHT7GB/iYbyv/zfIfaRS6Hq/ET35gxnvgdgXX//yl537f53UYI9xfpfPmM1WaSIwg90Jnb2WUzp7ILU27l0EIqymLAjXJe6ksk1Qwmy0oihJrNFYrFosFdVXlzfU4epjQxkQkKpEKGZMjraVtiTpqPals3ZgiPkuFKK158cUXee3VV3nphQdUpSVFL+0o4NhgQuROdI6fIA5nWhHR4l4WYVbWLOoZ87Km0gXL2YLSWhTkSfCxOIWxvZ/fyt3nbcT+83pQloWgl9JPEppDZvg75xi6PnMoIz44kaDre4JzxOgzGpvu/P7nxu60GR6t2iPElLsVqESKXp7DTwjjBLS0vJF+/AcJuc/sUNqD8igdstNSVgthfBITWtvcWhSZp/2+EVm7GIjBE4OHKEiPyuuRy5SXSILsTx5ixOpMr8qLuzFmWjxTGlVMBCE6IKdM910QdflQqEOSkFv3VVFwcXHG2ekJs7qiKguhrCgyemoy+ng8wqqzGkC+Jmr8UwAGnaDWBQ8WJ7z24EXmRZm1pEfQIk8sj7rSo00143UcEdFRwkqeyRGldt7TNE2Wx1LoLHIYnIMUiNHj3EDfd7hhIA6eFPy0J/38sftJyXROTJMo4UzvIXk4vAuu2PIfq3/+2QXgpzzu7ifH4Fb+OvIWRi1fH4J0R/NdjlEUJqxNKAM+uGw9KzS4pI6Ghj9a5B/9TdbcPMiaEfqpoILccT2sbyOynnKeIZ17xXK54KUXH3B+eiqJcoasxrhVOdGV1xxjd3xeDofoFEvSqxOU2nIxX/LyvQfMy0poCiHQDYM8Y1kOUYp6GAlV6agrENMYy+lgunF0D3a73XRVYgi4fhA1pSR0Te8H+q4h+IE49ETXE6OT3V+pzNf+NHF7956PPYo7CaqVQLim4/+pHj43pp6LoE6b3ljRqHyxpmRKPpfyhjAmaCMBOgQh5CciRVli7giIS1Vf2mLylkZF6llNip7gB1wIjCK/U89yFFQfuS4jSHR0AY8vmbTEUz4nzWI+5/xkwf2LM85PFsTYMYTM19OHCz9N1eeLPG74KeutRReYlRWumqEjlNZS2YKk7STvMm4Mx4K5I4p+nD9O+2n+0FnGy4+83yRBJsMF0hZJKeCcIzqPqSrKWcJmvVpyov1zo1HHJJO8gXzsZD/2t/FNJfp4wof+X/4EP4wJqpqu5RQ/jMni0QKW77P3gd2+IXqHIVJojcmLmZ7iIyP0thCklDQlZ9YWKLoj/ps6as+NKPdRdZ8Xx/Fbx4s6kvFH1B00VlvOTs+4d7bkZF6ilLizkTJPVR0kpsbhKBF0DjnG1JSYW2OpTMHMlthKEeZLThcLrJbJ5DQpF2Tt3vy8yZ4/Jn5yTO+VA8XnzsakDpJDfdvhtGa5XIAC7xwxSYISfcAPg6heGDs9e5/mSCmSfMQNPUoL742UUeaPrA/jsaPnX/DBp37Nz/IQZ7tRgu4QI7IdSVIylssxd1hCEGWVoMEr8F4G90budExJhjCUEVqSsYKkODclwOR7OYIOIxKFOrTyp+NoYx+3fqVyd2LapEX/t65r5rM5VVVhdU6somio6gwcjD8//kZF1oIZi7sk71UbIzGfoNaGanHKKxf3WRQlTdtjlCbABISMPz+d9vH9VxmpyhKFAEoLZOB9oB8GbFEw6SLnVmxVStLonaN3Dm0LvJNiCyXdu09zHG/s40WOXiTs1qtr6sWMelZTlaWcZ+Y6NnT8IL35qV7zszzuJIFHnx+pUONWIja0cYq3EALBezHyyXJoWou2dFQKZUQmLxpRSzF6zAEyn326bonjBHlcdw+7OB85M+nojGo64/kpLUnZ2dkZJycnlNmAIgTP2FBSR8/EJEOZ33kYi+T8qjFGVFHkwkq6t7P5kmW9YFnUxFz8pBQz3zr/vmOcado/PnJdYcrRDioCSobLtcS/9562aTk7PSVXbcTg6fsWW1UEP+AHWRvLeZFPXR399/nHTxXqT2KMM7SO3fqG2cmCsizo44YP0u8Cf/wn/u7nJqjWmmmaLkWTN10mRCmDMdOFEg97WfjatqVt5GJURUFhhWckYI7cWKMtpihJyuJDIgWP1joL9xcMXX93MnDKnw5Zk8qL3JgkjHIh4xGzcOCYRJ+dnPHKC/d44f4ZpdVo5IEAMFZkWZi0T2WiLqaANiXHRH6SYmZLYllTouECFkVFnxfahEiWjBX7+KdifJATUwLAgauKIisSaIZhwA2Owhq89+y3O+bzOaUV61nnenyIlEBhC7wZsIXNCebHomS8inf/PT5LGS1V4+dCnCRaU4Z3JzxDKY7X0jEo28Up3//2n3xeSH1uR1HYgx/9UctQkNNEJBBGof5cZXkvXEyroTIaWxySfKv1ZFbhnMfoOMW8Uoa+H1A6I/HjNdXjcBRMG7AyH3uKTR6EGi+qVmLfF2OSChZBcmdVzayqqEqRRgtuyAhbFucfkaijpHsUap5Qghip6xqrFLW2zKuCk3slF7M5hRIjAEFl9XSfhaaQT1YlRtx/lEsLKYmusRaf7BGVQilsUaKMJvlEs2+BxHKxQAOu74h5zQjB0zYNC7PMA1/POcYOwScCoomUAvv1ms3mFq01y5MldV2KjuxYEY5vJ1+XPY5/rv/lT0IDUyE+or/jsEkiZc3eQIqyrXnv6YcelYd0vAKTEm5QDP3AfFFhcvx77wkMIgWFJgyBPvXYEW2PYXL5ORRHn3B6yHY8JbajUoDJbVxjZONPstbP5nNsUebvd3RdJ+6Danwm8vpyHLsqFxpqTETE5rWqanRK2JiojWUxm/Py4pSFtmwQHddAlPecwYlDMZX3rmM0KGc6U3qRnzsfPN47TKZRDIPD9Z7SZltsxB2x71rqqsa5nthI6WCL4wT1I+vu8TU9TjY+4WLHEOnalvffeZsf/+h7vPjiA1597VVefu11Cq1lvQEMgXlqf44A+/yOKXZzQBmlGfoBnxMoFHgvDksx5KHTJMVujAmfAkQv9vFK+PhxkjFTeQDoE7p8U54qu67KlIhjJF7cohUmF1ZxLF7yenl2cZ51feVeD0OYfnZMbIVfqqeYlVc8DCKO/1ZZH96imGnDvKopy5KzoqRD5grsqL1O3qNilO6XOrwhyR/Gd5pJiGMnW0nHVehWshYnYBg8+11zZ4YsxUjXtcyWS3H5TOBCpJzPgNFC6/hOfmSx/YkAwseTjxAj7W7P1bOnvPXGD3n1tZd58aWXsMMNr6d/BPzbP+F3/ZQE1WQJExG59/k0x2AbzyORYkBFiD7SNm2eaBa+hjWK4DVRyxJgjaG0FqstzjtImqQiIYIxOSkNeVoyRpIZhfVT1lzNqJXOE3TpmPScL2USyz6r5YMkFdJsNuP83gWL5QxjIOEYugZSyLIl5ogob8SMwAgXb4w/lRI6KEpdMLclRTVDVzNeXp4zA3zwEDzkYZFj5YLDMU6OSvtWbPUSjP66WuF8xPlARESnvY/cXK8wynCyOMHaghQDQ9+hrcY7S9M0vDSrGd04fuYjyc1MGaV1vfDZTu5diHXnSBo8UqD9pEMPb1A//B8D/9nP8eK/uEMq8VHmJqOn40RpyqLlPowwTVaUEB61TxqdFH1M9E0LJyINUlipMJ1zhJw8Bp+AVmgweUH2XrRQQ36tnySKLKcmg3ZGGZLOovtKo1XE543eGANG03uH37ZoK4tRkYsZ2YYPvtBaHc7lgBjLNTg7O6PfNfhtw/l8wdLOOXPAEKmCtKAUIqg9If53IXPGMlGOA4Ixtpy10rmzUKCUYfC9FJ5W1hPnB5wbCEmSgvlywfp2RVXPsMWnRE8zqrVZ3/I7/9U/49HD97EGXnrpRX71W7/Oy69/CWPzQMNHjgt6/hzPbzd9nsdxd2j8t6AxCZdjVudnNvowgq14FdEpEaOmLCzR5w06/76YhNPsfUArjfEGu1hIQqdl+PWTkqWPnR9MHuiikhLpg1BOUta8TEjhcrPe8PDxE85P58wqQ+0sSWmilnuhUNgJlcpoUFZ6m9amJJJYZqZxbY/bNaQiUpuIebZBv7LD9o6Z1gISHJ2nzkW2GrOPkfsPWSNZ1suooO97rLXZYS1KsWgK2ReMFtqa1vShx/sB7wfabo8pa1LsCZ8Rjy/4wHaz4fGHH/L3/7O/j+93rFfXbLYbXIh8+etfn3iz53yb3+LvfCav+9kdhyI5eieSUHGGUqJ44kNAJY3S0PceV8pw0uS4l4eSCXKfQoyEvsN4GQSq6xm2kOJhWoueF7eZFjKCWEZrmaAPXuQIY8jJaRLdVQ0PHz+hMpYHFycsZgWD6yAPooas0TuuJLLmyt6tUpyQ0xgjGjE5qgoLPjA0W05VxenCYm/3MsOQYz2FgArZkl0deLISt1k3Oq+3E/ihZCDRBy/qBt5ljWShUdrScH5+npU9Mk0weRI+yzF6Od8JAU+H7sknQAA/zxFCYHW75v133+Y7v/3P2G/XXF894Ytf/CJhX/AKX33uzz83QT1uP0qRLbvVqN0ZY8wXRm6wntDKlE9OpBq8TgwkuqbFLWfMZxVlFrANMRCAEBMmwqCZvHUVkjT0zuF8GHO+o2PEJMfqSDZma3SWU5A2glbSkg2uw8dI5zyqi8zbDhekNU4m+x/vxMdVkWiJ5YDTiouLc544j2p6alOwoKC+aWgsaOfRWSfCmiLXOZKAHgrpNFVqMfrpGicifd9TVUoWyQQqcwrv3btHXdekFOm7Fh8c/dBhK7GF9W7IE4ufeDfvFkDTAETuWaRE23S88+abvPnGj9ht1nzjV77Jl7/2Nc4u7lHWM3F1+YRj0jVzL/HV6//+80LqcztGX3hjLNHKxjmKbpOpJiMFZUTfY0wyzUMkxEQAPKPIcZREUcli48kLUJLvCskLMmiE06pSmqx945Rp3H3YxztgJ0QgEt2Acz2a4gj5h+2+5Z33P+De+ZKzZQ2FISpD0BalC3QeIJxQqEzPmcrKCZ1VnJ+cclrP2dzuUNuWhY5c/c4PWH79S6hdiw2J0ZN9LEimM09MrYypFTwiBvl58c6hlcJHnxEwQZ1m8xlVWZBI3K5u8dEz+EBQcAFCAZoQg7tcPkE+paibLuWEoo4FK8Q48OzJM95688fsNiuqUvQy16tXObv3AvVcqBh3k1RF4svAL4cL2jGSeMz11+OmnxNxZYQ7HfNak1KAqLOhRMKaghAHlAKrDYUtGBKElIX6lcSXKSzzxYLQDxRl8VMT1KPhYVzfMzgnQuPDAECVKRVai87kZtfx9vsfcv98yb2zJQ/un4nEjxZTFKPtJIl1aBNrVAyILCATbcoozde+8lWeeYW/vKVqWgq1Znj3MVEF5kHRGDUBKNIxOUIvj9ZzAVCPUGqYzDic93RDn99xdpOzZor9vu9yB8vRdA2n5xd5/xiRm4+WbyMCpj6SSKXDQkDmzuZr3O9brp48ZXV9iVEeYxOnpycE52h3O2YnZ0Jl4pJC/UfAH/7Zg+xzPCTBTNkqNmuG5v/EINc/hDAZqoxKCtYGAgmfYzaGrKVaFGhrmM3mOKUyT1hAgTvIv5K1YSxXUkqkEGV2o9+Lzan3xJSoCyMqP3rsAgcub9dURUE/dFycLJjlbkTK4JfJsyvHqL/WCpzc/zEnEcMYxTe//g1mQ2T19gdUTcfSlXRvfICbV5y6hHExG/Uduldy4mqKu8O6d0CMx+fVeyfASYz43AkZZzDqqmbwAyYYvB8Yho5h6BmGjrK0hxxveolDWgwIh3b6xNiFOFDZPgopjCCmArarNZePn3D99CkxDGwrI4Pz/ikX6q8B//OfGDs/NUEdN0hZIOXkjigWuVIQnps2BlsUsrXkyjdEqeiDkoGJGNJUKem80UvrKhGjA5dEh85aEXweoe54uFfjrx8XybGVNFXNMTIMvbSscpLnvUeFwJPLa7xbcO90TrVtxHIg++iSN2WpslRuDOjDxVZ5iVFwcX6OVRo3eExKnCrF5kfv4l88R7WDBHs6cFc5WiTT0XnLenV3wUoZhQhHLTdtDHVdo7TC+YHkZFHt+payniEPRDpIbhGPFr5xc/9Jh7z+zfUNH7z/Pu++/TaayMPFjPN796hnc4qyAnOcvB/9dD79czvwb1w8fs7rfI5HHtiYONTTgnFceKmpFa30OFSUJJlNipAgEMW9LIzcTE1ZlqSoCMRJ/iwm4UqVZSXoeQiZB6uPWjKH+zze/7FQ6Npu8jR3IWDHsiYHvk9wu95JEpICs/lMNE2VuESNLmvHPOwDQzDLnuevlbbgm1/7Go+HxPbDJ1RouFyhLu6hXUfpZVmfktQpAUq5lpHAnWSP8n58QCAC4wN74KIydSRSijSddFr6XjYJaYSJXJXE7hEGdme//yR0dVyoI945dpstzWZNGDq8KmjbPW3bsF2vsmKDmWDhmGTd2LHid9Tf53l8qM/rOCRpdxPUdHSNfQi5EBEEM+UCZ7zfE2c4glAtLDVKZG0QBFXBFPdVNUMVFfO6yvrNRxsi3Lnso0Zjioqm2dN2Hb1zhCjqLKN7mqiXKHyEbdNhraEqC86TwmbJwdE++2ATyfS6Kg8nGTRG6cyRTvzq179Bve14//EVtk2UtiddrdCFZhkU20Ln4Sg52YOWvqAraaxwJr51foNJdLijQmYnRkwhLxwR4dKhxMzCec/gBpGriyHXcukoST26cPmXHaVLd782fq+WZ3a72XB7fc3m5hbXNahS4foux/Ke/X5PfSKcwl0a+G21+gNE3C/4UFLESoKah451Lp3zMO50vyB3MQ1VWYJOqABh5PsbcbVUWlPVNVVpMIWZEtzxuh4ROxjviQwFJoLz7HYbusERk7iuxeAxWfd0jNshJFbbBmtFp3W2mGVtaVlrzVGrfzqkypG1XWWDDCMc+y+++hpcronvP6Put8yUw9xsKV3ktIAaRfJiapSO1m7huDBRAMccLP9t0plNIea4HXWAJR/Q2QV0GAasNfjsitb3PS6DWjo/D3ficnpD8LHO6R2A6+PfLf+IbFcr1rc37NZrhmaPtYq+a2jbPZ2H99LXnxs6z5eZmlYi+edhc7p7/ilrkGkjXLnxO0bXgqjE0Ulu/ih4DLYoCFn8nijkaZ2k1WSNoawqlHNM6J9S3L0EkoTkAX9SjPhhwPlAs99LJaE0KrdzVYTr25VoVSqoqgJrpSJKetRJO7RJx8n96cFRWRIDxcliyf2LC25XO/SqYaYM7XuPSVaj+wEbsgiuljR3dKG6i2OO1zMdrVUpJ0OSXI+WnNqKRVxKga53DM5hq4q2aalnPWQDgjtIXRqv2VHhc7h0R5W93Ov1as315RW3N9ecLmesVzdstyvOuwsWJydMZgNw96Ecf2WRsA/cJ0XS535MxUtOpBQKo8zHzjtkr3udFz1QpJBtTZEE9ZhHpbWiKCpxOFGBUTVBrIA1RVkKZT4EScj08Sb0kWs2filG+qGn7XravkcbQ7AlCin+5DwUu65Ha9Fhve8jtihhlESbYvMYq5FN93iAUCuFVYpX7j+gXzyhHQJF8thdC6sdJvZUITEkpo075SJt1CGXT0vcHi8Ho5PcMPRorSfrQ2B6fmNKeCfayoN3dH2HUoaYhBOcUvzkazUu1EfX7rCtq+m5cc6x325xbQsqkIJi6FuaZkvb7jnxZ6RYye/SZhqOSWlH4Ds/Q2R9vsfxvWPEqpO081AHS9spNU0j/+1QGIDCWkutC4LyJOUBB4pJXs0UllJrqrq8+/Pwkb+PnxEqTGhb2rbFhSASbMYQQyAE6VjElPARusHTdAP73uGjcPHGmVSl0oFCBZO1tUI6FlZJbVwojY6JF87Oualn6MGjh4TuB9J6R1EXnCnNdQQ/SfvlPSOzk8Z/jhvbtJEjSdLQd8Qg9LMRHJACVn7GBy+zAd7RDwN916GtJQQ3tU8/3mr+pA3/qAC7I/CvICZWt7fcXF2x365JbkAVBTE4hr5lu1tzb3gBsp5oSIaBez9bQH1ux/E0t6ynIXrhKluDdvqQSKVRVmlCgdBKS5dVAz4RkiIOCWPNJOVlraUsCqxlei7kyPs2OZ7yfxISDzEE2kZiVmsZLI4hoMhulkHWfB8T+26gbjoW8xkRjVFjoc+E7MMhbsctWCMxbpXGKgUhcu/0lPVsRuEj1nmscuhtg9GG05OKWdJEfzAYghGIGxmth7+NCh7yD+mICII6SK413obcTUOLrj1AyAPWbdtyNia0MYI+4vGOuVY6AjrG5+awCRxe/86dz/c8Bm6urtjc3tLudoRhkFkZN9A0O5Q7QfNbz42i5yOoCB8j5YRS6Wz1NWnXyffFMcCUoqhE1HwMmBAjRonwLnHkOcriOpstIOvMJSVZvS0KsfDTluXylNDusYXonH0cSJbkD6Q923c9bduz3e0ZvKMsK3Q8unRK0w+R1WaPVomT5YzTs5OssXdwXRkr5uNX02oclBG5ibos+UO/+uu81wYePf0BJQXltiVsGso4UIWEWI1nsd6kpxucGO93ysnrYYwqBplmdloRcos3kXU9jcL1Ika9Xq954dVXWG/WzBaLLNHhGZHrMXjSEeScxmA7zvPH9TFp+n3L0DTEvodZiev3bNe3NPsH3Lv/Ail4UBmx+YQ24Pv6nP/l4jf57zw35D6fwyjuOEcJJ1mGnMZDWnkDaIUpCpQxGBTBDyQVJ77eKOiBHnnZBSlr+KIkIS9KSSiNtVRFQW2zj/2YyAHHVajUZIKYJB/puoau64lBugt+kN/rnBeHkdxKN0Og7h2di8wXBqNFI1zrcajmsGAawKSRlqBlwUxQAM3tLW67xXpP8gndDaTbDZbAiVfcjAgYKsduRMaj02Q9HmO8E18hBPquZ71e03UD+6YR4faj5ct7z2a7ERR1v6NtGoqyIoSBYeiFDpAT63RcXI1/GwcH8oIp3yLJcHCRoR1YXV/juo6ikozEDT2rm0teff2LxOAIbiCRdZjzINuLoebfSr/2Bw+8z+C40+LXH13z8kSsH9BWTRu2aCFrQgxoNW78Y/ItHOeysCRtScpN17SqKlmjc/Fl7cd1SO+0S/InVdIIUB4JmcpiC4VB9EhHyauQguwPg8V0jqrpaQbPrCrytpcLQFmEBHUCbJ6OqoAiRmwMlBqKlLh5/Ijts2fEtsV7hRtawm5LxZL7y5p3uh1xZidt3vFZGxOHu1czHdbMFFmvVpiylA2860SZhYzwas0wOJrmmrZvaZodu92e09NTumGgVPpugqoOz8YYrykGpo7N8fU8RqG85+rykstnz2h2O4hRBoxJDF3D9dUTvvTlr5CcJybNC3HOX01/9GeMrs/vGGMwJolZHzzGaIqywHpP8IKihiTuRgnyUK7sVYUtUFajC0haYrsoS4wtcoc7URQWW2SOfh7gHIeeP7YPJukIpQBD74TeUsrsSXCRQMR54aViFNErdPLsWseidbioKBRoNfqdBZIe41hyBINQwAqlKZWiSFDGRKkhNC3NzQ3bq2fUPXhV4PY7ZmXB2XzGLGpaH3NRqolI5zgedy4Zw/VusR68Z7/bZxrbESAQIzGKNnXbdux2e5JODKFnu93yik746FDRUFDIr1Mjp/c45xqT4aP84pNwBIAo3fKubbh89ozVzS1tsycFj8Gikme/XXPanfLn1X/7uTH0fJkpyHqGecPTOk+8ZWK8goS0N6Sq9NiioCxLWZxcJOGJKeJ8xKeQnXzUNPVZKYO2YIxk9UVRMFYKRVlwOr8nbjxjBRRB56p/unQKQlJ458RCbWgBIxfKe2JIOBcwStFFDyphmp5N03N6do5RCasSWkXAI7WP0BFtkkVsrjVVDFjvqZSiRPH9H/6Iy/ffRw+DOGQ0DeF6jdaeRVCsfcD7RCrHDfVu2/04WRrve/CBZ8+eMlsuadp2op5IS0/kT9q2Z7drKFa3dF1HP3R0fct21xx0CY+Oke9KzEmxGTG2w4Pc7BtWqxVd22IAFT2u67h+9oTT01Pu33+R2WyJsmTr2vwuxv51SvzK9bv89b/3v4K/9MtC2D8gQSYPEt2V7hA9OIhUVcHZ+Sn7fYNKBnwkpIhVcu1RIs6M0TLcp0Uc3FpLUlAWdeYCCo/JFgXKZO9zJa/v75xanmBGJFR8H/CdJ6KwVpDG4EVrMSXxng5RoQhYo1g3Leen84yQprzxGhTFxFe0SVMlzQxNTaIkUseIcYF3f/8HXL/7HnboGYJls12xrCzVvObl2YIbF4SrmCKRIN3RET04QitE51ek5EKeOh5iIPgbhmHIm42oehgt/NT333uf3vdiURkC88WC1WpFP2QpF8mEme5UZCrcbm9uuH//fpajynqdKqG0TO+7oWez2Uhy7YJscAk2qzXrm1uMKRmGyGy+ROlAUg6S4nZ4xHf46/xl/me/4Jj86Udp7IQgqnTcQhzbdpGEw9jEvftnNEPH9fUtYZBNXKmET4mIODullDI3zZMyxaFIVd7ErKBbfkCrRBUKGcBTY5v/gEaJbeOhyNWACyFL/TCtVcPQ4fwgigP5d3gizTBgdw2bXcO9sxmkMA40CHVFF/KcKQQ1TQq/bVCLE2ZRM4+auN7xnX/0X3Dz1nvYYcAHxb7ZYVDMq4Lz+gy739PL5RqXvWxznREpxbT2TkOE+XvX6zVJy6R5s2949aWXRbdz8LjOgYq8/fbbVPOK1XolYucattsVtVtQVTV55ByyLbhCSTIfHNeXz5gt5iwWS3SZB6qmQkL4ke12z9D19H3HvtlKd7D3YC3JJ9p9w9WzZ1i7ZHl6SvBPIP3fgV8OF7/jI4RAicimJRy20Lz00gs8fXbNbrsjivwHLqPtowqP871smapAJU1hCmazGdoUKG3wKUF09C5RViW6OAzzjrJqx6Xt2D1S+ZyOKXfkzu7genz0RCVtfqMUnQ9sm5Zya2iHnnlVQfIcdpGIuFCOiam8Vhki2nmUc6h+IPqe7/6Df8yT7/8I3Q0Ej8StF8OMub7gtDB0kcnoZXRB1El9pEZUkxvXiNc659hsNmx2O7FZD0EGy4JQeXwnyP/Tp0/RhXQENtsVt7e3ANSzwwC0SmECdqZrlBJD1xJioJ7N8hqQ8wg1ormSEHsnINrTDx9zfXnJen1L1zTopAiDw5YW1w38oP2A/7P6MX/hOQYTP8OQVJoSmVFTUk98VPKJeWLqScpRVZbz81M22y2kQEwCm6cM+cZ04JH0fU9UFnSeWs7ODCAwPCSqhVibSStOOGo5DO8EiUqyCASfCEPCGBn08BmxjVEE0nWCfojsW8Vq1/AKkoSKtE6UZs84BJIvUHSeYbcn1XNsjKjBEXrP1Qcfsr66pgqRLkRW+zWL7ZxFbblnSlYpt4qT2GyOJDJ9p7LOtIdMzg8+sNvtabqB7XZLVeYBKO+JLrLb7Li5ueHm5oZ939C1DfvdjtXqVjyotZLqLgtlizCvwvUDbduw3+84OzthtljKfQC5LjoTvJEkwQ8OXSjaZsd+v6Nt9yhl0cblISM9fZA3wXc8/AfpPv/X5wXV53wordDWSBETfL4fspsqA5GAtoqqLljOa5wfCEN2kYmRaBT90E+Lnxsc69Wa2cmZuJQphVZWdCVDNlHQ0HuDjw5rTUalDDZqQtLTw51SmoTEx4JYUlYZHnTBTQ4j41LrQqAdBq5vb3n9pQcSrzkJHNcMaYtqCqXYb7fsdzuWQ6Lr4fHVln61Y/X0KW7fYLzEVoOm2JUYqzmbnaDCPhciKW/yoiWsY9YMVof1AfIfMdIPA2Fw7Pd7YhJJqZC9oL0PbLdb3nnnHYahwydPWdeYsuDm5gZT1njXC9VHFSiTF70QafcNN1eXrG5vKKzm5OyMsq6nVtRoraGN5uziDO8HDBrvQPWKdrfj+vqSopphbY21NejA6PV+Hr/CX4x/7fMMzZ94lMZiklx1q2Axr9nsNyLDo+Q5TQh3rqpKqqpEG82QHce0sigKwKIieBdodg3d4KmXp1JMo9C6wOiK4Aa8UwzRMZTDNNiWpn6luFcd60QzomPx0BbUY5HtwsRDJQr3jxiJAQbnWG02pNdeFLRMJ1JGpBIpd7EMFsV2vaNbbTDrhvBszbPiLZrrNTdvvU/YbLEhgY+4EAmmwLcdcSfdMUaqQwIRt4cUc1adE+xDCiMdkuDl2XIh0vcDQ9eRXiZ3sqS93zQ7Hj16hFLgoiNpaLpWEls0w9Djhx5bVWQiolDbhsDN1SWPHr3P2cU56YUXObu4N736WMil4IjBM5tV1FUpCHXwxCh7ovcONzh2ux1931ENM0qv+Vp68XON0Z/lOMTHoSNgiyqLzEvSOkIlWtls1SyDda7vaboWZSuKekaIOWXK2ocpg09975nP7IFXnGcvxqI9aZ3/LjrB+ggUmqT3MjXJDYO0+tMROJf5xk03cLPecO/0pcloJZG5m0cFe6EtuI7d5VPqbc9s26PmK1ZPrnjjd75Df72iCgEfYYgwaEPf7ijWG5aziisVp2R0zHjkH6Oay3T24ysDMiDVe4/zga7v8d5jSyn4SImyLHjr7Sc8efIEnxxJRdpmz3q9xhgrA+i6IA0DylZMnBsSKWmCC1xfXtHst1zcv+Deg5ekE5AO8atSll7re7qmZbfbUdezDDpCiI7gDW7oGfqCLw2W/306fW4M/VSr03wtJlTuWJxZkJTcAs0ODLaw1HXF4AaIgcE7yfbVqI0oQRZipNk36KpG2zK32HWmB4wJm8fFscUqt0Nrkx1GMgJ5NFCQ4rjh589oLUMB2Swg5W6OipHBBTa7ht47ityyTlHaPJN3ilIUSXN7syb6QNlHTOPYPbrCbxr2VzekrocIQ1A07Y6i3aPtgmVVitMPTOcfkWpoaotOrYhDshJiZHAdIba0XSdcvhTz5qTZNw03NzesViuK3hJiZN/sWW028uBHLwM7ZMs+pRh6x2a9YrO6ZbvdMKsKZvO5nFsOMK2kXai0ElpHRqzathHFAO/xzpOcm0jqSums9SrvZ14W/Imvf+VnCqnP7xiHefKAD4xlyJRgWWsoy4Kqqlgul/RNQ3ASY2mU/kITE5P7SeE9QYvloNYWrQwuyaYcQqDrM2XkWPY0P9DSQhlbT0cJHkyk+5BbYCGGA1pJIibF4B3rzY7tfk9hZpT2MASmM3pqUOiQWD15Bp0j3uzYLy+JLuD3De3tGjN4dBTpIu97hq7FdBVhVkIZx8t32EDlVT52fcdzV4APkc459k2DVprlYjFV1yI/13B1dUUIDlOKrFbRd1xeXXJydsFms6YsKubzU8Y2Vd91bFYrLp895fbmisWiljZhYZHR05xmRCmmy6KUZ3n8yELgomU4iA5jNUwOLMoYQirp+MJnGXif+rAZpVZJNkBr9NEaeGj7W2unAY6yKOmnwY3EaMursmQficxT87iUMGWFsRaUwYe87qTA4G0GlWTNnMh2jP08NQ1rHDZqpudJjANcVsg4UnRBNrDBO25uV6y3O8xpTWEzAp7F2XVGokxU3Dy7ZLhc4S5XbOqnpJDw+xZuVxQuMM4HeOXxTpxx/L5CLS1HT9iEjt6J3cThzNVdM4OuH+j7nuCFnpBiyhqTkaZpuLm5lmLeKGxd0A0dV7dXKGOoZzPmswUX1ZgwKkLwNLs9V8+e8Pjxo1zsw8npKYywyGFxykPBUZDYGKXzlulfwTmGvs/yQJHgAkPoufwlGpK6q8M8RkguYoyhsJaiKLC2wLkgCaQZB5U1pDEBDajkUdoxpIQtKuliIa5/Pojcmg+RY6lmWQszyKRgKkjGmM2OTHqM50zl8sFPA0cwJoiSTLbDwLOra1578QKrjeiaKjJ/OiffSlMow+1qw/a9R4TLFe3ja0iJ3fWK/ZMrdD8QlRbtd7xwmvuedrdDV8tM1RnfRZp0gAVIOu55qul9aa3wITKEQNcPtF0n56iyUZBSaGvZ7Xa5azUIOJMC+/0WZTQ+RbQt2G7WLM/uC/0pS1763rFbb7l69pTN+hbnei7u3UeUUNR0j1OCGDzBOfzgpjVsHFYeH8ToAn5wOGcJ6VefG0s/W4L6sSMHnBrTLnnQpbVpKErLfF6TghdEZMg/kTlVKvMr266jzItqUgajxE5PSPaBFDxd3+c20diqlcGqqZKfPpg4L8cPCIz8lvw1IKBxMbDb79nuW2pTU9lMcI8Rk1MSFRMqBG6fXBKbjnizZTd7ihscw2pHsd5ivOjl+aTo+o6y3UNdUM7mpNAfUPIxybgzoDx+UZ4eIXAnhl4kW7xz1FWV7d1i5pG03NzcsNlsKAdLWVXsmz1qdUtCYHijDEYXcj2toe86bq6vuLp8ym674cH9c07Pz6aWv6BwcRqykc1dUJC+a4UbmDf53g3iQZy5ailaUh4uOjOK/+YLy08XUr+gQ9qkZGvFgyTSOHwCSvRNi5KyLFkaw/rmhmBMRrjluo8twRjH+5gXSG2xtkRpI0MjKaKiSIWJHd7R/c4nlNJ4UocqeLoPORkI3okCxREHPGkZlvIBdvuGy+sbTuYvQ51biSky2oiGmBh6x+3TS+KmoX16w6Ut6LYNOkYeaEuR27IxJWJwuKHHty19baEwE9Kfxs1cHUmfjO8jjamAfIyDSkM/iJxTTuxTkinSpmnYbrckAmUsMaWl7Rp6NxCV4vb2mlk1o66kgMIY4Vzf3nD17DHXN1ecnixYLhbMZjOUMrLB62wZm8RpbVzZdUrZiU3uefA+y6s4bJFIWigAXYw8ZvsLisKf7zBafcSUJOX/5+QQQTRHIxVrLbN6Rmt22fQhTpPQSWW9U2OxVpCikKAYp3uVwgXhrqcoGyYkQgqj2g2je9gYr6OBADqLkud1FSXtU3G2OhRWKUlSEJI43KxWGy6vrjmZvYyqS0HSlDQpVBJqhhs815eXxGdr9lEKLt97aq0404YyI2kRRdDCHaTrGJo9enmWn7l0FLvkc4Hx7Uwtyuy+lrKUkXOOwfmp9RvzLfBBOgDb7Ua0iktDqWppxXctRVFQFiV1OePs4r4UTNrgB0ez2/H0yWOunj2l63YUpeG1L3wBRY7frEITvCRmbhgI3k/on4JMWZOvyb/lene+51bd/qLD8mc/FBOPebJLzvFsjKEoS6qyYiikE6mI2VTFZM0GnVvm4jDpvcPFRFnP0doInS8m6XLlRH3SZme61VNMwiFFHhPUcdWVGBEwbJQTnFzV8nIsQ34Dl9c3bHY75tUpozoLY+wCRFm3b55dsnn4hMaU3BQl7b5BDY6zJH0NVCQVRiS0YmAYemj2pPMZKsZDkiqthykpVSoPlE+gQB6FVTqj64HBCYJpZ4vDUHr+4e1ux263IUQvXb1C07R7XBQgxNqC1eqW2eJUVnMr0eeGgdurZ1w9e8JqdYPzHV/7xtew1Iz99SmVibmQCh6tEtF5AfuyK6POiG4MgZtQ8T1+jT//nFD6VAnqYUPVJAxSyIgWYVFWzEqHzb7eKQS2fU8CtBYYHy3SH8YYYkyEfiCgmJ+com0W8faOGBzb/TYL5R/kgu6KLueEIcUjkWlZTGOMk3bp5JCCghhwIdL2iUePn3JSvybcEg7VKiExDIH9Zs+TDx5S7Ae2PJX2a++ZGc09U1DnloRXSrzcmx2p0PQWmHmxyjzKUEbS+KQGlZcfrc0RlwPatkWhiBnhCiFirWHf7Lm9vaVt9+hBc3HvHrvdhmZoiSny9Mkjzk7PmdVLtCnAK4a24+rpUx49+oCm3VKWmvOLe9T1nFH4d+gcwXmRXRHxLUBn0wGPyhI++92OqqooqwqMJGUpO1Y8unzGX//bf4N//6/8Tz9NWP3CDxkm0rmyhBilKDDGYLOAPD4IPwxwXQvJgR5RAY01Vux4kU6AsZayLPLgmAh7K53wweXuwtjCyrrAmcw/Ln7aMLU5EwlPxHgZkEqZZiD3Q5pJMUZ8El3h9z/4kFdfuMfFyUziLEFwA00/0O8a+htpC4chsG22E6d6aQtMEMhBKA/g0XjvUG4gDg6TeVXk5HJqJ6VRakreV5x2fIlVWQcMzgdGi9MxKezahpvbW9q2JSaPTwFTWLTRuCSyPhdnFyzrBZUWC+SqXuLalv1mw5NHH7Lf77haLrg4O2U5n2PLkt5FqqqWdWQYmNUl1mqEYpSmgZQUAq7v6LsWVw+ygmmNjsIXe/WX0Y1H5djTo7azFoMIJUiSQlEYy/nZGV3ToaJDBXfHm1u0HQ06iqakHofM8hFDoCxsFitPxOywZrIKRUhClxrlvyaFCA5dCGEiB0wcKVhS3E16y0liNyRFUPD06TO+/OqLWFugtRg7pBBo+5b1rqG9WjM0HcEHhiCdpxIolKHCYJN0wmKMBGNwOqC8JzmPzpu8UKsiAt7IpqhMhtLQObaZ3lMMTC5+iqzR7ZwAEMGz3+348MMPhVtNxPiCmKCZ7Rli4FI/waBYzubsVteEaCiqiugTfuh5/PAhzXZDigPrxYJ2u6WqKgZ/0Ngehg7XDwTnZThKJUpboLPMltHS3asKK+5HIXLqDP+t9OXPIRh/tmMaMh5pTCggyzhmxNFaK7Mq9YDNvH2VB5SNMZTFDFd5fISgxOFxtK9OQfbzoihE2F5nuoA6mvFIZNlxic8RvErxkMhGso51VBOAxZggToj2SP9IdDHw9PEzHpwuMYsKlXnaOgn1om06Hl+vefLoMaobcNFhaLAhMTOaUhtKJYOq0TmCVng8znvUMKDDOOQ8Dk2PoONo4cqUmIYYsl2wIUaRzxqtgr330s3wjugNMbt7Pnr4kP1+L7FbGGZ6Rt+1NF1Ln2kBp8sTumbHbHGCyfKKfnA8/vBDbq6esdtvaNst2/WK2fwkrzMKrMUqg4oKoiL6yH6zBSUFd2EMlbVYI7JgVVHw2Jf8db3kf/ucWPpUCarJIsuHVrpFG4vSQapRIIYom3ld09UlKtsyKn1QAairCpe3X6X0naoLoCgKlApojQSoltbXpC2qIKqIQYMRhCmqHGgKSRDSAbIfj4Tc0MEFnjx9xusvPeDiZJ7bDpauadhv97imQ/eBl197hdWHT+n3HckFSmBW1BQRdBzb9+C1xzsH/YAaeuxMT9qxoz+v1ToPnqkJhRo3+cNwkxKbSxBdsyDyO85Fttsdm80a7x2ph9lsiU8ROtDa8KMffJ+XX3qJs5NzFrNTUIZu8Gxvr1lfX9J1e3YnS5rNiugdSluMKWl2a06WNYv5TDaLKAK/xhSM9p5PnnyIi5GLiwu0MZNkWIryQF8WFf+fr/0h/v1PE1S/wEMe9JQ3wywllRKgsbYk0UoR4AND76bEk6qk0Nm2UR8S2bIs2feOZEQORwj1MAyOwkJRGEqtJzRLK1GySEbhR0h3PLF8TEOHKSAYvnxDymLqo7koIyKU5DmYzWbM50sK5dEKHn74iK7rsMpwerLgG9/+dbZPr2lzPLNrqCmwPqLzvfMh0qEorMdmYn+RKnH34bBIykuPSo6K0TUK7rb17nQ0RgUQpXKLvSdGT+8dprB0fU/IaJwPV7z7rma/23Bz7xmKgtn8FAI02zW+7Qhdh44RgsP1e1zo2DcOdXpOCh6rE/fvnWGtQWXJlpgdgVIU27+hF26v0pqgAiokPvTP+D+qv8Wf4o/9gqLw0x0KctxJIp4iqKjQ2hLpDsLyzkvMxkhZaObzGq00QYEpCtGUdoEQk1grZe5tUonB98xnc6qiYjEvMSoRvcOkNEkf+/FkkhSu6k63SmcZoQjayPTx1FmT52zCExBE+3S5ZLlYMK8XaBxGK548ecZutwOfOKlnfONbv0a32jI0Hb7pYLOnHMTQReV11wdJqn3wWO/BecqUMDGbxpABgfz8qNFTWo16l2M3xUwDpqNsmT+i14wUgKbZE0LABU+hNDYEtvsdGFitpcvUdS1t1+KjpixrKlOhHbhmj+taThY1pVEM7RatI207YG1JWVYYVTA0O1JyONcL+pafK/EREcTX+0DbNHgfedgt+L/xl/gPP7+w/CnHYS0wxoyljDyDyR14qdpgbEmpE4WVa5xUpsIZJVadSTpVyRREhP4RERpcWVuqqqYqLDp3nRTh0G2AO3vqQYKNXOBJpLoYp88fdF8mBEl+LkgcLxZLTk9OWCxmkBwGxfX1LU3T4LsBNQS++s2v012v6bYNw76FXUthSsGGs6am9wIQODzOO0zw6GHAWIUetTd1kuftoI02aZHHPASlspvYKIkpwBwTKDYWCc45mmbHMAxE5PmwtmK12oJRmbcamFczyqLG2gqyPvy8mHP15DGrq0ucHyjPz3BdQ1UWuJxHlWXNPpsgSP60ETORKHMZwbtsclSitKYoSh7E9/gT/BXg7//ESPpUCao2oxWoIQZJWIU7Kq3PFCO+dwQfIU86V6XGFnaScRKovxDkLkWUHS+0ztUkzOuamYm55ZUgZikHo7OEyMglPJzb1NiPgZRM3lCnBqp8Tzp8T/BIdl9WlJXFkNi2O9wwoK3h/OSM+4tTtvfu49qe1DvMtmP98Olh0U2CajkVqEJEB5mULqPITYz6zTGGnKiKraUavYRjbq8fTSFO6FUOMjEyCMTo5feEQEqK3g04PFFLm+SDh++y3685XZ4ym51AspzMT9lv1vRNQ/Seylri0BGMxhQltrAYAqU1lIW0DWMUFLmeLSjKmr7vaZo9piiyPZwnRvm+FETA/oXG8W8/bj5NSH0uh1xTNZk+ZKAdSNM11dkerq5mUBWUJnsdI7ElJg4ZTWLk4ESCk/uiEC3gk3kp2HiuwkkRozRBCW91auEeJ6lkSoBWspEeI+9Kipqx5WuU5ZWXX2K5WIj9agisNxustSzy52b1gmW9oKwq5mXNaTHjzMH/7//1nxC1DBaMMlAB4c4a79E+UIaIzb3NpFKW5AoZcRQbYKU0fnCkdDywOLbW1GQbLOBuzNqRPUMYslOPxhNRQwcKyrKECH3bsb65pShqXrj/MqUp2W/X7LcbIFEYg9UKrUTpoFCywXX9wHaz4tHDD6UNGj1RRZLVnFbz7F0uXZu+70lTKzzgfEGvfuUXEnef/kgf+evRNVYSd2M73WgxibCqoDKJ2aw6TDNL4Irua/CkpIQm5T0pZd3iFCkLy6yyKBUJ3h2KITWiOiKAH1UaQ/rQ2Yq5U5C7XNNpK6EEyGkIulqWFa+8/DJ1VRF8oOs7YnB45ymLEjsrWMyWnNZLFssli3LGWVlzOsDf+5t/m2Ss0A3y4h+QQZYiBoqUKEMUaapMGxg3aIldkdzRxuCcnwagxkfNqGOgYOSEy3Pgc0dv8IMgWB5Sn+hzl6UsS4beZ7nDDmNrTk/OOZmdUKuC3XZNCA6rNKU1KALJOwqVMCqis6ZydD2+7wXdahpUyvrgCkxZYmyNd9IaVkrjwg1b/i7wFz7rAPwDHYeuJ5BRwJgCRhsKW1LXCmssNg1UJktFjUWH0aLMoRALTiVIfvQD3jtGhH4xn1MUCpInxcx7PLp/TCt3/pis0SXR1+PcSwYsRuTyoIovga60YjGf8dILD8SoJSRcP0yaqiEEbFkwX55ydnLOvXv3xeo5wMIlfuc//8dEXcj7yftOSOBixMVAEQLWBcoo3QGT32HygZifdYW4V5HSRGuQ3GIkWY0tgQOLZTxCFO1p6WLIoHA/DPg9KKOw/UBwgYdK0/cOtMGakvnshNdefIV2t8X3AjIW1jK0LUMp1DYNuKHBmIrCaroU6Jot3g8CRrQNXdsyDs1ZJdJeSb/Chr/63Bj6VAnqHZ0+mBxsEsJxiiETYzP3qapnzG2iyK2KUTpFawNkO8+cOESf3aaSBOysKgR1HAOF7EGdEXg1sojHRSYHc8ritWPL/yAvdLiBKsFiPmNWV9IuiIm279BKUdc1hbHUswXVcgHaMCtKFrpkvh/4Rx8+neI/pSwLlFFSk8BGKFNCp7E9OzpiBaKWfxtj8D7mqca7RwzxoMkKOTkNOO8Y3JBF0BVN14CTJEP0EJ/Stw27xZbl4pSqWFBqS9e1MpWaOXkpelReFMPQsV2vePb0MZv1Sqb4USivqOslRVEzDBLchlGMfchIeOa5GUM9lPz67fNJz/+yjrso+ji9Ocay/CP4wKyuCd6TjEargspI4j9qw43tbjVu1iERQpLNJgNGRmtZOLMT2h3i5nQ+xyc3JqfjM6Wm1u7ISRWJrMTI+NRa8cL9+yiga1ui6+j7gbquhQtuC6q6BmOpzQn3z+/xwvyU2boVG14Oa/CYQ8ckFBcdEzamqVUqC2IgOUdKCWOtyGgphXNeqCQZmApZci5FEb6W3y9Jfj/07BvRKA4h4LwkqHgZFOn6nug9ruvp9y2z+ZLz0wsKpYnB47NsVcgDmBqxri0KRb/fcfn0Ce++/TY//vGPhK5AEsWfkJgvTqjrBcbKRKlzLrfOpPOwd4Gn6pdN7PzoSAdsJ3elUYiurPeews4w2lIWYFWcEgOt1JSESTGlpkG+lItgrTSkRFUWLOZz5nX9sZhV6oCij6hkStmpRmVlBzhM+o88kLFZNG6iWkkBt1gIN9P1uL4heCet7EIGvqpqhi4KCltw7yh2zbSKS9dKj+t9ruYNUESEapAfyJQSbhggyRo5Isreh8N8AuNzkHBOkqBx7Q1REOEQA7umwQXh7EUvXbuU1UGs7emLnqHrcc4zmy0pi5JFWUv3ZOhFI3YYiN4LgzYFjAaSZ+g8fdvy7MljHn34AVeXl/R9jzFCZ0gKihqqeo5SFh8C2nvacMtT/skvPgY/xXGsqTuCASOdyuiEKoS6oREan7DsomCbSla7mCAFScpQQhVRSKZX1xWLWUFVFXnNkTg/HB/v6kAuXDJaLhKW48D13e8fnzujNSfLhXR9+4HQC13Iu16GFEsBemblDFOX1IuC03rJeTnjXjR85x/8k2mtHdfKBFmgT/5uItiU8gyMiPc5JwWkOEKZnNukrDMsakUq/2LpaOb8Ro8zE5FRFnDIrlE+D0EbN0iXWUkbvu96ggv0vcPYgqqac3Y68ODkFNe1DH2Htlo6E9FDcDkfE0KgGPQpiJ6u2dPstlitaduGrmsJQUCKUpUkpRhSy2V647nx83MlqMcWfOPVnqqVvNh5J23usizQPuuD6SW1CViDcPQmXlKegE45yYtJZHqCJAAa2ejt2CZIMrUneiZMCONhgi8nvmpMaA8fRz+S/5tQSfHCgwfM5mIV2ncd282ai9NzcajQBoOmCw5dWe6/+DKvnz/gokv8F3/n702LGgoZFFIyDT/OkBYpoWIgJUlIvffEwYkbUSjFFjamaaBg3BJSEgH5sigmtxUfREC47Vuavpla1bv9VqqyfH+GrmPoeoa2J/SOB/cLSD777/aQEm7oIUUKo4DI5uaK9955i+/9/u9x/ewpg+uJyaAVlOUca2qGwaOt3E/vnERARtKTEnQ8+BP64c/9PCH1CzvuOkYdITojIKRG6Rw9SY+EELi3OKHvBkLwWJWw2mc7RtnEQ6Y1yMYvSa03SYTylSHFhEFxslgIIjiGoDSfkCThcEaRg6qDzgWWypu7PGWjBiXjT4vUkBK7Xdf1bJqB5AeMNcxmCyHAG0lSXUjMbEE9m2OLgtXNI6qigDHBgGxUMQ7gyCZvs6mGyJRJQue7jmEYKKuKKtvueu+wKtf7KjJ4z77Z4b2jKKVFJ8lQYrPbcrO+EX55Sgh5XRQ+fE6iwuDwvSNmtNYohdXifqUSdG1Ds5PqXEj3Gh8Djz94nx/96Ef84Aff55133ub87EyuV5CEu6xn1LOldGqyy1XILiraaDaD4+1fkiGp4+OwJqQMZo6DEcKt9d4TfGA+K0mpQTGqMgRSSFhtpoR03JJl0j5glZ6cdFKEuqg5XZ5wenLCbnvUCVGH0n70HJ80HcZ8NH9NZ1OSSbJv2pXzRq8kQS2Nod3tIQpXGwKnJycUlagLGG3xKVLYaord2xy7I70gkYhKYUQjj1zXYaPcd2LWikxBCvR+kAGduhKx91x4hRiIWabMBUebB0PLqppUCQYn6P++3ROSTHwLsVwTnKPPvPHSFri+zwm5QqdIYTVFpna5rqPZiUGFDI7I9w3DQLOX5PT73/993nrzTdr9Hpv1m6MSAKSOMKsXFEVNiBHnPXvf8jRdfg7R+CmPkTqhDqCVdLLEpEGjScHl8Q+JNecDKZpp/w4x4FzEGGn7KDTEwKysZGhyXiIWo0dlXO6Y5U/kfEHaZ0IVSIeOVRS76KTUtF4fY69Wa04XC0iw22whehkq9D0vvfQSZVWijJWiPUQqbZidLLk4OeesS2Lb6o8UWY5a8qM2vE1gkgy7yvMe6V1P8lKcaGOoMz92cI6EwqYEWsnQrvdZR3rkmY+d5Cz/5HuJ+ew0pb3FO+kGAFhtGHqH8wPz2RKWiTCbQxSO7dB1YrXe9WLhqkEpsfkutMWqhIuOoWvYrVfcXD6jrmt2ux1t25KCJ7jEjFwIpke06T8C/jc/MXR+Bh3U8R/5zzHRS4noBXa+AyWHwGq14vUvfFmCrG1QKmGVPKxj63K8mCq3ULwTCRhdVPK5qFFR8crLrzCragw6+5+rXHEcIPuIENwxo3ezvIY0UXK7dDrxu2/nq1/8EtF5bq9uwTtsYbBFKWYASngzgSQcEmPYbbf86L/8DqWx4P2kMGA/klhYBcYL2qNLRUiRru9ot3t8cMznc+bzOUbbDL0HkgpEAm0mLRdZBN6ngPeK9z94n+vbG3rnKApLiIkCaRu7JI5cvuuFLxMSFssLD0S+vTRCqu/bhtXVM1T0ED236xX/5T/+p/zDf/gPcM5RVzWzusbHiI4y+Xey31PN5+I6k6QFI9OOee+ymno246pc8re//GV+OVJUOWIcN+eMfoZAiiFPjCrUmCRlvmRRVPSDDOgFFWmHHWpeyeZqJGkXyz6LzrVuCIGCGqMtENCm4PT0XK65zQNUSjTtZFOVc5PHKxKTeNHnzHlChUbS+1i6aHWgGozvZUS1rbWcLBYynJgHwY5UzDBG47qeN37wQ6wxBO8kOUs5djMvMVeCFCGK+UZvicng+p7dek3TNixPliQlaFTbd1RIARVjoOv2DEMn3Y4kaHvbNTx58pg33vgR77//LtZqhiADCjEpfIz0vZfkyidSVFhdcH4C+9UaVcviOBIJri6f4VNidnZOSpH333yTv/N3/hY//OGPuLm5paoqXnzxJXa7HYNz7HrHm2+/RXVyxsWDBxR1xWQVGKRIUOkp5/H/Avx3P4eo/CnHVESNm0ychjfy7ZFnPEZp/aIm96ZN15J8R18aXrp3QV3LEJrwVAe0KXJCkEgasVA2Bu8cWlnKIsdxdr7LuuFSSIEkxohDV8re5UydCOlGGCPWtceHxO6IfMUMXAgHvLRakP/STgXEYQjr47EbvWdI8tyO0lTkhMN7WddUPEgV+aGl61uur1fMFzNOOKGwJSFIQWWLAud7ilDSNDt8RppijLjoaLs9Dx9+wDvvvIVzPcaI/E4gCeJPpOmlo+R9IvpEYS3LxQn9fk9j1yRTY5RBp8Bus2K73XBycU/WHu9JLrC9veU//f/+v/nOd75LXdecnpwyX5wK1zRGjI3UQaQXbVmjjCgAnKev8pfN/+FzDNBPPsYYSEcdofEY8wWdJBFsm5bdvqWqC5r9BoPj7GTGybxCZZRUeJqGwljJFwaHqS1aGwxG9quypq7mlEWB712GtXIhn464+2OhlISOMiLriozCG4XXGnWk6zu9pxz7znt0EhBKW4s1BXV9jq1qktFi9atFJlOKMU3XtPze7/5QBtpUYMgqL6UWO6Axzsd1XEXEWTMGAomub2l2raDl1opJibUMfS/AkC1JBJwXabSua2X9RfazmCJ93/Hw4UO6oc80i5wfeU+XAsMgWttGC7dZOquaZT1npgrun5xRmwITwfc9u+tbamsptBLeuxaZqv1uw/X1Fe+9+w5vvPkDHj58jOsHCmPQOYlmCMxdZI7lVf0l/i31P3puTP1UBHVsf443S954YtLeAJSP6JBQWdZAvLsVTdOx3myoa8vt/pZ5aTg9mcm0v7V5EdOZ92cIQ6AoNFGbjAJ4ZlUtC6HKEDdMiBOQ0dQISjZ4pUWPVUqjyMhJkSk9CVRB8mWB985DUWCtxhYlJ/M5Ra6EhLowTq8K1b9vO958801QCqdk07BjhahtNkaRqi+FBC7Q7vb0weGGjpubK+bzOU2TMofIknIglr3INuybHYVRk/POZr3mxvX8+Mc/ou9blssZ3ouAc9JpGvgS7UGF2reUuiTMPDfPLlGdo92s0d6hk2d1/QzvB66vL3n3vXf5/e99l/fee5e6nnH//gPq2Yzdfk/Tr6iXpyhTYOual195RTTTMmqTQpySmhgj5uQhL/+5vwn8Oz8trD6nQ1okfdcDWlxLcpuczN8JIWUZr4R3gcvLS4ZeEOaAx7cdzCsOy5aQ+2M4tINGKQ+tZRI5uoROOuvRIkVUlHgMjEng4RhtgxXTOAlEkUiSxVYQeFnvc1ynRPQBKo02FmuYaAik8RnJmphJMbQD3WbP22+/zTwZaYclcUuJatRy1YJghIhK4PqBfh/xPQTveHb9TCSctCCqIUS0taSU6LqGfmhJRMrKUtUVVVnhhoE333yD29trIHFxdsYwdGzalnEvSFHJoIDSuITI9ijYrmd8GN5HJzGa6JsGUqDd7Xj09tsUSbhqu/2e+xfnPLh/n6ZpafYtDx8+ErQgJTywaTvapPj1b3+LL3zpS1zcu8BaS3QiLfSl2Sv81Z/iC/15HpNWbtJ3TUpSwmRuhkZn15aW4DUxDQTfE4cOr4vsruUorMZalTcekcWJKWTkUqFNgQ8OlEYrK0lEjJik0FM1dXxyssZqQ267ju30TCtATa34kW83xm4iHhzGjM58KHFomxCvJNq0BKHNtE3H/nbHO2+8yVxl33skARB+os2dOUk+LJrQD/j9nqg8fd+yur4WeTMVQEHbtEIjMIayrGnbFq0tzg9UdckiD/je3Fzz6MP32O+39H3LxdkpPkZ27Z5xIHx07Em5PTOkRLNv6eqGq/SM7fUa5RNNuwOEN727XfH+D3/Ixfm5xGyzZ31zzcliwWxWs15v6bqBrvcMbsjIXmTfe4IuSMryxa9+hflywWnt+SLXn0tc/rTjYwPJKWHy/zQapeHy8hk+CBjkh4Fh6KisIN3jBLu1Fpc6fIxEbQUISpGk85B2tKQOtLIYZSGKDTi5wZqjT/ICQCVzJ2kOcOhWIZGr81D12B/IDS0BuGKk2zegNFm9kaqwzGazqQMlj6si+axxpBTdruH3/vm/QLlImhBU0Q43upD1PbftFTJ0TYz4oWfTN4Kst3uS0hTAerPCWkNMMvSbUkV0A227F6tsFyitJXhH0+7peri9veYHP/geZZm7BjHmEUaRVPPBZ36oPJ+2M9TzJc57mt2GH//gB/T7BpMk1rebFdvbFYvlgmI+E4ez7Zai1Lz/3jv87u9+l+//4PsE56mKEmOszBFlIOTxs2ckbbhvvswZ/8pz4+lnR1CngIMJv0yy5EztJ7Klo/fc3qzohyHrysnkbCn3OSdzIn0yDCOXSeNidlxSBmNkwTPGopJoollrIcEhNzgkzgdB4KMqf/o4JBfj94tshyIGj0Kmu0325hZ+Vf5pncVcEmzXG9zNlvVqRR0UKop9aspt4sSoKRkEXfWRoevYpYFm6Bhcy+3qhrIsxJbVB6wtQSmK8ga04fTccHZ6wunJElvI9737/tsMbUPX76lKy+nJCc4HNvs92hi6jD6HEEBFdITGNKz1iugi62c3NM2eoe9IacB1iqcPP+Dk4oLTkxO+/e1vcX11w+XlDTc3K3b7FhT0g+fd994jojg5P+cLX/wiJ+dnglb4SMxDBi7LKbXvab7zf/rX4Pn2up/LcazBGEKYhLNGqSStOKA0KJgGZ/bSUvKeGAai85L05/hW2a+897K5y6okMWyMEW6kNhQ2W6Hm6WCSyLKNKCkcnq/Dop6ft4Ton+ZzGyeNxy+Om2LMiegItgkBP05IfshKEM573nn7HXaPngm/SAUsMQ8aZTcwyIWnDMfpmOibhlYZOh3pmobtbsNivoQWcYELiaIo6buehEbbggcvvAQmT3ZHoc288/YbGK1IyVPVBdoklLXs244+8wBDCEJj0QYVApbEbrcj9gnX9fhhILghu5zNePjB+ygVubh/n/v37/Mbv/Ft6tmCsqz54Q/f4PLqZlyQRA5tt6f88ENefPVlXnrtVZanp8yXS5QXpOHk9oaSv8svQ3H18VU3fz4dviadPVlzhM8r9otu8MgElGzooPPaZibOu0Jnf3SJO0nUZQDDGItCFCim1v30nDDt2h/bGsbQzVq0sh+MVKvxG6TB6rxnnLIev5TyBhLzm4tRiqTBOd584012D58yuIDRIutjtUzeMzoDJTECUAlUSPh+wDUNQ+zY7XdsNiuGwVH1smHutluiF55jjNImLcqSFx/cF4ODLBv3/gfvsVuvIQVS8tSzkpjAWk07DDR5GHjUf9UokrZ0SrptvvfopPDtwND3xDCIwoRK/OD3fo96NuP09JT5fMa98zP++B//o7R9xxtvvMPtzYrHT56CRrSnjaHzgS5ETk4v+MJXv8J8sWA4SbzN0z9IyH0mx0fzhWlfTiPbU6SiQpCkKHiZvPfeU9kxgVFi+JN5qn6k6alREVRmV4pC4XpRmdEYdFIiTj++NiNnOysGZDMIsg7qFJfj0hvjHeDrzt+yydDgXJYsV2Mdhc/DS5J+ZC5r5tzdXt/QX67ZrDbUCWwMYkQxqmiMr5OQ9xhFEjA0Lc5Gbps1rheQr6xqQNF1LUUhQ37GWoKP3K5WokBUiHOU0pqyLHnr7TcJwTH0LSE4zk6XxBjp+oHOuUwTFzm2FEdZq0TX9ey2O8poKPrI9ukt29Ut3jlBdn3g0fsfCC/2spBuq+iGYrXi9GTJ+fkZDx8+ZLfby/BmIci31rJmFfWCsvwxL6n/lOcN9/1UBPUjBdHRHT26iWncNslQrni7jxusGxzOBWJpp59USvzKGfpcZecpvSRci3FASOuRt2LylN9xfSNnc3dudOqMHW3q6rC4jj+lxnPNQrjZkUbwWEGBQRAYhTiMPL15wu7xFX0/oJXBpAhGU2g1BVnKrSGCIIztfs8u9uyHlqFv6PpOqhbnGTrhRGlt2G42VLM5p2dnnJ4usnxE4uam5fLyKX5ogYQxiqoSF50heHHRSHEaThtSolCKvrc0pkVjGLatoK3Ro3VgKDW3V5cUVcniZMmv/Mo3uble8bu/+30uL6+5uVlhq4KUFN1wgy0qLu4/4Etf/Sr3X3pJ+IcgCWoIuLxBVgHSjyO/DMdBIHrkqzE9gMdBPXk0K5nkl0Q/bziDR4V4VAuNChSalAbyuJ5stkraHYqcqObfa42ZOK4pMVnyjVy+n3D2U9E3LpATNCpnPb0XQXHURLqfXD3IOpARhl3P9cNHrD98IhOXmU6glMqxOybZWTIsc7a7/Z4WQ6MCu92WwQ2UWS6EhOhDljLcUJQVXd8yn9eYUuyKu65ju2m5uX7GyXJBTAGlE9YqjK0IMeDjeD+EDkCMOBKD00IZ6Du6/R4/yGCUDw6tAqvbG2bzCmMN5+dnPHhwTzjBIXF1vWL9zrv4KLIfykR8gpvbW548fcZLr17z5a9+laqqMbVCRRjmFQ/Z/5KJTMkxxvBxRqcQNZVje2Lhmgptg6MwFzF/UedI/vj5lAC01kJZ5m6R/HarTS6q8gjRtOT+xKCVc50GU/MrHK+509/UYTwAgVen53OKXenIbbc7nn34iM2HT6iVxK7JusDSudJHz3VO4lPEdR1do+g8bLYbUQqIMrmsUPR9x8TvBvEXV4mT5Vy6AkgRttuu2O22FFbWCGOEMmVtTQR6F6ZhrBiyi5+CwfQ472T410f6fUsMgeh7jDWYjeLhow8xRvPySy/z4IUXuHhwj9def5VvfevX6ftAPwRuN49lN7JCRdA+0AyeR0+esFqvuf/iC4Sy5g1mP1dMfV7HFAsZdTd6NESQBNUN0okBe0QTkTVWOOZMw8cpx5/K1uiqLBkLn3HY6RNOgGkNVYcoOxahEv3Tu8/MhLiNP6EgpMOzI0mqwqeIzVrmY55JgqEfePLshu3DSwbv0UoUDASwgBE4OySnSVr73uO6jlZ7du2OoWvZty0py3C2TUNRGIkFY3BDYL1e8errr0suYaxQTYLnBz96SAyDDOGpJNa5mbOKUYSQGCfrU8z61kEMVfquo8GwHRLDXlQlxqdFkXjy6CGD7zMVEmbzObP5nJPlgi+8/hq961itVzx9ck3vQjZiEFAyKY26uSEUDpf+2XPj57kJ6mGz+wnL0vHNTGOCmiv7NArHarpO5Bjknh8I04W1pNSTUpza8kmBNUVm90nyVRSGwhqpPDggYwcM/u75Hh8jL2pEfiW4UpbXEXmokEYnbyYUFJWIOuGTcC377Y5Hb7zLzfuPOEHTE7GIvFJUamQBoDIKlWIEH9muN+y0o/Edw74BNG3byjDZINPMRVHQNXuC67FWM5tXJMiWgT1Ns6XQGf/LfEVjFMvlnNVmCwgiGJQEmgsDPpRTy2nf7MSVKHnAUVWG/V6m7Op5zasvv8Sf+tN/CpTlu9/9Hm+//Q5tv8dmGYmnz64Iv/c9XnzlVV55/XXmiwVVWRJHO8wkCfyrL93yp//YL4fcybSpH7V1gKl1Oi6cE8imFcaobGXqMoIaMufoMIk6FkuSbB4SX4nnAmMNtiwI3qN1LUlr9jJPalxoj4RQxjbTiEql0b/6+D2kEagVfXElOphSnOT3Kyd3eP95k4/ec3N5yerqhm7fsjCaYBQ6BkE1tToYPudCU7iZkabdsI8FrY7sdlt0JucrnJxMToistUDEWpk9lQE0hfM9+2ZLiD3Om0kmLREpCsusqhh8oB0S1ij8BD7Ic1/Pa9KQMEU2rAgywOWjY3AdTbPn+voKZRQvvvIyF/fP+LXyV7hZb7i+XbPbtww+EIKYB2x3LW+9/S7VfMFrr3+R09MzykIc197X9/l31V/g3/wsg/AzPw5rm1EKaxTWarQRysXQ9blFqaYCW/R/LUVhsyalrCPaWJnI1QpTFBRG0CvnPKnKlseMTjQHSSf1sTM5pAgwJp1MxeFhvVayJ+chmTDqqSoFmaMtz5HEYIiB4COXT5+xXq1xzlEaRTAiHq6UosjPy522spLnud+2tGWkLRXNfg/5unRdixukFdqGgDKGsirRGqrSIlSxPLAXndg8D60M5miJPa0URVlTlZZqMHSDgB1hPB8CLjjKqsQqS0gOpROkQMot5xC9mK1oxdX1M6IIvWGqgl//9rfYt56296w2W5q2Y3BeXtskbFK8+977vPrmm5zfu0cTXuWttPiMY+3nPz6KoI46uVIM57xI51Z69HgvdA+dyGg4kPmbQm28O6gk9r1jAmuoF4uDa5mFKdY4rMvy73gEVh3OaxKgHFH/n/jGQOWBQvm2rFk9dsBy3hJJ056xXe94/Na7rD98wtKIHrHEkCJOLj3qzrOivHRA+7Zln50p97sNzkcGJ+5hMsCnJnMYNzh2uzWL5Teoqjqr6ijads9ud4tWiVlZZnUNkcuazyrKsmC3b+XZjaJhrPP7dK4npbkYZ7iGze2G6AcgYrRmVlVcPnvMw6cPqeqKk5NT7j24x331Ai+88jLnF2csTpdstg3rVcd2u8U5kRK0hcVWFW1/yWPl+U7xEs+z9fmpCeqEOKmMNo0anWMAZKTKKE1pNHVhqWolCZiX9lMYvCRwKrt3xIDOwucqtyUTEWOLrI8qBFyrhdifYpkJ9XZaNFF35XhG/WVG8WUV71qXfdL70+ZOxa+0YkToJ5p+SjTbPY/efIf2ekNSir5UqNIS+jgJWX/0JVJMtJsN+2jYG0+bHME7rC1Yb9aT3l7oHc4bTs5OKAqNMeKL7kOg2e/ZbdcM3Q6nAtqovAGBNZZFMaNtRLu0DwbXh0nmpaxLludLiFDUihiEBxljIiTH4ASZ3e637JuGiwcv8mf/7J9hsTwBbXj8+Bmr9R6lwbmObnjCP/pH/4RvfOMbVEWBPj2dkEKUxRYWdfpFVl/5K88Lqc/1SCmidUFRFMCxS9BBZLwwRtwtNKTUY7XFxTAlqCrzpgSBFgSw0FIs+SQxo4tCpuZLi0ki1I9ShOCyH7mQ1VNWDiClg69ybrOM3yd6wuO/jxLU/L3yp1Tv7dAznxWycE/9WI06it3t7ZqH779P2HVoIlsbOT1ZkJoOFRLFtKiT/cbJGo9iQ9ipgUaL0DLG0nWdGBAoIEgiYYoFWkUIHudakpIkf317zdWzR0DA+U5MBwhZC6+gqg2VMxS9PPNNLtC0NpjScv7gHqun12gLOpKLRjAlBHp6t6MOBbv9LfZGcX5xn4uLM37rt/4s6/WO733/x9ysNrImRE3fOh5+8JiUFGdn53z1S1+i1JpkFMSSGV/8hcfkH+QYY0FrsFbhPDIQlDyFqehTRjDVoYiPWVJGpSRuYwAoirIQDWRr0UaQ/qK0WGtEVikm4QiPKJP8wruAACBuaWMM5cn9KWY/vu6OiiOdc5S2vFss5sG+UTt4dXnFo4ePiPseQ2JjEyenNezk/AqVUEGGXCIKh6imkCANA93e0wxJ5HGyQYxPMvmvQQaPcutXq0RKAe97ohcr3O12S9NsCGEgjTbQeCIapQNFpZmFiqZrKeuSITpQSTSCteL+i/dodnsGtydph0oRWynqumBxUlLNBbH29OzaNWqVSBZefvV1/vAf/jb1rKZpO37wgzcxeb4hREXoEjE2fPe732N5cs78dGCZfgz85mcbcJ/i0FpPwMjhvuZOqIYQHH3fTTq7Kne3RpOElJM8awzWWIwJuJTlzLKKjDJj7iB0K+BgB00UGn3iTloq5dBHj8NzMiKpU2p7J9keiygjyhLjejt+NY2UR4nL3WbLwx++xbDaodEMNmJnJbHrISmMTpNuMDlvCSRsEpWS693AxkGTZclA0fUy/GSUZnA9IQwURUFVFpyezElxICZDStKCv11dQXT44GhihzaakAZRjjAlxlrcILML1urszhWzsodmsZgzn81RbUDXwCCc98Joypmmdx1DDLT9nu1uzW6/xg0dPvRc3L/PV77wBea/OePxwytS1DRNmwcuA0MfUFozswNfLj58bjz9lAQ1HhK8I6Qnf/GoXXrwo01BBnG0KoRPlLU/VUxZeyzgwkDwbhLd11pe4+TsRBZNo7AaKiPC/i442r6j7doJsYeMdMY0uVVIJTQGajpKUD/hzSkZugpp7AvBZD6NIL86KlII7FYrtqsNvm9RKRJ1Qs8qbAwyLDNyn7S0AcSGzEubPyp8cAyplyQlQWHvclA0hn7o2GxWXF8+JSnhiaxub7i5ucEHhzEjkiez4OI/XjJflKhCUc8r2t6Rso5sPZ9TL5fEvqOal3RNQwyOmBzVrKB3HWoATGC3s1Qzw3xxzre+9Q2WJ0v+6T/9F3z3uz8U+0qlICqefPiU3/3tf8GiKplXBXa2yIWCvJ+0v+T6n/014I8/N+g+z0PnwmY8Di1/SVCNHXmhEat11gIdh5PEuEPn9n8MQVY+8r0ICW0188UCW+YiCo2xGq0Rt5e2xTnP6LUtBQR3z0flO5oiKenMCQoTkipP1hjXuVJPIjEzPpsis2OEP5eRLhUSQ5fReu/l2TAQw8AJSdr7+Zg4rLkdpICqNIQw0PseF5xYE2iNSEMpVEqUpiQlQVifPXvE4nQmqGjXim/5bk1Inpj0xANTWvoVSitmswJllixczXwxl00kQKENQ3DMlxUKj9LipMYg6FNMnpgczrUMHTQNGJtQKrBYnPNbv/lnePmll/nxG2/zxlvvcvnsBrOoiSFxc33L7//Od/gzf/yP8bWvfx1t4KvM+Q/Ub/xigvAzOsbYHYECbZS0+6zBDTLpb7N0kc0oTUJN1qZxnGlMovNc1rM8HBcxUWUAXrQUR/1FGJGxj2zaefGanqcMncaYOW0fW3THIksUTbzPia0aJc4MowuhnEZgv99Nai8+RlIBIfXUWjbz8TwUKXfFRD+T4LFaE4KnSw7nBklqlBSjSmsCTC3hwQ1sdhuePn6IMtD1PbvdltvbWwbXSeGYsjrMoc+GsZp6ZjmPC6oQWMwqYhjNOSJRK2xpqecVKgV22xVGgQ+JwRkCAzEmdISYHCH2tO2a7bZisbjHF15/mf/Gf/1PM6sX/N73f0TvPOPYrx8CTx895Ye//wN+/ULx7/Ev32TijptcIjsH6elrWmti9Gh7QNu1Vpgk3auxgzQK0AtwJYWVS4rZrMaWRbY8ld8tbpPyvVqrPEooRYiMYEueMGqLymjQkVNLpvPFMEo7jeVYms5xcmPyLmvijqOr4/6dtd1Tkq7p1S3dbk8axElpUAEzs0Tfi8JDTmhRMa+5MTtgenxw7JxnoxJxcAxhwJoi7xM6U65FOz0mTYiO7faWvt+jrXTN9k3Ds2cf4oMI5ROUOLwlh0a6BFppyspSFoaAIbmIC6I+YZSirAqKqpCOihUKoQIwYKzi/gtn7IeOfhikAFaeFAea7YqyMMzqmq9/5Uv8K7/xbWKAh4+e0vS95AkoUlScxdf58/zF58bUT0lQnxOAh++abqK2BltaVIbyg5KNyBpNWVTMZlWeQJMkYOSNKEQixJZFbsfHkd+fBzgU3gfc4KZW0ng+H+UepPxgoPSEpjLV+EyIq8polh6nR8elMX9tlJdQiDyC0opqPoMQuV3f0u4CD7DSmkijnEUO/yTJhkyJewKekIL4QqNF/lHJBLU42Uhl2XUN680tGNhuBdls2wa0/F65FmOVF0EF4UbpgrIsOFnIdRr6gLGGtmuYlwVFaQnegBJhi/l8RkgDIaps/7ijb2us1ZydLfjVX/lKfk8Fb7/7PpvNDh8Dfuh5/713+PrXvsSLLz5gNl+SUoacgcJ3vNi+99yA+9yOPGQxbXjHR25DjjxMrVTWDbXEqDOlRGNURWUTZ2enWGsYBvnx4IMASXqkjyRCcBBkUjqGIiNQdzfr8VzSx4J2jNuE1NISuxNypUAiTPRX9ZhNKj1yavIvyn+Osauk/b44PcGenuGd4+3332GeZswop4RjdMoZW+uSoKdcoErcBrIuKuNzJ7aDIzqSUqDvO66vLumzZqRMaweR5Tnif8lbli5KUSi0LqirinmsISHmFSHRu5Zaa2xpSMmCClijKAtNWZeyWEdHSpbge4Z+R9tobGH4wmsvMqtmnJ+dUdiCt+y7rPuOLgTC4NiuVzz84H2+8tWvYrTmVl3xN9X/gz/Jn/iDx94v6JhalnlNU0pRFHLtysJS2AKroTCwnBfMFjXe9dlQIgrtJN9z7z30PUoPiE6JiNl7VxEwefs9jDLpEfE/OpRSWWdRBkxSzIhYHJNWcvF0QNIUWZpKiSwPo5c3Rw4/+UEprJVJ4eUJ3jne+uAdKlfxkq5Ied2dzuo4UY7SJg7B45MnEElBAIkUU37CEpiRyhBxQ8d2u6a+qdg1e/b7w9qr4vioiEqM3ImAVjLNPasLKlWLA2IS5HnoBtq+wapEUVoIBV2nsVpQLheEyz0MQ56uDoTgCK6jazdYY1kulnzr177GvJ6TgOvVltvVhifPLrHG0HcdT588Ju5n/K6a82/8guLuZz0OMlOHSDlGUVV2SyyLgrKMsu4YTaELFouasrIZIxKN1DSJzitUlLa2tiYPuErimqI4Ko6p4mF/HGPqAFgdPz+T7ngG2A6d1mPa1vTGDjStnCMwUQSO8ocEpEgIYgphszrFru+IrqUiUmjZL0WnlencRsvi8fxCDHg/4KIgqEYbyFq4kLBWEv8QI+vNLTfXl1T7iiF4NtsdtzdXhChUrJhUNmcZr1UQSawCloua0hf03uN8wGqLH/IQWwoUM0vZlQQn0lUo8HFAW0WBEUa4T6CidH5dQd/taPa3LBdzfv3Xvk7XDyileee9D/BZcUhpzQ17/hb/jL/0nJh6/pDUSCiabvfRZp8XkgnkUUJsLquK2WxOxGKHgC88KlXMC8XZ6ZyqKpHbnBeq8YYjdAAmFwkwSYtdahLXHiFTc/gZNVb3h8AbHU7IVezhAUFuTv4Zjej4aWPzJq9z0I0bcG6VaiiKgsXZKYuqJjjPB9dPmZdGEoNxwjsvYFO7NsggiXMDsZBhDe8iNntAM+YXGcWLKTD0LfvthpgC6/V6siRTOtO3VDx6qBIJjzEy1VhpS1mVDC7SdTJZOrie5azEFoaiKtFG0MCyKkgEwEMyRN/RdztQiYt7Bffv3ecPffub6GgJwfPBh4/Y7feEMHB1+ZTVzTVts8+xIA9uTBFvHN29zXND6nM7kojMq4/EcP4iByREdBursmK5XBKCoSxLqTCtpi4UJyeLXLWrnEDJBP+YnLrBgXdC91DgrGHoemqTle6UyklgZDSxO+ZECXAgi9R0ekdJ7aG4khcYdSi1sbLJHw34jQmtyshZNau598J9zpantF3Hd9/6EaeL+9BHkhCxp1eQcwhZrziJfauKE9c2xIhVhpj7sNoKIV9eO+L9wOr2WooZH0RRQCsZaDgaUshPOzItPip0VJCtOYfBMzhPiAPoUlDuZEFJ+28xK2VQ0IA2UZ6L6HBDS6fBFoYH917kS198haqsACis4kfvfcjtToqtrm24urrEDwNqPqfTHb/P9z+T0PuFHSkPspFtDrVhVs84OTklJZH7Kq2mMoq6MlRVKbJTSbSLfQiAgaQYhp7Be1BQKkWpFV3b0dUlRaomxH9yQdFi63uogeQvk3tN4sCxTocH7ngQSSl9cBIck1Olp7iV2M2ritHMF3PiC/c5X57SdR3/4s0f8OL5KSqIRna+KIc/R4Q5SpyE5AkmZEODSDJZHTHGicuojZxh8I62bbi9uWbX7KW4Ih3lz+n47UP2fNdKURYGW4hxRUzgQ6TVGh86rC2whYJoqWYVVmlBdPP9DMHn5ydlC+meodvTaE1dF7xw/wGny3Nubjd8+OgZ737wkNvbWwpb0PQt69Utff8BPfPPOtp+7kMdAz1Hx3GBbo1luVySKAhBepWVgZNFRV1XeRtWk5Xn4fdmdN57MnMUoxL90DMroNT2aE0dwaaj9fAOLZEpQR330wOdipxbpGmVVpPSSQboRs3o46xISa4gnG7LfDFnZku893z4/hWDN9jcof3oFZI9Kk6uT2S5Th8DPgZBaBHgDBXF3CEnzTElmmbH9dUzbFEyeLHj7boe6VKNLxanOJOOd8AYWM5rsVwN2coazW63zwYWjqoqKGcFQyvScUorfHS4kJ8PAyAJig8OHwaGvmG/s+wXC770hVfEOjVB23Vstnt2bUcI0LLnez9lzX1+ghoDd/zEY4J4QFGkgvZTzaGUpi5nvP76F4XrE8UGq7KW0kQKLRc3ZF5F3/eTD32MitvrKwAKBaVRxLJgc7vhtM4ix1oWEwMikZIHq44rt5G7J0bJKiO1TAvhYdxZJKiMkWk4tJr4UTHf05QTyIsH9zlZnvPyCy/Rdi2//cPv8bVvfgP18BLVDDni1QQSxSS8rxQ8Q+qxsxmFLdh1PTajEePibYzIZynAu4Fmv2NwvXhHW3nPpT0MPIzIiUo6I1oytGMLy3Ixpxs8RufEHgCPLQxK1YKixoEYHIWuslRLIgVP2+0ZXDd1ix88eJk/8kd+g0Tk3sUpb73zDu+89zZdc0q739E3jdj1lSUxCELzaDHwH/7pLf/r5wbV53OkMCCbyBEBfsqREjH7Xo9Damdn57z22uu4II08rWUBtMmjkzsaGgl45xjJyt4FercnkiiMIllNS+Lm6pq5vZ8lfqRHIOyMUaJENEoPJ5yRpzF5nhbRaXtHZerJuMFba1HCQRD+KIeNPinAKpbnpyyXZ7z+yms0bcN//jv/lD/2p/4kT3/794i3m0Nba6zmk7zH5AN90xKriCpkKCD4QFCJDGFMxVzwHjcMGKOJyYlQ+5QEGA6OAWn6mRSSaFKi0Ur4j7PZjME5mZS2o8tL7jSYAmUNfdNgtMIqKIxiNpPCKwWPGzoSAWXgZvWEk+WLPHjhlH/9X/9XmdUFTRjwDyO3qzWb9QY/OPabNfPFgq+aL/G/S/+TX2BE/uzHx5DKHLcphUm/NISELUrOLx7gkxVUQskAik4enUSRQuS+okyQx0hU44S60H1QSihLpWW/3bAmUpydMZ/Xk7wexCzsbxj5zscbfsqxO/EJkeG7cVqfLAyuUnaZUvZQ8E0fR+9dgbKG8wf3OLu4z2uvvEazb5j9V/+EP/pf+xOsf//HqNWe5I+S1ImelcAHXNfRm54409TzOd16J5bEJjF2GIwxWCMGAjE4unZPiC57vYOxotCSpuJyLK6Ey6+UrL9WG6pS7C5jSgzeiYWpFYRV5a3q5PQEYqR0Vji/1hAqkzVqAzE6YrR0vZi5KMDogpOTB/xrf/g3uP/gMYvlghQjq9WKJ88cXdvyFXfFvzdb/mKC8ec4UjIca+p8EpJaljNeffULNK0Tulth0MljCZRWmMTWWoYsUZlSLsBT5PbmRtRSlBjh1Aq2s5oqLSkXc8qyGJGoKY7GpFLidkQdw6Teo/K6muKomToy+A+MVYVGJzPF7Njcj+S1LL+U0hpdwMW9e5zVZ7z84AX2+x1///f/OV+4fzYlmjKUml89O8ORIHrpRnjv8JnCEEJOu2LuZihBjmVfyXtLitzcXGOLkphkoEq6U9nhT3aurIIksSzDrDIYaEyZ3488vUZrjIWQBnxMlDNDvZTiSqMY2o7dfo+yhlE3VusSH4Si4FxH3ymaZs1Lr1zwzW9+ibOzU+7ff8CP3niTq5s1l1c3lPvEb/4U3v9PQVDF21ZNzBfxbrcqiN+zgpg8KYkFp7GGi3v3udl+iOvFcispaEKLwxO6nsW8Zj6vgWz9GcnJIlkoNlFYTWkti6rk6ZMnPLg4zRdYZVtUL3EYNQcAVx0QhvwwjPD+8eJnUCIZrET2YPADm60j1AVnyzlwt0WhFNhZxWxmObk4RW0NRV3xZ3/zt/jtv/l3cLtnpDjWdJmdEsloRaTdt5y8fJ9qXnK73sgQyngyMdL1PT46FnOpHo1VmBgIvkfpUlxdMlKXVGQyUCcRM9cXhHN2/+Kcp5fPqEtNMGLDGZND14Z206FJnJ2e8upL55Q56UDl9nQHoTC07ZbdpsLaiuXJi3zrW9+krAy92/Pw8fu4vuPq8pLryyu+/FVRFIhJySTlPlG99S+/kgcOsh0ZkVdKrlFhFFpFBi8PUtPKmJD3if2u52a1wxjFfF5QFYpuv6MicX5+OmZkB5Q+D/2FbHtotKW0Bk1it93SX5wSvCA6BnBIknFAwQ+LoAC9d+dPx2GXEdkXXdW8KFnY7rYM3YazkzkvPrgAzNRanST9jKawBVhNNJpqMeOb3/o1bn/wBu5mgwry4kmN/KyUZXNgs9nSnRREU6JMlodBkHyAQKDpG3kWVcSWGuUtikPb12h7R13g8H6jICg52dUkCqPxTv40qmDkpGmrSUnjnOf6esMrD17l/HTJbFaiTJacTp4UImkIdAZury7RseDs3HJxcco3vvlVfvv736MqCgpjcNHxD//hP+RXfuUbzJcnPA1P+V/w7/Af808/0zD8tMdUR6uDmnMMHvGa8OKQNiRubtY8/OAx+65jPq84P5nhuoYw9Jwt5hRFQQwJ76PwVAuJs8kyFVGwmFU1CoVzjs71zFWJzmDA6LWdUpZUmqyqD4jp1DLlOMFOudgX/qbOk9C6UNKRcQ0ni4p75yf518nviQgfVheWohBlDE+kC47f+PYf4nfeeUy3bjl+xajyR4qYmOj2Db3uoVhwfnaP1c0GL5tH9j6M9D7R7zrKsmA2q0TVQLnJ4VCrArTE8/Su1Ehpki5KIqBUorQKm01TDInlvBBZNSXFZqktelYx7KWQMyT80PHCvVOUEapLabUMtriILSwJj/MdSsnk9WuvvExZlNRVxe9//0dc3ayILvEWA/9D3uGv/wLj8Wc6EihMBsFlXiIEaf06X9K7gpgU+33HvulAKarSkHyHb/ecn85YLEpB+PuYQZaMwSemln/SMii1qEv6rmWduz4vvvAgn4iekr6xVRmT5ljn9MCdPsb4P36MzGhtNLPZPGdBMvBqjKRPkgdLUaatxS5nLM/mvPjKa6zWKyhL/uJf/je5+t0fsfvgCeF2k6Usc1aV1NTN8sNAQ0M6rXjl5Vf40TtvZpQflJZuVqFq0YnVCqLHJ01MHh/G/SPkLOewh4ydOAW5wxum578qLUUpTpnt0LNMZe7QQFABXUB9sURlh8q6tvTRgXMiYVcYVK5NovK4pGj6hH/2GDubs1zc5/XXX+Xe/Rd5/Ytf4ns/+DEfPnrE02dv8Leu/wn/g+eE1PMT1OBJwQtqarTI8ZCyN7mgQt73kNGotut59OiSt9/6AJ3AFMLnm9WKeanZ7zZoFZnNSoAs0C1kX1S2GVMqS/7oPBEYs3ORnIchojFZGSfmHCtAOrKz5LBIStfhqDeT9diUkhvw+PFTkmu4OF+ivvAKRX0hQ1tKglButTADB5UIhWZ+fkZSotuaO/kfh+2joAphCFkTVgIixIhNcdqAQlIkF4ixEvHsIJ7DIQRwTnxyjcrosZ7eY4wBFaJwUBEe63xeZ3egJBwRYxnDs+0bKmN58OBVilL4iZPoMEz6kn23Z6csYLFqzv37J3T9izx8fB+rA0+fPeb999/l5Vde5ktf/zrn9zQ+WKFf7AdO3149N6Q+r0OYZoGxHfrRQ3IfGUZqe8f11S3vv/ch1s7wyZFCDfMyixM7QhAZF53b/MkzOYNqyA90jlujiVpaKk3XCqKCPGxpRHlIErOje0peSI4YM7mlk2M5HTpLMhxg+eCDD0mu4+WX7rNYzCirgoiIQctkaLb7VYmewKATxdkJjoSLiZAyJpQQBywNOhpSyGiYj7jO4Syo0uRpeCH0Kwsp82LvDkZ4juZrcN6hS0lMlJZUKyUZdLLagE7oQsskafZNVyRBttTYDUn44Om6RowqTpfUdQE6EtUoq8RED/JO0XcN280KlTsvr756xrd//evcrjasbm/ZZyvhRx8+5Pz8Post/Pf4U59tEH7KIyr5+Oi+qXNBGUcESGt2mz2b9Q5lLKubFZWB6AeSFyRQROElZrRWOB/AmCOVkqw5mfmYKKEMbfcNnigi8VKV4I4SQpVG89PxM7nAymihLLwy3CevrzKTShQD3n7rbTSO1159kbOzJVapvObKMxIzxcCnwJACQStmyxNMYQ9+6SOyTIQojlvSLRWL02gSbvDstjsZv8pDqinlTlnSgkpHk5MXsdcclVJCjCTfYwud+ZNiXpB8JEWdZXlA5U1airKAVsKvDnlIaLT/RMHt+pqL5Snnp2csKst8XhFVzFrMGeX2gWHQtI3FqJJZeUJZnXPv4gxrC3a7Bud+d7J5vc89/gJ/9hcWjz/rEeNxPCTprh51r1KUBP7ZkyvW2z3a73UNGQABAABJREFUaM7PFywr0fec15b5rAQl1MEYc4GjJFEcI06TZdayi9248Y4czjgVSx8vjKdzmVr66aOPGSOH1ZBVizIdRBnNw4cPuTifc+9syezsVL7/iB4QlUZbQ7IKpyPeKGZnJ7z48itsfvy+0JjkJCdAYvp5L4mjaweGQhJWPVoOpzgVbj4N7Fuh9xWFwVaGYegpSktKETcE6vkckwGHqcOtVW7OCXUQZI7AmIjRYy4VmdXmAJSMnV5lpVD1kb7rCEQKaygKS1UWlBaqUmc1nCCzxLpkGBrS/IzCQl2WPLh/wa9+8xssF3Mqu2N99fyYev6Q1NHfpdLOwtDS15Ebki3eUkrZNSqw3TScn5xgTfH/Z+5PQ3VLs/xO7PdMe+93OufcMSIycqqsLFWp1ZK7BlW3DOrGbeFGRpK/NAKrjTEYjI2xTWNjsL8YjAcwbbA/GWywscHWNxljY2hsaNSoVahlSa1SV6mGzMopMjLizuecd9h7P5M/rPXs972RpRsasqJiJyfvjXum9917Pc+z1n/91/+PMZXxNGKy8J5S6qX9p6hgyeKIg7MLcmOtw3ppu7dhpqJaXcYaTFYkn/PrkN1KeYFLiKE/Q/7DFBF4FsFl0RA8HI6MxzuomYcPbnj06AGtedVCtxhBAWItxFqwITCXLBtYQxSqoHXVqAZqdTjVTTsdR0HxMIpCldZ9wFltPSjKVKogy86JUHmpGYrRQRPlwlQouQjUj2yIknib86ZgFFWmsj8cqSXRrTrWqw5rI9CSZEkyqFWE1+PM5E6E04lpPLBadTx+fM2f+pd+gd/7vZ/nt/7Rb/Pq1Qt+9NGP+NGPfkToN2B6wGK6Netv/+q7I+4LuqpuQvCHb1NtAZYih1rOhXGcePr0McfxyDxnaj6Sp5HNYHVBIwe4MdJ+alPHVdF568Si0QrihLHEnGUC2QDWLvSORQtVUf8FNV22Vm2LWn0ziHSa0YPeey8qAYcT+/2JeU768i7aBUZ7CMYQS2UuhWws98cjKau3u9JgJOmwi2SKMCOMDthUJdlzHnCs0jXAWj24z3bCtZZFEi7lREkWa6uaT2hCmZNYcdqqyavEbilZ222asGhHZJ5HxvHAk8ePGIaA9XymW9L2ATngU5qZ5yPj2DGOgW645htf/5Dv/MEP+PHHH/Hpp/cc9nf84Pt/wPXNDbvS8R0+/heOu5/J9VbA/nRx1fbD5qLknadfbTkcEof9SI4nLJnderUUOtJSDsxjlnZnFY4dpuohrAmectvGaSTXMyVDDqlW0Jxjtu3Db5FRrNHhJx0woi6AgHSJPNMUmac919c7ctZM01y+cTGhwBhSrcRaIHhSlcRFKAXQ+IOYIlpkmryXIohWKZXxOAlXWmOzolxUi8R8SzxrpZa8GMLknEm1IOoYVu9VFStjFZF3xuj+bJhnQV/l2aDJbyvgDDknnBcgYbfb0Nmi1F6jz0M7ZEWsheM8Mrqj6FzWgc12YLNe8+Dmmr6XzllKiX3Z8/3hhz+7+PsZXcvabPmBujVZ43BWqSanGZsq4zQTY1LqRBPqd/IctWpvWibWqMyUDjgbDazSzn3DuXWuIdXOdC4Kq8v0tOUK7e8i+eeWosoYiYdPPvmU03HA1KfcXO1wSPcJoxbvCogVI6YS0VT80OO6IEYiqrlatbCqKKKpw4ZtviDNif3d/mzyQutGyN9TyUJ3qHahWqbU7ndRek+lNlDDGGq1QtUpCMiAwXixVLdGwAVDxruL1azr21qdddSC1jvHqh/ogkjTBWtwXokCSu/MJTKejoz9HlMD1q55+OCGnCrTNPKT19/mh+a//M4YemeCWqjqMasPzhi1qqoXwaeTZyrnJLyJynq9o191lBq5f3MgTzMoDyrnhnayTHxaW5VnVVWrSyplnMgS5IK4zliLyUbBcfPWptam+ks7cBW1kc1EFOwMzUdaNNRSKkxz5jhGaTtgFkI0LfesYJxslFPOFAP740mGZUpDw6psSEbaDYDYBhrLeJrIpmCdE+vMKtabonwgHCZjz44oOSf6bsUURci/FijFnRNUoOaCcZYEOC+bVc6RfFm16s+7vX1N3zl22wH50ro8L2kIyOgOVdxQUpqJ88Q07pnnjt12w5/8k9/mJ5/8Gi+ev2AcR3780Uf8wXe/y9P3v0ropKio3ZbNL/zr7wy4L+p6CzR96+ADlkSqxV5zKjPc3Fxj7i2n4577w4E4HVivrt+qkp1z1DKr6LbRuNXnbZ2I3zckqF5U9LZxmM5Vv7zWc5IlmnwG4626bwCUpSBphVUIAQ8csKRcSVGGCa0O9+kKkLVgxfEkJhmUefn6DTEmuqKbTm5C2lKuGKqYAGgyIlOnFmNliArO87JGdfNQlEjMm4qodVCZ0kzNFZuRCWg9rGtOGMfi7+ycIaZZJKSWyctG2SlCQyHz/nuPFl95QWlk8EzuqTyLkovG8USMJ6Zxz3TyfPD+Y775jQ/56Mcf8f3vf4dXr57z/T/4Ax4+fES37vn/1N/m3/3ZheA/9/UWunMRx8tfdY8z1Qh3d7Nis9lRauZ4/5r5dCA44PF53/be04XA/hQvnKYkNtwFB19cySwpiwyUdFiMPuczwn+5HoRioVqQqldprdU4EtqT1T3XeTncjDGUkyElcf9aIMblvRoZsnOWVAtzzlRrOI4TMWZ1PGtQG+AKRosniQkFNwriCGedSLfpdi2zHTKgJ5qaVq2DxURiUSgoMmBGOeulliSUITG6UDUYa7QjmGjccTFGUYAEwxRnrnYrrq7WrNc95JF2R9veb5Bcu+ZEzpKkHg/3GNb0/ZoudDx4cMWD6yucgXkaeWlO/MPtb/6Mo/BnealpiD7jzWZNtZ4YE2k6cXfaU3NcZCEBfBfoZkuJ0u0xpSWdiE158JKgNmTbyvncqFe27TOaYIn9eZEEdanhL9HTpvHeCpWCsTIE1Aa3Ssq8vr9lGvds18OSZ1TT5lyUJqUdkFgrsVZs14myyTSRYsYtFZJQR6qqoNBUWqolpcztm1usFvVtANxURJJNC09jrA7nFWoqC41mmk5y1gQZBHRViS1JfqdBZmFMtYuTYi4JUado0qJI215uj1h+I06Aq6FnMww4VVXwoNQvWZc1F3KamU4HDv4Oime1Dlzf3DAeR3a7Df3mm7wy7wa03pmgxpiXg/xShkGfpzzkWpcKG61sg296jEGh8cDzTz7h5nrN4n3cwsIUrA4YyVaoFbs14tBkZIOKVarZBJiw0I0bfXVBjuQ1yoSldyJkG4LwOGOMmCoPph3yzgTW6x1d15OzTAs3TnxTUMpVktpYEnOO5ALf+f3vcjqMrIswXmxWrpiVDRlriSVRZbQPazyuC5ziXgSinXCylgpQb2rVKsWHnpgrJkudlZdDuyy3P6bCIssCnE6nt56VFAOR8XTLN37+27z3+BGlTBiSVm560C0oiCJmKZHmE6fxjuHkMSYTuoF//c//Z5nHzG/8xn/M8xcv+N3f/V1+8V/6M1zfiFh8+fjAo79xB//dd8bcF3LJJqP386Kgks9pEqPVOkiSGvoO4wx93zONI3Ms7Pd7PvzgoSTgRElopUSXODFqtqgi+1w80wKLxduCLhkWD/VGiWqIzcWrZ9X3rNaDmgwUYhSJIK+6rn03cLo/0oeeLvTUYjH4BhIsCUeuEpcxJ+YUAcMPfvAjptNEpwiUx+Arys2WTW+ak4hd63uyLpAVfdLloe9BNCxre08V1Tz1el9UgcNU9YiX+02VtpIzToZRQuB4PL4Vu7Vx/2pmvep47/E1w2CoSBJQLhK4Uo1KIUnRmnOUBHU+MY0dZbvj+nrFv/Znf5nNZk3JM3//7/19nj3f8NGPP+Lh7gnf4Muhg7oc1A0p1auUqq47Vqbqa6HrAsMglq8+9ExzYpwiJRiozVRFPqyVPbY9KEHQ1SEHs3zkWpVfdwE/NTqLOQ+lnhVUhJohfSbHdr0idAGDUIeK2gV75wghELqOGhNdPxB8t8TuMkunLznXijOIcUbNdMOKH/7wI6Zxwqt0jssGD6KlqS3LURw0BAEzbklYYlE6Dbr3YhTEaPQTeaOlqD1s6JnGkxzMSoUqbYCxFNBunFGaS85Z5LsMUArWVk085HycT0e++dWvsVv1GCsSbBVEdrBKAuaaIUCpMmjb1CncG8YxsN1e8/Txhm/93Ff4rd/qefbpketpzb/z5N/6I4rGn+1lrdAh1qyYfeL2dOL+7sB65QV1047sEHrKBsphYjqcwPol13BWXPskQQNjHaUaTuOk+cMiAMUy1qbxfm7Fl4tCRvKNpsW6aA1rImCdU8c2x3q1QcY3AmjSZ0pVKSxoQZyNZVYbcOs8L1684f7+QJoioRhcRVFzQV8NYvpTc8ZR8UXWfug9McVFQ90YSUa985IcqqshVeaBXAishp7pfhQN8yp8Xdsc0nKTwKz4EChxljqvFuZ50o6wEgNMBbVDddaJKUY1BO+43m3wCrgocUse8DJoBuREihPjcS/3qjp22y273cDmvucb/hX/7fKfAn/hnxgv70xQ53limkam4CiqOWatuOS0vSTlogtcHEn6ruPhzRX90MuinQspi0h01w2E0F+I90KTV5A8wi1Qei7qHUshlYpxjhBEoko22+Ye9Ta62342VYYtrnY7drstfS8TbpRM3/cMw4B1gd/77d9jNQxsd1uudlvAkktdhhOMNaSUsL4wpcg4TeScmU4nXIUOS6g6jaeJujEObOBQZ8acOcaKiZZ134kIdi7CX82GnAqrfgBnSDXLIIyF+7sjxcjXpSKWfCJV1FCtIpuqs1RjiCnyk08/0eGsNrQgyee3f+4bPLjZ4L1yV6nULId6SyraUWNU4qKUiWk6cDx4qIWhFHxY82/+m/8Gw7Di//f3f5Pf+u3f5hvf+of83Lf+JO+//wEfdpm/Wr4cUj0tARRE+mzW0A7VXMrShjfG0IWO6+2OaR6poLI8hfVmt7S7pTUlvDVqUvtm1wgbxDSzXgV8EGpLrIK+iKxYO+zb4X6O3VrrMlGc5gRI7F9td2y3a7pOeFnBO7rQiTg1lt/6zd9itV6z3e0YVsPyLAGlHYgOYJssPs0jMUZqzvTO01mPU/RKkDJNLF1gBk6lcEqQYibHCFlaQyDi4jFlqIYQmhRVkYMdOBykgq8Fcdzyl5PftbETBW1eFDSqWGA2BApBrfquY7seeHC9JWcRe5bkTav1i68vFUxpg46KpMaR0+Ee3w1c7Vb86T/1SwyrntvbV7x4dst3v/Nd0u45f55nf8RR+U935ZzJqjl7RinRP41SJeS++c7jO0vKIi03ThOh69htV8wxAasFgZ7jrEODUfiXkkmRUsZ7J9721lGKSv4tDSo9cMzFn/rhvT+L+qt+9Wa9Zne1Zeh7kdDLgjZ2XcA5z3iaePbJczkPul47OWqPrRP/GEsuiVwLMScqhpsHN7x4/pw0zYSKJpkVi1WQQ5LSAky1EIsguCVmfOdkvWaJs1IznkQJoo/qSqEkSW5KTQQPIXhZT8WqKoFcIuuq/+0Mrg/0Qw/3+m+SbVJqo93I63r04CHrVQCbiKpwU3KlVqcPWNZLtfpn1RdMxDJR4p4SHevtNf/Gn/+z3N2+ZjU4po9+SOWvA18OFYrPXguBSalsvvPEnIRWNUfGOXK9W6mQv8a+FXQ4zpMYpCBouHVSdDgnXcNSqxbihfGkboF6/432kmqVBOot9YmFViVAw9B13NxcLbzWGOPiTtX3PUPfUzIEawm2SmGVAa8oZJUB7kadafQm7x3f/MbP8fvf+Q53r18TYmSjryygxRUWmXQRhQFBJOVccl4S1Ib2NtqLyLSJrWnOBeusnkWqesD5VtTcgLwzp78ijBin+3KtdSmuMlFRYy1msZSYCMbTq1qQN23YkOXn1VKExmblvATpBNSSoMxQR9J8S9dt2KwdT1bvc/053Ol3Jqg5RmpOTNPIXApDHy6qZl3oaDsGOfxDH9jtNsJ/oCyZ+c3NjVTlQsZT6Fo3X21rFxAuSU6UHLG2J6cih5axWO+XaWmQH1WMIJgyuGUpxuJUF9IZGSbp+56+Fy9qawQh22w27LbXPPv4U7abDTfXW1brNTFGHS56uw3cpFpadbVoshoL1ZJNJVlFoKwlO0s0lsnAVCvkjBlnKEJXyLWQSksOR/EVKPIasybpQg5pgEfSxZPPUlrW46q4OKdaKCktPJyWcVpvePTwii4YaokNv6LoM9Md+LyBLAFXKXlmmk46RCGmBNfXD/nlX/4VhvUVuP+Yv/N3/g6HY2aeI6ex5//S/Sr/9jtD7ou5cmFpr0ulfD7oCy15tWStSp23+N5zOO4BL0WJtSKSXaH5SS9yZlUOubYGpAUUkSEfTymFaZ6xjSuFDlhZbadqfDmziEdJHCt3D0VqnXNnK0of6PtON8w1XR947/EjHt5cU0vhNJ5w614Q/Cqtopwz1mdKEZjGe888zZCioueia1qsIatOZXWe2BlONTNVK3q8MeEVkQVRJrDWMJq4UBhqFQHpVCFmMbcQpxSDzVpU0ookMM5hayGWzGmapGhbkAKJwWCh6yxdcAvpXzZaszzPJUFdEij9miLvO6dMnGdO04nOOIYh8I2vfY2/8pf/Cv/3v/H/4nA88I9T5jcM/LUvKD4/76p6yF5eTei8rVgDQt/Imfv7A/MoSEfXCSL9WR1J4V9mla2xktoZR4rCVxPudGWcZ/og690oZQRrFGlFuwbS9XJGdG6rFufOSpx6J9w0Hxy2s3Qh0Pc9XegY+5nnnz7n4aMHPLi5EUHzhV6g66l1gGoh5SQuPlnmEJy1BONwRXmlxminQND8EgIjhakUppwxKbGyhpwVFUKGdWydCW1vMGIfi6m4aohWkguRS0w6RCbrOOYor9eKHqsHDuNJijd3IbOklDOQNvGDB9eSuqiRQMWqOgJLO0USkIKtVqldDYUVneEYJ3KOrIaBX/vVXwEM/2D9lP/tT24+x5Pnj/MSzXFxdALjDDEnpnlSQMVirKeh1wYrUku5UdwKhUzR4iWXLDMA3pCrfBQa59QogorUcFiZIbH2LfTfWgtF6EoCUAQ26zX90OtgoXSEnXcM/YC1lufPXlBqZbMeGIaV7kXqUVWhyLCKdiYLMUfmFLHOcjqeIBdc61RltPpT3jEG44RjnWolZkkY16gBhPLIBCjJWCuIqLGWmBPNzSqbwjTNgBOKijGSU8lhr1uKWboizntCFxQkbEmwUs2aUQLSuRuGnk0/iHyVHDIsYJh2AFRDEapBjhylutSIYSanE/0w8OjBlsNTz98xPf+5d0TOu1v8c2Q8jQILl0wpnU7dN45NXTZMfSfiax6ciLamwjSNpBjZbYdFbaYFiNED2KFyQFm0/JxyfWpJzHFeqvkGaffDAMjGPKeo7aqL6T5rFwcfQ/MBNnglXndB+Fjb7Yabmxv6LmCo3N3dUutE9/QxfuFkSVXjq2oJ5ox3nhhnQX8NZGtI2GXMumBIxnCsibEU5mogF2yMuCq0KSmExG3KVLBWbO+8F4u8hFk4Nhh5+GdpDNlUrZP3mXWATA6eczJmjSQ8Q99jEROEhj5pnrEgbuejUH5+S8hTnJhnTwgTxllKjjx+9IBf/BPfZn8c+ff/v/8BP/j+99isN9jVDdOD+3eF1Bd2Cb1BpEpKzrowG7yIEuu1BWQrxslA0vF4ADwpJ9WS8+dqWwUpK1WH2FDem7SuOu9wplJKJM1i5+uCV79zacv2w4qKTLcL5UQ2Qou2/rVFklXoXq7GGTpzh7pOHISGYSCXzOvXr6hsWQ0PRRIKqEYcSVxrmZdKFwJxnjGlkKhEY0iNY2WEU2eM4S5HThSmqnSIJAVT28aMAVsr0agFnhZzRS0hbTY6LGMo1hDQDj91GZK0OeOKk3Ws6zznsvx82VvVeSa0AatzzC69YJbbhGynbfK1LrzAGCNuHrHO48OK6+2G/8yf/tP8o9/8HX7/D77PmztP599dzX9hl0CKy31dLqNo8/JlshfHFDkc7snJq66yKBe0A6QNeUAlBEfGQhJJJmsM3smeK170heQtm9VqGZ5yxtENPcY4pnkSaoy2TC0CCFTrJPHSFiQtga6CnjprtQPgsboHr9crKpVXr1/jwwNWndhji1V51X1XOiBS1BjmGFWtoar2ryUJ9wCrCfVUK2MtTLUwFYNNWbShi0hRVW3LO5OXtqmxkLJoS2rKREyJ4uxyDlgtxHLOksAY2TeyFqO5nDmNlfMzbOfdauggzUsA60qSs0CTg/NaYtnrRbUhKW1lJsYZ3we+8sH7/MK3f57X+x2///GHf2Th+E97/bTLpFyXes6y9yXGcWQcR0rO2lXVroD8JKX6nPfdWqRbgxFULqaZVd9dfK06N110VDFy3i/8dKOugUr5MNae3aqMuLJ1XUcIolPrrBR66/Ua5xyH/ZHiE9vtmtB3ytMOXEqjCRaVcXhR5EmJXIo6h32mYFRKUjGiWlGcIxpDrBBLgZTptAOn4oQUCjZlHXSWWzaliisKLugglqwfloKsgSRtbVpFnatBhnhrXQYbaSCMPjGrA9hd8HRdWCgU0uHRp1p1BqGhyG/lK1lkAEui5JlaZvousFofiPy/gf/6PzGm3pmgTtPM3d2eoe8x6trgnCPlRpw1i895XaBnS66ZcYqMp0mCsE0yWh1QsueOZ+gcpjhsTuRTovOW9eAZgiPHkXkcyZosdMon3W13VOA0noSfsfjoslTxVNTHufG5ZADIGZRYn7EUNts1JSfu93ccbl/z4OGWxw+ucV2QB2oLsUR6M0jbN2VCFziNE7ZkZlOJ1jDjSM6QrSE4R3aWl3cHTjmJ+HsuTCbRGa2K9QFbTXAMiZzlACnBSTVYWNrAHU64Snqz5fBOC+9JOIpuaY9YIwlu8OpgUZciXX/z2+pvRs+z1gKQNr/YI6Y4M80jWMv+7jWrzRVPHl/zr/76r/Dd732P73z3Yz75yUc8fXLHrzz8B+8KqS/sErmuSPCOkssiBSXJjxyYQq/U6tUIN/p4PFKV4eyDxTloYh3twDemMPQdtggNwJaCD4ar3UDwlpyEd2PqI+Hc9R2+C1jXc3V1RYyR47HKpCWihEGVZ1b0ec/zTE6C2FMq1WRqFnOGkkT+LQTPnCLjac/pcItxT3n88FoKWFVnSHmmo9cEtdCHTpI1KrOBk6msrCEZEWjOTjzeP9nfcyAzlUrNhpLV/FIP62oqnko2hrnpcpZCKR7v1I7VmKXLwcUdNLVirBworZ0dYyTGvBRYxla8M3jnGfqBPgQoilwp4lRbtS5PULiZ1VKrUxaMGivkWfmoE855rBG3sPeePOHXf/3XuTuMpJeFP2e/JPhpK1j0Y5HOu7j3jWaVc2KeJ46nA9DRBUmyjCIc9a0iB9abFTFn5jkr19KwW/cEB3GaZBC1D7gQxCnMe1zouLq6oZQiB26edWDCLEVwtVam/mtlnlrsql2lkaG4msU4IzhH34vD193dLc+f7dlsOjq/lZ+FDOKmHOlqrwlaxQfPOI4QZ0KtzNaQrKXaSnUWvCNbx32OHGvmVAtzBZstVkX9i4WaZffLqlFqZpRmY+j67swDzZCT8FGr7rlZE/Nml+2KgBYxRjGyWGY1ZM9wFnwwdD5wlk2/LLLkPyQ5bQXXRZqrYEFKiVxECH2OI9V5VqsNX/nK+/wrL7Z09eEfUTD+018NdDqLMLercRqF33w6jhwOB06juJxtNutlQEmSeWjxKsCSqKZY3SOpohNubQ+16GBlput6mmufsRbnA95LophrUdBQEH9nnNI26iLtJXuI/G5qwftOwYCO1WrFgwcPyHNkrXbuh+OJzVrkIa1SpCqGlCKhdheykRFrFNEE0H2+IFxV6WI5ZuOYjWEuMNcKuXCaZqWSCR0ELSohyv6mKhPOyXuKJMxksMEvd96onSyIvF9FqTQWQi0cTsdlUFJbA0DVtStnpHd2+Sgly6ClJg21FdOgaLc6B8q/LLlEKU3I/4TxlcD3ueZ/xj93gppzZhxHainii7zZcTgciXjh3S38sYpxYIpZusT3hztOJ7m5m/VKJ9UKqNxILiJ1NAwdPgjH7m5/4smTx6zWgwTA4YDtoNYTXWe5vrpif9jjO8/9/YFxHMUXvcXUkrFr7l/FX3oeJ4Y+XCRfEjT393d873vfpe86huDZ7bZcX18ryFgoRrilubbhDrmC98wl4Z3hGKw63ViMQziCj64ZHj3gH/zmf4C7uREf6JRx3sngibpWyOEDOVfGEjEx4WaZkLN6sC8BTaMolOXh1yqLb46RimUY1qRSMAWB5IPl8cOHWiWeDzX5Zg1G0zZGK+GgBzzI64wlY3PEp5mQAiWfKNHg/cCD3cBf+7f/S/wf/o9/nbvXz/nBs57/82/8N/hfviuovqCrbQy5C0L5qE3bFk2a9E6I24SgIHFiiiPWBNbrlVToZRL5jUvOkoHd9ZpiDHPKzFPk0aNHbLYrqIX5BNFZul7I+8MwsNtuGTZbcjLsD3uxoiv1rdf8WfWFWup5bBqEUqBoyjgeefnmJSnNbNcrHj9+zM3NzZLACbqmA85YadfUineOrF7I0cOYLScr057eOezVmu7mmj/4wW8z2spchVhvSmG2DeEpC+KTs3iPp1yxSYqloe/BlLfoJoI6n9tvpkLOjpwdKWWmUbzJSxXyickVYz3rfk2wXjZU9Ictl1l+fjvcz+1smYzNORKjYXJH7BxktqhWSjHsdh2//mu/TC2Vf3/1N/mf//av8u/y/GcTgP8iV82iqGCky5JKJlsZdMsGUsk0uGaaJ07TUR3RKtvtjmAK1iRtA6aFKiDWtz0mRuWPBq6urri63lFrYjzcU3NiNVxjLfR94Npd0Q2CIH3y6TPlsUridem73qhPACnGpehqnxNaRtG9d+LucMs0HQjOsO4cIbi39m9JaMScwpQKOdM5zzTNGG+JqRCpjJ0g9ZOz9L2n3/QcbObOVI45k2YZtPLOLJPepRashZCkdZpSZYoZ6yoblUgD5akLEKf8WeF0WySxLyVQsiPnyjQK+ixJSNb9vWBtYOgGbq53Qs8y9bzstaMgA5dmQVsXZ0SaR3sSh6scmfOMiyeMdQQ/8PjRI777lT3/U/7X/Hf4819UhP6h1/LaP/PvQqnSJLwKP/14PDFHoa31fVicdFHJJmNE8q4PjmHVM8VCLtL5Cn1gs13TBUecZihi19Wem7WG9XbDdndDCIFPnz8jTVLkVD1YnfEyKGWkm3M6jaScqKUTxQYKNUWKtZATwRpurrYc7g+8evGM02mP95WHN78KQdr8rbA2VguZlKSgS4mxJGqKrHNdpKNylaEjjKUUwysih2CZEqIl7S12zDpv06iGVX5WceRYiVOhpI7QgbOSKFtrIakWpxVKRUOZW6FlVQ2mqSq1PKTSaFRwLqcM636Q4fdacI06pJxztFiVoUGWJN8Yp9QH2YdTnkl5lE4Phe+Vmf8hPf/Nd8TU57T4KymKv7P3HddXN5yOs1hb1vNhr9Jt0mbxnliiaJ7miPOeYQikeAIzU8sgcjMUnj6+odqgGiRwfb0jhLBUUNtVx3D1HsMwcHt3z/5wy/F0ZLo9Ms+JlOVAczro0jbMNglojDijxCRkdWecBEfVlgGwWq94/733udluGJTrZi4qA1rCa8SDfU6R6CAeI7vVQO0gGQfdirDuWO2u2D19j+76BoYVRVtf4moEqcFQSCusZLA1LZWjcxaQRLScvxSntmW5tacs6tKRMTbjfMWFTsDhFOm6wPX1FUM34IoKbLVSpwWY/v9nN5SlTeEM5ELMFZ8MfQ3UcmKeMrVkQu9ZrwJ/5S/9F/lPf+t3ePb6t/nf/4/+n8BfeldYfSFXSs2CVtuDFqGpmKq80aoSJYZUCnOaOZ72pJRU9N7jfGXen+hWTnQ3qVhXefjoBtf1UtUqcrgaAt5Jddv7FVdb4eqM40TOE+M0cpxmTseoKEtZ7rygt+WtTUQQxZmUIn0viLppE/EpkX3myZOnPH38mN1mxeAtqz4o/qIyZjiaJqOg4VLBH8aJh12PdUpN6QbC0DFsNwxXV5hhIG/WpDSSalYuUpvyrguKX2qhJJGaAs31AxRmlq2uShvUdcIxa9z04IwO4xSl6iS6rqfGJBW5raxWazbrLd7JSJWtHm2mLcXWOZLbpuqWdlPO4jlt7MyUqySo1krCbj15tAxuw6/8mT/FV78Gf/7PfTlcpCiV7XqDwZLzkVwnpXYtuz+AopUj03gCW1mte/rBUWMklxljO5HKsdAPnp1Zs97umONMyjIJPKw6git46+kePcBQ1SIxgilM88hhHBlPk+y3qnzR9td2wLXYNcYwjjqMV6sIfBuhXdRStdsTefTwEY8ePGS77hmCtFepKiCunLbWeatVCshE5RQjqxDIVMZqmU1PGALVW8J6g715wLbvKJ98jzxJe9XkSlK1lbqoWxSmrDqmWgVaW3FtcBBpfZZacKFppYoaTK4JEBm4tlaP46j/DUVl24y19N3Aqh/ondUi1yzdKmmlnvffizEspKozSzcrxhnrPcY5nPeiOpMmvOv5pUcf8L/69f/aFxCY777+SS1+/ay+X6Q4N1K4r/qeLhioI9YVvBdXv6pFhHMO33WUesIkeHBzxXqzwnWOzjlu5wPeg/eASTx58kh4/saTcuT58+dnoxRn34rdy9ctlt9F+cFNVUWeYymZ4/HAj3/8I168fM2686xWPbvtSgA6jdmW0FWUppLEbnjKic466DwpF+YCY2dJphIRp79uNbD+8Kt88psf8WaeONmKmcH4TrROrST5SX+PTRrHVGJOdNEz9J3mD1mKWKl4cN7SdwXIC/LvjV0ArpSEWpliVvczLRIQ7fvV0HG1XtOpVn1ZqFYN2LqMAX3WOvR47vAKf3qePV3nqabyjfWH/G9+/d97Z0x9jsxUJKl2YkqSZTvnMOWcY799UAjHYZqOlBJFD9HJoT6eRkJvwIjbhg+WzXagYBetVWtFwBfljzr1bHbOacVexd9+Lstk9mV69ZYMlr62XIto05VCdZetP6mcmrxN5wx92OkgEtQiN7vmisnyZ4Ps55g47u95+PApgw34CmFzxep6gx96plXPfZ5woaNap4mocEIKVeVe5H+iDycbkdMW5aw6e60pap0VHoopYPLCqRHBaqOv1bG7umG3WunPKnQObVZbbc/WZQmdUdgG1ZdlcRVE6DrnRBGNIqxN9N4wWUF1rLXU2WNtx/vvPeR4/CqTTfyt31nz77wz5L6YK84zcZrIfSekdwBMo/e9FbMtiU1xJngrwxKIo9c0n7jabDDIhtl1nq3pMc6rEw1AJXQBqk6OGnmewn1W1D5npii87IsTadkwz8iRfsqKOUDWQqr1dxtXW/jfcDwdCc6y2m3kmXw2dtugRZEDN6bM3WHPew8/YOU7umrw/YrVbkO/XpGc436cMF0QbngcKVnQi5plLTeifalGJvSr8pGUO02UTsl5EKwi0seFppUnItyS8Ii0quHx46cE58WxTs00hI+v8krUs77hggZcJqfyO2gqrVVaWiYVjCvkdKIkT/GOkj0pQrCB9eDI5n3+5m//Bf7qzzwS/9mvWipxjhhjpfVo7ZK0Ne7/0g3Kwi0PiojEOJLnI7ZGrO1UUs+wWgV8cITB4z3LXt53HZUqFsB6HBhjsaaqWPxEzKJk0rSb35b0O3euAM671tJv0DfVugKyt1tnxcShdHRdTyuAlt1ciyJ0uCblQsyZ++OBm+sbVnhCNaxWW/p1jw0e6wMxdND1uGGAKN0PGVq5tCiWn5tqG6CUy2GYUsXrAdz4/aJnrESfLOVtW2s5i1ZmqbDqV0JRyGJxaimsV2tWfSe8xyL4q9V71ExoWhegXQanNJXG68uiaJBn7TgI4OOMx4SMLY6tu/7ZB+I/4/VPTFCX+KhK4xenOO8doXPUMlHrjDFSNBtT1Q5dBodc6BWxNmzWga6zWA9D56lXUsAa5JxvknxzPDHNrbuosbS8nHqBHMprs1aKp/N5aM5i/5Xl69977z2G4Fh1nu2mUzZD0ecEpsqgYaOMlSygwHGeWfe9AF3FkvDYYMnG4roBf3PD01/8NuNv/V1mNPxzWcZGRI/A0oYcY25mqQZyM2SxWsyLNBsVjANfnaLLcro7WyjFLXMa0xRxGsMlS3IKYoHsvGczrAnWaSpaF0eqM8R13nVVdJOlyOIMGsaYSEn+xDhM8ew+J24/l4N6GkdpQSAt06KrydAkit5uU5ZSiNMIVay4vAPrqngNdyI8nvKsE5NeJvDOfUBJzPRHNiemJgZu9eefNVkVUTCm3QuMFRHfywnWZbPUzaZaFm5PLWLdNXZBtEa1ymsfRvukpWjLVFGA++MB97U13bDB5Uper6k3V0wGRls5zqNgPdaJpl0Rsn8TabdaaVzeQ1Pl0I8qCl1VVFiAzNb6kOTVVEfj3dZqsTZwff2QD588odZMnE7Mp3vI8xJE50V6hvBbV2XhSXA+aHIWLb9sItFEZi8adN4K36jgcB6GYcPTp494vq+8+MfrdwbcF3XN06Q2pQXRB60qYv+Z5LRdMjIr/Ekr1SamEDpL6ByliPxTCF4WM5bcSIHIfcm53U+9242TxeWGCLQ22HLQF6xuBlY3T3EOY+FPt8Jiad+XIvQV60l91DXTYtcssWuKattWiaGYEvvTCbdZ0a22+AoldNTrHbHzzCnz5pjI1uC8w0QZAsm56EQomixK9OSWoBpAJ2rrpdMbgCKlaMta7DqlNepsXdb4o0dP2a3XssnWzHjck6d77KJXKOLv5fLA02KA8460PF0pQqv4SadCiidS8ORkyd4Iv9BPGNNRUs+zT37pZx2G/9zXOI6SxNeqWp5vJ+LL+0f2EufAWkFAKhlrRVBbXL0MwxDoCuAsznrt2Ah3LSuCYk1D9NVZSgfMcrbn4smYdn4vQ3zGir0vVeooY88JrOy5zcZW1BdKEWRpDjN5JS50nwGhpMiqDT2V+IkpsT8cCY8/ZOjXIu+32eA2K3zXkUrhMM+kzuFCkPeQJVHIjfev+sUizJ91alzOCVuNzlfUBSBok/SpVOwyrFUoVtqtpUAtghjtdtdsViuhfCEqKJ2HzukaqSwFGgbV/lbAoN1buaM0jvUy6FciqVhcduQ8kXNHySehfCRPjO9CL7+Y67O5wPLvF5+3CJIZnOiOWluIccQjRhwNvRuGIKoVxmF9QCiVTjpbSgdwzjD0nZQ2BiqZlCLTFDlNmSnq87U/fW+ETvQ2HTA1FzDdo9uLr0UkYay1Qv3qOtZDYLMKZ8DDNBtdu+zxtYrBSq6Vu8OR3XqLX3shQrsOt+rwxuK6HnfzgPUHT6hdRxkthSyuTy0/avWaJnxCVVkyCIwtWJs1TxCZxFrFCMWoo5oM7skIoC1WE+hKSgVjxfBHIvdsN92FjlXfy+DrkurrBgDyp2H5d9ECd0iSJUlqO7PavEFKWV6rOo++63pngjqOE6/evKI7OE6bFR988J5wUl0nL+QC+ZGHLFVMirMI1lvwvhI8bG52dE4qlXkehS9hPTFJdZxUF60tdmMcXrXOjIrdy+ZWLw55q+WFbj7WEKwI2DZdr+C9DMTUKsLHmoBSZQDoeDxycy1cFUxz/tFqqOjDKAgvRXfmnAuH00QJHWYrMkS3tRApnE4j1nsy4npytd1gihzyKUWpL2xD3mRza20z0TFXPpLVBFkT6ikmOmMVyZNkq6iIrjWi4/nw6jFPH7/HOJ44APl04C3Cekt0LjiEEmJVqx6psLhIqkyS6iyTmEbdYBTNDoNIfljbsd0M/NzjDX/xvY/eGXBf1HU8Hohp1mIGcsyLaD7mjPpURf+cMerUJG1hay19cKx3D9kMjlJELSIESy2BmC5cjqsgtkWR7EU5QqeK5bZqUFGp1Z4PIycHlO86rGrwCRKAHnT6vSZrgiY8n5wSx8ORm901/TBIxb4UVrKxmUYlKVVDuJBi5nSaKCFg1itqhXsgkcmT2LIebeVEXWz7KDJw6MzZ1rShUM2RzGmVn5vmXj3v8zXDnAvesHCoE5ngWsIt0nGPbh5zc3WFs4Z5HknjTK3SGjVvtZPSudRqhgkLXid/lpqXgtYYSXByrMQZnMk4W+hDIM4HrIs88Cf+4nvTH3lc/tNctcBhf6TvBKkHlr0HWPYOY1RQ3ltqyuQ8MnSB0A303tD3npRGvDdYJ4dpzPWtienSlBPU87tpSYsFszz7RdaOiyKJSq4Z6yyhC7jSOKRFZXr0jZQs4H/Tpq2VaZq4fXPLbrOVtjhVY1e7SVm1RjUxLQgfdJ6i6OuGDr/bYYxl76CuekIInKaJ13Gi7zulQzSU84yWWSPIUGtP1iptT1HQsKQqGr8NCzYayHNMtJ3QUsVkwlaxhSxSRDx59D7vPX3CdjVggNNxz93rF8TxgClikiGLqk1BnwELzk93+Y+KdBhyruAEeXPFkrOjloFSZkq25FSI8e1W6x/HVS7i5K3rIkfAGLrOS2fDFkqZmecTrpMEU9y3Cl1n8CGo0Y2oqTgb8F5Aq5ShZkkARZ1H9qVpGqXDmnRArwK4Zc+S7ThrsQItwTLWqmZ10U1UuojNgME6MUh58eIlTx49ZFDKUhtgagCBIAGAJpapFOYYefnqFT/3Jz5gvd4SsPT9mvVOkUnrKF3HKThs32Gcp6amziPzEjqMvySHudYLVQCRRptzxSpYkEuzUTW4KpYnRTXShWKps+KaT4XQsVlvqEVyOFMFrFmtAr0PONMIXooqWwVCis45mOZi6PSMkmHHBiyIhFglpVb0erIOd7/remeCCvDppz8hxZlV17PqV2y3O+pglzajxF+bNpQM3ZpKsJrVEylF9CBDsDhF4fpVzzhGUpw5jRPjPGNMwBqp7pspj8VAlTdbsMsCl4peNMScl7a0VFUiOF2pdJ3TlqGh5oL1DlVqFkQNCfLdbsdut8M5r8R4mUg1NPF/rxWI8uZqJVtkijQEnDHsxxO3d7eM08R2u2O3uyJThNNhRZOy1iqSPbZgNEkRya5W4bAsoqYnZtAEKBXRNqRQUoGa6awsUKpwp1bDlvEQpcUxRuZporPn4Z66PCsA2ayrYUmcRN1QJ/yUK5j15+ciAzNSrSYqM8YVgt/g7Ja+c2zrnoff+w+B/8HnhdUf+fW7v/P7HKeRr3/1a7z/9ANqsbiVTItac+YitTaxcw4fArnMlDRRXcF0HaHr6TqHg8VGdJ4TpzRzmqJSXyrO9ZQq9aezlb7vFKEKeB9wzlFqvOg8cEalVPmhFe2lBKiZ4OX3miLKAeh0sVF0dLPasBpWBB+WBFTQLvk5zfauITH5gjueDOTgwTnmaeIwTZzGkdD1rFYbtfPdg5UERA7LxkHURH9JiFE02QgNoGrduBSDlRiVT+iAUrA1U4Ksh5oLwQVW3YqSDTVWylyJpyhj1LQqW99/25ituZwhg7Ma4pK0VpVosdWQTGKWrEeHbhx9B7XOuOPzL03svnnzhv1+z2azYbO7AjOwmDssMCOAwQcv0lF5IqUTtjf0Q8+692rTaLGK9qUkRfmcBMWgGqz1GBuYDif6rmO1DoSg09AuqPOfoC4gB5rCrTSr5tCdlVyqiFiJC08uGFehtRtrpmZDmiMPHzzg+mrHMPT6bmRvBXTQVrj4Weu6rNzrUgrJGVIQD8oxJw7jkXjIqnNqOY0jb+7v9AzoiGkipYQJRlFZgXxEMMJeWBUL/9G6NkmutWUqOO8W5I5SKbaSrdzPah0Wz2rYMHQbnHIYHR2mOMjNuapiUNm6mpYE47M+cgtIINWprK8kRVU2mWgSc3AEJ5xaWdvdzy4A/zmv29tbQf7du5JlAVPG05GSItY6Hj+5YfDgAws9sOsvkpqcl8n1GGehnxXPPBWZPndn1zCRFhTkdMlPClTVr294wbBaLWtJ5NXi0pmqRVWGrYJgyHPe7/fc3+959OBGHJyMGiqYBu3IPiN1WabZu8aYOB1HkrHkYSB6zxvvSH2gD4F5mtm/ecnx5RW3x70kfM4Tp4k4zZjOYarBOjlbDOeuWKXJajpS4WKYV65chA4wzhNGkVFrHNk4qhfqlbGeBzeP+eY3fo7rqy3OiBb9OB65v3tDOt2J2H6D/XXXvcjv9aMN+p0BaLk0vzBN9k8GfUU67e0d/LPXOxPUbjUw3z7neNhzNAe+9/3v8Ut/8l/G1ctbcBF6RibonLOkqP5tBeJ0JFXPWCpjyWLnZR3jGClFBhaGfqNCzA5UdDbOYg6QUhK7PH3T1pz5RNWoI0dtnupBNABrpdaEs1aSC6ogMcXKBpMyp+PINE189NFHHA97njx5zDp10Dm8NRTXbFkNtVhSEcJv42HNKRFzhtDhwsCUJnABrFcNMstpOjGsV3jnmeMsAVLdmSfYHjRagQKmqtmApOfi3FotMUkLxBpk4KDoMJN1rLoeg6gIeGc53b9mOuwJg8UErX1qaytJ8rkI0FQVjK96GiiqhUkYU1TeArL1pDgxGWnXaQUB4USMjuPJ88mzX35nwH1R15s3r+k+CZSUOdwfef+9Dxm6vqXpb1X7TacweMc0Khe3Ch8nxcqxTJQ4672zjGMkZoOxHmPEElIlqCVJVXQmxrLgJG1yVzhtIoNSjV8OZ+f8ol0p8jxmiWMQBNFUu/CApnlmnic++vFHPLi54YP3nkJ2YgNsDbnFbhW3spgzKV7Ebs7MpdI7B0EGLqrzGB8w3jHGyGE8cXVzRdf3nMYTuSZs8VK81HPXuSpCnMliT9k5cVZZugXa2tU80yjQkHLFWxksu9rsAEdwAefBU+k7z/3hSOdFOqlh/m8VFxeITTtMpJV13kxNKaJtmQ0pSks8WsMcD3Suo9rCPHZfmtj97ne/x3o38PTJU3w/YIbV0n5vl/DmROLJWdVOqqLBm1MlGssxGZyR+5VTZZ6lwzOnirOdmG/gNDF0iv7bBRVpupHy+4oot2hyilrbLqLmy2WkbYkRc5Ki+1htkSsoStd1vLl9Q4wzD6+uoBR6J2cHXlAhcQW0omOb0jIoNpdCxGBdRyowxiRi5cbR+Y7TOHJ3f09Y9XRDR51F6D5n1JJX0bRSMaXxQSEmsF7WqEUoZrL2hGsac8Fpuz5XHRJUJKvvOoldIzJmBlHPEE/yiDFZElwqpkry0g73pUcKSHJaNauqS5vYYihJ6CpTjXRWGcMlUeKGksLPOgz/ma/T6cQ0TXT9P/m1GC2qjK2UGKkl4XCEzsuQqS30XRC77SynVC2FaUraGg4YG6jVcBpndtsN3nd4HwCRvRxH5ZJyHgxuA0W5FjpjWK0koW86nSkZghd+8EL8rMqZr5kUM4f9ng/ef1/UXaqAN844ldFrXQ67FOxCjZJco9ZKtpYcAjl4DjkxHg8YIMZEzIUHZG5PB0qRdrtobRdsaYglC0DX6IYA2RisAmhLEt4K9CQbtQglSQ6UEWOhGBPOdThjGfoVu90N29WGkjLZRLLN2OogWx1NbRJevIWKL7XU0n3VyzbQxGCMrD+hds2kZMjJUdIflkmer3cmqNYiPtlaId8fjlLN/CEwfoO4jTH0fUec0EoRUB5cTkIaLilDjeSEWng6SoZxkkrbeUvVNnIToc9toAKB45eDyZiz9R9tmr+1q93y4qTjVFSMVhosMUY5QJVLdFYCkMM91UqtUZLsKK4QcY4EH5Qf1rj8FusN3lQw4u0ccyLVTFAe6aJL2CgK+oqNZqm1IailUF1Dv84Pu6QqahqaZTYf8oq0RzvXEYwj+A5rPNv1htNmw3x6o4u3PR99HaCCussTpGkmGk1hL1+oVJeZnERJIBqLdUcwjsnvmWbLXUm8+fYH7wy4L+wyjhgT9/d7vA08efLBgpYaFWpeuJoI0iQKEhXx9JQzP0eI2ZDneYmTnCrGCneOaqUFV0RSpqGjLWbPLfcqFfki76U7STsszZljuexDoG1FFqjSKLVlGo8Xbd/z4EpBi7MssSt0EeGPp5gILogmX0UOV4NYPdLhq1VajWW/39Mq3+VD/zAXtYl2Qy+Q+YLJYHDUNtyjHYKcW2LVKAcg09Ayleytx7uAtxCM4b0nT3jz/LuihOGcoleNp1d1k7x86A3ShWpb8lpVrFp55G3IrCRKiaQy4XJlHsyXJnbv7+/wvSWqTE2v8WGtE2OUhbYjtCZrDQZNaEqUwqoh9SU3erWg01V+jliJNgdAK2h0FevqXCreyJ66JHMLXGLPsVbBWnfepzQpsObtZ7TErrZCD4cD1guQ4K2j7ATdT+ZMuylZjEfmOYqLVMp467Go4H5FVWMCJmk3TdfUy9eviDkSTMAq13HOEeXTQG2dOZaFJgVpBhUlb4msqU31RYDgNhDY+KdU2cM3qw3BBbx1BC/KBX674f6NZz4h50gVehG1LJaYLee/uH00PG65d6bqcGQR3cyamecRr0Vtzn/8yWm7miFIK/qb371cshZLmfVDAKpSxaZ3yolkRPO8zJk5VZI0O6Slv7g22TNFY8HvNMfXwkm3gUUyyerXVVpX4fyaqDIU6K2YjgqlCJVQKkvxkJPQxKZp1gFYK90iB66ahY7Xht9yqctci7GWVIo48WlHNuYsNIVSwDhSqWJPbGRdhT4oT7qoxb0I+ktrvZ63ZP1dtdEDC5oDNfMeaURZ3ROLOQM0tRSs0im96zBo/GKYjUN1MeU+WQ3QutzsCxRVS612W40UcK6e9/6cs1hPp6gzG5/bwH93glprYbvb0A9eJWrUJUQ5O4tl2Dk8aP7MxkhCKDdS29sXAxslCcphsMqFyJxOUfT/UsW6rJtfJZdBHnau5wBDNw3MmVN1iS03juqCmbWPdsiLRMpuuyV0A+v1RtBH7/U9yZQcWdrxMYqXdUqJPvQ445bWEEii3XuHtRljHdMcdfpONjOjrXKoy8jbpTf5IimimFuhYL0cHBh1U0lO+COmqkaqBH4IHX23wjut4m1hpS5D+7uZPluMM8s9uUw+zxyo5ldvliBUgOwikS6iZmAK2URmM5KyI+Y3jLPl5ZR48fUvB49vWG1wLpBi5ngatcqVJNBpm79RU9DNxnun8pKtsJJDKSURis45UxLUasVprLTBI0GvrJd4ywaqKaxzf0ZZkE3DOm17G0uTZzofVE3bgeX1tuvM7ZJDuNTK0MtwyHq1FhrBIkNUl3aVKZaUEnMU28BWXKGtKJmodgTbU40MlwCcxqP4qBdpb10mGkgInpca+s9K0C8pY1WWvGhR1AbIdKR64akaIwlqCD3eBpH5ccIT7LY7ve9VPaMl8UWLs5b06E3Ut17PB77eMbO8dEWna3PmmYlppJjK6PnSxO40jfL8qhxwst+JTu3CoaftZeeBHtElzKIYokhonGdRjsAIfcoPOOsFndKDr02hx5wxEbocWNlOYgRoha15q5hqB5I9nwGL6UiBy2dgLvWX9aBCBNAbp7klr5lKTZlExhpHZWZWwxUpvhXhVQjfeUc1nWhKqh3pp8+f0VR4RZNUEtimaHKOXU0Sq/y9lAq5YnGLfKKsJiMc2iy/V1qV9azAYqzo9TqPc17WojH4YEQ7EtEyVQj14qoX+r7ndXTJS22xLhQ6hBJjHDFOxBAIqVPx9T/+S7oljdd/GSfnoqqQSWkil5laI7U65aGK3CI1YU0lzYVULEWHbaoNOOeX5LRqcSSFdlEe5IVKS7uZ0A4wlpt6Af408TyzyKGh+5sWwOX8A1NKnMZRncSgDx0e1TJ18r3JiExeUUpK46+KgZHKqBk5x0sWzj8VgnPMMZFKplLw1RP6jnFK1JqhSOJt2kRzvdgHtKBrVs/GGJ3mr/p5GeDF1GUINxeJYaFXeUX/pYPiXYfF0AeRAZv3MjCsqNayXs57wBko4a1bfR7kMkXt7LOjpJlsRSXo8+L2c2SmZr7+ja+RUuT2zR1xqhj1WXalqoSJXzZ+Y2RYQqb41e1dUZJShEBbNSn13jLNiWkcqdVhTGCeJWjnWfy9+74j71bsrrdqF5bIiNaqL+Lj3fgal4d5w5wFlFa9Vj0sz19isL7j5771bULo6LogH86w2IJW2fTPC0825PV6LcHc8s2KagoOTKPwZKZpZLfZUkoiTTPFyTCXPLa6BNjScm/TnbVqQGV117C6qYtrjM0FbCEr/8Xp67l6cIPrOmy1UArTNHF/f0vMkZQ9zrpFILi1jrjQ4BOkK8vzMYIcGIxu1GLjJv66RhJUErVOzKPhJ8+PTLPh2enAdz/9PeC/9zlh90d/PXjwgPVmRVWLREkIWYSMrbWkNFNKwlqvqhhFNyaNlGqE96bFQ3MtKrUyz+LIkYrwg411TIeTrAML3lu2uw216mSlNXgXpN2vLSTjHNJahfPmybLCjTM0f3RkDxRKi7Vsd9dsryxdL1OWfd8TrMHY83ugFIzXSegkQtV938tbbe3DKkMzIQSME65szrMOlBlyTJJQKrp+OVzWug6t9S5xncnZYa1wThudusQqE//KBUspKRpqcTYw9CtAvOEtlZIn3rx8oUWuERc4nCRjS/ueBsOegSfD8nqMosnQimFRI6m6no4H5bDnxN1+z3c//bt8GWJ3SrOIhp8hNQxV2olL2ViWN904jaZWOci0RS98efRnSJtOtrbCFBNtOK2aUbtTmdg5hqFbaBQ0xBUpQOpnEtTWHmpQQDUs6KZRm1B5lZZSpb379On7OB9YrVeshh4fOrwauLAkOBpnuVBSEgqA8v1lgEoOWOc8w6rHONnfSil8/PFHYMTYgNksgy+6qvQFZW0BXMR10s4SZ9ClGXo0xFSKR2nvSm4hIIHMK3i8lcPemIrJkRxn4jxSy4wPnXAJG8q3jGejSfx5P5YkWp5zba1mpY3JxHl8q9i6nIX947rOCcslknb5wiqQyDVSapLiAems2iLatKVk4jRRq8W5HmOFW2qdx7uOaZL9VNgfkuSkbAml4o3Ynxct+AF1eGoxq4W+lcFrYxowhChb1Ivt9/JVV+mIjePIp58+4+mTx2xWgwwUVYNV7CmlzDTPWOOp0yT2tynTOQG0mtNUpWKdx+ZMIbd8k9v7Owqiye2SY9gO1Km0Mkq+/62MTl57SYViMhZR07BOE1gUIWvFX9IBKlhkOgE2qzXrYRBLY7/SYVjPcB24WnX8J89/SPUXiMQl8/8z9J4lObWyZ5VaFv1fm8Xp17tKtpWSu8+N23cmqMfTHcfjipwT43jg7nYm5qS/lCXhaWhNs73z3uOAjLpp4KhZJ0iN6Jp2oeOwP3E6RUGkfEeOYpHqfGC1Gthut+QS5dCslXEWInMXOknerAErE3HLIEXjTVlDcALseyPTxcaLDI6tKpVTK1e7K0InXunOqvaX1dtSMzVlvBMXKuMDLvTC0zNGTttqMMXgbaALA/OUAUPXDXz9w69zf3/LcToSU1rOVOZCdaLLapwephe8upLE9UgWvMdWizUwzxnvCvIthRSTou8OUz3OekoRfThbK/N0otZEjicgUJ2X4AURfV/qF/2b9rtrNTRBdFuNIlAS9IUiwxZFLDefv3zDDz6O3B8Tdvc+f/Ev/E/eHXFf0DWnTJjFDrRbD4I4a3LqNEFdUEYd/gJk06xF48MwTwVTndq8Oayp3L6+peSRJpMWk1IfaqHve4ahp+s6UkrinZ3EN9l5T+eD0jScJoSWZhFotBWENTiqijOLTrCO7OnGI7a/263ErlMLuuAMbjlQJXatEzHmwViM9xTj9HeJFEhNYDtH36+AWZP4wOObx0Q1GJjjtHRFSIDTglR8g6W6RpLikiu5yvuttS7fl3Tzz1XcW8Q9RdrPYPC+08LIQCmkaeTjj35IyqMIVleH6QI2iH+7DN60I73tci3B0A1aM4pzD0WRjZy1eM7cl1tKPQCPvzSxi9EukXO4C8k8GUwTPrw0LVUeyjbcPWGw1CzDPg0G9M4zTmKHWkqg4rRNLkXu/nik7zv6oQNWnKaRXK7EHlKVUbCOzlQZhrtA/xf+mYHWMjLyYiVJRroBbbCjYnjw4AHDsFHJIKsKGhVvW4tYkGArloPSgvQdm81G2rBWUFBSxYXAetjQhx5jhBIAEIInx4kURUDfOqPcQhVibxbHtiFlZallcpY9/EzNEuQyg8S+yv1VBWk637HZbSXWEa51LYkSjxyPe8Zxj7ORmirGdzoLIPSKn0aQyjJl3fjUUoe0qe26UO/EOnIiqZTgl+a6yFDNW/8kwdEFj7cihWSq/okXG3IEbRQ+m6fgidlRY2GOB1IuOOvoOmkdj6Pw6vu+FVUFY6SQtU2ysnUqL7K7M+pXF1RQnAYFd5fixyFdY0FrP/jgQza7LddX16xVX1voUlmRdckrKhlmSHGm5KyFlcVXs9DqnHMMqzVedYhrqfzoRz/COIutYhd/HEey7quuOsk3lv1OX/tyhAvXVZJsJx3YZtlbZADRAomK8QJM5GpwLmgiK21+awLBeQxSKOzv7yk5qsHFmd5zvtre6j7zb29fIi2XsTURfKVYKDl+bii9M0F9c3dLromcI+NpotKRSz7D6LydWDVZms1mTc0bxlOh5BkqHI8nrA10viOoE4YxVuWdJGnNsjOzWq0YhgEfLHOUpNFr68QYy3pYsb3a0Q09zntKMbx48ULkrQyYIK1Ca+2yQHKWZK44gXBCZ+j7nr4PovfoVGbIiM4nitRUZyU4QOEfOeSHYa3+ugKf16xtjQKdC6y6jg/e/5CnT55SaiblyOl04rDfK7Qt1U8uUVG2JBqSUloTfJCJU610qiJVWStAtFU/TdKWDN4zHg88uPaM93uO929I8xHqTJqjIAa+Ww6Kti7lfV0E1LI3CkpVjZZ3RQMUnaMqMBVDpuP17WvGqfBkPfNrq8PnBt0XcU3TyDwdJY46aVt/dtksU5tIpR28JwRP0kJILDhnhm6NDQHng1TbSOJgjbZKc6Ja2G53YonoBX303ollXW28V8/19TV93+FCh7GeaZrY7+/EscmLRW3XdfJMFN2JUXhZFjDW4UJlGAb6IeC816KKJYHGgK2W4hwOq2ioTGtXG+i6AWv8ErtGuxoUEVAPoedb3/o2pSRimolpZp5n9vd7QWvQJFDbTdIrUVmTKq1Uq1yokoSqI3EjyYCxQuMRExAx0QjOkuaJ7By1zEyHPafjG4x6TudssbHgdCiNoij3wjc/I6lVuzYVGaBc9nLlNZpqFl0ASuJ4nClmz699+OWIXe+dDnw6GYACWa+0g/+MYp+TKF2UGGJM1FTxvqPz4i5jiDqNrvSmnIXnmWG9GlhvNqpbLUNt0xSpxuJDh3VBhzYMLog1tXOe0+kgSacVsMJZg/Fi5NJkz7SzKPSBUvAYVqsV/aB0KieDXN5KsugqCzIoQ0YVnNfiyioFS1q9RWafkGRSUEzvKt/46teBzP60Z5qFLjFNJ0wRRL24gjeehX8KS3JTotCnUKSzcf/ycugJqmqtUH5y1IFGHzDGyUBimukDvHrxKdPpjponqsnkJEVoMWrdqwi5MfZM5gaB9017eYKiXnaoq7YASpqJkyFNE+Q/fpmpd11y6yRGhZKg5UsRACDPIgEpl0hJ7Q8HcrYUPLU6UhbzlJx1ELJUNpsNITj6VcewFuOfECyes1pK5syVfqstbYzw06uBkptspyhX0BBDkZIK3vOVr3yF9WaNDwFvNW612G+UEWpd3ColhhyrlQw5Cj1FWv+dH+iGnqLDjdM88ezZM4L3wsktkVnrDivQrwz1aeHU4BRNp8/LH5k5QLmqRgEnG0V5Qib5xRaVCt53rNcbhm4lhRtG5y0z8ziyv3/DPB9wtsMYGar8w3RlLzJlBbtaEaAD1ZpwiJpIFGm8Uj43bt8t1D9PmL0cwDEl8ZJPZ3mpt15ea0cZcN7R9x0peqYUSVECK/gK/twi8t6xNF4M9J1jtero+g7v7dLOc060N70RLtZmvWa72YC1THHmeBwZTydSSlgDORV8kmEs7yxe0daGNLSkxHu/aGNK61elhAxSIas2ngBEFucEZu8wDMOaWmRCurjz1Lf4VFtWw4onT97TCU7hII3TiePhQFrcTeRhxThxOh11Mjsxz7OaFOgOr4dAW3AFIWU3rktrieVpprOV14c7ptMdhhlLJqeirQuZFhfqwhkz/Gxbox31TU+zKmZfi9GNRfjEczLc3Z8k5J3DpIr5+P6dAfdFXfM0Yy0YM3E6HkUOZmlPv/21zT3EB0/fd5Q0yZBTLcvkubVnaaXQBZnW1w1qIIDzbLZrsYfVhN8pUGONwRmL844HNw9koVaYk7SN5inirJWEoUih59zZVCAhlnOJjFMB5hCCDklYidu2+RqjCK1dfMxNdThfqcbRG8vQr5Gp+kK1wuUWM4mCsZYu9Dx8+ERbjImcE/M8st/vSSmqgoZUxSnNjNNITCK55YxQCZxRlEtJ9sumj8hQVafEfn0YxhhKiipFFInTkZonrMmKzBqhtdTEIr+mz8/oNv1WCXL5kE2TxJLTKitUJu5aE3d3EzVZHpsvR+zGGBlHURhJKeNru0+yx7bCyqmMlHMO7x0lKtIXJfm0tpMk14mTjPde9kFjCEg8187SDwN93wN1cfJpVtDOOfq+Z7u7wgcHTqb+51mmqlss4D0i9WOWddKMSWoFbF2EwLuu073X6rCJxHdzsJH3aNWnvCoy6xiqpe9XLEoDbeBUD2drDcEFHj96inOWm/nIHCdyzuz396SYlr0NI7SRXCJZpW+oVTr/rv1MHXAxSlvQX2aNCJyX3JyoJN4bX9sCxhn2d6/JcQQypiZKMVACWG2/woKOAnqon+k06OfOnNQzR1IE1sXCM8WZy2/50l2N2wnLOSSnvtzPnAw1iRqN81L455KZp0gujbdfSTERZ9lfBImVQbtSm3QRYKwAWbYNoSLmFK5bVFLQ/7eNqmLRdaEDnVUG5ZoJScsXdrsdw2olX6tP0CJOYsv5WaUorqUSSqWvkLSLIXQ9SQBbZwO80BpIOOu4ub5mf7TENGOdVU3SqtJ6WqzYZjkqSZ+p0plqeWDRfKYd7KWq1J4VDepiGv9UpCW9C5oXGHLMeAs1J8bTgTevn1PyLOegkzxg6cu/lafKawPtuBoB0RqXt+UZsq7yRafk3aHzzgS1lMQ86+pXzlpM6byA6vmAOb9M4U/64PHOM1U4jbMgKtoaksnDTAiShBVdmD5YVutBeSOSeZvO4GzG2iIDVIhPdfCBw3jk5atX3N3tyVHtH41htgnnLMFb+q6jDwFCoAuexgNpMlVFW0mtSXauqWU7sBrAopdnBD2rhvVqSy6CAoVOB7ZSohQh86/XG7764dd58/oN1AgmkcuG/OBafi+SFOSSGccTb9684XSSQ2k8jZSiLfxmx1bqgg4JqikJoxj0afsqJYKpzKc70rzH2YTRVnxGDgHjO95qvph6btMt71wDqZXx1Wjrw1CrWLfl4pgjvHh5T+gGjDOMOfNbH/+AX353zH0hV4yZvtMhqeOReZqWeP2pDz0AvHMMw8B0PDDHKIe8kY2lDVZhYBh6TuMsMevFS7lfr+n6TjhhsgPhnbQtnVbPXfBcXV1xv7/jdDyyP47c3d1SsxQ1yVlicth5FH1L5wjOYWsQekpRkj1qOayH50IPMOf4NSBDJraAUkSckdhdrTYyNZ2yDB5oclpyAWfou56nT94X/2+bMYiTTdTDvknrpJxVK++OcZrEaS6JQYZQQOTgqE3gskjsVlMFkddJ3Lb+ak44igwvTRK/tWSJuWwoiLuRjgvIqbAoT1RNqM9p61usPkWsKsgBUcRYY54z+/2e42nmOH85Ylf44/ccj0dijHRFuHkNhRKaQsEHFPEO9H3PmCZSTOSU25TdkqCGEBiGXmlWiDd4L4f5arXWjpEgdd6DbgA46+j7gccPH1FMYU6Z+/2B/X7P6ThiDYRQCDlTqsdRCdY2XEePDoNxstasUsAkkW2GFuf2r6QQelbU9t8Obzw9lvVqA22Sv+gQS2nJpCV0HQ9vnmhnTmIn58T9XjQ6S8kKDAgQME0n5jgzRz2ESxG1ACOavm19VYoiUIBFJXPOrmbWGHKKZOfojKHGwulwSymtyEoyYOkbJ09juAnHNY43oAuF9g8Kmp/PKOVr5jlCrtLxyT8NGn1ZriWPqRKvDRTKilrPcyLHLMPH6nQWqyTdBnGeKkXWukGE+fthYLNZq8yaaPWWUoRP7zt8kO6SMQbfd3jfKU80KvIv8WKtlba6AVtVc1ZVCHIp2NJs1aVrFTQpNqZlEjJ3syQPtS6a58Y5jO8oOLquOyP/CggYrKpySDJ3c31D6Byr/cA4jUDleNzLvqwjfk0qElS1vHV6m7IFOjBl3QUbQNm+OWOdJKg169eVNuwkPyOlSBcgxYnT/o7b18+hRmo2UJzMTRQjKGo9kwQbQoqCPcuvbsCB0rFEHUE6ADXnz43bd8tMmcp4OmCdIYSOaqryHkXX8Kc5HSLVcXd3y3YlVUwplcP9kccPnmIN2qqX1qYMRVVp4Qdp91gmDWpFkrCYmjA1C5/VVFarjtv7O97c33M4HEhppg0dNQvTlCFGgdNrP8hkpe+0ne+owPF45Hg88v7Tp/j1qnUsae5CkpvqoJAepFiDrY6r68fElJjTTFc9tc6U6qk1slrt+MbXv8EHX/kq/+Hf/FvS5mHWatxhbBvqOpcgH37lQ+EsqvXXPM/cvnnD/d0d93d3HPZ75hSpZOGxFKjFaAtO0GZvIM9H5umOUo50QbiRKVZqjqRk8UmrJd6u+ipL5NM4fPIpqfaFmC+baKkyHDSnwHEqmNBhTOX7Vz/mf/dr/w/+K/y33hl0X8TlvFsqX9ESbHFbf6qwqoquNWTSWEucRTd0t1lJUaUHfa1iwzfPE9XIGrG24t2sA0HCb6wYggWr2n7eWa62O3KcefHiJXf7e+aY1HRBgEZpMcnm55wUV5thxapfi5WkclSNtYtE2jD0F7I+erzXM0qBkecnFANwxXJ1/YhcK3OK+M6Ti5hppDTRd4Grqyt++Vd+nb/9H/0G87Sn1hlwoGvaqDyWFJdFubaCbAz9wDzPnA4H7u/vefP6Dcf9nnmeSSVRNH6FDqPJfT/Iv+VIno+cDq+4v/sU52YwwneWjoOhpBnrgqKiqu+rb73SNsj2YJEOSD3zytqVSyXmRIzSCvvB9Sf8n/6V/8WXInatPl+ZFpbp4YY4QDP80ARSEc6yXgst4nRSPdAzzcla4SxDZb8/SFFqRVu26yD4rPdHTEX6rsPUxHromFUR5MHNA/7hb/1D7o9H5lkLZ/U8MZPEvpHBBLarFUPf03c9buik3YjQBbq+F963Mfim8/zZ9w+0WBautlBYQnXsdjdSXMUkfG6g1ESMYoLhnOPJ4/fBOHKO1CL0pu1mh1hUsehEzvPE/f0dh8OB4/HE/f2eaRwXJDSp5E9Rrq/EUyvcZegr5SL6nFVQJ09h8I7D/UtivMMwYU2i5MicC32XdNraNfiUxbBjGeM9F1ntz5acLiVohRSFtjFPkfI5lpF/7FcD0iwLmp+TGP7kFCmxErzBGI+1gfU6qKw8GOOZp5nNugcjFKj1aqVSpbLeW6el8471ZsV2dy20Fe+x3nE4nLi9vePu7p4UI/SiDRqsJ1iPdw5TsoBwMVGKo2gb3DqPNVYGTJ2RD9NSRhCXv4Zvi1QgRd4LprDGsVnvKDpNH5xZUH9JdC196Hj//a8yjic+KLPwWnPhzd0rDoc90zgJOEijK8prLbWQq+gNW4Ta5bCQig7hKmJsKtnIMK8MYcv31lpUVz0zTScCntA7jqcDp+MdOR8xqkxQi6cWSfobH8LQUoVK87he4lQLsSbNZnUgzdQMJOVp/wtYnWILmCSTdN4spVxtfJyL3aVJhVAK+9s7SB3BW652O9Ik1WrwQYG+IoGSZmnzOI/3Fh/AmLLA5846CEEg5hQBmdp89eo1L9/ccponkWVIhc9y0cAtItNF63njHQVp8aWSiXkmOBmQ8s6pBiZYrE7Vm+WGVyy5WBXgraRsuD/MZONZbz2l6mLLmTlGDscT+2Pk6voxB2fJaY8xnkrUDanpcEoF4zAE32FWBq+yR08ePiTOkTjPwlU8HSk1kfLM6bhnf7iXwKwOby3Bwu3dC8bpnlImSVqtEdkj3UDTPOK6QVBYzcIXDtuyl1Q9+BVpVpmWpkF3Ok3MKZEIDP0VV4/fpwCPhx2/fP/l4EI5A+N0VCHx1ZKASuiek5jGoY4xcTgcyPGIs4J2GpXhIOuzMWd0q9ZCFwI+WLWbrBg7Ly19rJUWtVb91lRC5/joxx9zf39PjLNO8V4soqVA0tZTVd6ms6IbaSrDsKLrez755BN22w1Pnj4h+EEoBBcHWvtDcETVDlT5HOfX7I8Hqin0Kyt2eqUQowxJrbdbjAs8ffohb14/Y5rusMaBjTQt4iYb443wyZsihLVIUv/ghpwy0zSTYmS/3zPFiTnNjOORw/FO+GfFyvR0lsG0aTwwTnfksseYiO7lau2XKEkP+EVDtrXyymfuo1bxdcHylk+X0uTBYJwTpVq+UR/yP77/9Z95HP4LXT/Vnbp8F/r3ev5ME+S3zmKNCJeH0Kk/+cQ4noBCFzrRbnSV4AtwxGM0zjzeCk+47xzeB7rO8+L5pzx79kJahcYgx6EeRkXVenULnueoyhAdznX44HCmLgNf3/ve93jv6RPlYwe80bn5BoxzpqvU2vztLI7Ag4fvcX+/Z04zoXpSPpGSJeeJ6mX+4d/6L/xlfuPv/F1ePH/GNN5jzCzAgml7GRKvmzWPHzxYRM9jjEshcDoeef36NbevXzOeRqYow44pRzE4LwYyODxXVzfkmKjzTO0cxRjubz+FesTaCWoC5arHOBNAh7MMGHdOR1ulhZgwtCbB0uUqVYZbnXxNzNKJPI3z0kn4UlyfAcWaZFrRTsow9KzXA7VMHO9HUqw4nAwa+04RVlgNHfM8E+NIIeN9R99LPPZB9qxiMs57+s5gTWS33fLw4WN2V9dY5/n000/45OULxnEkRUHsUs5M00RzrGxgwHpYMXSBThF+4y1Yi/OBTpVSYjNsMW/t3EtqakyzLpecyVnoNUaoKhOmCH5KEhfCI7dsNzs222tRjskRyFxdXavLVaaasqD/03Riv99zd3/PeDpxsiPjaRLrKCvnW82JVr0bi9ArixT2BaPzE6I9T8mYMnO1WZHjidP+BfPxNc5EKlEKNidos3Otz3GZPmq+tNyNi3tjFuxLvrKo21dOnxu370xQnXPYENRSUCZscy1LgiW/3Jy5QDpQE2PkcIhc7zYMnU40z2LTNo4jcZ6Ypkm1CA3N9mropf1jnEiUSNJbMdrWa5PUd3e34n2uQw8KnywormxBBawT+N5Z2RwVJZR2gEwuP7y51qEUef2S1F5m9dIoLMVwe7vn9e2B+8PEi1d3nKaZWA2+H/B9T1Ah7P3hwPd/+ANevDzRefGJtqZTbqDRxSo/XSoY+b5GgG5tpRBE9qcfejZ5w3V9QCFTaiKnyHg8EDrLg5sbrBE+3cs3L5jTSK1ZdBFrFeqIclRynMmqF3g+6FkOl7cjS//Qdr9Ub5WUKofTzO3hjuPR8OoHP6brB3bXA+9/+u13BtwXdRlT3iLFnzUwL1BiRaMEkS7Mc2Q+HlkPPTUVSpJDS9QZIqUkTuOReZbNTarpqoLU4qBjjRW5DwoU0bQ1VToLt29uOYyRlJIi4MuLRRIMeQTtdTd+oXZgKFTGeQQjKMSDBw8Yul6oG9b81OOz+h5LMRyOE/eHibv9yItXdxyniXWs+DDQdStycZRqGefIixcv+fHHr9msrvB+kAPWOIyxuv5b7iTt4oZ5tbcicSwWwc6LfMt6vdZqP5FyZByP5BgZOs/D6yucc8Q8EeeR8XQgFdmg5VAWrlcxlZxmMQlQjpkxjTB4LppbcSUJjqaurYZVLnXOME2R0wRzstS6YvPpV/8IIvFnd30WbSylMs+RcZxJMeG9J+j9LklQ55Qyp9Mo+26cl3zeOeH2G5N14KNNrTdT6UKKM1OEOVZeTq8kzlNZNELbriD7rVXtWwUrpFITqSmgGpEAmuaZnDPr9Vr91e2y917Gb4soOUjlMJ1j5niM7I+RoVqGlaMUSZTnaRa1FeB3fvf32W4fMB4n2QfVmlfoSQ1Nl9dpW3sT2W+pGYNls15ztdkyPn4iCUGciUkGhu/3r5miSClaF6CKNJqplRwjxzhzHPdg0jK0WPV5lRwpxsq8rZNkYZH/W/YAieA2WNMktzJNtkfmJEQ+TmyEKZ9d/V/89VnqVPu35fP6ZzP/CcFRasVbD+W833nvmeeRnCIpRXJOkjQ68L4I9UeRP2PA2ype8abw6MFDQvAcjweOp5FPPvmEwzwpx5ilkC5Z+cW1Df1OUCrObgmhW1QorJM9rNbKJ598woMHN4uaxgUOcD67McvnLQ6coRTLbveQOWZiSnQ6KFZ0WMwawzCs+PbP/yL7w5FXL19yPNwDkdAZqL3mDqgOsiSpMQp4Nc+RaZ7Yawcgxsh4OhGneaG0VIR6AKrpq5P9q9VG1kAumFywNZHmI9N4R4x7nJX9/tLgBBtkh20t58/GwR/2rwZQMK5WUVLJOX1u3H6OlL/DOpkoViR94d20SpdG4hZYRf+QjTPOgaAOPXmeKCUzTTK9l5TLKoltwaRKUvJxNV6mS0x7X0K2d2qjOs+zwNMNzeU8ePUW1GzOTRFDaweqRV2phBBYrVbi56ub5GXIyV+rooYzz1684pNnL7k/JeJcyQU4JfzdyGZbGTqZDi4p8vr2lh/98BV/8pf+ZZwNONchBGdUW1W7kToZrxIIyyM2OqzVfJ2t9DEWvVRDpVxd4YNht1kBhpgmTqc75jyKzlwjJ+tCrqUsbg52sejUiLosZMz5L28leFRBjwscjjMfffya49QxV8fVzUPCes3z0/DukPqCLlHDMW0+Rj6KtDSklrpQn6h6b+LMNI5crVeLYcMcEykLjzUXqVwXu1s1nTc6EeyNPxcYy9pgGQaZp5mUMotrmHm7sFqeFVqRK0+vHVlQxQACw8ObB6zX62XwRYOI9v+GlkQYxjHy4uUbnr18w91+4nSMxFKozPhwYrXZslaOZy6Fu/t7/tE/+i5/9lf/nGgeex2gsRVb22CMvJ4lQb3sSmpcCx1GkN2u68BWJc9Xcrqi5EznLJtVj7VOEKrpyDSfZNK3NhFtvS9VUKiSLVSRaGv9jeXXVwNKsWD57EVQUKTbUSoxVcYR9mNidpmw/XLE7rsuo3qhzQY3xcQ8TszTxGoYmI6TgAE6pd+Q03meyDlinej/iq2p3COvbfQzr046WLVU0pwoFE6nSeXQPtt+rsvraihu23vP1r0iaRVTJCbPbrel7/u3eNQN3OCiuwFyiI5T5HCcub098ub+wP4wkjGsVonVepDWqbr2xBj5u3/v7/GLf+LPYF2g71ZS/NtKrfJeam3Fa/stkqA6lZ6SKezA0HVst1tJCEpWDnhmv99xGmXw0jvHdrvDWuHnT1NiigfmONJ0PluLE4rw7myiNqF4I5JWl+t3ub/VYkxBOllloZ61+1yrOn/lc+H9x3ktVJSf4vhfXrpvtKFlc/7XRWLKGOYYleaStFvlxa3RIcM+lEXhwlkZXGv61fM8sT+O3N7ds98fyG1TrZfHvHYE9Y7mIm57Z+cnZbDr6yyl8ObNGx7cXOtw1blIOL8zs3SSK00SQN5PP2wYp1txh8uZULK6RCWwotZxc/OAzeaaOCXiHCWZFlmLhT5z5ixrqVVFFi2lJAO348g0CnVlOkmymnIWTWBmksYkcrvk/JDDEVMkPufpyDQdyXnCmrLoKteSqDmC687AVnt4l4iy+exh0M4z+eI25JnzT88wffZ695BUlYm6WpPyhsqSGBqalmj7EGJ3KVlbTYXj8UTNhb7vmMeZlLPyybK+WLMsOpkelUWOukdYWFydqh5S1rYN7JyooYjjZ91N5HWdb0gtdREuLyXjvV/uY0sU23trD6AAtRpevLzlxx9/ysefPicR6LstvhuI2XM8GeY5MCXZ4JyVoPnHv/s7fPMbv8Bm7cUas2ZdWGdx88Vju0lM8fZ7WZ65kcMVK25H1lrCek3oLL0XdGuOYt2YykitUQ93hHfVCoxaSDGqsoFsgG2ApEXYEjNWpmnlH1U7rch04/1h5A9+8BFT6rh59AE31hO7wO/6L4cbT0PL9dikoANeNbOMO7ZuQFUbQa1IS5aNziq6E5PwM1vVZ4zEeJ0TYHEU8I5s5ZBrB79RLVvvRfd3TvozmzQAZ6/ztlM3zQTa1GaV9nd1WpAVGcR4+PAhVoXQq26J7QFexi4VXr2+54c/+gkff/qcKRtW/Q7rA1Oq3B8nHmbDnLQ6N4Zxnvl7f/8/4Zf+xJ/h+motKFFJWH0/Rje5s0/7+ZRpif8lT9U1hFjGXmVQZhh0kNHRBZF+E6HrSRzbSpat2LBsMsLBV2crj66jloy2u4c+V11b5qI81aGs5sqTM9wfCp++HLn1ifolcZL6wy6JhiJ7m5odNCQozpHT4cTD955w7/ZEIwoUOScOBzm4RNouy9BkiQy98ACdlxamKE1o/NS6KE/UKgdJ25Ncc0JbYs0vQ4HVqOa1JlDSGWo8cOms5a7jK1/5ivqxS/yet7pzOdEGQVOs3N0d+OTZK378k2cUHHOsJERNY73bkKqhqInB4Xjkb/3tv81u9x67zUq6AwlJUJkpVXnQiInLedPTmQoDps1nV0cIjWeIAgTw6NGNoj+C4g1DwDopsGIeyac3pDQve42pddFalglm5QxWJK5N0TVszmupva6i52JRaaWKvm6rSX+R2YA//vxUgCMd5FzO3IakLv9fEItT2QOlYKpLA6TWhrSL0H0pWShUwRKCdJOskYRNaPmti1WhFO5uX5OL4X5/5H5/lPOrtaZohVRLLOWutxmndiZfnr9FE+xSi/K7rZjeaPElA3T5nO4aMLgLKmAWTWJjmGImFQhdkoFt5e/j5eXdvrnj/Q++xmZ7y+F4oGSLdVUVNc4JKZjlbAJD7TpqrVxf7aBKoXC4u19Q1mmeOI4jMZ3YH/bMcaSUyOKmSMuTpMA7HPVrsihS5KrKljWLJmqJYm6jj1S297IUp9DyCb1HyA4s9YR6ZRbpxn5e3H4Ogurpe88cI/M84Vy7SQ3VMHwWiSqKfKQY2c8TeU48ebJht9txf38v7XwJAwBCcAyrjs1mhbdy8Htr8OqhLBqPrUVTpVVVDKW6cyK1nPGtZd1aTXUJtJySPugmswSr1UqGYt5CTqu6PrQFI37Av/+d7/H85S0xGbIRjpcLG77yjW/xrW/9POu+482LHwCG4C3WBj598Zy7+z3ebQihqoSV9NJdQ0GWZpY7V3gX+nz1YuG05Nc5sYd06GS+9Yi3tsf5HcQjlRlrxDrOOaeBUSAVYp4wEawLag/IIlzN8ptaUnv+3aUkUjGMU+ZwTNztZ/bzRL9+TIqVEgbmrz19d0h9YdcZEm7PsRRtuxchbFvAlKp2mkk5YpFXr1+zXm3ZbnaUeEdMCVDkU5Mf7w1d5+k6RwjqtFMztYreaQgejEiQlJKY4yixs+gdto9zW7Q59rTXXktdeM2hej3gBB1DecqXk/y0xFDRrFoMMcHv/t53ef7yljFmsumZqmPwW55+5UO+/rWvse46TqeX5GIXy8v745H7+5FVv6bvAjQ0iIJ3Wvi0JKVtI7Uusdtatpf4ibEqMu6ccK6DJ3jh8FINEUt1HdWGZhPBIgpvigJsMtFuRCpSUBQdLpPq/Ky7d94azfL5UiqpGFKBXC2//4Mf88nzA/HxY5587Rs/m9D7WV71bZQGWPbbooZ3OYuH9+FwkCFUV4gxczwdkYRAkgGjqELVn1lLIs2VuSa8qzjXa/ELEoMypmKtwXojOqJvHfbyrJ215xg06HSu+IxTKsbL3i120ZGuu0SqLwfdWjWtu44xPHtxyx98/wf8+JNnnGbohy1dv+FwKozTLcPmId2qUK2nGsv96cTv/t53+Ff/7B2rfsPQryhWgAGjoXLurrnlNVxa2ghtRO5BmxE4b4hGhMxNj3cO72T9W+sEYc2JTCIyq6A/src6+fZcZtGNg4VnuiQamnwsv7+KAgcWHfhsoIzs53M0grZN6UuRoLbWbb1AUeUT+harIJGHw4F5PgGZoe84xkhMmZSyUlZOnE4nks6dVAzH44FhFfDe4VT5oRo5U60kHsR54vknzzA+kIrBWIc3Dsnfz10oUAlLTZgBMAbrRdKq0durgSlFhpTYrNd8+9vfoutUX/0CPzJIsnl55Wy4P5x4fXvPi+evhV51f2Q1DFTj6dcrUrFMc8ZYT8yJ/+tf/7/xl/7yX2Xoem5unnDYv0SSYKECXEok2uXWVmlg6f021uBcT//wnNdUAGvIVSgT+/tb9vtbcs5shyuC7xQgSLx6+YLj4Q0pnoAERmYDcjuL7EzOHdYNUoCh92I5c7gADHXc8TMC/23gOqXPj9t3JqjCBXWYLBVnm96/TGTON034YMH3lKze26UQjcDV6/WKGNWebZ5xqgzQdY6+E/cCcWaQqtQ1PoqTheytJTgLJVGrvGz71iHYkgetLrSa6XpxINlu1qSYFpmDWsUpIutEqUIutB2jHcAFOJxG7g8nplSoxlOtxwbPGCtzsRi/xvoBF7YYJqzPpAT748inz18QpyNX28DDh2u8wMC6CKR+883/sbZESgeUlg1JOIaGgvdWheAdnfM4J+hQRmSFjNvh+0JxnpreUOoJa5K074yh2KryVkUEfRVVYtlQ9KBX+EqkXAy1OHJ1pAjTSZ7BzcPHzC/3vHjxksPhxMPk+Ma/9tfeHXFf0PXWBvk5l3CfRIJsnhPkE8F19JuOq+sdb9680SpfDpyuE/HzzWoQNxwrkRKc0edj1e0lSHJQqzhKRcDK8MqS3+mKbjyfc2UOofMiXdUSL70aP7UURREafmoa71R50wbu9yJnNaVCwVOsI2zWTKlS3cD66glX6xXTp0dy2ZMxxFxIJfPi9StSnNhtep4+2cpxbo0mjZJcB/s2VaSUijVOudLl4r0arBPuo/eeYJ3oHmqSnbG4WrBuTeiuMSZS5jfUMgpnsiZMKYuzSikGYzO2JFgS0DOC2u6TvKy6iFiXXCWhS3A8ZV69PvLqzYFu+zXeW/2Ff/6A+yO+2hlQi/hpW1T6r2RyjkzTzIsXr3n04BHWOE6cRCqtTdLqT/HOsrva0vnGIctYa3SvdQSvahWIvuY0jaRizxPBFx0ZuDyIGuLPMnRZihxCPnTKXTXSGbgQ+r4srmzL4pDtOBX4nd/7Ds9f3XKaK4lAcD3Fr/jww6/zzW98nSEEptNrcp4UdfcM2x239/dcX11jzAqDAB4gSYuxLPu7afsfRlCg1kI1UHQU7JIqJdxdR7Bu0ZS1xlON10GxinEZzBHYU5kxJqmUmgI56pQlllPN+rg9IafIq6C12RSVZDsP+pUqRWqMlilm5ijufl+W67IorQjn1HuH87LPzfPM8XDAmcLVbsf9m+cYI+YqRdvVc4yIQUElZ5inEWczOaoaiQU39Liul/1BW4Sb7ZpUDK4afDGkaoReqBuUIHkCqnl75o0WxDact5BfUcsQNHfi4YMHqLj2WQq0UQx1HxNDHcvr2wPf/9GP+fjT5xyOE8b1VDzxlIgv9th+TT9UKp5U4P5w5NPnL/j973yPb379m2xWK6xdSWFlZiBBzTRpYP3t8rHUM6VhwJjgJaXRXKaaCs7jzJqb3RXUD7HWsBkCm/WKXBOH8Z50PJLjSK5poWNJPSS0SHQwzy/xWN5O9Bs310rMWsSeVXSwjbq0yTqYY/3cuH1ngtq4diJcK7+oDXCc4VxU8qC14IVX5F1YnKWyQsUiM+UwvfAxm/6h7rUii2KKCje3QQtZ2CF4EZM2ULMgU+1Aag417fWIZlnP9fWGq6udVC2lcEyC0LZgSilRu7AkMk1SagGm9dD7+ONPiOruYYzBh8B6s+LNfeT17Rt+8uxTvvr0A9bbHdPpjlQScxIO2E9+8hNIDwj+GmM2mjxLXImHs1tI8tLCaVwopCholVGtqsVplo/Gd2kt5WoMxq5wQQV5kfYcHLXFJFuxyP3JgJrBnJ0hFGWShyqvJ1c53HPOpGKYZkfMDh/WPHkccN0VMcrrXsc93/qD3wB+4Z1B90VcTcqmyS01JERDckEtQNFomRihSXDEmMg5qzxPQ7udIoBNy0+SrOAt1qjmqbtANpHBPmNE77ahQ0InMguyC5zXFJLA7XYbHj54QN/3HA97ahWLP4w5x1DDCBvCw0/H7kcf/Zg5CWRjrLgUrTcDr16PvLm/49nLl/ThK2yvH3B/ewck5ZsbfvKTj5mudtTH1zx9utWXr/wrayR2rXQympSTW9zb5N4ua+sibu1n4lZ4WpZiDMb0WLchmEQEyvSa5jpjTIF83oRLNlQn009ntqnR33cuugqVWqwWAZaUDfNsOB0Lu6sHTKVnHfjSxO7nXVXXpFfkjiqUqkml1EKQPU04gWcetAygWDof6IJVS01RmHhLON8YqpV9PeckVC+NuXaXK0XNPs4oPqCSVh3r9ZrVMGiBq0hkO8Tboc6Cb8t+qHEr6wKOx5njaSJGaaBiPbbrmFKVgqbfMqw2zPOJmp14jNeKD57Xb94whJ4cr3jwYGDhMRskdo3uvbQzo6GoF3txS7j1BVoL3ls17XBqmSydDykZKsYEjFvjuxtyPFLSQQay1MGnDWSKsLzobNY2kKB3uOo5I3JWLQVoY6pGCxNIWmylUojl820j/6ivtwakLvin8t8yLNqMheKcSDXRuZ7tZsM4ztRamOOsZkDyM43SprrOKyVPeekgCZDmCta29Z1Fo3bRAjI/RW0WPV7tXAGYupyNKFJYvagAlawT5+X8PeddRn52Q+XlZ0v7+vs//IiffPqSu/2JOYHvDL4b6Ddbtrsd1m+Zop7nuYjmauj45JNnPLh5TOcDonHuMSbJ2rAXlLC2d2KoVulPy7nAske2F2uoOC/3yaLGMc7SeenAlmrJCnQVayhW9gCqwajAf+VcXNWiLSzdy2lUgeW/24uooDTCiuaKmvinnD43bj9HqD9T1ObTmrYAz8mpFL51Ee/GyBCTUbtGg8Mbs0yAWWuFe6ST+wZtr+qkWHMiQYeYQAjgzoh+6jD04vRjDMaKD3rNzZ+moaKWEDqurna89/QJu90GZw37+3tJHjRB9S6IzM9Fa4cFgWqBKwGw3x8UPVNh6c4yrAPl7sTt/Ruev3jGk4eP2Q6eXITjGaMgZ69fvWK7cjy4GfTMFl1GuYdWUE9dNOfFool/1VaGEnSaoHVDU857qD1vaDZgzVo3swQxUvN4rviMBE0pGYO484iV69sHPMhGkrWFmDPEXInJgu3pVwOPvWNYJ+YoLcXBwviP/y7wX31n0H0R12fR01LOblrnxd2MI8QKVDQjHW2QLueMNeJqJlzchqTIPZJ2lpcE11Scfbv1DdKCasgUWIptiZNOjy4r2aiIuWO7WfH06RNurq+wxjCdDho6UrDJtPECYS0H/bIttSy8Gl6/uaXkvKA9LjiGwVNI3O1vefbiOU8fPWG7XnN/a0ilCEJnLK9evsDkme3aLwd8GxCQRNm99Xsv33njdrUDShJTluR0SU2UhNiQB2vAugHriraFJ6pJVNK5W1KrbnQGX+piGCO/WPmPoPdYhe2L8E5LMcRsidGSS+Dhw8f4fsIZ96WJ3XdfZzqVc14OTe1JplxIOTH4QYbSEEAAyjIdLR0XVFpPnp2zVWX23k42zzy8DEV4mW0EpD3Xc+dOvm8YBq6vduzUm35/fw/1XEgJ8l9gUWnhAj3U/9ck9cWLV1LoSzUviiarjrv7xP544u5wZLu5phs2pNJRTZI901pevX6FrRVD5OGDr8Bl/Gq3zzQpwmUdqtUjDbBo+4jR79Vui67D5QO3pE3WGLVvvaJykATdFGCirRPRr5QEtXH/9A5qMYYWy8olb2fA8v2SCKUMMUmimuqXJ0FdktOWoC760437rwNVacIbz2Yt3MlpjmI0kYvOpIgIv/eOoe8JXUOXRdXHGqNC/fJ1IpeXyKlSjJNuJ42k1vaodsad9+jlNGhA0UU3sYFvvPUz4LN7fNVKplbLOCU+ffaSN3d75lTJBEHZbaBfX/PgyftcbTaUdKSUGVsK1lZW6w3PX77gg9tbtus1OVX6IAmlLoFlMMwsKbWhmLyI97f3KmMllxtjxTvZsy3iMmg1V0PjVyQNA9QA1oN12mGoOiwlz060hZtdrYIDbdj7p6MCWlQrRS6r0UhM6XPj9p0Jaq0FH5y07JXIbHgbQc3KkQPR3/NeHKT6wdN3HmcMx8Oew+FANwwYYxjLSZANFYttk9bysy1xnslFtPS8rwyrgLOB9bDi8cNH3B0jpUp7o44iaC0bH3Rd4PHjhzx58oQP3n+CNYZ5PHFswewczgkaaxcP3Zbsi7/uUjYbaQHtdhud5gO8xflCCICNTNOe+/s3VFPYH/eqkXlaAniaRuZ5Ev08ncSzuCVBbYeANVUD4Jyw6EM4H/oNKGuFdJHl1SYOATIOZzqsr2AqNiRqvqeUqAM1shmLJp3BZEvOoiUnP1dDvEgHKqWqSaolZ0vKnjCsuBp61jWwnpMswlR4cz/zN/7ha/777wy5L/a6VCE4n7znf7+Uc/JB9O6cEfpH0fu0Wq20EEuiSVdEk3OqBW+l1W+1Yig1Y7JOlTr5vavVwIObG27vRqIKfDdkqV3OWfq+Y7fd8MH7T/jggw+wFg77fTv61BGoo+9XZ0T4nCGIQPNSMsvHMARQ7qf1DufBu4I1kXHa8+buNalIm/B4OmGN2Ox67xjHI6eTY44noEp71BRVRzjzqUXo2bwVu+Ziu2oFlSoPXSAZZ6RoKcqsAzrpuASDDSM5R4pJVBPVJ12sT2vN5FLxyF7VCkp7BpokKc2Ih3exUmglmJOj32x4surZTokXd9OXLnYvr8s4bvbG1sqgUStsyqzi/kj7spTMer2SSWFUsqiK9FRf5dC0FoL/TCxVqKVKUuAn5lOi1IQLA8vgYa2IRI5fktoQPO+99x7vv/cE5yzj6cTx/l6KFXW8CiEs7+PyfUEDBVgKoN/7/e8Qk9CTDOA6y3a35s3+NS9uX9J99EOudw94cH1DSi8x9aRLvPDixTPm8UjwhZ//+fcF/dffJ8NzjkVBRYueVlxWpBUtEIkMIIkohbA/5QZocdW2bHPehwsOHIT+kcZfIrOn2JZcCqKUc8Zmx7JekaN8QaCMcFBrhZqbi5zIFpXqiHNhmirTnJnr/EcTeP8M1+Vk+Vnpg/amz23zUkgxiylCrFxvH7LZbCj1QJwjFdE7917pFMExDCuct9SaEEJQpfOCYjt9psUYTYDEdQ2lqtil6GhtafuHFFei6tPcpxqCUHVg1jlHzuVcbMm3XcRQe/aG12/umWaxtm2ibSYEYrHYbsX1w/f4ua9/kx99/3eYxhPZFLz3rLdbPv7xS168eMl2vcbbQt/ZC9TfLFJ5TnOIChcglg6Av9WVkz26deCc94iUv12oPRVDLmIvY91KAAq/pdQZ8kFjUp9dLmQTgQjVSXxql7s5YFTN46hQTRZrab2fuWSRjUtilPJ5cfvOBPV+vxdej/UI3HzOyNuhk7McEsaKqHP1QQ7aTY+zkOZZNbpuubm50da6ZM2FwmqzYr0eWK1WGAM+eA6HI6fjqDqgmVICoXOsVyt+8U/8At/5wU8Yp0Q9jpzGKPplnefhw2vef+8p3/j61+l6T03Soso5Cb9FJ/dTytze3mGtYz30PH70gM1mLQLMF3hUO+u315tlctZ5CAFKPDAEIyLPOYrzwv0L4unAeoD1eoX3hlImnENtXfNbCJOpjQNzeVPbOV/PH+3Txmr1r65WCPLUtlKD079ZDAHMmtA9JOYjpThyOVLtJGLnVVoZqVqszVjXiCyAMYo2SVI2R8M0Q66Bl3cn1lcD/Vr4YMZ3rLQ1NtvCp3X1zoD7Iq+lxX+RjAKyiEpRzqcUCYJc7igPHvDgasfpdOBwf8+r05GbmxtOp5MgplU2OB88m/XAMPRURRNvb18BVZGVmdXaYG3HowePWK2u+e4PPiJmMTqY5khzeTPGsFr1PH70iK9++BU22xXBO/XYlpgtpdL3PbXCNI7M0yzognPnZ0fjG0HjrV4/vMb++AXGind75ytxvKPzVTRN88QcT9zdveHu/p7NSqbEQ3CkOJFyT6lClpffIBV9i93lUhvI2kRHPxO/MvjR5mXb91xWh289Oage6AndjXieV7G3xFQZNNFDMJeEK4rG1TOaKsYZEscpFVK0xCwLOmXHOFs211uCWbHawslmPq1f3in+dtUqHvelFLAs1KfVaqDfrmXgjErwgaePHzPOEzHKgF9JSbR0XWVVetqzkHbbhDFWPMM7Ry6Jb33zm3T9c37y7BVYS66ecY4i7s25uOv7npvrK77ywVMePXrIeug5HA6MZU+jcjknScZ6tcWirca2PrnsACgwoJ03KGI84CyuM4SughmZpsLd/RsKhWmeGMcRwwR4fLBM05GDSRzHLcbW5bXahhArOCDtWdHYlfwlc7n/t7TRVKMqEg2TK4DDVP+ZDRx1irI6DyDdmJiOYI6isFL0wK9WEiLrLtZSuUh60N8jWsZFbTdTLpQM02TItaOajpz++BFU4KfW8uUANVhC6EFl3nKuJJOZ51mcHnX4KARJSr13i/Z5rRLrFuH8emtp3N72sAxwtdsQX94Rc6HWTHFSmC31MyDDb+ezIQTH1W7LN7/59UVDNM1xKfLFJngQWTTOhc4SG/rf1ThSgt/8R7/JOEeJZmsJXeDx04d8/Okdz1+9oP94w9XVIz78+rf45OMTlCOrdU+uiXE68uz5J6x6z5NHV2DX2LeKK4cxlx5sVZNU7W3o6yqqDbV8n2vKA+YtJQI5ExutRWVWvMUbg3GGOB7JZk8xeVE0SimSYhQtX4PycFlcvYS2KdQV0ZhVu/YsxUnMXhLUVD43bt+ZoB5PJzAG7zuskVZm0zpDW/7w1pEjm4tzTHHGUclxIsYZ5xwpR5o+IlR1KQk0a79UMuv1Q7Ul9SreXXExYm3C+UAIHe+//x6nKbK6P+JDz/F44PHjJzx5+pib6yvWQ5AWCkUmVVMUq8WU1a6yMk0jz188o7OWq82KzWolOmBWp+ltQ1B16liRBu8qlsQ0Hrje7Silgxr55KOPKOUez6Rto4oxmeAN283AdjPIFpwyOEubyrMY1X7TB2vOU7aX15l3KHc8V1E7WK56bk21doMxDu8G8I+heEwNpHoHNjUolVIyMaYLuL8dWhCTKBjkYogZnr94yd3oKK4HN+CHnksVh28OT/n3vv2ff2fAfVFXqgVrpFKsxi4t/ja72dB/692ivWeUP5eyqFZM00iKkRD8RatHDjjnAsF3OCuWqq7vEWcw0VksBQ6HE6uVI/SBzWbF17/6Ia/3M69fv6HUIzEXgg88fvKYD95/j4cPbths17K+6tltpPFpnXPMc+Q0jnz88ccMfcfXvvohm80GkIRNQcRlM+u6TvmxhuAsxmRynFmvOnINWBIvnz9jHN9ATuTiOJ2OIqTtLOtVYPX/Z+9Pgi1bs/s+7Pc1uzvN7TLzdVX1CgRAAiTCEAhRpEyFqN6U5JAYpgYaORyeOUIzRXjsgcMTh4eeeCLa4bBJayDJcphiwApTlhS2SMoyxAYgAVQBhWpek91tTrObr/Fgfd+397mZLx9IQq9ycFbFq8w899xzdrP2av5rrf9qagQdnjAqn6Mib04hJVlF+x45qZIgkLVrgRipCCqc2JCMzoEi6gpbrSA6QpxwwYF2gj7HKHvn6wUiFyRpjEomT52TAZLJaQYnA4+744F9r/m42aCbmqg0P9N9xP/m5/+VP3Q9/EcR54VIPsZHAT0JQY0BnfgLrRXn2a3WXK077u5ec/fwgFGK1XqNcyNlxasS5oSry0tqK73vWsHDwx2ZGswYyzQFjN3SNi0fffQhzXrL69sHHnYDw+RmJ601bVvxyUcfcvPkmu12TZXoc7yfcM6X3taqkpaq/nik73uM6QjGiHNTana4ObkicHlzwcPhpRTercaYwNjvsFqSZ4LDh4nbux0P+z21nbC2pm1r7vc9rlIQPXkYLEsOPpfmc9ZfBYlKcc64ljMXuQ3icY1gWaWJsvEpgqGhNheE6imDewVGmFViSK1xIWISXJt5iyW2SIFwjAQHPpjEeQrOq5SsrTHW4G1LUD/9Iamlzzpl+JkZCnRKVI2uqKuG2lbJ3gXatkapDeMoG+2CF2J7B4ToWLWVbLUkX3EjPdaLfrfvfvdTfPghD/sBFxS2rtkPcm3kmZIEJK8IXq87rq8u+fiTD2mbmrvbW4bDMYEWEh7VdS12NFEz5j8FpIsFn1VK/HTfD2IfNYLiWWhqi1aO3e6Wz79s+Pmf+0UClYB+qZK73z/gp5HXr1+w6WqeXHfzMLV6XHVNekZME/3J5yMMNWBPuxeTjc7vlWBSk/nXY7pfXmlhuDEVig6qC4bhlqAiaVUEMfENV8oKTyuqXFeFmp+jkBMUeYyCy61W0kMdI1+rt+8MUMfJYUZHjNJcr5RMZYYofWrkMqOeezfysU2Tw0ePd6MQcjdN+VnmNpV+VVFi52XCfhodEYXJQWqlaZq28OZpY9huN7RdoOtWXFxs2R8OXFxecbm9oG3EIZOcnAQLIaFfoUzoicPXxISiFc6+zDvHHHg1dUVlFbZqqZqaw9jjnaJtpKfQOc+rl8+pm8CmCYx9oN/vUQSMVXRdRdtV5UGay1uUm6pSVQclK/lK8J+UTJ8UQkk9qKKUpX81ybzkQoGyWLsihoEQBlQwRGVSIJxg+9QXohfpb4g6UfFYRg+HwfGTL18whgbbbKjaLbrJZkGutVmP3Hz68p0K903JsR9omg5j04OQkioWZ7ksk+cr65zDT8Id6RJJtE8r43Jwq1NflARB0j8tDiM7MNLwRC65S5JzfXUF1YAxirqpqVOV4OOPP+bm+pL1qsMagycQnOyoHoaBcRzpuq4cs/MTD/f3jHWF+/DDuYSWdWmBQtW1rJrURmFrg1cy9V3VHQYLBF6/fon3B7ra46bIYX9A64A20K0aulUDKm+Qyv3ikRh1CVCZT19AqIVklCojZCfI6+JeAKVsFdKGHaUs2tToqsHEmmkUOiE5HqkCBB+KDcq8hYKSpypAClonD7cPe16+PvCwD3SbC7bXDbqqseuJm199P3R39JHjmAYtodgjUuIbo7RTkdDAPNgkk90T4yBJclUn559tiZbXhLGCVIrL1RJ5X0hsBzUOW1V0XYsyNTFqotozpXkDrQ2b7YZnz57y4QfP2GzWMmSlJOl1TipXQApSDT549ocDr169wrst19dXmMThKA4+BWrZ7jZ1KW8KtVAk+pGm1vggvbV3t7dM/R1ulM1OwwBt0/Kgjmmxi4LkB0RiLvKS/pJEyry5x5lUrp5zqdn+5n+nCKXYk4yo5XOQ17RUF6uKYZCSr6yZjqkP1WOiFqBUxRNkLHNju0l4baXHWOEcTFPk9cM9UVVEbwnVTz9AzatMs0mQAJUTnwuKumpYrTb4SpDQ/nhAxbQcpW2kchQFbdMKjDWyejonMikxDVEArxjFl6OkInp1eQn6yHGY0NbSj0Nh6Ilp3qKuay4vLri8vODm+ortZkP0vtxDpSiVqzwQJ49futsZrMiedlaM9HPxN5lKTBtpAJD2hpFpmnjYPUjPP7JqW66h43DY8bC/o5+OwDobUHLrSyzXV15fEDykY0l94oXSEMQwU7IzlTkqguJkD2mZypfYQdkGZToBtfT8bMg99ZS+QYXoq1Iz1eBi66DYGOGe9j4Q8tDU1+jt1xD1Bybn0Ua4sEKQJvwYhIYohhToJUc8b3eCcRjR+OStYpp6nlG6CGgjwZUPARUDVlv2hwPeR4wWxLaqa7rVCpSVKU6j6OqGBthsVgQUx2HA2obKWLmXuZSenWJ6ODIXX/CCSF1eXECa1M7UExl9zI4ApNfFWkW7WtF0K/Zf7AAte3WVKN3hcMc6WFZWM/qR3cMdKkbqSlCGprFoHWcjNoeQCZ5P91oLOXZWpJizI3Qh21flENOE/1LB0kfHCLmRWplWaLBCjXMVgs0HwEloGfM2kox2aXwkZeuGYfLcHwa+ePkKpdest0fWk6OwGSqIBJy95+Hy/wf8M+9Uum9CjsOIMjWmyqTlMlQTVTZUGSGZjSlA3w/gRyGd9vO2EkFYEV3SMggRYsB56cF0k0vJhZ4p0uoWY5JhNZquXRFSC8xqs+Zw7GmalqdPntImHVRR1kyG6HEur7Ib6bpV0cdiDENMJOBzYnjKfB5p6lq2sJgK29TcH+4IXkjZjZYp1fv712jtaYxmmgJDf5B2FBXoVi2rTVe+dw5O569U6fpk0SqhufkN5Cd+Rq++TmJpTLdyH1VLRcvxKAhMRBXmAO8DJqMAOY2IajaKQXrDjuPE6/sdX7664+HBcfXsGc32mtpYvNm9N7o7hggpQD1Bo1NMRZQBkVmXxTYfjgfGcZTAUGumadHflYAEayt5f5QkOOjcpyaOQuyxBE8xBqqqwVbCs6xsJT2/R+kjffbhB3zr44/ZbtdYa2TiP8gyFglQZRAo2/5pGmWb2jSgiVxsNtIvxawSsj1RTlRWoaZKUGUIBIKfaJsKHyo0gVcvXjAOd3TNgNWKvvfUlcVYaGoJbFDZg8fyTTNHLuKLUxnglM9yLt/OQWpJb2eU9UShMxYby7OotKzaFii4PDEFDc+2SKWK2jxolO6t80L87iMuDfj1g+PzL+5RpqGpDZX96feg5gB1YRXk9YSi5YC1rlOAOlWo6Ll98VwGd9ZWmEqi9DpCQKUYoG3aVN7XxOBEzxPZfPCJXcFYjLZcXV2CqTH7owSAemRe0iHo6eXlBU+fPuHq8pL1eoVJVdy8bRCgqiqm0RN9TMnfiFI1NscTzAOzYuESfZU1RNKinLTNkDChE9UjqYJ76yb8OGG1cLRL9UE4s4fhgPOyqrfomZqv7TIgznZ5KTPTQf6dORtTebtS0dPFbVvSRWFQpsXYFWE8EvW8eCJP8sd0Hcg0btn/ZC73xNog1KP5z0AMUtUMX6O37wxQc++HrDcNqZ9rXnXocxahc09J2jbkA/cPO9ra0NZWsnYrvKdGm5l2Lj2gJN5Pay23t7fiVJtKMnUrva3aVKANtrJElQNeQVWbpsK7BC7HkFZJptCdiDGKrm142Dn64xGfSk8ff/Qxm7ahbeoSLgoFgk+9LTkTAKMUN1eXrDdbfvzjH4KeGA97UJMwHTjFNHjGxqLCQH84AoHtRUfX1amvRuDzkwGdBNkrnaeeRRl9YjAo6EdacVgqTW+RYhqz3cxUG9piqksEIZlwUy/9aGlCT8WY+oBsodIYJ5i8YnSBh0PPi9d3PByOKKXpx1GQFDJqFiFE9Jc97a/9LvyP36lz34hEIpP3GOeZfL6WqmTmxIixmWBf9EWcqJTgBP0JWCttDDPFiLwvT4fn7PZ47FEYrFHYKidWG5QyoC3KWJTVdG3NxcWGqKSnMkZkt3aqgxQkKwUI2ViW1hqgbRo++fgTGqNoO1lDKucBIa2rlKphpG0qbGW4urqmXXXcfu8FqICeBlCK4D3DMGJsJKw6+n5kf/+a4CZUHdhsWi4uVqmHVUFBSbIeC2XJqXNPCU9ZliAlTx1z+entd2xpSFVChKFGG5MotgLYO+J4JCZSFBUT+pCPJ6b+qwguaCavhB7NwfPbe57f3nO72zNOit3+wDBOmKpFfTHS/rX3Q3cnH1DKM4VY+pRBzitXfJaJVYyR47GHKHyNNg9NBZ9sc0gDp5W0WjkHMeC1DPQoU5XhNa01Vd2UYSatNVXb0G1WtJsNV1eX9KOsT7y6umKzWgs4FRwqenQUDl2fll5M0wSo1K4yMbmR4eh4dn0lOp9KtBEttFVpE5tWkbqu6VYNxlRoa9kND0zDkWbdCa9kULx88TkxDtirCl8ZxuCZXE9lFdtNx3a7KhSHMURpKdGgVLIHGRVF3qSiKuuks//IVZETgGyxKU291SDLdDQI/6/WEWVvCVEI6qX+6WWBTFXlmLwAPMELU0VIrDCeSrb4ecXoYXcc+f0f/Zi623Bz2XDRPP/DVMF/JFlWUFEnPwBiCVAlDrB4pO2u70e07tHGFL3NH2StpW2FlSJGYfOZgud4lOBNaysthBa6rsYYxc31JXW3pnk4cr974GE/EkcZUlbGcHl5yR/7+Z/n6upCZlKmkegd0zTIEGyU5RRVVRHCgf3hwOtXt/Rdzc3NjVBllhaGcqhSbQuOZtUQH3rQCmEvdEyHB2qbkg4/cXv7GsuI0XusmTBMbC9WyI404Yxt21rsXMypki7PqVjLfL3VIkjNrBAp6Smr1PNbFzcnl23LS7MfEXL9GsMFtj4w9LtELRWARD8XXPoIeRZkIZBQNRaKNCf/eZ+Wo3jF5CTuG93A5N+tt+9GUAGjPCE6vDdErZhCIGT3EFxB9IiKGBTew3HocT6gTUvTtijvUEh2I4FfBB1xbpJp3ATdH/Z7Hh4eUNst1mr8FOlWEXSiQPABXeVpPVWm3lUEm+5aYEZ3rNFQ11hj2aw2HI5H9rs9MUbWqzUfPLnGpgxgSeweY2R0U9rnboQPs6roD0fGyeOdx1QWN4yJTN8ADd4FXr14TXRHght5en3Jn/4nf4UPnl2zWjVUlaY2duHcRS+cm8DL5GE2VKjTQPaxqIWOLfuh8t7gZBJQEq5gVIeyAqmP1YR3ARUHVIbpyby3EentUbgJ9oee51++5Ae/+yPu7/ZcP7mQndeJEqSqbTlG/2nPi3/1t96pcN+UjH4iTkeUVvRjRz9NZLoukKzeKpVW0UmbxuQ8D7sd21WLNRWkPqSuW6cSUkwJ2Ew2LFcYnr94zsX2ghCUJG5K0wEqtbFkWh8fQkKF5M5IPBpyaiRBFxIY1HXFer0ixMjQD7x++ZK6rrm6uuLbH3+EPWURASUohgzQJPQmColIbWXRxXA4UrUrpn4g4glBE0ONB+5evyb6nn5/T5gc3/nWh3zw7Jrtpk09VAGrMh1XmoTNSkskbw6JMVdcFk4rDX8salOLaoIMgrB4tfwo5n3ZHeiIsU+IZhQrGseEtnjcNPPDZkRxcpEpaEaneHV34Mc/+ZKf/OQLdseJbn3J/nikHyaaLuK+c+TFn//7f2j6948jMrluhIM4vRbSYFS2DSgZUE1+gHHy1FYGSkxiWLC2ErXQ+UpL8Kijw2pLjAo3yfNvtWyVq+uK9WaDrTopAlrpM4sxcrFdc3V1QYgU2i4bFRDILW0uIVD5Nud+f2Ns4Ufdrtd8/MEz6lpaTERFgsyvR7FXMQQut2s2q5b19hJbV+x+cItzA9r0KKMJwTH2wj0cJkW/GxiHPcfdnuBHrq83PH16AdohbFspBFzEUSb14ZKCDWUURuXgML0xenQ0iSotgTILvZ8/7bHkamEDSmH0E4wdJOmIg1QavPSalzYzrfFOeJjLIFFQOAyTM/ROs+s9n7285+5woIuaqroH9360p/xBZJomdrsdh/09wY2pCukYhgPTZMiT59ZWNE1LUzeFizvUFcpUVE3H2B9KBRbvmaYBY2uM1VxdbtlcXLJ9WFM1Dbe3D2ht2F5c8Mkn32K7loHCGKWVKToBafq+ZxzHMhSltbAQ9Ye9rGU2lraqhSEgJJ1VErcJuCXcrFVlMFWDrWucH9DKsWotq1WHqTtev/yS4HtWXaCrI7UNUkVTgdpqurSlUOxmigHUTCP1hrY9qvoCpbqq0kICXaoAuSIQUrvU3H6RDYpgjxajO1CXGP2AEEt5NE4YDkIUDtZ0CNGXCY80QB/B20JT6ZNv9M4wusDhOHC/f7fefs2qUznXECKT87IJgtlopqtA8HOA54NnmjzTKFmOUWL8nPP4AJObGMZjgueF2qGyFcpaDseeqq4luh4nrDGsVyu6rmGaApMLxQ5oTZpC03Kx0Gk6Mu2vjQFlKpoqXXSluLq8IDgv/VlVVbZPaObiTYTE/xrKRJxKwe/t61uiUqyaFdrW7I55YKySdaOpBGUUbLdrfukX/yhPby7o2gqrBQV9Y7I8CsXQ5KZU1ojMiw9m0nfRwZztR1TqcZkxAE7el9IuuVcxISW6oaovaP0BP+3x0RN9jhdmBY/IxN3kAvtjz91ux+v7+9R6YfDOM05j2lIj10Yp+OzY8R/95i/wb/9rX6tV34jECM47hnHA+blUknU1BEHbXdLvfhiZJqErMsamth0JUicva+IkOJWyZTQWgsbFwG63Z7PeJHRL+HiNNSXR0CqtVl2QdhqlSpsL+bnSOtF8WMx6xarruLm+5njoBU+shdi6tkYCXJgrBsg5hRhl+xfSl2i05ng40g8jIKuD3TQJSovQU0QXOE4HcD0xOJ4+ueRP/PE/xscfPGHVNjJVWyl01G/or3dpQ1w+j/gmBdackKWkUucUKluTWY/LtZBojNx0pVRN3WxRbodPPJKkga1MvUSqyoRoEi1aZOg9t7f3PH/xkmMaYAhBgoCMTH926PgP3xPd9SExLERmarhcr0tJgXcBb/IAmKyI7JqaqGUVZFVViR4Nec5L+5UEuVIqDRzHHoWsZ5YYzpYAWCtBRbSxMtBX5fXS8sArUjtHug8qxmIv67pi1XVlMCqGyOXlJU+urvjgaWppSQhOdrohtavksqHVmflEqgv9fo9pavw0gesJwRB9RURxPDj6ODAeH5iGng+f3vDRB0+4ud5S15rKgI6mVNgzYfmsg6oEqXkxBPmSgzjxHBgUB7gMUJevyQ0rWxejAlVR1VtCuCBGJ9zEqX+1+E8V0+pljfPCPuG9UAe6aDgMkYdjz4vbA9//we/T9z0+ahSvUd1P/vAV8R9SpgQCLCuDBeovuB9l6Y93jmkcpC0qoZJAQrKFq1cpSeonJzMqwyDrT4kaWwnXb9001HUr7Si2FsaHSlPpCm22rLcbDocBUNR1Q9etEgew+FtNwMW0DhfxD4U+U0mlzSQqpa5tU8VAUN4ZBMr/itR1xeXFhqrpUFpze39g7A/U1QXKGoIK7B5u0crTVTXRKKYwcdjdopSnbhTtyrJa1an1UEDBrFrGxISMKhYms6CguZIlaGvW22x/59/JPqPY3XKLFihoAFutqdoLQpQ5lph29XrnyUGtrEFOA4UZekhLDnwa7vPREJBFKZOL9MPIbnf/Tp362gA1kptaXXqA33wos6OPudcvypThMEwYpWkrg59GycZTgDBNk6xpTOVTi7QK1MaUckrTtegF5C8BqSAuWtnSu4mWCxpUHhBJBPipdJ53wQqxuBhnbfK6xnTbVL4x+fzEGcRAMfi73QHnA+vtJVXdsj/cErxL+2+F8FpbTVs13Fxt+eTjD2gamwhyFyRdycjnBziXJssQV5pg1kbLg65zxq7mc36bcSwtCfJ6ngSNyUhqZdCmwdo1tloTgyfEHsqEO6XvKQboh4lXd/dCODxOoGqMrcr1NGV3vHxlf1vz/b95Df/u12nVNyFyTZ0LjM4Vbjhgvt5eqDikt1q4+SbvZf2aURAUY1qT5ybHOI1MTiZM66qWwZTkPI01ifKowmjLqlvRtDXjkBFRMYh5j3NGbYiykjSPYGilUzAiCGqe2tx2K5TSVJUgUVl3dVRp6w8lAAkhLclVcwlyt9sRgLZuWbUrHvZjSYiU9Sg8wTs0ga6p+dmf+TYfPLtmvWqpjE4DJxqdFncU1D5GgpIAMSRkeR6WUCf/ZWq0uR81/5n/HhdubPFn/ou2VNUaVW8ZXE/wk5BGI0m0SkFFiLH0vXkf6MeJ5y9fs3s4pHtbA0KB4lKf5PGu4vt/6+P3Qndz8pTcUtFdeT4TQpwQ1WmSNafT5NB6lVZ5St+mMRXOZ95eCVA9HrTs3Z7Gkf3+ge1mI0NpUIK3HMmJTmb9laNIg8XyX0pO8zig0YrKWjbrFXVVsepWDP2AAq4uL7m82LJetalSlJOKxXmnADoPhxEiLvXVBh+o0KU9KYZEMA70/QS+xw09tTX8zHe/zYcf3LBZd9QmJVeLUr1K1QWhwEkjxennYfGeElQtISsV37DBGX8qdYEy8EWKfTWmWmHDljANkLd8RY/zQSb5tcKh8T4Kg4qLafjS4oJid+h5fX/g5at77u52DFMgMKLCnunw00dQXdpYp5ViuXFyDlblHusMvCR7YLROg3R5ECqhogp8DAm88dTGMk3C4OGcTyu/ZeCpaRuqqkUpi0q0ZMpoKlWxsnVK1iSY1EqjEotOLENBaZ7GyoYl5xJPq/d0XcfFdotVirZtJLHJ6LpOyHvSA62ga1sIhna1IqK4v4+4caAy0oQZ44QfA9GAnzROAXFg/7BDE2gaTddaqkqVVdZqoWswF6LkMdXJNiRUkxR85viChKQuDetSn8vnzvoeE2AVUChdY6oNetyB3hMRsERY4CIkEKRUalL7m3cxMU+EMgcQ0LioOE4j9/sjX778xwhQ520QgehAJUqdha8HSIiF0AeIo5Ce1HGSbVHRW9zYE7WiW8k2qMkdRQHT7t2qqmi7FbncaStL23Wyxu3YJ+LeOmUHalHaVkkxJOCMShSwsrYkbo9RS0G18o0Rw1Qi/wJ3p59HKcmuVivu7h7wwySGd7XCqFuid8SoMQ3UlaJtW7armmdPr1gnLlhpjk79d4nLcTZrGXGCnP1Ia4EE7JnWQidtVMsy9fImZNR0qXIpGQDpZZOp3QqtV1T1ReojCRAyArY4bR85HHqeP3/F7d0DEY2xNdZWVJUtVGAp1oMYsfeRm//3su7805VAhDSwkYEMFroQMtVF4hcMAZwPDKMjVglh2h25upGeO+en1CgviYI1KRhAsV5vBFFWYJMuN3XNMOxRypTWC2PsTFivNAsQGvRiTerCiGvJ1hbbayRbFZ8mQ0NKZQQzD8AEcuuG0YbD/oHJebbbCy62lxwOLxn9KAgBssnEGE1laq63Lb/wCz/HetXIGledkU8SeFZScEmmEmtG7plLYJQkgjpz9i5RVKT38CtUpSRLC/2VH2i06bDVlsnsiaqXrD3KJH/etCbtP5A5UA99z+fPXzAMIwGLwQjBfGrfiTFS3av3RndLgHribKBEPKm85l1gHB3jMDGO0hYgfLNA4kseJ9HZELzQ3hiLriqGceB4OLDb7bi82EKUNbWVFTL9UKhA5Ai0sIFTBoYSL6gktzNOI4tQDF3bolCEqBiOPU0tdFht22I0ECIzuVhyilEGGkPqP85J0PFwICAzEXVV049eyofRghGqodEdiX5AB8fT6wv+6M//EZ7eXNE1VSJ71ylAVRkgRsxDLPQ3J8OmzH5jyZlZQAUSN+pJsGpm3WVZaUzBi26w1ZZQ9YQx8VGHQPR5GlqSU6H3E4q/EEAZxThF7ncHXr665dXLe/zkU0XREfzIq/u7P0QN/EeTHKCqxXP/2PcuGXSstXhj0ASqqhZaJyWIoUoMEN7LQhQNKFszjaOsKI9RyvlGyOdtVWGbCmJawJCMrI55gMoUm5BW+KV2wFiWiFijaeoaNzn6YRTua+fZrNZ88tFHNNbSNk1JzPP5iP1TZUXtuuvwLtIljmwVI36a0GaEIK07BGl3GodBtmH6I4fdHq0im1XLdttSVQprVTaEKdDMPmJmi1FRC2d+yFWS1OZXQoJ4Co4tHjul0mII5vghJJS1hKrKoM0KYzu0afDqkILXWMAtiU1tavP0UjF34KIs+glRtlYGFGOIPPQDL27v+ezzd+vtu2mm3IStdckOfZB1lyHmx292VCHk4RMAhY95nZnDjUJWb71npTRt2zJOR5wfpWyanNonHz4BhDiXENjtDgzTBApWqxVtC+M40LQbVAgYHdPeehkGMtkBaqisbLHKVRb5gVz+EhwWC6KSwqbeQhMTVUKaFI6RD5894+H+gcPhwGG3h6jxfkI2W2gqG7ncrLjaWK62LdfXF1il0LrAA4mWSBCfObsUkyZ7sGtB8zJEv1inJwNMMjwSfCilyeWO7NM0P7+arLEWJXEeMBeYyhGOPZMXxMGNR2L0s3OfAg+7A69u79gfjsJb13Q0dU3TtGXdZu7TCjHyUfcF/84v/cfwHuzjmbyTABJ57nxGFUkDRFEeyxAD0UuQqk1FDJq+n3CTBLfDNPGw33F9tcE6iFFWbjo/MXmNti1N3dLWLSFM1E2DMprXt7eMcWLoJ7pOAZWwAtimoAuFUkarhD6JgRRj/ngYKYkCch/c8qX0F0EBktEKUFu4urrk4WHHMPRYa3j69ClffvmK6B2YSG2haS21NWxXNR9cb+maakY48zHktcIpAD1xwCpP+QfI6IhOPYxKUel5IC2k4bCYhsyW5zcPWpw+nzFGSZKpQG3wocX5ikpFiKP0BseMwIpRD2PgsOt5/vI1Xzx/iVEr0JaoDcpY2rYryOAn6+f8O7/0H/I+6C4RYdGIM3pagiVUAW9kQtmidcXkPLtdj1UweWlFWW1W7HYPOCe0PUorVt2KqqpSqwo0bcswjVTGctV1XF1fsb3Y8Pp2l2wOadufATxGWUmsIqioCNEnh2lOzE9eG6y1ge0ao1KyZRSRsNB/VQAOQf+Xa7MNEHnx4gWT81xfX7O+2PL5l68ZpxGIWF1LcpaC6FXT8qv/5C9zc3NB21ZpvXVMDChqCRKl6yrbiYTmJw/nSGlXayE4P1lQk+x13lb2tnt38gXIkxyCQkeLNZf08YALe7R14IP0YYZASMHuNJEGKCU4dpPn/jDx6vaB5y9vefHqns2k2MWI00Lif7/76RP1L+c48rV5w34h9lipvBmvAu/RWrbkaS2NSzZYJjcQgiN4qI3FjRP393eEGFiv17JdS5uS5OcvtTYlLoA1mWpN7r9VOjVwxMKlrJTEC03bcKGE9/R4ONL3I21t2a47ri62aV4l+YyYEcsUMyRzFWJku9nw/MtXGFujtObh4YHVaiVtVU7hgwMaglM8POzSLMiIH0euLjb8/M/+DH/05/4I21UnyH+uoEYKj7u0+mTkf/YjQcm0/DK5Or0fMwAnjEfZfyT7nUEzcpIlQI6mxdpLYjXSDz0+SBVAKY/X0iqmymIU8ZvORSJGeMhR+GAIKF7ePvCjz7/kh5/dcb9720M0y7uHpKaIx0kGojVhchLtlywol/vAak1lLD6X05XCI4TpYfI0yiYnIgFY1VTEQSgcgvf0h56Hhx1d18kQiVZUTc04DSileHh4oO97Li8vy/Fl+h6AgGQkuRxlUxBQerhipr0ps/LpaZF+1ry6zsdY0NgcgE1uxBpF21gUgefPP2d9OHDYH1Cmoms7Vl3Fh89uuFobtuuai+2aphaet7IwJ87l/Kw0ChYPmBj2mYp4dk7l9/N5a432MuGptC7IxamI8ooiyiTtoT+kbKwGtcFYRwwDnhGfwTAPx/7I/vlLpuOIc4GgAqu6Yr3e8uTJUy6vrjFaS79sQnaefBz4N//F92MbT9ZdY3XqtxOEQlYHyr3WYseKs8yGbJxGYhDkW6VyaAiyKq5pGsapF9oRN0mbQ92yWm8Bn2hPPLvdA0OaCM2E+9bW2EqWA2gt/ZzRgFFGUAOde1UVStlFcrXEZbKkIFWnoDA5N2Xlc3WU4G/oJ549ueH29S0PDw/cvb7lB+F3OfZ7Ak5mjXVkveq4XBuuNi3Xl1sqLaU3rVIRNYojVCopCczPPxJc67TKMmfvOWLRWoNJG6igoIOZWUGhCkPFyemRMSkS0bPjcPsa3AG8p6FFB4tzQdo1smFV8tnH0XO3O/Dy9YP0VibeP4ulqRqs1hglPdXbDwb+R//u+6G7gGzBc7KXXClDjBoh7p8RZVmVK+X8EBX7Y48hENwkgECz4/Lykofdbdrepxinnqo2NG1Ht1oDET/1bDZb0IZ+mFCHnt1+z/W1LGiIwZX986ropwz0RDLtmsEaA3H2D7OTjIUP8sT4LSUn7CYVNANYLcBEZe9x04S1lidPnvDy1R19L7RxSkdWndBRrdo1Ty5WklylSlxBfEOU6fw0yGFMDjoDykBVSWndOVeSpqg1ysxDgTq3BaTWr5zsvn2Kf7YrBnAqsL+9Q3nH1Dv0ZDCxJgQHUQjQfUKlQ8xsOYqoLMcxsDsMvLrdcXd3wE2RwSm8MqiYaNe+4hh+2pIDuWXQLuu/J7yLySZ7aQe0FqNISZVM3WuV7IytuN0/cOiPQiNWWZz3tF1Ht5JefdD0Y8jMZUXfjEnV1MLPCXhVONhz28y6bdmu1yilcS6gItSJRaD0KXN6v5VSTJNftOOQ2gmCAFkoKmNpbMPgPC4MeBwxzdAQJqIbidORrlH8mT/9q3z6nW+z2axKDFMZW+CIEvDHSPACAKZ6Q/EOkizGZDfShVAqzVTIyeocfJzerXKOOorNdZNnf9yhVcQoTaTDUVMZ8FERg7Te+HFiVNJUmKviwSuiUTLQGKGfPLf393z+8oEvX7zm7v5AVBfv1J93BqiZDL5wLSY0W0UD0RKjRWGTAiYnadIUXnJQQlQMMU3iZ+NlrWUaZwPm/cSxPzIMA23bsN6suLy85LjfC3dpCKioxLGYStxaVJhc4tTi/gT2jmWNnhyZZAfy71BuQkrUKWlEwYzkpZgCakHXFN16xdX1FcbuOfYDSsNq1XB9s+XmZsP1xZrLdcWqrWjrRhCIbNDzE5MTzJh7QnLfqyhRVpFlm0HexlEyHJWcfpj7Z/K2oXw95ZPn/duBgHt1y/43v4fuD9SdZdV6dOw5vrojcOSis4TRMR4G9DCxed3TjBEVFJOGoBXdesMHH3zE1fUT+nFgt98LIuYjnw0V/97LT/ifv1PlvhnJuqtDTBWYnMkvUOX8r0yAnByFCyH1HotjlRKFTAsbo9CTVAj8NDFEjbUD1vQYa2jbSgKfROeTA1TvPU0jiJAA2ipNwkuPtPQIzzyqSqnSHQUzclPGoWLWaTmLnFDJqWmiikQvw35Gw6qTDTu73YEQI/2xRxlD0zZ0reV6u+JqW3GxbrnYrITcX73F+cZYDGKuiGYjmPVTmaTnj0qmp6iK/FucfSgI3Ky76X2pijG+vufh+z8gvHxJbRWrVUSrkeOr16CObDpLGCeGfuBQBbAdU9NydJ57HyWBjeLwYoz4SSbZpQAR+Gyw74/uhiBFuQAElRyNKjbAGOmV8xl1TJsRxnGkUkBq7xjHEbNN1Q5kR3nuW61sQ20NdVXhrKaqa/p+ZBhlo984Cpm4MWJ7ZDBDFZobhbQgRWUpOq1AK5vsbXpXKtkm/GABMGaXOqPvEhjqkpM5F3j65Ib7uweOfc/Q99y9umMcpGVBaHwCTd2yamsuNzVPri6oayuoE2kAKlWFMn2UUpTKjzDyKLSx5BJ/TMOz89Bp3ro121ehgcvpwtxqVoKERC2rQmS627H/4Y+ZfvIZK63pVhAZGF6/YlRHNiuNnybG40gkMFWG4cMLaDuMM0wu8PLFPfvdmNBVw5D2THqX+qj1Y3Dim5eZB5UTEFnax9KiHKTn9tj3DENPYy0+wP6wBwVN20iA42UYSibpDSgwVlM1NUZrvPdYo+i6jrZtMNYy9NJ+VVhFkh6WGqMCwtzQF3O/PxGUrJ0V1F8ngEI2qxmjk73K+vT4NBfDxcmuE+Hu9hYfIhfbS7aXV3z54haXWhe0BqPSYJyRbZ0fPbvgydOnMsSl7axzi4Uo5bkiJOgp8bEXSsKIDCulVrDMJJHjuDz8l0ORkjgqMl1UTH8bbu959du/S73bUatAu7GgJ8LzL/BqZNNp/Dgw9D1OR/SqxT/Z4KwWWxOBaKSPPAT60fP89o7PPn/O/cNB6Kb0Ix/xSN4doDI7elLWKGen5//QJ2W73DNZjJBKDispcPAeYqRtG2l+n+RBl0kvz+QntFGsYkfXdegIh8NBECht6dqVZJop+NLaQNTlO+R6ZweYAtYFGjm38zM72pxhkIPWtPc4UfYoJCDcbDaEEFmt1ry6vWMKkc12y82TC26uL9isOzarmraxMvVsTKIxKVDYqaT+3hlSV+U5mlebzkE9+chTZKK0RqV+mjJwsLx/SibFVUrt4sOe+L0fYvZ3mHVFd9VS6UD87EuMdlxdrXH9wOFhT+0jFwdP5SCi8UoRjXAiXl5d8fTpUw7Hg9yb1N4xPax4/Ru/+k6F+6Zk1t383yI4Tf9lJCSvtQXknsSU1ETQqZ86hIi1YK1h1KJRznliHBnHgX3U1HVN29TUbYNSsNvvSpAlfdF1ai1IiQMpOE1OWSsZMsn7pVWc9fZtAMmcVZf/S8G46EQmY1da0XUtFxcbvBeGDKVk/d563bLddFxdrrnaVGw62eteVVZ0l4yY6XJ9yIld0bccnJYrPyd+6bUocUJ5NpWS6x7DzKhwUupH+GZ1WqIdHnYMv/0DmttXVLWlu2oxxuM/+wKlJy6v17h+RO32TB3ExhFvNMPoOTpPiDZ3RpBBYK2EL1QrxbRfc/+e6K6s2szXBrKTmf9jgWTmjFemqJHOCpQRJwHCEuKDls1FMTCOA7Vt8LaGqsZWNcZajsdeKI+skJb3fZ96A6vT6s3iWJQS1DTbXZ1644q1TXRkhf42J1EzWENOvZSZNwVJMBlYdx2rVYu91Rz2e7S2DENP8AFrNJVRrFcNF2vD1bblcrumqauS5JXPL5GxfHEMlGpWeY60ACcZDSvPVElq07IUlbhSs60IudUsnWL6VpXOJdw/cPyt79P85HMaa+luOqgC8bMvUHrk4nrN1I8cHw5EA1MN41VHbFaEqJkmeP36gf1+ZJpkM1o0KTgJqiyM+WlL3hiVJSfRZXAzyuCfD4FhmuiHEY3CT7LdzliLtkJ1NrmeSFoiEz2BSN1UlOFMJb3+Km2I8kGYZaqmhQRLkXuEsx5kndNSipbPMmnBoAz3aZ3ZBFJkYAxza0hOXPIZ5iHUNIwcU5iXBkrHYWB0ju12y2a74cuXt8QgSzSMjlRWuHKtbljVLU9urunaLrWmic0Vq7toMcl/zxUKcmyV/X9MNkCDMmUwtVSQF/I4NMwtRBHpR4+7Pfvf+h7r/YFGBbrrFaoC/9mXTNpxcb1i6nv0fs9gNdV6JLYNfjNvhxOkNzI6z2FwvLq95/XdPWPviK76Wr392il+MRg5C1ILyD4jIao48CiTOIvyiVxI6SsRao1pnHDjxJObG8LkmMYJkH4iWdlny+YRay2qbQvZc1U1rFcbbh92hcbHWk30s1HQijKNeopIasq6xuQw0REdVepVTU3Oi8GZ4IUY26Sp/6vtlvV6hfOR64c9wzTStC3b7ZaLiy3btqFra+rKJONpFgq/UIKY+wsp23BKZLq4rotLSLrXc6CqNMZkWpTZyT+mpbKVQYcIHmo3sb6742o64I+ebtpQWwX3O8I0chU10zhS7SeoG54Pcn1CKndpKw3p3WrFzc0N1+qGz7/4gmEYCD6wub/il//BP/11KvWNSXF2i14dhUFhIFMThWx3ZqQKlFCdpP9CkJ7fpq6o6oZxnIQ820ty5SbHNOxkQO5iQ9d23Fxds//e97DGUtuatmppq4bBRaSJI2fFqeE9624yqCEFeIqsC+H0/i/KqBLDRBk/ToF14UNF0P/1dsNTFKvVmi+ev6SJ0K1XXF5d8ORqy83VlotVRdtY6rqiqeqUYGVDmfgps4NIMpPzZ6NOmebMxzkHVpp8F5QGEyHqeRXx2/Q+r0XW/YB9/oInY496cHRuhbXA/T7prmUaJ8zR0emW/TDwuhX7Mh4dPlaoICualYXaNlhtaJuWyljWu2v+id/803/4SviPILnHv/T0xay3KtkmQf6k7GxKH61PAEBttdjFZKu1VZggvISKyNAfsboqetc0QoSvtCbEwDAMOOfY7/esV4LuiN+LRR/mwE2ACq2E+MKk3r98/Cq9P6o8hJmNXKa3mZ1/0bGYZxqEirBra7q24vmLW0mw+h5tLZXVdG3F9dWaq7Vlu2nZbju6tklOXsj/y/bCkGkK5TpopdKxUoysNgaiWaZWpzen2NbkT0IkOIdK61wfv1XFSNztCT/6MTfHA80UaMIGVS/truhuc3DUqw3s7ggHz9CBi4Zpitw9HDgcB4YxEJUhmIQA5iN8DwLUxwBJFrFFvtA3ZtaUcZQeXJwjKgli2hi4vNwyDHtclNK5C47JT2y6DXXdIi1qka7rGMYR0x8x1nAce5quIwRHRvhL7j5rG9oopuBRShc/HZLuzsBQkvxLJ6j/IqlSs7/WqdJpNTRNg7WGaRoF7bU2abcwpmgTaVtZ7NI1lqttx8XlGqNK6JJAAKR8l5DOSDwpTMnMgU5JQJpcUBqlM5vBzCuLD7M9joroY3mWS5ChpPWgUgY7TqjPP+daKerRUweHrt4SLxw8Ydvi9kf40OObyGSEY9hHxeg9hyFydxh4dXvP7tijR1CT+Vq9/dopfkE2pOQkhsOXMsb8PgqVifdS1hR0T87bGk3wEZD9uT5MVHXNxeUFw9AzjUOa/o1sLzaC/oTAOE28+PKL1F8pU+OTc4BmterKoA5L5JbsNNO0W77ZGnI7gtDQQEqjyXNsJgYMyLCFB+WVlNhSltXamk6Lk73YrPFAVcvqyqauWdmKKvWCGpsnGGNxILPz1SmzI2XwQmL7qHpQMrnHuc7SEBhjysDDclq69Kkq6dNdb9a0lxu2z674F//Nv8j3/5tf5+7FSyqr+Sf+1C/zN/6vfw1Dg9lsuPj0kl/+1/88P/xf/2/xrselozDG0HZNmsA0rDZrnj59yn6/x3vP74aJ/z17/q13qtw3I7PupolnLwsWHhtRaR2Zi+miu6QhHxkCceMofH2bhu3FGgVM43hynUOMbLarNK0v1CTX19dM00TXrmiajsl5FAprNdbYVIGgGIXZMGlKlbzEa6l3OqGiM/KTdFdpIjJg4pyTXdXeY5RQRF1tN2w3GyYf+OiTjzn0PdoIbcrVxQXbdcOqayS5ykN5OXguTjll8GV1ZCgOvOTnaqnnbyZbBcwGGabyM7WU9/6ED1EnB6KiQbWW9smWf/Xf+Av84G/9Oq9fvcJUb+ru9juf8kv/gz/Hf/SX/jLHYcR5TxNM6SP0IeKVxtQtl9cXfOvjbzGOE793iPx774vuBkFQc5LxuHRadHvB8iFldIWKRoYxkfYkjMZqS/Bmngtwwg0cAhwPPVVlWa1XPH36FO8cr169oqs7ap0TqxYfxRbmoSFJVEQ/MiuFKQk2iP3NN1pl5UnUk1lbZuQ9J9r5fF2mFrSK7cWWj+JHrDdbPvviOU1bsVqvuLq+5NkHl3zw9JLLTc26qWiqlFxpjdVpwKTopgzgSIwqsw/LYd9iGRZBQnrrG+9RKXlDzS08sLDBWlFpDUw0VvHxB9f8D/+1f4uf/I1f5+Xrl1C/xe5+a8sv//f/Wf7j/+P/GeM6VGyIaJSyMnwSIh7Q1hKiIuRtVkpWxbyP4r3H5jQgLXAIweGd0Pa5CaxKg51aYawkyMbqsirXucB+t0Mrw2a1pamqwmX+6tUrDsceW1lClHYPrTXGeIytaZpu1suESsYATaIJ1FrAJ4Pc01lSKXzxyuyO5wHWCKi0ITIChIBz8O1PPqI/Hvnx8cjrl68Y+pHdfo+PkapWVCbSNZbtpuJy23FzuWG76oQ1JWNVKeD1SD9s2vCQbOQCKESuhQ8hcVvP6RVIEis0UOnMgpdqik8hkMkVmpSQKVAqsm4r/sh3PuZf/zf+ZT77r36dFy+fw1ts7sW3L/jv/fN/jl/7S38Zx5rJ1BzVAUWFo6KfJu52PZ9/9prXdwfGUfxF0F+vt1+LoGZn4YMnRkPm2ipbNpijfaXBKlOg5hgF0bu8WNP3SOagRUHHcWCzWdP3a0IcUYdYPiNGmUR9/fo1++MRBaxWa+q64fXta5p2hUolIDLCtEgCfIyJz8zMGW85JshFf22kjFaajqFkeEohivsIkVRapabfiE6rA6212FrK+dZmBFihzMKgFSc999+JvD3rnH8nLt6nTgIslcv32qRsTp0cK0p4OqVjJTD6if00wHe/xWf/r/+Coe+5+fAZ3S/+Mdxf/c/xZsXTP/ELbH7+U/7ub/8OfRRyYpOQsy7xbw5jzzD1bM2Wjz76iB/96EeSUKyf8/rP/AfAX/g6tfpGJF8/yd6FdzD3Qcfcj5j70pJ+aCOBntYyQCWTY1CWCgMXlxccj3umaSBG6Qlcr9dooxinifuHBybncF765EY3wfFI267YXmxTNg0gvZdkOhbkLmub+1KzEZJp7ZimnVWM5IUA6pF+EyIqcEKoH1Xe4iTI7KpruGJTqhRdU7NKyH9JsIxB6Sj/KVUM13xdVQn+Y9n2JIFRfs+b07tx8fsp6dEKlCno/xuUVEpRNzXeGvbTSPz023z2//wvOPY9V1ePdfcX2fzRT/nN3/8xew9HF3BBylXZWHsVmZTCEVBVxfXVNeMw8jnPeb15P3RXhsx0Qe7zkN5cxoNM7ZWTFrEOGm0laAohMo0jQ3/k8rKjrgwaxTQ+kLkolRJn1bQrQgisN2u01hwOB4yRQSph7KiZ+qHoheielAO1PuVm1sacWDSl0hCKStUeLcemyVNxMaGzKSB30itLiBhlUCZyfbllvVoxPpt49sEzjsOArQyrdcf11TWXm5Zt19BURioWdX3CgJKRZ4nnJNHLmpmWESZsTCSzbEBCoIsPWDL35j/nxHbJ/0sQTuxus+HQtbwceuKnn/Cj/8d/xuF45PL6gzd0d/tz3+V3fvw5va5wusIrTVCK1WbNar3h/uCEUF5XKczOtsk84hx8v8T7mAAquco5kMpl/wgoXUmrXwgc+yO6UpjEre68bIU7HvbSclW31FVF23VoY1JSLq2DD/cPXF1dYU3iVUUGIWf7KsizDjKYmfUXsxw0i+Sm6RhPMXTpLV6+Jv5VK0HohY5wRBtZvdp1Fff3e3zwHPseW9dUVctmXfHxB1d0rWHTVVysGzZdQ13LDEOuUejEcUqptKbDi8haXnKbT04eTfFvpX0RIMUu8qwlV6bleK2at/CBmBZdGSYNh2kgfudjfvhr/xmHY8/lGzb3F9j+/Kf8/d//EXvAJV+gjMZ4iw+Gw7Hn1es9n39xyzgqXDCEqIn+6/X2DxygxpAd1ClZf0YJc6O7yX2hCQzXGtbrFq2ExkOnZn2ZyN9wfX0JeIahZ95GIw/85IQ6YxhHbFUzuonhbuTj9frRMc4O0Sfey6quU2lnzg28DywOnRNDU7LlefJTa1IfII843aSNQSd2A6NJNzkFsNk55888OQp1+s3JxmlyWZdHjv3UCC7/lAA1sxOoN4KCvBEDJOgehx6361FTZP/ygTBNoC2q6RiVwaOx6zVmteJ3/85vgzY0qqKJEYyirg3Re1zase2DZ7PZ0HUdh8OB692av/jle9LHx3wdQyprhKRf+fot+3wXvwUxYI2lbS3BgVHS5D+5Ceccl5dbNhdrhumIm6SEbitJErx3DGMUQzVOtG2D9wPOBeqmLT2PM4KkSuITo1CIVYnnLlmmue0gBVkSLKZhPzWXnUrrCDN9We4r0onmJ+aKTkLfjDE0tbTK2OTQM4qZv3ZWKbXQ29STqPUcw5+84/QewJslQJWPVW7GSXtKfr/3nkl74T/c9ygP+9c73Dt09wd/93dwk0zyR6MwdS3tOgltyv8ZU0lfeRf4mUPNX/zhd/+gqvXfqWRkJqb2lIwq5m1QywCfk79JgpWHj7yf8M5hrZW1oiFwOBxkiNQgqBWapm0YnXD91sZgKiutK14WV1Qxls8Q3c2Jz2wPSci+JPQU3V2eT8hh9GOaNIGLKPQ5QclgiBb7ZioZ5vKxoW1bJu8xRlE3lvV6xbqtqSuLtRKg6sSccpqwp28sNnauQhQkjFMdXf5+yQuWtjn93GgtVInLe4gg4U7JMGV4yLq7Z/Lu7bq7XvE7v/Hb+MHjjh4/BeKqwqqKmydPeP3QcxwPYg9yi0GKOGwZXX8/RdBxGQoqk/MAJNunEn1TagMwxuC1BmS4SqbChTzfT56xqthsN1zdXDP2A945sVtR0VQt1jayxjOSUP7Z5sZcWdSJPSQFrY9MF+KYVcqh8iBgXqySWlSSzpbNhKn9wGrDZrPm5uYKrQ37Q0/b1rRdw8W24/pqxdXlmlVt6BrLqmlo6ppKC9ppcttXeZpnOR1OTbylS2ugFHO8mRKrSOFpJfWJK60gc6Ez94ubKFRYbhzwDzle2Ek89hU29/f/3m+hJmHJIGq0rmAU8CFGzTRGHnYHhini/JzofZ3e/oED1PnExfrMD3JEvXFxbDFcWivapsJ7i9GW4CeCdwyDZOSXlxf4MPHixQsxxt5L319ksaNYp4DVMfUDwSeC45wlJLQw943CgvA86dkp7B3LjY4nr8jP881LS6uKocpbgIQjL5e2YmJhTTaDOWDOhPwyU0cKJigKc7rJSrKmeQL2VPIDnY348gdR5fNavizvcSHgIyhkc0w8jjB4UTyFMDKYivrqIpU4RKF1LzQV627NJlpiECcVE6ozjSPjIDuLLy8veXh4oGHFv6z/1LsV6huU0iIRmUsiedIxyVKHcr4ZY6SuDKuuwU0KFYXKQ9Csiaapubq64HB44BgHSbZzgBnEDfsQGPpeENkAk3Jst648F0sMJic1eaiJbDwK03Ish51RgGVSlT+oJC466/3sYIX0X5Vj1QkJ06ldw2jpVzKpZ6n0ti6/4LEFV/Mfxdk8fssCiVpec6BQ9ig4yeDTJxFjxOXJ83EgHifoHX43yPTrG7qbjmZIS0Oi9BRWbZMQ5vmySa+vDLZVVUV7uOZn9Hd4H6RURgB43I86v140IdubuGgninlaXaaG67pCqzUPuwf6XvhIjdVYU6deuYlDf0zTt0mPJ5nkDzEIsmOFtkaFHHjq0ppSSM/zwGE51uQg8//HpX071QNSIpmDSaUUvvCsKqJWNG0tCJiWqe6mbWispbJpKDUjvOUhO7Wn+a+5uvJGkJ/Rp0XC/zixehzEaiWJ3/J3FIKET3imcYBD0t39KPvL32J3AzBMjnGaGA+DrPXsKqpKc3VzTfvFK9RukEpvRtaUVHCa9eofRsW+YZkHT+MjkECQPdErn/i9iYGmrglO+Glzj3qMsoEy+Nwq4Hny5AkPd3ccD0ehuPPQ1C1og/Mp8CoB6oyiSmvMzEoR9FJHEttPpCiMHG8GB+aYR0WZKYl5N32UwVprItu10Li1bceLV7dEIk3XcnGx5eZqzXbdsqotTWWpKyuDqSbZ4gWKP9fW8p+pEhzjiQYv7a/WOgWeqvwX45wMyDOa/M4S+U/f4LzDjSMcRugdbjd8pd4CQkFaEHEF0RBdhEqCVaUM0xTwAWS5rNCUrpt36+07A9SMpuT+ILLDT0FaRk5LcBhCKXnnjTfaZCidNLkfGfyUeCErVpuOfhwkkg7g3Cg9EV7hRkfbdFxeXCTqB4+KsL/fsWo6rKlSpiAB7DAIZ2rXdaKU5Au+yORL34bcJKNUeWDy1CHIQ0+UoOG0+V3yGpP7WzOCuXCA+RrlGf3MG6BiNtNz/2lUkpWVrqyvRJzE4GdannKP8jF9hSENIeBdQCmHnxzaBeIEVTRAkN4ZU/HxL/0xpt//gnHfs57gn/3n/wV+/y//ZT549oxeT/Cwp0oBqncTw9Cz2+949vRDPv74Yx4eHoj1xC/8qf27VOobk1l31eJhz3owO6pl9misZHNGR9q2YrPuGAYwuiJ4Wbc4JT7GZ8+ecXt7y9A7YUjwISGU0sSeqVD6vpc2AG2ZUpUgT2VL+VEXrlW/oApbmiWAvJIvlyPLCWSUVc4s8YlKICoURbNeFd1Gl/WDmihjY8bIlqtHQ30zEJZ1Oh8ZJXhGCeIlun2KJL1NJ09ei48+M/9OOnnnIwqH9w7jArH32CjjBm/T3dUU+af+pX+O/+D/9FfQxlA3DSutaapa9pyTuRUVRI+LjuvLa9Y3LatP3g/dVVqjombu4UztTNneQrG1S30BL8T1aaVzZTXeO8ahp+tqthcbDv2a4/AgwWllaeomtW9F7u5u0UoLBZlSGG1pmo4QIm1bzzGfRoJhs0C805Y8lYGBON9rAfZTwsV8DqSXH6OtSs9cqwVoSL4EReImlb7FqqpkoKMEp2oOULPkpSsKSVBVCpZPniFVfMLJvXhkj5dVrOWzZZRQIS3vR0AGvVyYUD4Q+4ANwi38pu4eaV3kl/+VP8f/7X/3l3g47Bl2hth4ri4rLq63rDcbqrsjfjzVl7qp+OT62R9Qu34KokiIdQ5g5vuvtUZZS0xb+sZREULD1ZVsN5umiZho1FTq77fa0nZCR7VerxmHgWmcRF+MrD6dvMd7l/pZFQpT2Gxy3/aszypVfdPhSt+c+E9kYE/iCUNMXNckJNiTEiyXwLFk6zGei+2GzWbD06eOZx88ox8EsOi6lpubS7ZdTVtXVNbI0HcGDUolFmIUOqlcJ5P2kbA41jR3U/49PzP532+ryMb0s0yHmP2gMcK/7fD44FEuEPqAjamt8S0217vIn/zz/xx//f/wf2EwMGqFCxrcgK8jddPQdR113eAmjxdCVuq64ZOP3q237wxQ83ASICutlET5qkCLAfBobQuELr2hE3UtqKCtRGkuL7cYYzjEgD8e6fuBGCPWVjR1S9N0DEe5UDF4QUA8tGkarrKVDDlUsnpMayB6pM020wDZVIqq55uZ5KsmDCFB2yHinT95Tcjcc3/GvApUZ3L3nDUjD1/0QqOhSBuecvAQAp65lylTUsw7nTNOkt30svQ8o6yRGT19nIVmZGIZqOYAdXJyT1zwBOX57D/9L4njhO0qjl+85Df//f8E83TNEBVf/O2/z4vv/YCf+zP/NNfXN/zJP/lLfGdt+L3Pv+Tubid7rRPH4Dj2DOORp0+fcnd3x5ff/4K/9Wt/m5/5C//Mu9TqG5Elkl+2byA9lTK9l4Ya1HISU8qeBkOVJqEnJxWAPummrPNTbLYb1qs1h32fyslesnwcWhtiVXGx3qCsYRomopf3jMce03Uoo0ogFpxnmsTAtquO0jZDuuvLTQ4nOrF45VHbTU4mS4UhSRl8IsU7+TOC6GnMawKT+BAIKYma+XwfXWtUiT0ey1c9dTKsyFs/T34umfjkPCGOTMHj8PzkP/0vYZzQbcPh81dFd0c0X/7tf8CL3/k9vvunfoXoYF2vCZstVhk+/XTg+Wef4d2ENhFt4Tgc2e12XF9fs98d+Xu/9r33Q3eBqqoWi1DmQFUp+Y+Yk7AKo6tkHTzPPrhCEZnGnhgdzvccjgdW647Lyy3f/c6n3L5+IWjH5PHKybIJJTrgglS3AJq247DfYZTi4mJTDk4lBCofW5/e33btyUkoVJlVWN7n2aKd/jtX5ySGTZbwUTCRS+pKC55lUvCqtBE/tEDiJaF7m4Oev7UgavknC8fOG1WEU1lS96iESuUnJBKZPPjgOIZArwM/+Kt/XXR3daq72e4+/+3f4zu/8suoXojrh9FjoqHbXDKNgadPn7E/TPzky1fzOSi4ur7iX/iX/vl3HOk3K48tVAm6kMUb0i6okA16AWNVonSMeCe8nk3TcHV5QYzS/qeUw1qF1hVt09B0Nceh59AfJXXXisFNWFNxHAcUmqqSFh5B/T0kvc1rQiGxkizRSpWfN1lCoxAfUXJ0peYEK0hSOJf2U2uVklZHkk+J1KxWq+TWxU+v1h2rrsamkn4G9aw1pbqVpaD+KUxVVtDQ3B/rF7nWUt5gJOBNKjBN7nOdX4tR4T1MwFEHfu+v/nXiOGFWDccvXvGb//7/HfNsw0Dki7/9G7z87e/z83/yV6hHxcEYBqXoe8dxv2cIhqa54PrJU7773YHf+t0fYBIb1PUfQG/fGaDapsbUNdZYGqWYRl+GJ3JwOiOPiUHLS99p8ALVW2WTMQtCG6Vlot9NnnGUHlNbGeraMvYpQCXmdbnpgslF1UrTrTuZljbS8xeC9KgYW6XdvDbdzLfLsuT4ZnaRA85Fkq+kX4vEYrBonjkp6WSanKB1KgQsynSJLUAyrpCCBmEGYnm05Y8UYD4y5XP2GYsR8Lk0pk5RqHx8UhoO+QWij3z/v/k7KOdomgb10PPqB1+gVy2VNYT+yPHLl/zWl78GeJ5ePeXyk2dcPfsWD7s9TVBsNysJzn3ATxN1IxPA7pXnR//fF19x5b9ZWVKGzLc4XScNknDkcqIQGCsC1ib+SZk0omktXdcwDAeCC7gpME0Ooy1101LVtaCrQdgrIqCCtH9sV2vRLx/AwGazSgTfuXwERI+PORk0hXs098vlTHdewyiiFn+JUbL8nJxkA5uDc8owU/qVXAHN8YAX5segI1rH+djyVYvCR6jS6lgpLc3VhryLWq6lWRzgEil9C5K6OBf9lsSrBOZRjGaY4Hd//TcJLnJlLNXtxBe//zvYruLi8hI1OnYvbvnhZ/85032PjobNZsP26poPnn3Mw6vv8PvPP+ehP2IsONfTjz3jNDLtNT/63uuvV6xvQAbnWddyPcsEfyL1nUv9smIxo+XaRFnX2FQEP+EcVMYyHkemSTZSaW1Zb1pW3SZtYsoIp2z+Es5nScgztZ8kZRPOS+VgWdpUSpYDECPGmjmQzMgU4hfCo3s/o6aL9Dy9R+fWlgBLBokUnpb/CQtoDghVqS690cOsYlpWseSSPDmIIgaNxy+Q1aWGzvo5f8HynBa+JC6e26hQQaHHyA//7m/jAly5quhu1dVcXF0yDo79F5/x5e/t0E1Hvx8Ydj1rZambFe3K065b6q5Ca+ELj1o+u647Pvr0596uTN+gzLo5Aym5dUqAJJuwlMRdHjUKT1tX9GGkrg22MhyPe7R+RrdeMUwD3e4BjWyrrOsaW1W4tG701d0t0zAy9j3OezZdxfF4pGka8W/Zp6ZlKChVeE9zO2DuW1bx9FykMiR+QFQgG9D0fyrrHInSam4XO2m30opKU9Y+K4WU87UW5DQtB8hD3SEBXtKHuvBh+dkqvgMB0dQSpJjlbej/ybElB6DS+WQ0VitNiFIVM6PiR3/3d/AhsnWG5vXAlz/8AvU7msurS9TgOb54zuvP/yamXXF1MIS7wPM4cbs70ugtTVtxdX3BH/+lC373J58zhBFCoG6br9XbdwaobddSVW0J+mIYxfGXDOSxBDGEClAhlfZ1oYTwiwGb6D339/c8eXqNNZXsMFfHhJzIRaraRgJi+fbkFE+nfCV41anEY0tm/wbKFGek7FE/+xxoxtnBn9LryJ9GS+TvnGRjOQhQSl7TeqZJkdWY81YU6W1afGdGnfKx5mhhceRvor7zz2emhOToUwC/DLxzLG2SodR1i765xo8e07ToyzVYA7sdPsKkYLSaMSimYY9+eoOpW2y34rppWK+36NFRt50Y/AhumjDWsV6vuPrOhh//ym+9TTG+cdE5EzWGumnnLH4Z2TE/pFJKkU0nOvVpGqOpq0aSoZCGRqaJw+EIKKpaVnsOx7TZhkRZVzLlxClXVVhjWK9Wspo3Z+8JJVTKLNgfZv1+3Ov22FGW+7xEqdTca1UGpZVKiC3pvbHod9bteRBnNmQJ5k/fmYaM8gFFykIA+dqEVs0HMr+RUwP6xp9yAOVcl+dtxJtQ1y110l3d1HQ3N9RVBWFPVAp7fUk0in6/J/Y9rq7SrhVDt2rZPL1AfXjD9sNrXt7ecdj3MtgWUq9VHfC/8or3RbQ5RbJnlH95XeW+KBWx1lBXAWPk2lmrqWvDeJxRE601bdvStB3O9QQvJVPnp1zswRqbynF1KuGLzvSHI03dFmeWg0Lvveh3JbRk+afL4+aNfy+Cw3QacwvO0u4+6ksugACnuq+yA065upoDjNyGEr/q+3NAMWOhnLTQlIM8lRgjC/U/AQfytdTI2uG6bqmubwijRyuTdNdC2IO1VDdXYOC42/NwGJi6mn46Mo6OLgqnpakNq/WK7cWG7XbFq9t7FCbZGEPVbt44xm9a5uf5Ud/0Iq1QWlpHVLrPdVWxWa8gTtSV2NzJjXgfaNqGzXrFcLHhlbsFKHZpGoXYf5omxnFkGIVv1Dee/X6P1prVarVIorPOCGWkxHZz+1dpgYqUY17smiaDG/mzchKf36tVMvzx9DlVeRArBaMqxUMmtcfI5qrMl55pMuOJPsp35ckDNat1SbUULNX1D3KPFv5Dp41a5d8AURIf8+wpcfBYo1lf3tBUllda2t3M1SWx0hz7npf7HeMTYWIYx4mDHznmlkI0ddNx011weXnJYXgp7Wx/AL19Z4DatC1V1aCUwO7aJM4wY9KK0WwQ5hvig0t9QHKRTTK0ddNwf3dH3wti4VG8fn3Lt8dvUVWy7zb3NhllsJXA4lVli6HO2XIIASOahDAFJFL/5NRD+pnWp6hM3qowD3olFVCzUZpLS4uMIgUNRgmK6vDkgmc2rN4HrIXgPT4poNRGE38lJEor4Qssg03Jqce8Zi++GZzID8r/ZW9fzqso3eJ85/OiOA6zXtN999s008TkJ7jeEq3GKuhD5PXQE3RFVUnPr7q4wFcWgxiSqqoxjQczP0zTNKKtxVYV9UeGuz/7xTsV7puSqq6xtWzBabq2ZKgpNiT3gao0uRgTZ2qIMiGstZQSm6bC+Uka+CfPoEYOhwMxQl3VEiQRC4IpZZ7c/yr9plVV0TQNdV0LV6XKiZU45KquS4lH7v2bfZxlcCk53LIJJetuNpQsg9mMls4JiwtpRWPMRjn1jsZ5EjVPc5YNbIpUreCEGSNGQdKyDX1jtSlvJllv6Oyjvy9FpV5ZMFSrNc3PJN11AfP0A0zXUG9lQNE/e8q0rhmNhKXTnWz8Mj6glWJ7ueF6veHigye8vL3jyy9fpmsVcW7ErYb3Rnezrpq3LPk4TZpzYhUxRmGtVLesEsSqqSv2ijRkEggxtVS1LUPvmBIfonPxBEFdr9coVNneZ4xh6kd5RvQ8V5zDPmstla1Oju0EIVc58QeyrhaAIZ7YqxLMZPu7QK5OA9Q5WCHmPsH0zyWSGudgaRlz5hBgmbCmdLEcQ/79x87/ZGYhn6Q+fZPEFPIM1W/T3bal2UpbGh88w7+qmXRgcJahf8Hx4cDkHOPkxI9aw3q9lnaUY8/D/kDUply7EE+D+Z+WLK/N2wAWrU3x1Vor2q5mtW5x7ojVEWNk//s0eYw2rNcrgr/i/v5+tlGplU6bmYptSotTQgzs9rsT9DTjP9LLLEmXm6bCZFJ0KioWWiv/n3qXlu5XqdzStAAQEt9u9PM0/KyvMVXGEnuAUlidKw7CU1oI8xfXqujsQkfl0TlpSpnlK4LUk+cxH3Ocn4HSCpCgWBVABUWzXlP93Ke0w4SLkermGVVb091o3OTg6VPCfcfYwO0hsK9GnI08TI5Df2Rwjsp5UJq6bjB1ywcffMCL23uhYFTqa/X23atO0wWWPsaJgAcVBc2oGjQHYtCgZrRK+dS4XtV4JydvK0NV1zzs9+z2+zQQEtntdvT9QFWt6dqVOGEtK0VXqxV13aT5gIg1hrqqWa9Xsq0krQDSStE0TZpai0IzFTxEQRDgtO9CnQSCszIoRcqsMmqUlFpnRjJQ5lEZIK9Ti4roPHZlZNeuj8Roi+OOcW5wjpHUexdLqVUOcn4w5k1Tb2rc44d/nsCDR89WeSH3U1VPLln96T+OiRHdpGbocaL/8CnP717z/c+/4PrZB3z6R77L1eUFL3/8Q3RtqSeXNh/Jlhii8IaqIFtnotKYaeL3p9f8lZff53/5LqX6hmRzsaWqWpSS4SdlNE3TcTxM5I1ooEuyQjJ4RofUsJ42lVWGw3HPlJB/YuR47PE+0jQtTdsQlTh/jaZuWupaykshBlQI1FWFNTXjONKuumQsAkTpc23rWtDJmMuhM6fiSemT3HskxzgbQYpbBUGecrBbgs2kx94Flg2jSskzFGNM/bWLgHaBhEqCNROaKCUl0aULOqW2fluCxck55YBYEoRs/U8Dap9K2ebJBat/6k9gYkDXltdBVibGn72CpuKzYWSYnmD0z7O52DD9jf8PD4c9Nni6UaiWVqsN9XrL9uKGD599QvSeuqqIwfHZ8Z6/8vB+6G7upRdUfWmikzFXBmUUwXmZDYgyuKg7CeirWkr9Gd1xk2cYRvpjTwzQti17e2QanVxjL6skrRG9yPe4rRtW3Yr1aiUE6EBKVcpnr1ar1LqVQYR0iAvoqiCaj36mlMJ5oR0srzEjQsbMAbkEJiT9eIz++4SC+vR8zMlcfiZ84iTWj/16igCWmNUy+E4vlGNYvlbssFKoR2W5kPiBCZH6Zqm7Fa/TOcefu8Rs13zpPMfxKZX/eey6Jfz6f839b+zZT0fC85epOmfZbNZ8EBV103I4HPFRcXt3J4CPP36lPr0PIgw9TkAurdAGqkrRtZbgBoyJCRgAJuj7HrhmtdpirOWzzz7HTQlIMFBZqcgej4eyJEG+B/wktHTjONA2XUFrc1IXggxUd6suAQNZS5YMLxl5T3by5GQWIKpSCQjLjSfwGPnXSqbdpRZECVJ1Wp1b9HQOPYSvOzEEKTKcIl++RE/lUZltNMzA1HxN3gQJcuUiz/nnBCs93QCY6w3tr/4CKkTqynKXBuP1Lz5h3TTsJkc/DFj/x7Cbit3f+W95eHjg1e7Iw/5AP4y0Gy9DUqs1pqr52Z/9WX78+XNZEx781+rtOwPU4/GIc3LV8q5spTJFjSX3m/m0Qs5WGq0bLi+3PH9uGcMkGZOpmSbH0I9MoycGoVTqjxOHfU/brui6DW3b0bYtXbeiqisUQj0haGFAG9hebFAxlB4prMVNY+qxU/i077euGnIWkh2hMfOksThtmWCTclrasz5KZP9GQkMO9Aoemhqi5Tu9nzBKM/rcG1a0oXxO6QEhlUPDMkheTObrr7otsoEKLcXWJdr1lZLQWo3CNzUPteUQA1obIQ9W8LJW/DCMuA9u+OQXfp6nv/RLVE3F/cMrmTCMgI9oLevU5KEJECWgmY5HYmU5/tYrvve/+K/gf/ourfrmJCdXIW0osnlKXYpvYg+0MDJUVUXb1TRNC9FjjBVks6q4vRXk300ONznubu+4u7vDGCNJFAoItF3D9uKCru3EiEVBzY3VNE0tw3vRk3uhTCq5giJ4j0s0HdloCsoaHt3fWbfmpCvTivgTdGnZLqAXSIG8psvEs0ExTA5tlitS87edSogzUnxqtNMJ84eI5KRnTStFaBv2dc0x0Q6FZEgdHhcie6O4uLrh5tkztFGE3/9d/OdfoCqLV5p+8hyGCV1Z4Shcr9BR4bwscHj47Vd873/1/uiu9CTP96ig6wjKr7UiBhnKQ/nk4BNlmNElaPMx4J0kVfv9AVBcbC95uN/T9z3BObSqUtlRUB3nHbWpAdEdYwxN15B7/kUrpP2lqSvS9AEZkxFwaUaATkqXJ2jpm0m4IEqnnLiZrcD7tH46OeiywhFOghR5zpb9qOKGcy+sgsQTzFsQ0q+Aochvz21V8xxAivIfVa6kYUhrCG3Noa6S7hpC6ot1yvOgLRMNm4uPeXZ5hSdQffE5V3c7zO4epSpevnzNzfVHNE3LjW24ur7mo48/wlQ1P/rRj3Buvg7vm5QENBlEGSj2WAOh0hgD2kbazsrSjhgZe8XxKDSITdNQVYam6YhhKjoYfcSFSIxHuqZldXEhdtxWKO8JzjHse+LKoU1d7F4IgePxSNfJJsoy3A0LZ7xsx8vc0vPPY0Ifl/dbl4BhkVQtACIJvufnOQZZXiCLUE5i03wIct30Y9rJjOg++g1VYLWv1uCFH1lyZmNOWS989GjA1RUHazgEl5gxABXxWgKXqWpYffgBVxdblA0MP/gBtw8HXg+B3dEzOXhar2nXl7SbLaD45Fvf4uOPP6DvDzymfHybvDNA7fsRsCkISvQ0Wpy8NrKXnQXHWL6oGf1QqdRpbcU4Clm5UHNJM/DkAofDSNcOTM7L69qWLTwxSKNwbWYyV2st93e3KcO2gKfvjxjtMJUcq1GJkqTcjIRIKTjNjAIhOLSx8noMRHzabiPvz8lGZaqSlZf0I2dKceYd03lzBQqtZJCrKMbiIVEyhrrUnllBvD8JLpZSMsFk5JRSafNkMZdlgEFOVwxESA+ih9SgAE5pem3YG8NgLFXdcNcP/N5PPuPmyTWjC4lAPZ13uhgqCmIgZj+iVCC6xIXWvx8rTYZhIIRkeiolpUpbYasqlZhmR6JU4lRsGmKMGCW9eNZIT01/HHEuITgajseRh4cjl5dbrKnRukJVmm61Er5JrcT4KOGTy6skq7pmmnp8kEFCo4yQeCsJtkIM+BCplLQN5AQrUwrJ7u85uQrBp14mCuKpU3I1l4aSAS5GM9GWlPMX8c4RQ9owtCBPzsZz2ZeaUSOf+qmWhu9ku9tXyHI9b5YZqVj8XnJsWXddOh+d2mbkmoFXkcNwZP/8Ja92B1bbDbvJM8bMUanpp4njOGFJSzaUBCx5sCeG+N7oLuQhP6FNyoGj0hodU0kQUp+0EZaTWu6ZMgpbVwSvuL29Y5hG8LDf73l4eGCaJln/mOcKYkSZyHZ7ibUVRtuUyOdSpYABUt4Xu6415XuFLjKU4K84MuZgbmZ9OtULSSB9Ys+S51AryjmX48vAiNa4ybMcTjZaOFy1saUKUFXVW/QvVZyYdSw/IrEEqac+Y3mc75K3lrNVDiWE39TJDZOKnJJAPkRFxOAivLy958XrB2EAoeHZB99ic3GN8yO3rx/omiu5P0b8m7VrNhdb1qsWH+zJNXn/JAWmCrybyEwUSgUyWX7brgjeMw4jKCWAgJMkq6pq6qZhHCQJ8MGjkA1TGZG0RkAHo7Qsb1itWa1aqVQVoCqkGQEBJHILzRKlL0f86H7OKnDaDrW0VwoFmnkgL8cP86wns9bJK6leRgiJ7/cU+ySvNJ5tcC7V5/ctmSbUyR8nd+DRucV0QCfxCSmhSFzekVxJM4X2CiX2mETU//rugVev72lqwzgq7nYj94eJwWsCFlN12HqFsXIvthdbLi82NI1N9//NY13KOwNU7wR9kqusQBuMsqkvQwYvhA813eRFgFpQVWuoqordbof3kYgufXE+wuE40KZsKaJTP0lIPYHQVg1WG6qEIhwOe4ZxoKoqUB7X90y9Y7Va09rU05GD/eR48g1QKUCVcmLad71AqYTUOq2+LHoUU7aRglikFJE3Z2URPrFp0es497PERUYmnKdZMRaOP70z//kYaZgzpBSMxLl/UoLR+fuSHmVtnEsD6eyIQv8yxcgYAoMPHIaRbd1x7AdevnyF956mbsrDlpU6k21n1ERIihVBuTIo9D6IMEQYSRJMFMer8oDfnOnKbvmYEh5JMqzRmLQqz7mRafJphkcmQd3kOR4GtpstSqWVemnNbQiZuw+smifpjRFkynufglBJ0oZxQGsvw1NIv1LR05N2jozc5EEmce46lWXlJgfmbRFzjj2XzLM+hvn5gESCHfA+YvzCYpSa0eLCLu69jpn1NCVHUa7R8rmYA+PTf899iSoNmzwy3zElP4vvzJuIcoAckc6YEBXHfmCcHGp3oOsHJpeHuiQQdUHWnupkV5SW8qtOf74xOflTlFlnTofmZvRb9FZrmQYWSiqN1qLn1lrGIMMibvJEFxjMyOFwZLfbY219EvTWdc1q1aXqQroDIchmJisbmkjIVa6aZXuf2wNkhTBUlbz3FC1lcexLf7hkUln27MkvKKXKfZEAdXbp87WilH1jCMJqcnIx57/Gxf+V7WxqtsX5e94oicKjJIxSJl3yvZbrUn6JEozGdL1ygCxIYAQtp9j3A8djzzg6QlBsNpfUTcOx39P3B3a7fVr3LWtc67phteqkRzNWvD+W900pvkNJIqxyqViJL5UZlI5h6Al9T4wyBDUMY5rtqKmbGqUPBB+JwRcfBioTXJQ2k6qq555/OYIFYmio67qg8jyyVW/cx0fR3kmss+DJy8j/snKVJeR1vxEy24lcFyRoXpie0r+6DGLT4HhQMbmq5UEve73fPN633Yv0sRLzQKIQTXY6hKLrQqYhz2LZQkUC2FKvzDRM9H3PARjHwOQjQWls1RABY2u0rdBWVnW3RphV1qsVxtbExwf4SN4ZoGZu0Ji2rigUhooY5snhmGmTkmHL5UEfBCY2xlJVNcfjgA+xZJE5IN0fjzSHhrZriTEyjAPTNBJTqbPSBhcsK2swVvPy5Qt5vapwk6PvB/rjSNN2RTkkSZUHIGbtJRvHFJzG3H0hO9ZlkMCRg9PsKEPZeS20RHM/30zZEKOQ+U9uom07bEInXHBYY6SpO71HE2Uv9UJZsoFeysmwynI6Nj9TSiVkU8zdCS9XuYGz0z9RyCg9Ui56JucYxpG7+3uapsNNE/3xyOtx4ur6AqtNeRDzZgpVXH/OEIUr8/0KUB1GS98ZQaGxQia/uEY5eMvWQ5GQT6sLaf0weCYvk4h5fWKI0B8H6aNBWjKslevbJwMrAyuypUhrIUzv+2MJgo2WIarDoZcyFpUExTq1cST8ZVlSnL1oRNYcemLMnYFeXkvoekmuJJQ8Qf5jDCWDkd4+yd4zYXMxjZGFMY3FMGeek1AM2+OgYW4pKL+XlFctPmsZnM5DKdnE5hx+ZuUoWX+IpV/ce3H0/aFnnCaUsYyTLE8QSjwtK09DKielex1DGnBJe1qjf390t/SfWkFRi5RbGEsiXtd1ccSih1VC1yP73V6ujwuM48TxOPD69T0fffQhRtv0fuHerZsaYq6UQAweWxnatkltKKmlKA2Kxph8A7FskQoxUlVyLHOJf4EwFV1a9NirWHaEqwKhInZOqwVDXo5yA6h5a1gkBahR2k/yeuxyyR4HrOn3QohzZahc3EfHxsmP3iILfXp0ruLdTqIfOd5FMhZCQhaFDR4/TLx88Yq6a6mbRlpVoixa2O125Ro0xlAnBhHpda+5fXiPEqz85yPgRWynSwOo4DVoKzMkbdswjkOpnhIcx8NR+vY70UGlIbpAzG30yiyQdhlIjSGglTD6GGsWQafYKqMVddueJBI5eF7e6FKlCY+TLVWqpWU+ILWkPG5Nye83BsbByfOVrpBUniQ4XT4rp/aScgyFkSWmFHH5lsVGytOzOL0f5RNjLGu/AYRZbY6azcIWZ7u9iGtnvU3DX0YpDocjMYKxFW0r9kQpJW1yaeAzRIW1movtluvrK1DtSTvZ2+SdAeo0JURQpy8AxuMkK6wsxCCT7aXXLxkw7z3Bg7IGjMUH2O0OTJNPAZWUrkLw9EPP/ngg6ijbH8IkqC0RE4XcvHCjBs/d3R1PnjzBOQlO9/sDlxc3XFxcLDYxzKhfxtg1gTA5lEqcizGAV1hFQlAdEekRqmw1K01GMJIDz3ynIGsEx2liGh1d14kTzJ3QEYa+x67X0svrPRcXF3LRtSKo+aaDZGASPLx5H+IbKhfThCgJAUhKqU6zq/xbOSiNYR6qijEyeMdxGDn0PS9ePKdrWy4vtmzWa7qqoq7qFMyGeRq8fK44FnFOHkLEx/fHSEYf8VMAHQgm4EdP8CnjVnOy4tNqXRLqL8MUNcZaYtTc3+/xIW99EtQ6oDgcjxyOPT56fEKp9/0x9UbDuu1QVrNer9FGMQwDt7d3fPDBU5SS37+9fcD7yLNnz5IhS4ZOzEBCDPM1zaUqn5KXPEyXh4zkmUm7qVLqlZNFYShQZAaLeTo8RkF8t9sL9vs9YjwXfc2JkzLGUHavn1zneFpJyDqcg9fHPdL5b+X1tInuzUAia1lKHOLi2Ygwh22K4GJBW1pd09Q1ra0xeaGHD7JBRs4ck1GXHBgX2Pb9kEw5Zsqk77KHWHovRXfna+Ymh1131HULMTIOnnGMhGDkGfYw9BP7XZ9QUFnCUtctVdOkAcB8KeR76qqiqmu0tXg34WOgrmVRSwg+UcXoYnOtteSFJkunHqITO5UMVdneZlJZX2vZCa7C7BwXQW9OUZa3SJ5hcd5S1k+BovOlLMoiwZKPPLWkc5vJqYMvqHWJAuZa2FKyzX2LeZaEPocjEWJQ6bjnhNgqm/x/xGuobJrujmCiEtSpbrj3keMoy23W6yMffPjhiY+TJPPrsKj/7mXWUT2DKklntVJCGVdVtG0rAJOXilbdtpjKMo4ju4cdYfKMceL+4YEn/Q1XV9dsNxd8ob4kImuntTasLza0VVsYhZZJhvdeWCgqi48eo6sCbKmTRCEP8aVPKIFoCj4NAkwFdXKeebBKlcl8aX3RiSlmGXhlWxfnkaREWCQAWozzWvcZmjjV9xRjCwpNREcK888ji/zGK0t5nETlAL1QFarkcdQjvQWklxCxHyFIclFrjJbB4GmaOB731E1VVmg/e/ZUttARhL0oRC4vLvjudz6VAPVr9PbdCGqMpV/SRCldZ2J8MhKDZvKpZzTKQAjaEHxks16zWV9wPI7sjwNjQmpUJZtLnHc8HI8oa0q2mHfxGj1PEkcVOQ5HvA90m640OssQl+fDDz+gqmzKdrJizMiLZCFuRpsSYjpNAzaVpEL0REKif4Bx8kQUVhm8n4h1TcDjg5C0K6WYnKBiwzDSdd0JBD76if3xSLvqcD4bTUEIlDHFwcbieJfB6TydvQg/k2Fjtro5YNQJYQuzQ89KtcQHZgAmMkbH0Y3sxyN3/QOf/Mx3ePrkA1arTrZuKAvRy45pPXMyRj2Px8QYFgr/lj6sn7JEL4FcZS1ohfMT4rWE983HgPOyclchCyRCgKpqQVtG77nf70VvtCYvbvYxsj/2jE5aQlzwBCcJHUE443LC5mIQ3tQo04qZDuWw7+n7iU8++RabzeakvBnjnAwoQEWf0N+UmMRAdB6p7ife3eiBgNbxTb0JEHPp//QKEYJnHKViUciiAYLHR8n+nfc87HdcXV0BKbh/42qfIqlzWfekAWsuAyC9iVEpMbSeU2Vd1oTzn3nZQCSV98Q+Hfqe589fcP+wo2lafuGP/yLXmysiwvHpg0ufv7gmGXVQJQZ/b6SU39Uc1DxGZkjY5TD0HI8HXNpGRlRMk6M/TuRpdl0Z0IrJi93c74+M3kmTk5LP6J0M2BltqG1F16wTwb/Y9X4civ0fXJDqQVRsNttSfp9poU6H7bQxi2OOZb1iDg4klgwQ8/tykJtH4eZgTHquT1Gt2U+R+meDoJf5ditZcpIRzZB0/U30X958MhBMfm7S+1Rq04oLtEprTG5FSN+noAAVMiAU5oA3Bd1ViEStsA4YAsPgqZSiqSzGpr7DqsIoRT/20j9J5OG+5frmptgIcjXgpywFHFKIb1K5f9qW2RVUoO1WaUOlVKps4jMdxolpckQXCc6z2+05HntAmCfKIggj8wI3lxeQ1mbmNaZ55iUzk+RVoaJTnhg9CnkWYlqgY3RVWFGWFVEg/a709WcqwexDlDalqih2RM3nvtiEJoioxFG5vUwYJTRDL1vYbFWXZy3m+7qQOYCmDImq+JaA8w+caYelSp9+V+p6W+ptzODXUtHyBi0l4YKuFNurC6quBhVZrdfcXF/TWCsLD5Bnta0rLi82QPu1evu1AWpMdE5KK7r1CmV0GehQqVQdfEjBYbp4XpDI3Aey2+8Z09S9kjSDiC8lGXHajsk5QlIglGbOnXNsFqnrupT2QSZSm6YhzbUXrtG5xDgHpDmwTl6fyY0Y28hEXV4QkJy07wdCjNRp+CDTEeX1Z0oZ+qFnGPq04lJoimI6J+88Lp3TNE0y/ayL1z5xPjlIXaJR2cGrr9C3+OgzVHb0j254RlsyFhfQRAI+xQSjdzwcHti0Hf1xz9EaurpCqaYcR3nosirnACVdxzws9ibS+9MUedDrpmW92UoJu/QhZafmZb1ts8iqA2w2F1hbMw6TrORNCYNW0j8dQ6AfR/aHg/Qjw4yuk6+1BBDTNIKdewOdcxwOR/p+RCGUapDvIcyhUi63ph7pUqLOiyCErzXikxP35TPE8M4lduEFdmURRUYuJydrLV3wOO+FRs3I9DFOpmSbhCwP47BAGt82KRof/f30HWWgL0UwbxsqAQqvatZtzYJzML0nhNTzqAJT8NwNB8YIQRtiKm/n3mmtZRBFoYQajRz4p9aY91DeNpSzTGAy+q+VbMPxPvexGZSyjGPP4TgSopGEO5XRpVd3ZPKBIEUPdIxSWp0GKiN8pkbnFiPY73fsdnvaVrh6+75n6CfG0XF9fVMIx5VSmLxNkLnvfQ7uclIr/Ych+gVaKjounNnyO8t+6fxzqV7l65F+MwWTkiybMsG/jDuVFh8l7RJzUHuK/s/HqJbfr+Kptj/6/vxSXLxnkbKXF3Lf6dwOB14JR6830KvIwTuOxwOb7ZrKWqmhBNn49+r1Lc55gvd069XJOZB04actxR/lZ0zNk+uQqJNiejbLdTRUtma375kmh3MRlXpLj8eeh4c9u92+LAyytk6Ucas0bLlMLcVfd6smDcpBcL4M0IUgXNZKSWNiCD6xuKTfjjO936wn+e+z3VIpSdFGC92lJgXkFNAOFgE7nNg7pSgK5L2HCMZE4TGPC7qzNwCfrOeL/v2QF8NQjvGxvPEpxb5+lf2Lxb8v9TYHJBn1zb2xmbVQxpQMtq6Et3e7Zb3qBLSMc1ulUYpV06LN6mv19g8QoGZnImT7PnihcopzX1xu+BYfIvx8lTXUicZh9/CA967A/yjp48uGKKTPE6Jdj1GmnHhG/CQ7lp4TPwoiWVcNq9VKIPLgZp5GNW/zWQanufcuG88QPCE4maIOYbFlJ138cHoDQ8jUToqI5ng8Mo5Dytqlly8oQeKGcQQopYaEK7F03EtFW/75+B68rax6+sL8V5Wjx6Jc6VlY2OyYhn2EsSqgnae/vWMIr4nDkdpoNttNQh1MCehyUJw/M5dIZgT4fXL4cmzGVtRtiw++9AKXgl1c8HCSnJjSrLoVKsLh2Bc6M2FdUAVtG6eJ/f5AVQtTQyaJNimizw+4Dw4TDVYrrDHSM90PEBVt10kP2aKfCZbOLzXIR0+M2bnKf86PGFOn5yet+9VGjGYaJFCZnSLpd4J7Cro1TSN936OUlvYMRQpkAuMwyqaVhAb7RKxcEIDllf6HuO9L1Kvoaj7vAj0t/p1v5fxiuodCyTUFz244gtHUTUPTpsQq/Y7WCqWtsBOIMTl5RuZjen90dx4se3RcCaEJUeytSVvJQhQ2g6qWRH2cPH0/4WOkMiZR5sl7joNs3XFBAAIfoqwydRMKQSB9MDKVr2SV6TR66toKWn04Mg7yTGRUS9pfHlu3bPEWJFQFIJiDTsjl0DAn5czl8fI7RLx3i3uVngPn0pSz2Eljrdi6GOYlElESSFM/RqHhTaceSwVj+RrJbmSH+vhj8lD1G6lbtr3Fvs9fl7u9R+AYA7tp5P7hgZubG5qmRmuFMxJ0hxCl0gFM44SbpvQcvz929+SanVyG8qARo+ib2COFoKgVw+AYRxlG1VGqmNPoOB57GUi92KKUwVYVVerRdt4R/DwDY5Q+GRokSiKrjOhyCIHgEkG/tckGLStOy5jHJD2d041HxgqjFY5TfcwJWAnw3uLTT78nJsoph7GmfFXRyvx/i8953LdZPv4rwYM/mIhvn5Oqpd7KUcT5uNTie9OrIUbQKlE0VidUXjnBzIwglbWYqn58CG/I1wSoabo9BTfKKPb9UYYpYkhObQ6sROk0fhppq5rKWILzPNzdpww2URal4FD67fKghAQIzjvQlD64HAznHqeckeThAGMMwzAw+TyUpdGpFCW9sQ6fynwhWnS5qAGQCxZUSE3DCu8zEjYHufP6Uo/3vky1Ho8HQvBY26GAY9/TNhLIjsNA267S1LYvE/dvJVh967V/W+P96c+X74sLhKk8L0lhywtq/qFRiipGVlHztGr53m//BsNw5PjsGVopnn38CdoHtJ6HBpTSbzxsuQE9O8r3RUJISE5qqzj0vSRXIcxcnkqVrD4GT/AjTV3TVBXTOLJ7eCD6tMfbKKIWdDEqGFLZu3EVqJjWRUaMrqRxKWXR80MrydXd3QMxRtbrDTc318XTZQ5TrU1ykHMALYlUmJMCJfRoIZqUNOW2G6H+6adRmAKM6KnWimmUAL2yNdmx933P4bBns90C83Psnedhv58XYCwGR/JqwJislEqGP1cAvkqWvaiPdZfHuruQRdhOcQIqT+9Lm8axP6C1om1quqaRa5HKw4Q4t6fkz4w5CZVXY8wtA++HzNflzZ+FpKvee5pWWo1IPXLrzQXOR/phpB9HfAwok4cpxV7348D97oHJT2LDp5CC0zjrW4wENS9usNakwMKx3+9RSnN5ccV6vS49+fN2vkVgmqpvhFQzJFXjEEQFJMAqw6lREPP55qQEUj6ZyU2YhIJGZFFD3x9p206Sx4S+ajTeudQPmBbNTBNNXaVNWEvnugisiqPPVTgJlTNyFUmHvdTKfKyakz6R/HJMyIBUaKSHXMXcA63wHnrvOYwjD8cDr25f8ezpE5pWhkustVKJrCqOh0PxvcfjgfW4wWrZcf8+Dag+llO7EDgc9kyjE9Q/KrS2HPYj4xiIWOFZVxC8Yho8/TDROVlpboxFGc0UPO5wED+thP+0rvO6aLlfsswHDGlLoBMaq6q2VFoGpoTJIhZgaz7eudoq902Vn0knS+Yv9WnxTgJusp3KwWL5Uy0+V5ekKQeF0yQD1ukSzdlezCFjKJ93qrvp3zkBJye2j5Ost9wTNetl+bysp2/obR5MzQnach7h1FBJ/6noLcln5Wfw9evX5bwVXz9Y/TUBqhDlKyXrxO7vb2nbjsmNRFqUmkufIEbep+nIdtXgwsT9/Z04EKPk/QSIMrBitUElzlCVhhgIMtwRvJdNKcbKxhgE3Rmd48sXz3n29Bl1UxMJ9MMRNwn/nTYVMkIhjtu5icmN5P48yGTXElR5fMnM87T1OA0F/p8z9Sm9b+6/lcAgU9VI8ZwY8E72e+t2xfEovbNWy/CYXU7lfo3MDdvq5N9fJcsS/Ft+KNY1ZwMKvFJM1uBXLe0nT/j8h99jnB5Y9Xtc8BilCNGk5myVpm0TjybqdLtfQqfeFwmJgaKqLKtVy+G4k4BFJQOjKSXvjIyHENhsOpSKHPY7Hu7vCtVtLqNED6BT6VQoikqrABBUGTsWw6WFzL9pW6Z+pO8PXF8/YbPZYK1l6Hucm0oAK78qPW4huLRmdaKKibcyIU+RSD/0RBWKeZA2S3H40i9VJ5qWCucmxmHEbgRRG4Zj6l0cqayRdpcFl+Q0jKy7FT71l5cEa9F2k+2TDDzGk9v/WHeXr51INuDJyC5gg0eyDAryvYPoAl3QfP7lC16/ekWIgftPP+Xbn34Ha03aJ68XyfEid1t++nuku8652Uk8kqyv3ifnEaQvua4rnjx5wvF45HA84r2jthUYg4uhnPNxGHnx4gXg8X5imgaUQngktUFZi6mEo/A49tRWyqXep21U/ZHt9pKrq+uTgH9GHZONTci//FxsZhoDxU8jRigUiAQCvqC3kbiY3EeqB3jpVY5SqTB2Djrv7m9l4j3qcs1ijBwOB5qmwVZV6jWfiOTWr9PrmSuE2Sm/TUq/J2/qSkw/L7HJYxC16LVIZj7MCPIwTTwcDtztd2irGd0ofJqVIWpx9DdPbhjGAecd0Tv2ux0Xl5fUlUWr6r1KsJYifZeSsIuHF1YNFyJoS9ttGMbAbt8zTAGMTZUfiEYxOM9udxD76T0oWcwRhyNunKjqirZpUBpcMBIP9EdyMFdXFeMoFGs5KH7y5ImMFGhVgk0gVWBj8mvx1FBEScJ9cAiLRJThP1LlKi8hIM5BVwKjQghUlXnDPXrnqOsa7zx9P6RKRLJQsYS6BbmcD2iZ4J8y/MDJX77inghgEnxueXs7yvtYb3P14jFQlVFxYh7elRYIozXRSyCcOW7vbm9l8Ucjla6v09t3B6g+pGl9T/ATzo1YDTm7KHxf6YZm49l2Le2xxvmJcRplS5POjsKA0jg8WgmNVG1r6qqm0paoHLlfzDvHZC3GBw7HHjcJU3rTtti6QmnFNI0Mfc9ms5UtKlqUIjM0huAIwZUyvFIhGbFAZkr0XiavtbZoA86PaU1lTCsux3JTtJEeThkakOxPemGcBBqVwQeXOFHh4X6Hrix1XZ8YuDzkkRXhtJ3i1LE/Xnt5qjAUtGU5IJXuCMTcD5mamRcURErLpKiqGi6ePGP1cAvKMDifkPNYlDgjaSrK+cujpE/6td6nADXGiA8TD/d3vKpqLm9upL+5ADSxIKj5+jZNTbdq2B92DOMRVMBWOpE7Lw2M9JQ2dUttFePgCjIffSA4R/AerzXOe/p+JHoFIbDebmnaRmioVORw2BNjSPyGkAPAGL3siHcDcq/iI4Mhrn2pR945pqllnMbkKMWhS4AhvdC5faXvj6KjSmG1oj8eaesGjE3BvYMYGYaJyU1oI3RjPqERWb4qeXpbOept75l1B+YO1dRbmDlqM9KZkdMgCFrqrmWKke5yy8uHW758/pzj9wa2lxsuLy9PNiLJwCXp+c9ohCqG932RzFzytkPKVyij1m6aCN6xajuaqubF8+dMozg7Y7NtymgeWF2lEqpcgzh54eJEwIj8n/OBuhYOa6ul93S/P7BZX3B5eUXT1EQkmZE+7LnnFyjgAICxBQwieGkt0Uahg0mVK1+2Zk1OWg+snZdFyDZBqRJkfRvHgcPhIOivigKA6BS4qondYU9QUEVZx9y0daE3fBuyVPRwgaKVsvw77tUbLSvpPJcBw/JnofQoqlTDgykGxjDicFxcX+KCY3/cE1XAWCGVv766pKlrhrHHpe2K49Djqgpjqke9mD8deXuSl68PEKCupd+/NhW6rrm4uOKw7xlHsU0FZIpS7ToOI/f7PdurLVHJeUsl0wmibi0+RmFgSe13L1++pG1bNpuNVMxCYLfbYUzFxfaqDDSXnvicFEfRM2Ik+ohKE/mJSC2dh0/ggQBRElMkkK5U5HLFyYKOTP1UrsXcsqWZnBOu8SU4EZUEwShsZdnv99SN+Iv8bBVV/YpEC97kYn18n5bti48rW2/T2/L5WvrWM3PTMm7xwSd0VA5SeIklnuuPPa9fvSKGiAspxtMjdvWPQTNldNoSpSTgc+MoPI3ME3qZrzOLTgPPzsku3NGNEgBqkzgeJZOaSW3TxhQlWyCMtulGqtK/6atKSvFegtr1ai2BaOob1VrIfnPDPjFvecj9ecnQqFjQUx99CUJzxhOjSeXQtJNcCFWJMTL0R6HbSkq73+1TI7asFRyGId20NHCVhrDcNNLVVsqtkTSZoMqa1DeUIyvGwvG/G4WKb/1rkcflK+SBU0mJjTbUVY3aXPLBs0+YJkdtG5mYTtuqculKgtSU/qds7xQ1e5c2fbOSEY9x6HnY3XPz5EnqUTOFx65c11TOyw59mkacmyQxiwFtpIcxMKObJumz1rIxzChTEoXcs2mtoP/jNKKi7I+um5rKWrxzPIz3NG2XPiffKgm+SET80goz59Ilp85IbUKn5thVDG0O8jK3qQzShBKcyzKBiE08rcH1eJP6aV2uDgTGNES1aYW+SNT2FIV6bKhmNoKvC2Tn4LQEocv3R3Xi7TOKIEM26ZcURK3ptlu6iwuq3T0OnzaJPdpWlYym3MeUyJY+yPdHebP+hLjsgWNGVZNzyNfAWMNqJfdnGgZ8mcjPVD/pfkVJwgVd0sX2isOcE938+bIaeDlBb6kbKT0775ncMaFAeXNT1oFQdDeFxWIbYgQlaKZVVqplaTZAtmJp+l42rS3XlTo34hIilt8/jiPH46EknNM4Sqm3qsq1yYhr3/e0q/ak3PpYTpIlZtBASqenNvpNBPVN3XmMf7/x2VJfRUUwMWJ9QA8jr2/vmdYbVquV9KEa8bNt22KMoXMtY0osvXOJ+N6/oes/DXncS38qsSCBJt0nWxnapuX21WvcNIkvN6KXMZAGNmEYJw7Ho9xT7xe6dXrtQwj0wyCcoXpG3IUrOdA0MlwFyyQlI5+qfEZMSKDKz1+QliEJZlMQmxJkpYTcZYm+K0UZnI5IVTbzqmZife9cateQ+EVbTVQpyJscJF917HvhKH50LSH727CApWZ0NQMDX6nrX5V2qdNr+rb7GecvX7wnMnPJzyweQQWiz0DfVH42TRPKjLT/OCX+zWaDTg49BzJ5T/hpDwKLm6NK8z5K4PKqsmhbYW3uaZNGWqsNdS2To1ppIbm3VpxpDEQd5yb89F1aa5q6nlcsRmjbJn3GHJzmAYNiJFN2g8rZli9GaUk4rrVmmoa0HQhyeWoYBpq6Kf1Yh/2Oy8tLVDIU0zTSNO0iqJAsPyLTqcbkACajnfPNnXsNw8lrj6/xVxnI8lqk0FvkewEU1oX8PaVvDFkB2lYNldF8/OG3GIeRRlvJfnApeJvvwUnwoU6D568j3f0mZbVaYVXqWUIGlLRSxDfoehJypzTGiiMPQcikq7oCH6jrBmMqQgBv5d7Ili2DNRrddpRhPBXRVngAy7YSKH2v1lq0UbIQoe9p2rYEzSVAjadbzsCIYVNk3DR9nrw/Z/EqVzdUJJOuZ3aBediEogcgg4exDFpIe8AwTuXv4zgSiELVxWyMliqYdTfr7en2o6/X36/U3QiZnSCLL71l8hlKK5Q1NKs164sLtvtL4jQWtOH0nOXZl89fPB+LZ+99kNxr/+a4QzrnXHEJMW2CskJ0Po2SdFiDMi3WWmLqd8uIbNO0GKOprAzuaSVggrJppWQtyyWskWRKK42fpOdV+CstEemxDyH38s7bo+RPT4hSuSIFkHMCsGiPKoFkkJYFpURfF/cIhJothEBbt+k+eqmcDYP0SSvF6AYCGqN1QmhEpDVBGF++rvQJbwICxHhiq5d/Lt7yxmfEEEu7j0Kd9qeiyvON0ugQsQG0D/T7gySQaZAoP0+ZeqyqKurYEDxio70H5Upg8NOUksy8LWCPOfjTNHVF17bYymATIKC1oa41xlZUVZ18csTWFSgl7Vq2wqcEVSeqx7puhFbKWpQx9ONA1cgUeb5P3nuMsTSNLECQXGlmcJgn75ete3rppAUMA/nTJMYgQuI/1TgvP59XCIdi1zLfbtmMHmUhUUgAiErDrbkiMaaKbd3UDMPwhs3NevFGDPZIx9/8ebkbJTHHL/Q5xbeZP/ux3maWIFmCNCOoZcB9YUfLv1VITCO+2K0Sg5jpa/X2nQHqd7/7XRnw0AZrLHXV0HVtKunMQdQcEJKMYMPNzQ3TNOC9Y5t2JVvbCOIYI8FLp6jVUBlFU2muPv5EBp6mUXptlNzUtm3SxgJN162prWTsMRGor1ab2WhHceTOTeTJMZAgzXtZDenclIxWzjbS5F0UZTseJXBQRuNHCcScc0JcryBERz8cuVQXiTpqLp9P40jwQVAG5yQAzyhV8MRoCV5ofZbbHAoHWowlGHzMffi2DHV53hmJyvC9EP+qdMxzEFGURWmMMqzqht5pvvXJU0wEEqfiBJhEIFy2Wi2OySzImIEyVPE+yLe//W0qY6hMTV0L3ZlSEP18DaWEIZLRfJ+WRGy3G26qawbnqWuhOAlRNk3pKLpbW2gbQ9dd0fdbcaQIGmStpUolUqMMBmnkNwg1UAzSg221oanqEpDlpCo74nxsIQVUIUZ8qgrIOeT3yErKaZIGdF2C42w0cjARyzORh0D6vi9OcJwmDsc9TdUwTUI/pI2hSgbfT062vWRevYoAAQAASURBVKXybUa6lgjU42QG3t6m8k7dVYsSlUpOXSXSoVS1UVpsQlu3DMGx6jZcXz/BDz0XFxeJOJ7yveJ8Mio7B3uCsvz0Eags19fXbDabtCFq8bwvrme+bjZNeTdtxcPDHd2qpdu0aGMwVpCXzFBGFINfW1h1FZUVpzxOR9BgUxUKYLXasNlsOe57docHHm7v+PTTnykrnb13WFOnazz3fkpfqSTpPiXqOYES3ZZ3T94l3EB+rx8Cq9VU2DZmzyjoi3OOtm4KOpUTL2OMIJwJnY0x4EcnOZqXwbBcLQjxbaETb+hv/vNxFeBr/570N4QgAeaSA/gkQDXSqkIQ/kvAG4Ndrfj429/i/v5+Pt7FM5W5tE00mERZ6JzD+YHjcW6J+GnJdrtNfnk+2aUNyL2nm8stkx9KMFg3FU+fPhGKoqrC2jolKQGrFW1VserWVFYzjcfUTxwwpqJtm/Id1laEwcvwtDYlPhnHkc36gq5byTBdAqikojtz72ZwSf7ToEKykaIH0zTOtiglyrWtZFB7mgghSlIYH5+3P0k4Q/A8PNxR142gymreTjUNE8MkMwltqoDFKMtWcoRaWoCYk4J0tXmchH1VgFoSSv9I3xMEUt620NuQWh3m7V3zczP5x0uMZsAmH2dVVSXgzihq5rn9KlFfDcmf5SxnOctZznKWs5zlLN+8fHUn7VnOcpaznOUsZznLWc7yU5BzgHqWs5zlLGc5y1nOcpb3Ss4B6lnOcpaznOUsZznLWd4rOQeoZznLWc5ylrOc5Sxnea/kHKCe5SxnOctZznKWs5zlvZJzgHqWs5zlLGc5y1nOcpb3Ss4B6lnOcpaznOUsZznLWd4rOQeoZznLWc5ylrOc5Sxnea/kHKCe5SxnOctZznKWs5zlvZJzgHqWs5zlLGc5y1nOcpb3Ss4B6lnOcpaznOUsZznLWd4rOQeoZznLWc5ylrOc5Sxnea/kHKCe5SxnOctZznKWs5zlvZJzgHqWs5zlLGc5y1nOcpb3Ss4B6lnOcpaznOUsZznLWd4rOQeoZznLWc5ylrOc5Sxnea/kHKCe5SxnOctZznKWs5zlvZJzgHqWs5zlLGc5y1nOcpb3Ss4B6lnOcpaznOUsZznLWd4rOQeoZznLWc5ylrOc5Sxnea/kHKCe5SxnOctZznKWs5zlvZJzgHqWs5zlLGc5y1nOcpb3Ss4B6lnOcpaznOUsZznLWd4rOQeoZznLWc5ylrOc5Sxnea/kHKCe5SxnOctZznKWs5zlvZJzgHqWs5zlLGc5y1nOcpb3Ss4B6lnOcpaznOUsZznLWd4rOQeoZznLWc5ylrOc5Sxnea/EvuuH/7P/yb8db26e0nVr2mbFxfaCJ1dX/OjHP+Dv/r3f4G/+1/8tU9T4EICAMdA2NZ9+8gmfffYZwzDgQyACxhqUiiitMNawqivqStPWFqsNSkEII0RQ+QCUQqEABUqlPxRa6fQzEa0USimM/Ir8qlEYZdBaY42laxu+/e1v8Sf+xJ/g8vKKuq4Bg1YWYxRKa/lcPX9vjJoQFQ+7A+1qxe/93vc5Hnf82T/7pzAg3xfBT47nX7zgr/0nv8av/sqv8nN/5Od48uQJVV2xOxz4ne/9Dv/gH/wDfuu3/j6/+Md/kc3FGm0UQUGIkRgjUWlsVVHZiqgsz5494/r6KddXT3ny5APWTz9AKcuXX/yE8bjn2dOnTN5TVdX/n70//7Usy/L7sM/ae59z7vDmGHKozKrqypq6i93NwRJFwiZp/2DapiSSkm3YEGTANmBYsgDLMAzDvxiwzN/kXwzYP3riP0DYFCCALYloW6TYbJJV3V1VPVRVZtaUmZExx3vv3nuGvbd/WGufc+6LF5GR2ZmRCcO76mVEvHvvuefsvfbaa/iu78JXDRJqskAWcGQgX7umcs2vc9Ypnibefiny9HvR75j+NXuPJJAdVXXy9Adf8vjv/c2/nr/5jW9y69arHB+esFo2/N2/+//gg3sP2bQDMULKA5B59ZXbHKxWXF5e8OHdu+TsQDLOCVUdEO9wXnACjkTqe9aLhkVTE4LHiZBzBkk6jTlDzoh4xORWpynjnG25LDhEp9heE5PvnBNBHM45vHNUIbBYLHjrrbd48803uX37NnW9sOtP78MJ4mzPiAM8GUfbD/z85z+j63b8hb/w60iKVM6pd5qBLnH3/n2+993fJ6XEW7/yNX79138TvGO323Lv3l1+9KMf8bu/+7t87a2vcXbzjOVqwZAjAvgQcM4jzpNF2OxaXn/9DU5Pb3KwPuKNr30TVzWQHbvtlvt373DzxhEpZcR7fKjwdQM2N1n0thzp2rWdy/BVMc3lv9fI7nykK+/5gx/c49//X/02/+//9L//ucvuf/C/+V/m1197ncvLLT9795d877s/4NGTC4bcq8xK4vjokNPjY+7dvcfjJxdkcYh3LFcrXBBw4L0jeEceBmLfjz9VpTJV14G6qfFeZcHlPNO/qlN1ivTv436XDA6T32yvo/pdBOecvob+PQTPerXi5o2bvP7663z1q19luTzEe9XPzgnee5VdKd/lgIokjvOLJ8Sh59btU2pnJ4LdZ5UEEcd2u+Pdd3/KD77/A15//U3+6l/9KzSLhmEY2Gw3vP3OO/ze7/8eu92O9cGab3zr68Qccc6N+h7n6ePAru1ZHxzyxhtf5uzsNW5/5at4VxP7xG6z4cnjexysFnqmeU+oa1xd43xFFlHZys+O/th2v6o90V9/tOxmMnn2nt/74Y/4d//X/yH/9B/83z9X2f2f/Fv/dkag3XVcbDZcbi+4f/8uZJXXG2c3+OnbP2Xbdrq/vcfXNa4JeOfwwROcnuOkSE6JHAdi39F3O+LQkx2EqmK5WHCwPtAz2w4+MeGVok/tvsQxTrQwvQ6qb1X4ofJh1C0iDsg0vqKuKlarFa+99hrf+Ma3OT09pWlqtTm8mAyVzwUQTxZPjJFMomkCixBGfStkfAYvAQFiTFxcXPJP/+nvcv/eA/7iX/yLfPOb36Kua0Qc9x7e5wc//AE/+vGPaNuWP/vnfoOqqWb7RR/wcrNjSIm6WfLVr32D269+icPTWzSLFXkY2GxatptzKi/UTQBx+CrgqxoJAXGBnIXk0rjHnhplfq59QWa2wXWft/egS/a9P/xouX2ugSpOD0DBTgz7QidFqcAQdX1tufUWnAcREpAyZBF8Mf6ckBGGlIi7HlKmqTIheEANg3HjCuSUTWFND5lN+Mrz56y/T6PRmpGkhkG2d223mQ8//JCTkxNiTJycnFLXC3ARyR6fM1kyMtMaIhknjuVyQbbDUg2IchcgqHI9Ozvj9ddf4/79e9RVzWK54Lg6pm5qzs7OuHXrFu+99wsePnjI8fERVV2RSMSU1Dh2Hhc8zgVighj18HduNMPLqtjE5NEwH5W6mzagXCNCNlF7z1EMI/ulXmB26uf5+2d3MJ+oPBfadL1R8bKHOAfO6dwgpJSo65oqBEKfTK4cKamRWuTEzmhAwDl8VYMTfHA4MjkNxNSx2baknKmriip4vHcgTo3T6S5m1nwuy1ZemRyDnBHJ8+UhkUzGhRgjbdty584dVYbArVuvUNcNzjHKo5iiziJ6H6KK1nvHctkQPORseyerREsGJ3B2csqrr73CnTsf8u677/L1r3+T9cGapq45Pj7mtdde48aNG2y2Ww77nsNwiJm4OO9xzoM4stkv2eQgVBXiq5nnqCeIQ/WAE3UMxZzO8j6RPF4fu99xVsX00Vx2MQd2duzPZXc+Mhl35T2D1Nzl9Jny9DJHyiDeEYIexGdnZ8SU6VNLN7QMfYt3ooaliM2kPrUb51Dl2+HIzuMCeBEGMnno6VMkxp4hDiwWDU1VqZFLmeurHivjnBeZzXsvzARbpvemnEkpsd3uePDgIQBVVfH662+wWq0R0UMRhJxUh2UTIl1nT1NX5MqpI182UTGkbW2XyyU3b97gtddf49133+HXfu3b5sjVLOoFt2+/wiuvvMIvfvELzs8v6Nqe0KiBLBQnUlQvZFNvWWWbnGw+9Ut90bmmZd2eDhZcLrLIbD5teuayO/0WsbNKPkp2s76nOHEAiZpzbr2YcH2GY0iZGAf6OOCc5/DwEEjEvmO5aPBOqIOn74WYMkLG5YhLqqd9zngRPIIzZzeTcdmTo4ccVRc7DQi0XUvla5wLpk8m0RPyeIzpdI+u67j3y5yrcSszuZ9GjImegc1my4cf3uXw8AjvHUdHRzRNTUpXnDfVqhaAyLpOyeSnCIXJVs4avHMirFYr3nrra7z3y/e4c+cOZ2dnvPHGmzjnWTQLzk5POTs95e133qZtW0Id1DjnimpFrzvEgSEOpDhAjmbM6j2I6V9EcOZQzvWvk6vacSa3kme/Lg7sNG9peuWKn7Wvl5O8mNw+10B1PozWOcwONefM+xVkXCDbrlmjkdkMoJT1dvEB59HJcLrBuq4lp0xMiWWuCZWMD6066qoJPjOR5lGUYlyMh7wKXTY3VTIMOXF+fs7PfvYzYkyICCcnN6hrUa8hWwQBQSyCBnrAL5Y1m+0O5yAEb/OQceU2RKirwLe//S2+//vf55e//Dmnp6ccHR3hvefo8JDbt29x69Yt3n//fRCxSGkipkgIFc5PESj6SIzRvl+VHjjdcKb2cs52fM/UXDZ76DrX/Bkz+fR0jprf/lk29tWDf99pKMM9tWaf01BhA5m828PDQx49uaQfEuRIAgaiHfCj1hgNO+cCoarJkjXCI5kcBed6+r4jb1vikIh1RbOo8OIn42DmLeowoZ6vTS4LZu+QcpiVCIkexjFFRISHDx+Sc2YYIiGo41NVNSKZlESdmQyShWwRALJoBGu9ZKg9KSW808h9iUJ5JzRNzZe//CZt2/LHP/xjPvzwQ76yfJOqCqxXK27fus2bb77Juz/9KZvNlpPTRFVXeqiWw1nUOCZnjY4KhLoCX+Zles49R0r2p4QXlOGnRx7343Sxfdkta7FvXEEgcsLm437hZzJiyogEQgXNYsHB4Zp+iLRxy3Z3yXaTCF6j5t4J3qJ2qg9mTnaeIpoiqpcDmTYOZpwm+r7FSSY4h3cB1b9uOkRnBil22WKcptHQN5kFXC5m26QVYkxIjmw2l8Q4ANA0S5xzLJdLRDwpmROcKBthXEZVf6KGYh41oH6HOXciwtHhIW+++Qbf//73effdd1ksFty8eYuqqjg5OeXVV17j3v37XNy7x+XlBcfN8Sxia8GAlIr5Ytszk2MEl8yAtMAAsvfZUbpeRP1d857pDPsI2d0LHOh/a1a8ln7tBb74sx0xZbZtS9/1pJRYrZfcOD1jGFq8QOUdq2VDHAa6IY7ZPpcTPgkehweCFCMU1E31kGvqyqthFlROLy83pBqCB+8D3okF1MxIRLOTe0EC+6+J2HjUqWgV3V+0t56zKSWGIXJxccF7771nkU3w/pgQLPKfixLTvTB3GsWCAjK/DYGczD5xUFWeN998k6apuXPnDuv1Aa+99iW8D9S16vpXbr/Cuz99h81mw3K9xGusGZh0KpiBOgwMQ0+KPTlFxFfjOTeX1fEeGUXuWr37LHU8ualX5/hpnXv1My8it881UCtfAULK2dL4EONACIGmaVg0DYlEX4xqyUA0XRLJ2aJAIlR1g5jN4JzDkxn6wLZtaduOrulZHyypqgo3ep/FlS1TOM1B+b65MkmpGJX6piRmcCSHiGcYBu7du0fXdVxcnPONb3yLV155VTdCTqgRKEiOaMRSlRIxs2hqjg4PaLuwt5LqVCQkw5ffeIPHjx7x/nsf8L3v/QveeustnMDBes2rr7zCN77xDR4+fMhms2GxbFiulsQcZ963IyNqNNkju+CpmnpPAzozpEQ8mkybjokpsLlnwU/jGpnJ17xteo/svTIeWE8pTRW7HMLHtys+gxEk4PFjqjGEitPTM7btgA9PuLjY0KeOXTtQBU/lPZXzeNTI0rS5N+Np8jqdr/DLFcl7hr6jazuGvmPXOg4PD5AQNFtQNCAwj/ePa5TLgV5UqUmyOUajD2uvq1MVR/nZbjf82T/75zg+PkZE9ykCEs3AkGy2uYeYWS8XsKzVMI+Ct1vSuwhA5uzklNdffZUPP7jDb//2f8bf+lt/m+PjY+pQcXZ6xne+8x3e/+B9Hj96TBUqvvTl1wxuowa9iBshK5reDYQQIPYQ6vH53OhgFafWIqV5Jm5Fhp914D9DieY9+d6X3T0Hd3yfvueQD/hL+f8G/BvPEqmXNnJUmfPiyRnu3b/P8ckZa1myaxdcLipcTlQO6uBpgqdPWQ/m2CPiERzOZQLgndfXJFOJgxgYBkg5gmT6vmfLlqpqqEJD5QN5jIXMHDewRI6e6JMzIKOcqi6148kyWgnskAfouHPnQ7yvEIGbN2+yXq/IORKqoMEB2yRqECacmLGakmaZ7NqY8ZBSwounritu377Fr/7qt/nDP/whp6ennJ7eoKoqFrVw4+YNTk9OefLkCQ8fPuT09HSc8z1DM2dySvRDT9/3pH4gu0jOEFNPeWyVeZ3rMlfXpkWvjmcoSJvpckM8U3bHP/Q9BzzmX4q/DfwPX+DLP7uRYma369jtdvTdwOMnjzk5XLFcBJoQqL3n9dtnPKorzi8v2fU9iMM59WErh6b4BSoXEElkL6TooG4Ii8DB4Zq6aRhS4g9+/w/Z7QaqUFNVDcvlgkXtEfHkHKfIdXGuRKOIV02nmWYe314ChplMShHQIMX9Bw+IKXK5ueSrX/0qN85OyUOPd8GySOX78nh2OBGNorqZ7MLkiJvdUlcVTV3zy1/8gq7r+cY3vsmrr77KwWqFu32bIfa8/e4t7t+/z9GJZmGZXa8kTwSIw0AaBtLQkVKPryp1DnOkmH26hWQyZ2yoDO+78Hsi+9wDXtTQH8+14theDRC8mNw+10BFhJgzQ0x4lywND4vlkhtnN3njjTe48+EDLjaXo7JT3JPg8yzVEQLee9R41YM/CNAsqL0n9gNtO5DTOVVds1g21FUFYoF704/56mNmPfC8PG/GdNVy1rS5c44nT57QdZ2m0QXOzs6om5qMvp6jIJL0oHcazSANHB2uyLmBrBHYlGKBY6mx6Bzf+uY3EeC73/0eb7/9E77+9beoqorDgyO+8uWv8v777/PB+x+QUuK111/FVx7nnaVuNZKbol7Xu4D3JvjFMyu5p5xAPHPpunYWXkRhPvcCH+Ni4sjuC1J3J2qsJIHshOArtrsdoQqcnp1yfHbKk/NHPHgQVV4d1EEIbkoVBTISdYFdymO0SnzAL4XOCcOgctTuLkmxY706YLFY4H1F5Spzmso2jZhfXsSSpw4e29ZTRGxSoEMcFB8LvP/++4QQeOutr3Pjxg3W6yVkR7aogZRoKqBwEL12GiJVFcbzTRBi0mi9d55XXrnFb/zGr/P3/t7f4+23f8I3vvEtzs5u4Fzm4OCI27dvc/fePR4/fswr3S2qRTU6WIoysfR9Tpru63ry0NuhkcmxJ+fBDG6Hd/kpsSsi/umWcObnSu+Z1PxNuflpfuGfaqixLaQEjx894fH5hsOjFatVze0bN3G5x+XM6dEBjsyTJxf0KeFywiXwAgGhFo34eVE9k2JCmga3WlDVgcVyQb1Y8M47P2Oz6fBuR9MsWZoOFslgh72QpwinzOV0fwVznmBZZS+Vg34YMs557tz9kD72vP7kNb7y5Tc5Pj4mDlGTHuJmqW2VD1fSkinPYE9CSTmmlHAiNHXNt7/9bX74/T/k+3/wA3IWfvM3f5O6qnjl1m1+5atfpW23/OTtH/P1r399vOdRfh2a4cuJfujYbi7pui2+rskIfdcS0zAK6YhZRMaHvWpavvB4pv69IrtX3ndK4q9/AaL/oao0w9MPmh0l8eDBA+oAxwcrbp4cE3zmYO1xfsGi9bR9QgLghOChchkvjuAywXuCr/DBa6hIEsvFgsVqZfLnGIakDkWOxLQlxUhVB60XkGIYFUPVbnTPSL0uyre/empDZobYkwUePnpEP/S0ux3f/rZiUtWBmrIYmQHnNF1eIsOkREzZsLWJ1CdiPzAMA7vdjvfee49HDx8Sh4HHDx/xO7/zO/ztv/03CZXnsDrgDf8Gf/kv/SX+6E/+yOyPZHbVlccTq0UQiCkyRA0abncbchzwQU+3pyzTYmGNoeWPL8d5tjXzU9efXVNeTG6fb6DaYZ3LFe3HOY/3nrqquHXrBgftARebS9p2Q04D3gkhCMFPWBOJUTevU/vZiVCHCpwjOocTxfdtd5cMsWXRLFgv15pqnclQHn3ySbxiThq1KqqweEB58ijK72NM5Czsdh0ffniXpnmbvh84u3HGwXpFFo2kjmomAzGBKIgacaQYR2O9vCdnPXCbpuHmzZu88caX+N73vssrr7zCwcEhdVVxsD7g1Vde5Zfv/ZLtbkvbtqyr9eyAtxRXMpyRRa5jigR9wXArk2coMGGmXkCo5uD6pzcoxWqa3j+P+8/DA1c+NscBe74AQ0oSUGU25sSu63j8+DExQbNac3Z6g/WyofYeSYnBCaumph3UY5accFEj3B4hiLNUqNfyo+wZXE3OiaWraJYLum5gs9kRfITFSu+lHFziQNKkF7JQMP5i8aIpEmWYKFfWU/GtKWVVykQ+/PAeIVS0bcfrr7/K+mCNE69flkGSmhBIKcgyzBzYfqGEvFRpItRVzY0bZ3z5y1/mhz/8Q46OTjg6OqaqKo4ODnjzjS9zfn7Og4f32e12hCaMh/uEhZpws5vNJV3b0vjKvieqYzcD+kl+Gnv3lFgyye78cNnXgbL3x57s8gzZNT1xwUN+1/2n/NXnydTLGmN0DFWYLjCkzMVmQ9dv2VaO40XgYLnkYFXjZUXt4XLbKrTDgfNC8I6gtXME0X/7ekFeLsgCofYsVysOj094991fMMSBGAcyrToTy0zwppuZ+wvzlKVh9+3G92J+lqafRwYTmZgGuk54/PjRqJ9/5VcC6/UacNg2wUkmWUaO7ExuTQaSRk99Boc6P/0w0HUdjx48RID33nuPZrHgy1/+Mjdv3uDgYMXt2zfZbN+k61u6odNMi/ejUT26k4IVP0Zy1GxgzkLfd6OklndLRi3bmWBdNXv2ZNfW99OS3XPO+ef+n/FfuypHL3lUlUbFU0rEmMBl+jRAjHSV1/nOPTn3FvASdcO9Qv68yarGOKaaDy/qfGQrpHPO4UOtGa6qpqprvNeipE3b4fqOOnjqSn+AMSqv19y/7wKrVCfMnKM8e5+ZPnMn6/Lykg9SIgt851d/jcPDQ6qq0QyslCCEM+dJTQiXYXNxyeOHj3n84BHtdkvXdvS9RuqfPHlCv92RU2K72/Czn73L2+/8hK985cvUdc2yaXjjjTe49+AuSfIokzqc6nqy1bSonZRThKhrkIah1ICPc/EsqR2lbj5Zz5JbmX9yqs15yhKZz/sLyu1zDdQ8apbJOBVxpgx6trsdvqpZrtf4KtC1NX23JXhHHQJ9AZhnkJS0yjgnJGvRlPMe8ZBEwdJ9n8aoT0qJXddShYZgofPpwZ91vxNuKVt0tfyvKE+NaClGY7vd8eGHd63YRHGkTVNPVV8iJry2mE40ElSA9HY/4x3lhHeO46MjvvSlL/Ff/OPf4c6dDwhBqwAXiwU3b92kaRYGM7jg4PBgbwUnbFVisBRTHAaLmGrkNKfRwik765rZuDpPz48gPT2Z1/1upkRnBvHeZ1Lii2ChqqLwYwquyPJu19ENkQF1fBZ1Q3AgKcKioTs84Pxyw5DUKXGkgoIiiFA5UYyUeIKrybXOa7UIrA8P+fDDBzy+PKeXBAQrIFJMlbMyzqvpjv3J3l+nOW4/2y9KdHK73XL33j1EhKryVFVlsqxWr14n6feVCuu5xFqxif5VD3xBCN5zcHDAT370Nr/85Xucnd3gtddeY7lY8MrtW9y5c8p2e0m723F4fHh15sc9mFJU+Y29xtrsO0ac7/SR6ZlnhsI8ovGnlt3n6A2AHuHBR7znpY1ZBEQQfKgZutbmMtG3mRADh4uaykOuhLwMqjOSOTxeK4y906iOc4Y1rirEG2Y1eMMYry1L49RgEEc/DEgnVM7pdYKbHBDYixZOoRtMPstjXD3cdKSckBTZ7VoeP36MICyWS9740pc0+6DeHNmpvsNNMpvJkKDve9pdy7BtyTHTdR1919O2O95//44WZu0uuHPnDj//+c+5desGIXhOTo55tXuNzXZjUpX3Ir5png52xcCICllLQhwilRPmsllYOya9uH8Sf9ayGznkEX/h43zLZzJ8CIDCfKLB+xyqW2KMxGHAy0CM3YhRdxbqzK7g2KEEoHJO5KTYVt0TdvYiijk1BpBQ1zjvyX1PjB2xt/XKHu8XI2SwWPRSCjXm4pnV6cmzQuOra5ENvpRTYuh7NjnzwQcfcHpywuu8ztGRI4RiFUzvj1ENxwrH40eP+em77/L43kO6XUuMmk2KsejKAXImDpEnjx/zwx/+gFu3bipbj/ccrNcsFgu6oS2WwojDLnp0b99lM1JTZOh7qmqGWy1GVXn+mfx+TKm9dsyDDteZCy8it881UFPMMwudkRIkpcTGqjKb9QHL9YFSPhys6HcbgnesFgvSkIBeo6i5VO0p/lSjUVilm6OSQOdAnApcynBxuaGptIBjAvrDaEFK0QuGV7pSCQ0zoZp5pmnE00bOz8+tcAmapuHG2Rk+WJFUUicoWUq/RKDKj5PZfQyWZnKwXq94/bXX2G63/PjHP2a9PmS9XrNoGm7evM2NszMePLrPo0ePeO2N1+Z3i3pAQowaDei6Hf3Qkom64EkLfK5+5iqyZn5m6L9lf0Need9THxzfJ0+9Pvosef99GdRV/AIYqCk5HAHnCn2Gw7uaLELb93Tn52wvz7lxcsDhsmZRBRYHK8Wg5kjb9aYYNbUfHASBSoTKOyovOGfV6SIsj1acnd3k4smW+8MjhiGSslGHVUFxrgWvB1Nkf2+hpnS3yq2M8zxif81zjhZBOj+/sAMhsl6vOT091aI7S1OWKn9V0s5SpWjENGdV+DGRYx7xg9vtlr5VLNnPfvouB4cHvPraq9R14OzGKV/+ypdBoO3aa89OcVmp2wwGk3MEM1BznkFynvpscSSvSqXMHNT9dz9vXCu7c7kdzylBWOP4zY+44ssZOTtEAkIkZ8VP77pOddcQSXngIu4YTg8g9ZAHgs/UtUcG1XVilGOuRM3VDtBCEqf4+nKQ1XVD8IHgIrhAVTd0fc+u7elEi1sWixpXjFT1lKbD7eo057l8l2p89qKDKSVEYLvdMgwDMUXWqxWnp6csl+WcASFBViFOKTFErfbeXlzy4P4DHtx9QLfdsbm4UCO173n05Amx70gx8eTJY/7oj/6Q3/iNP0Ooag7WB7xyGy4uznl08dAeZpoLdaD0kHfj70oUFWJKVN4X6O2UVCpz8hTcTJ6S82eFE/Lev58tu8L870LFa9xM/+41V325Q7yzOgrNajoxSsis7B4xJZJTQ2yImWT8TmLFpeVzZf5j0kCDixZhzRWh1gyUt0Jt8QEfvBZ0e0ffZfoU6bqeOPSEUFF5dbLKTh9ZIkbv3B7A9P1eBGa2niqzGvXNGYZh4OLigp+8/TbeK97+4EChfqmkcFPCSakhCNy/d4+f/PjH9JsWyRBCsMxpsufNY+Bg6Hu+973v8Ru/8RtjgEtZJ9T2uBLKsM8ak0cxDi2gkWNP1+5wribnar5qkwW5J5jy1K9eJAz23BDi/ALuxeT2+QZqwkDo6pFoRWjA+xoRT9v3PLpzj6p5QlNXHKwabp4cElzm1dtnbA7WXFxc8uDJE0TUwAriqERoHGqker2mc0sA6sWCw+MDdm3LH/zgj3h48YiDgwOqqiIEBcKPYOfiBMlEyyNXJqjI3JiQl+KdZVKCYXA8evKYtu948uQJv/ar3+bmzZuEUFuYXIBhpF5QPJdMIADzvEo9VjmEyZnDw0P+xT//FxwdHnN8fMzJyQm3zk75xje+zo9/Apebi6eXVzTSVlWBlCKxH8hDgmGAqh6/Q1OkEYgo/+jMI5xPwGyvfdR4vmBN3tB0wO+LrKREHlqoP/q7PuuRoiBS46U2rtsK5ypCtcBXUbE5CR48eEg+WMJ6wXpR49yOo7VnVwldD4kIXuEqwZUiPwziEhAHET2EV6sVdV0bnlhT693QE1MkRk/OFb6alJ6UNNbMKJOZYtlfuAJrGWPsikkVuLy8oO87hmHgN3/zNzk6PKaudI/EWPhVjR03Z4acYIj0Xc92u+XR3YfErufxxTmbzYaLi0vef/8DRBy//OV71IsFv/Fnf52mOeb45ISvvfUWB4cHvPOzd4mx10Io9DuiHfDYPYJVQZuBOhosM9c9u2SQsjx+Sq/HU7L7lCg/HQy5fpgMF5FNJa9nH3yUW/4BP+XvfNR1XspQnThIHh0RySWBpxIQDc9GHBhSzxC1eMd7YwJxgnh9ONV9iZTtEEwRVwUwjKtzTquFG/C+pmpqXBXo2y1x6BmGnq7vOT5eKXuLzFODV0yrbI463vgnS2RnconHQzNrsKPve+7evcvv/cHv8+1vfZvXXn2N5XJlsCnbETnB0DEMsAgVv/j5z/n9736X7WPFsEmhi7LonUbaYLu55Mc/+RPu3rvDq6++QggVTVWxWi053z4ZgxXYHJWUhTAZ8uWATzEx7LZQrSwboYhrFXN3jXE6TtIV43P2l2eo7b0xbpV996z885A/5r/EvwW896wrvJSRBJKD6DPJqYEa0XMeC+bEnBmGgS5CtGI6Uq986RZdCjIFgVLS1PSQE4vFgtuh5gBdb1+FKfAiiqH3yyUO6GkZho5HT56wXi6pfSAET6i8FabKUwpGAEkZRKkfdTkLJKs43EkzEBYYQ+DR4wf8+O0/4XJzzte+9nWOj48NL+0VciO6h9uoTACPHz9mXS3MwdFAWE6ZkmXSoVCuJ/cu+N3f/WfKT/31t2z/KTWfFnZfPS+mMerZHCH2tN0WCdCkBSN7ingwB0H9yAmWA8x27fy6PFPnXqebr76zOK4vIrcfkeLPZshlUp42cjJeLx8CXjwpZ3a7LbHbEneXvHn7DIfQeCEvauCQzW5nhOduxJoEL9ReCEHJnJOArz3r9QFVvaBplgx9j/iKIWb6vqPvB5plowS5oqkqb4pJZsJaJlU9JsaFL+/JYl5ejgyDsNlsGIaBfuj4M9/5DjfObpqS1AumNGgBleGdhpzIfaRvO7bbLZePLsgxsWt3XG62PH78mL7tyAl+8pOfsFgu+Ut/6V+hqiq+/vW3WCwb7t67q+n60dDTTTxScwlmeCaGrtXiFpmeSWZrJPNc8LSAzxKd2Xtm7xil8bpfTsVDRcCyXBHjnPFtC+vrv+rlDmfOVCD4gBMF3COehBBNwaShZ7sTmgCr2tF3O3KOeBHqSjkk8W6MQDlTWooFzQpTCV6hI1lwPlBXC02nrlb0xkXX95Gu61g02jTCBTviZ4B0ubKEpisNGjCrGBrl24oDYqbtIu9/8B7NouJrv/IWN27ctGYUE6+jZKEbOmIUJGZ++cuf85Mf/4T7H9wndv0YRYop0e9aUtQUwr279/jPf/v/w9/8W/8aHjhcr4lnN/jw3h3iLJpf9lihnipR3JQisWuJMVu6/zpffP/Z9uAHxSi/qi2vEW+5Vnb3vyDPzKUyjlnzX45/9tn39VLHRA0VKZXqykqSraK9HN5x6Om7HV0/MCSvTGZOyBHSkJUo38jPJcNmc8Gubanqmma5YH14pPRTIeCDFdeJWEpRiF3L0Cq84PJyR1V76hCogxoHI9Zs5qwqkkUDAN4/Xek2ZrVsrWKMiMvcf3CPn7zzI7bthi+/+RUO1geQIcZMToqtHYaBftty/9597nzwIat6SR0qJAGGz9fswABZDfDtdsdv/dZv8Tf+xt/g5s2b410HccQRlGf3dq03n0lpICXNDKQ0oFXi+0ZpMXPm8j2hc2dn9VVbYibzk/F6vb6eM4KUd/ySQ/6P8hf5r1z7iZc3lNsSpZgsoHQBkWARzoCjo+07Lnc9fQKkIian8m2MDCWirQwOCgNwCKFgPE1eQxWUMlASpahZsjppg81fcE4zvsNOISshcLBajsXNeo8GK8oT+4Q+yfWyG+PEvCMmxw8ePGC3a7m42PCrv/qrnBhuX4MVjqFX+em7jmHoib7GiyMOqj9T1iBbocUq9SdVVfGjH/2I1157lTfefJPlemk3UgIbMKYsrrnXUY/EHudgiJ0yUchVAZzkVm9BrjcZniG346+eAvgWH3WfRz2TX0hun18kde1d6c2LD1TNgu12p5yKwJAS292Wvu9MMHrIPcGh6Sfz6r0TpQ4h6cKkTIwaiUpSIoQe7wNVEOqqmeHZEpvNlqauqColId8zsq569DbSWP05s2ApylIP+a5LPHgYefudt8kZbt28xWKxtOhTNj0s9ENPSkLuB+5++CHvvfceT+4/IfWaqur7gbbt6NsWAe7evcvPfvZTvv3tb3HjxhlHh4fcODtjiD0X28sxmlAwswoVKJWsGXJiGDpCWpByNM+pCKWFbve03nVh9jwKtb7lOiPheXGotPdqvuatGmkYvggZfsZK4PKDFveprKXRA/YzntSM8kIOKROzJxngPaHpvZQTiYSPMGDKVLQzWr1aqYyJKLm3FRKKd8TkNBLVquyWiGPKWRsAmFzNJ9gb/ll/V7xk/bs+km3zrM5jjjr/d+7cIYSKfuh59ZXXLN3vDPcHgkY5u82Gu3fv8v7779NvejwTfCVZasrIgdlsLnn77Xe4d/c+J6cn1h1I4QJzU0+LvMwtlLITNQKVht4yMoNxYRqtG5mrButTomX6ZSrw+5iyeyWie924xPMHsnr2NV7iGPf+LO08lh6IA+dHQzKT6OPAZrejSw5E4VHJAgt6SFmPFDKgeLc6ZfCeVWY87F1fuJezEqKLdkpKdshnYOgjOWaGPtLUNVXl94o0x0YmFg1PqfDk7juz5aAvjQWEzDD03L9/nxQTKWa+8pWvsFosLSqsDALDEJGYaLuOrutYVguLMGOqcAqm6K8yOWZ+/rOf88tf/JLFcslyvTKc9kwflmjeNeKh54YaqTH1xOhGY0Y/mp4tfXn23PKML3gh2Z0/0f7oCLznTp59jZc0xi5iM91bGshk40YXCWPzHkRIOIakKXCVVzQKT0Ib5YBLEBz0UQs5i27XKvliWuUxojm9Nt3HruvJORP8QEywamrVYYb1dpJHg7rAq/Yhg3vhGZUdx2RMxsh2u+X+vfv87Kc/gzfe4ODwkKZulOZNlPqpHwaGGE1+sp6q2SKoszN5ypQKF+cX/PIX7/Hz137ON779DUqXq7G2xm59bGQhU7RXbYtEjJBTpPJ6P9MoeZnZI2JzOtsf+ePaC9fI7dXxInL7ggbqldsyAzVUFXmzA5QYPOWkHaJSxOVEjJ1SM2Qj3jW+7kwiJehzGo1TEaFLkaqpVQmJ15B9UEC0FmfpAdf1rYGga5aLRg1kZ97lOKlPexUqdPvPkkhgUaoYM3EXuXPnA1arFSF4zs5KJMoumWDImTgkhl3LvXv3+Om777I93yn+Ur/IImz698vLSz688yHvvPMOp6cnysu3UB7Zze6SIZWIwr5gupFOM2lHiNST46C8bDKqX92cV8G3ulLzp99/qRwos1euNW/z/mt7l81PvymmgTnC5fMaE2Z4ehbnnB1gM0/ZOeuYpq5eP/R0MZFyMDJ7z0AcDxrJhbvUmikIBB848F5lyQnOa5GSOKzdruE++8h22ykeCUhxoKKi9mFGnTOPopSo5Fi1x/yAmhc2qVw7Li7OuXPnA8iZOjQcnxyPbX1VWXniENlsNjx58oTHjx+zkIZQVZDy1Hp3vBvoOjUcfvzjn/Abv/lnqBdNuYGxm8kY0R/nH5Mv3ZspprF1oUag1KopNHJ72rE8/DiuE8I/pexeeTFScSmv8kUYEyenjJ3DRpaPwjtblIMoT3Xb93RJeZSjFZaUampn8yyildLiIA+ROuop62aNV7RISFOHSpDvpntAD9J26CnvytIQDOs6p0XLJhDzwlWY7LUS3Ukj5ENIkthut3pYZ2G9WnHr5i2apiF4bdurONxBeR5N35Y9UA76687N8/ML3n7nHY7PTnl9uaDwqMoVcRllxXRjMXZErOiva6mqanzPqH/3V5D9qz79z08su3kW67IXA5EjnoaLvexR5Eh/NHgkmCFnzqzzYdK5CGSv9oAVdaas57SZouoj69LSx0wumNUxfqhwEUmRbJAWZ9Rqbiy8EgYrRBpiYrCamKZR2KAGOWWWutZCqqtF11AcsOJ8MWK8c87EYWBzeckH77/PqmmU+/TwkKZZUHlHbxHinCYdO28bbsHTp0y5vh+4c+cO77z7Dm985Y3J2cuTMI1Z4rGRRTG01ajQTFZSUMpe86XJuZpKtPV+Pmp8XLmdC7/wYnL7iQxU57WbQxaNMoEp1dJ7HIh5UK6wviNaKiRFGcPY5qTrlCQ97Nu+5+joiH4YtAe60+iUiLZC9V6I0TH0LZebDTknFouGPirNVM5FUeaZgil26uS5j/eLHfBW+CWim+lyc8nPf/EzpRRxjrOzM+Mb04X2Rvq/vTjnwYMH3Lt3n4pAE4xKpwhFUq9fnPDo0SN+53f+Kd/5zq/RSEPJyGvb2KmKFDNS9V5kNFAhapXi0BPHA36sd+TpA/6KxZC58rrs/fGRY7Sf8nitp4z9nOlTYvGCl/wsx9TdaPrdFJUu1pNFdpwHr219+xi53OyIOZClImWhN5qZolyEjPcaWfEihDDQHByQQTltg5AHWxMxJRYCuarIpqT6nOl2LV1MsHQEvBawoAe9FubNFWJmHoXKWfFcguCSGPxEu0w9eviQvuvp2oG33nqLk5MTjZA5T3KqSLu2p922tNuWZlmr8Tg6SjP1Yoq063r+0T/+x3zlV77CWR0ogPxRxIr4JsbIHcUo8oJG7rSDkbaXZbY2WT84IcX3ZVcXdLaUn77sHuY1f05+/QUv+NmOsuYioql3w51ll6xDn1qa2XnEBXCBCOA8wzAZqDGJVp5jhVOSNRplRSMJ/byIUjs5KQdnwrSoyu/YMhiyHfZt19EPkZhgWWu739GSsOipMxjQBP1g3H+CG7lLKcalHbJt2/Hg3gPeDe/inXByfMJysSI4j3fQDpFhiIZbnBmW2QzGPD+71ZxLGX74wx9yenZqLTivO0mL8i3nWTnb9LUUB7bbSxbL1bRHijoZL/XsQIFGmf90sluMiDkc6CBf8hfSH7zgBT+74b3XaHwIVjOiOE6XIhnFnIaqIdQ1PvYKszLbIMuUoRqSQqfA1latVgtkaTtw7wLjQlt0f4zaUuBYk+mF6PkUY6SPWtuxGpYsFjVNHShVZ1k00yplvm2yzZ8ZuzdqUMjCBdZsIOXMkCIPHzzkl8EzDD05Jc5OHPViYberhrpmdG0J8yyOWeTZzq+Ivu/u/bv8+Mc/5hvf+oY1DpjL29PCNGbZzDh14uzsKZzGspeO171+1TydadyS4ftTyO3V8SJy+wkMVMEbif6u7Sjg5+nPAUQFs4+Ji+2Wto+IqzR1al59ScMIyShQFCsVVSPivOJS+yGpwhQ1Pr2raL3XqreYGPrIkydPqBcNy2ZBbfyk6p2pJ4ZRrEzDBNoA7jmm0ZJV/snIo0eP2G13PHl8zq995zucnJwYLivYId/T7jo2my2bzZaj5aHSSeQJt0s2jzBldm3Lhx9+yO///u/z5//8n7fuLqpuQqFBwpqRlQiWYX2d12icksJ3ylWYtRDB27y8sOD8aca1UdrZy0D/Mu7jBUceDRL7N0qFEoLKYqFpSBaZcqEmOUcU7SudUlbscypLYQepy8iQ8U55Io1ZlGCYIydY4fFU5CGSNXrFFMntU2JzuaHrBlbLJU1T60EvU4REye21oUSJsuZUWCWKJ6w/FpQkDpmLJ5fsNj/DO8eX3nidk5MTVss1WtpRoAGGLyfTD8YSMTNQRZjxN2Y++OB9vv8H3+db3/4mq4MlhUpV3zAd6HncXcXgFrRj0Y52t9VsAMZpWcIWHyFbf+rxEde/Lz/h7/Hv8X/+AnSSAsP5C+OBH0Iwh0qLIjPaqtdXFfWi0Za8MWib6ZwVh5qtIA47YKzCOkXBDTBE0F5TzhxqjXKnqPIq4pGsnesqJ3jv6WPSgzhnYt/RPenZhJrlYsHBeoV3UBIOeQazHrtBuazZMKIaqRnIEUfBc6oB1saW9997H58zr73+Ojdv3uTo4BCH1yIossn/5NAXQzVjelefnCKR55sLfvz2T3DBcevWjf0JLxe5bi0iZG9XTkoBRM5WImVtCkrE4Xnjk+rGj5Ddc+n4z90vP+HFP71RIqjqVIWRycNbl7mcxKBPC+N/jrM18phGJuPHbn4pmmObEq3vySgHalU3nJycsGtb+mEYYVsqx7MuUjBlYNHaA3JmFyP9sOFys6NpAsdHhzRGPeiKk5bLc2GwRBRil/SexbIGeUgGKfB6fsfEnQ/ucLnZcHFxQf2Nbym3ebLiO/a5RM2dA4kmtyoopeonpkTb99x7cJ9/9I/+EV/5yhtT9HLi0KJMZQm+FbiNZhoSp0fHmrEwmsrxo2PqdrrW1fjAJxqfgtx+AgM1T0IYPL6qxypdEe1ulJ22hMw4YlL0YtLCYWISYlIPIotNUsy4GMlVBTkgEkblo1Gjgo3SzkklogAWtYuJYdcxxMyirlk2tWL4yixnGErXJ0txuSzEHHHijQHAGbhfjduUzJO//5Af/8mPePPLX+Ls7CYH60NV3DYXBfOkFbJuBE2P0eyiOLNy9f2Tf/JP+NIbXyLUFm02vSZMBlUpMimHu3d6b0PfjpyoJXktSQ3s7BLip6jAVV0r43/213L2qv0tP/Xm58QE9n6Xcmbbdde8++WPOMP5zH/nvSfUldEqKWVMzj19jIh31HVNaHuyeHKqNLKXo0WuDJeTtRgFU1R+MMJp0Silc9agQlTllkrTCaMlo8fcx0jMHX1K1G1LUzccrpZ4KUc1xgOpEQZn6qu0Cy3Yo5ST9mYun8uZYXD84hfvEYeB9pWWGzducHx4hOTMdqeFNbkYtxT50+9MeUaeX34H/P73f5/FuuFrX/sqgjZAKBHJibq8mAmK+80547IWCLRtq1JtjpuzKGsaBsSiJtfK7lPj05Xd7+RT/n76b1z7TS97TK0L1Sis69q6fTmyC9pALm5J2eFdhfONUp7F4jE4WzPt81SiS1osoUaoJ40RqSpUrNdrLY4bBoPAyMifmFKySI5DLM1aHG+XEym29ENk13aslgvWiwpn8u8tuq9cwBZcLZXzM6Ou4Oc0C6aRpi73vP/+e7T9ju1uw1ff/CpNVZP6iGS9boxJITXjUAtZC1hsn2WFcg1D4oMPPsA55Q2OMZpfNUEpsDsqARGlAFJ6L49wfHSoXRDNEKI4ijHaYbYvV5+23p3LRxlfyYG/k8+e8e6XNya7oDhVep9ehKoKNIuFObyiKf00gUKyAqRmiEjDWVtjD2dZGX10hRGu12sWq5XRMfb0u46u7+kMZhhTtMynQ1utRLw55lBsh0hsI5w7Dg6WY2OLKgSEZDUuecyclVF4fESEUHmTMdVzynLhGPqOvmsZrII+iLYs9z4wpIT3eeaY6V5ISa8TKXKrej8PkfOLS95+522OTo7UGQylckDvKFsUWIzAy8tkhGagrmuGYdCvS0aY7DOmCDSTOBtzMSv2DkwGtH3rC8vt/ILCi8ntJ0vxO8F5Tb+HCmPoTDhJeBeJOeMrBd57F3BETTeJiqJG5QUcRiOuv4tJiEripzAC53AyqxK2g6/gqQqhd8qJ2JtQRv1ZNI2msmbT5cxzcWmKEMUcEXGWjrKYj7Nq+ght6jSFH7zywsbM0eEhOUW6tmfQMIRFsufp0SvIJNsY9x8+4O133ubGzTNKv+kSPUUYjao5l5mgBTNd16mB7mZeu6TRINn3ep4+1ueCI8964fqPjvdR3n5dvCCnRNvurv/w5zAmR2E6BL1XQnucrlXwUVNE1rGnCg0iLRSvnykSUw4679RR0sM+E1xGeVY1OltVFUJU+R4Ps2RQDZTCxNKT6rxFUqeRmWGICLBuAsnpXgveuutI0hSUiUYB8ovRjmD36gynmKNiTe/du4vGSROLpsGZVy2oAVSKu5g/Z7IVNiM8ow7l4ydPeP/991kuFyMcQGxfqzGvBlD5HFk07S/2fRqWtQXS90vKV7hzX77sPmHDP8k/+ALQnU9DDVSrWPYGqXIZkYhLBZufGbkkRw2gOiTmPBpo3rJSg0YJGCQarEmNzeVyqQf2oK0Xu87oqyzrNZLZS0n3K81NzqXKPlJ6MDjLFuhB7/E+T8WseVp+lQ8ozV80g6WcmWrIRjoiu+2OdrdjGDrVdUkNQTEMKzIdmtm8c63xkzEqN0Q9tC8uL7l77x6nd064ceOMkfllNrSWQXVqMVLLeiwWC8hu33kbfzDCc7lyvdmaPuuFp16c//r5svuINf9J+o3PvYp/wk4zOs7zuXPGwSt+wvznLJbazyZPWdPbDjXAciJp6JKCuVdsdeTxoyc0ywVOPE3tqEKAi0tl/pkbQ2JnaAkU7GV7IMVM23X4bem+JqSYqSrbTzlPGQC7nl7KUzhMpRTb2qV98CyWC1arFb7SVq1DHDRzLFNBeLlLtWNKuRd6Zpjx571mRhLQGjXgcrlA+xkmyAaHyGnaB9Z6vjx7CSzmXHS92iNePU0kKx/SswzNz0tuP2GRlOHvnNNFzGpkOkl4Ue+oUPz44JGecfEQp7gke4Bsi0JSQ14Loz0+BI0cDCawdtCnrJyAYxGMhbGHFPVniAx9JGVhUSsLQDl08YYtIYNhQApjgIKvzaOJagRqIilyebnh3t17agg4z6KpcRk6YxXIdv0xWmfKKpUIQS5mtW6Et999myF1nJ6e7EVMx+hTmkWvysRlNWBEGA2k0vZ0TjgsRTtcN56SuOve95TZOZrJzxsaGEnsdl8MA7U4LmVNCp7PeTUisRS8F4eTpJ10JOCMikqNsgK5MHLmcjXntJdyMkMgTtHCutZ05xAG+hgZcsEipxE75AxSUAy4ZGuZYiJaKyDHkipo1IkccF4NVGd8t2NTL6NXyVIwW3nEAEI0Mv8n+OBYLBa8evsViHk8PAp2tWC3cpm80c8yZWYKrW1bPvjgA0Lw3Lxp3m8xwguPZLmIOQcajdMua1WorAHI/k/BfBUn7Sm6ks9Ydh8Q+Y+44H/23He+nDHHyYsVMTmn9HyF6QO045eYg72PJ8ujQ5xN92AtInWt0shxXQ4q0FaVPlTa8YeObUxES8POYR8TeGPSTQkgJnZth/cYb7Aaik3tyo2pIz1bjkLV45wjJUuj2vrmnBFjw3BO4VoxDkq2iTp6UaVOU6128BZRGkVmPHuEYYhcbjY8ePiQsxun47zlPZmyQIiRzReYjnOOqqpJMY/ZL2UXKDCKEpS4chx/xrL7mGP+Yf6r/O+e+87PfoxzSR47Re3dva2DGzlGs+npkoW0AI/ovHpvEBPvNUuYtdVoSpGYBu7eu896vaZZLKhrLXhqFrX2u8+TjhPjMC94ZBEhF8ifCUrJAGhTFiGGSAgLUoqUgJjuIZmL0/iMgoz8pM5B1dQsVytW69VYgBpTRJxQN7UWWIVAcNWs0DTTDnE02B0VPniqOuh3C9SV4f+LtZChkLDnbFBFMjnrfU8KnVGPiCu2RsKXoEI2HFvBoZndsrd2n4PcfsIIqhvTNnVd4YwoXnLER8GHhpSnNOZ0uE9Bck1JinnNmiLv+2iHvaMKDYcHx7hwSdsqJ2OOCQrI1w5XDeGLeWKaKIhDS98PDMuG1bKBqkQaVMEnyTiXcUWxlCiU85r6xZSmeeY5Z56cn+O84gxvnp6qRwdjtxb1jEoUq2BM8nhO56xRpDwkfv7znxlFluAMM6vyYFEoxokrDhxDSgTnqH2FHhH6PCW963KEHM24mo35YXDV8ym/v2oL5Hz9a7L3x1NjyInztn3Gqy95iIHiMeoygcPjI/yDB+S2Mx7ShMRMHSqaSrlSKZ1PZulu0L0bbS2d98b/qRHUGNOYIlwsFhwdHRFj5HK7ZbfbKaF+ziPsYCrg0rTTSJGDYue225aUIstFYFEHUk6EpIrPFdm1RVCFOaX6Xc7kpEa3kDSyVgVMv4JkhqFj6PuRpDyj8yPWsadgZ8v+LZ2qhtST28gv33+ftm85PD6waLGVbqT50fy0gVmFAI1G3PYiB2g7zslIfVrQPmvZrfgVXnH/4TNefbkjxvj0L6VEV4ozaq1zs9JOOQN/jtzVuZB+6wnqvBKb+94w1xaFUjx75MO7dyFry9FmUbNaLsiiUfeUIrsdZPEWiIBy5GeyHfbqiMSUubzc0tTaPa3b9fjjFUU4JDO1cS2HaXk+yzoWaiIB6kXD4fERRycnetDnrJ2I0gCiRSTRbEI1GhxDtB7lZkB48VRNhQSPF0dd1dZ+cjbKWT4+iaZKvWeEH6iT67XojALesZMtZ8vWWejlJcruCTf5r7v/7jNeffkjpzwV8jiDvZFHmsnybHnmvKveiUAe6QBTToiDqnJ0Q5nxSMoqt/cf3OfDu3dZLpcsl0t1wF+7zUmzYrdYsN1u6XprsiOzLmjmcBQLU6c9MkQNbrU5s3PCweFiNMxG6ioY9fdIUYWzQIcbixpXzZImVHhxeMTyV5n1wZrbr95mt9lRVQ2HqzVNXWuzIh94cv6Evld8uasCJ6cnNE2NBN03tfccrLRgMCdMqWsgreSjNYWvWNM9WMIsG6A6XmXZKy8dxKTdwPSRvxBy+wkjqB4RbR+ZEvhKCcHJmp50IRj21CE+QB+tlVe2lJEYqtThi/WforE0TfjLlBLL5ZIQAkM/0Lfam37XD0bV4/C+UsqKiIaps6a2ckqcX+7o48CirmgqT8rBohAaeQquFJ4MFolNCF4jZhI1lWZ42LoSnMsED7jM0A0Mfa/YFEs1JdD0wcxrmWKcmirrY8f9+w8VBynCV77y5lOLWPxQikDZ+nvvaRbWBcLGHpAgP0OiXtLwVcXi9o2PfuPLGE7UMIWRvBzv6PueruvwdTVGm5NF77XLjgeCeZUKUB9GYJRQUpwhVBqpTyVVqo7bgwcPeP+997WbU9OwXKxpdzs2l5cMQ6KqGsTXODJBAt4AMtkUWLT13HXav77relKMnBytqRvtvEZSg2NU9Ex412SUaSLOumc56rrh4OCQw4MDbeXqPbvdlq5t97zkKUo2tSbEK2Sgoubo8JicEk1dsVgs6bpeDdQx5zVKLhQew9H/mhzKfQWn960Fa/K09ntJ46s84u/k/wz4b30u3z8faUypz387pUARlRW8YdSDZqwg6v+ydVPKkWwFSSkJOXnqJtB2rR0+iZSVk/bevXs8eXKB94GDgwMOj7Rt7mq1JvaRg4MDsng99IfBIp7WIrT8mN6LOdNFxbNuLy9YHSzHiu7x0JJipBbjQYtpvLMqbe8JzrNcrqkq7c6m2TnV7VVdcXRyyKJesVqtOFitFQaRNW18udlolsKaw3zpS68x5KhBleBpmmqcVyhGqOpozawpc4eWQKbxxp1zRK40p3ARPdE+Hwbom3yXf4N/HXjwuXz/fKSURudIYUKzwJSoU6VFTVE7c0WDkIBB6wDDdw4xa1vpuqbb7kYWleJEd0NP1w3sup7H5xd473nw6CGv3LrN4cEhJ8endMPA5mKjsDjRLGmBk5Sh+ku7VJIjse/p2i1kwftqxHIqrtMiu2N3LM1kOOcI3lNVNZXzVrBdEZzXTAIeguP0xhnrwwPqULFaHnB6rMWrVajo+oGf/OQnpAy7ruPu/Xu88soruKpAvKzYMCvzkApnHvdeqc4fkb0lIk2eOWzTe8FqK/Cj7fQyte+LyO0nMlBzUixmStD2PXXBmRi+IqOeklbyVWiq0XC5qUyObvs6BHIWjS6NOL3Mrt3x81/+nLpuWCwWisOqa9ZOD96LTaCuF6rMtA5e701vRNNWGXZtbxV+NZvNlvXBgrryhFGXGN/f3mmg9+dMYYpzVHU1eml6yAtd1zIM/cyjcobx0Ghs30fzuLQKsPENvj4meOHg4IAw3cReNF1T08mMjintAUZZNGtb6d2UHnhKuPJ1/5D9f++F8Z8Twr8S7p/bwmOawznq1ReD7HzfWJpGzKoQizCKETSrd+9wIYA4MkkxfFPAyjSEdiBbVIHUiTkoRREnNtsNd+/d48N7D1itVqzMu1+u14R6QbNYslgu2e26MRNRDEOHTPBMBsXOpUS721FVgbpZWxqJMZUE+ndt9VhkVpRD2PbgcrGwLmSW+fAqzweHB9wYepqw4OT4GO+C9bgOpJS52O4YUsQFz3K5pF7WeKfdgZq6oq5rCoehiO7nlOJoLCtYfyoyG/dIidyazEqhJ6IELOSly+6OM37Mv8ob11zl8xp7VDDjPRdIRIkYquQN5dCfUXwBOJ+JSekAY86sm4ZusyHHOBoSmUzX92zblpxb2q7n8fkTHj56wuH6gMP1Aev1IQnouwEn3Zg6LdH2sje0yNVaSpIZ+p62bVku1oqDFathKIEII3Z34uygnw775WJJUwXlWc2WLRBPs1zw+uuvc3JyymKx5HB9yNHhEYumQbLwwZ0P2bYt3TCwbVsuLi5YrJbWhUv1vfOFx7XUMxRO1QHnwhgZVRFKmlUp6yICyYx75xVqYdb3mD2YrVdZzb1/f4qy27Piw/wdfu354vQ5DnVAc4441IkoRUy5BKUoEBTFIffRnO1KHRWcul/ZJSQwNjvJZmwpn/rAMJzT7XpWyxXr9ZqDwyMKtZNGQpW2UmuTSiTV8gFWfB2CZ9FUmnXwAS+aA3ailHkFS6tYf0/l3Kg3q+BY1A1VCFTWca2uNaXvYyb4oC2xQ0VTL6nqSpOeHrxUNE1DEmEQey4N800OHXrfeXx4s5NL8x77b6GLg3n71FlxYtJgiDdO2n1KFmaC9vnK7SeimZLZTSgNhHE25oQnM2GirAIyYZ7vlKIqHJUueCQLQ+g1jGUHfYw95xfnxPiE5WLFYrlQcvuFHviICoRz1pvWkjJFNxQuwZgzpEiIiXa7pVnW42FdFrukm0qxiROjtBIlG3bO01SNcruZMnJGRLxcLjk7O6UJC1bLJcEXDriax4/PLWqqFcrrgzW+VsNhtWxYr5dmiBoEYkzNqWAJVgA1oqxmww6huVr8tMZT+L8X/+BYif15j9Hwm+H5SiFQAdtL1qIjfDaiZpWflBIxJSv6KJbThApOKRNCoA9OO3SUuktJDEPPZrulbQe22x2XiwXrtUZ5qtCwWK60u5MozY7ulcLjNzfWNJLkJON8YNd1ZA71d+Mes4p+cXb4hzHS5L3iuKtQU1fqyTspVDCO9cGaWxkODw9Z1EtOT05p6truzfHgwUMOhsjl5pKUM0fHh4Q62H6wiILXSOuY6syZlKMd9kVuLX6SZykmKYwIGfHmWH1K0f9PKrsDiYd8MfDTV8fYFYakafdxrizKmiEO8YqBqg6jZnfQQzpDqCrtbhYHrZsWTb0Wnl+NaHW4Trlv223HbtNxeHRI3TQjxQ52aGpx4bSuisUTY1pBK7dRfLY6UUaiXvgmi4HqPMEXB0m7CDZ1TWXttOdwruCFKlQcHR0RQs16tWa9WtHUCwTHk4tL6uWSbduSRLi4vNDe7Qb7VqMkmzO4FxqAonNtXidx0tT9iJtWlnZzcj8dDfxJZXfLAT/iz/HXPoV7+CxGMZgweFPpkqcBKatBQQ0/nOpUSuofrR0QJ+oQyIQrhYmloWCbY0zEuNXIatvTdgNVaBhihDFvPUUT53I7sqzgrNGAtckmI8SRpGF0ciyq6b3T4qrgqUIgVF4ZjszZctZJyokQgn598FpELt6NkfuUhdDUyinU96pXDfNKCVKJnve5eKhljudp9vKn25eneQSanBlipK4LD+0nl+DPUm4/cScpI2UyA7RQGxR+OjGBUuLwNCsoKSnXMSLovBZPxMDQ95g4k3Kk71sePtoQwgWL5YLDgwPOzoRFWNA0SyXzzcwwJlP43omMi1ay7m0/GP+lmw5/0UpoVZrmwYsQnLMolMcVZem9dahQ42exXHLj5hkHB2sW9YrjoyOWi6VFfCve++X7tDFycXmJeMft27dJRBX+oFHVQgSjLSExIt0484jUkCoH/BgZzJbmyyWJMkvR2jrN25E+rUJl74+PXvSrMjD9fc+0cFcM6c9pTF2jFN804qZB56xsVDNQS7o/l57QJrOK09RDtLgLMSWc1zTk4EoVqlZNxpS0Crrv6fqBy+2O84tLlgvFVN84u6UGacozA1XTn4UkGkvU+KBcgmTouw0FJ10wp6UaU2bV/t7kNjiPrwKLpqHyQZ0tKV2BAkdHRxwcHEKGpllyfHBkkdaanGDbdqyqingv0bYdVV1T1d4+r/OWXUGWl5kRw56p3NpuV1mWwk/MZMymNBJy74388mV34JKHfA/4YlBNXR0pzY3TMrOGoYeRvaTAeLGIlLiAK3jAjME+vOFLVWZHHD/GH1ramg4d7W7g4nzL+cUlR8cnug+sFo6xqxWjzhesfaQ5RE2zsKhTsMhp4YxUWS8/3nsqP0VRQwjqWAV1irxzFjVTPa0BBkux1oFE1kCAwVOqpmHIWZkL0sTxO9Hi5FmgczL4FWpSRGYfw4cFWWIJpORETrYPZwZCLovCy5HdDSt+4L7zghf8PMYU0ReRsfWnpuqhFPeogehVN5dOfSh3tfOOODC+b4yRZW1KkQFn7Ao5ZdpuoO8v2e5aVssD5Qcu4TEpXRqnQmulZhMYu01aBjgEre8oxPoyYZfFjFTt86Ltn+vK5NUrLlwzAlY9732xcMcgF2JxzZwYIvjKTDLvJmo3JnmcxzWLYS9jIGaWVmFmgBfnys6zoj2GrkcOij02OWXza0zj85HbT2SgFu95bqSOG9apV+7EkWNk6JVYXjGa84kLSnqbtYq+aZYM7ZYsCXGZLIm+78kZuiEyXGzZbnY8fPiYJiy4fesV1uu14Vb82DpNK/zzCLJ34gle8HVFs1hSSPD1oE/j4hdKl6ryePSAd9a2rbaQvRePF1WeIsKNG2ecnSlVSbPQQ76uarzzWtHeDXQx0g3aVcsFZ1FZ9B6l+OxJ6dNFn1tTSoUVrkRUzTCYefJ93yumsaxBSnuC+9GjqOfpvbK3BebveoErjhvi8x9jCqgorxKBEW2KkKNGqNPobuqzS86kaNjSMXiaFdyfy4KpQnVePfu+68dr5KzR1zKTMSk1SN/1nD/Z8uD+E42mVrXeC4APJoN5TD05QVNOTmgWC5YLlV8fBJcGix1g6XzFRzunmE8fFBdVV4GmrpTuzatHX+alUKQU5UwQ+jSQB9sRwSPBK2OBFEMUMzbtWUd5nKISOceRe6+Y2tNni2EaSUNP2/YgjqpuJqdqL2r1vPHpyu59dvy/3Lv8j17kq1/WkCk6kVIiSSRZ5YKziSo+60QFOlqoFt3xFlXSIlMxvVbWy/vCN22Fpmm21ogS2aSe7vETLi53NIsFPuhBO2XDilEq5nSVlRfEBbwTqqqi8oLkaOCuycnyTghefRgflOmhaRqchxCqsfAkBMWNeld4svV7ysGfsjp+XRxYrlZIipiyHQMRxTgd7x0o/MajoZknQ6DMo9hrIjD0A955umFgGDpW64Nxva4Cra7XxJ+u7O5wvMMXBFr1rGFBAV/gELlU79tc2Nw676i9Z7vrIJptUC+oQo1GMc2KkjKH2bIDYnhKK/S0SnWH0A4R54vxlkc86hTsUb09WNMSIeNFq2KlQiP/RueogY79QnHnnZ4PXvHgIYgFoczAtaCN1rOIZhycsbnYs2eUp/dys6FaLMfPaHQzjWdTcUtHx2cuHFl/Xz47ZwMpcKoUo2U4HF27Y3TKEKay2GdJ3MuX209ooOrXl4MkxXlveCP5dqXCWMCig9PnBMTjCpBaRIuGyGTP2Pas7wfjkdQOEClDHwc62bLd/Zy60pS/96XVn1I9ZCC7NEbMtGrUsVqtWa8WLBcNVeXJQw+iRM8KhFYDz5mABe+oqopF01BXlR3y6sk7S6lOBpkjiXZuSdF49yotxEmiuEURJc9VGRCbs2LMWAVuiiO1ibM+JeLKJoYUB3JSA6zbtSxWCwJKQzER009UGC91zFLqX4RR5qM4IKOLmCM5aboxSSIn0YpHS02KhVamtIl5u2nqcZ5z4er11pLXXANrb5vAWvlZbaWlAdNuy5AyVejwlm4tEZc9nBEyTqcTB9ZUoLKqTYfgXDJlWTgxZVKcXqhqLYppmpqmUYhKUV7ehxm9yZTqSkZBFFEZK1G14KfmFKMRdEXACjn8iJHCMNPldSCmSN91rFYH2vY09mCpa1KPGEvFyx6XfIk/4H/x0r/3RUY5YKYcVKE1MjNwZtGnNPU1J8cZrk/9qhACdd3QdZsxOgMW7zZDLWZGbHsEsH8nF+jTRHcnVqhXvr4c9CLaHrgn43KkcgtrbW0GLGpQOw9znlHnnOHAjaGAiPeihSd1Pcq3RuBslxTD0+4/WoTz4uIJ0TxMH4K2iS0YPDNCNHVbbl7v/foCGvuMA8mKofbewaD0hhhUgjyYkRDmbttnPiJ3OM//F+Dffgnf9jFHZmxkM01GsrnWs2/QcDwuZeUot7MsZWX8iCnjQ2Do3ZhaiSnS950FDpV5JZWz1fQwTukjwdP3SlDvgscZP7WD2b4Sco4MqTSyiEa3q0wrOlTW5pHX0ta1QKokeOMbHT9C4d/2I3O+M8SemsNJ1DTsGHj05DFhu9XOfvPNNZ/Q8YwXs9PL3i0wK5N7mfh6y/E3Qi+zOnJ7wQNGu/+ljBeR2xc2UOckuZKS0TAVWiUj25Zs3WEYF9BpWxBbsHL66mYvfbtTSlRVGD16UQYgw/LKGLkGneiYEy57hpjYdR1VQLlqTVFpBKpw9WHeW1HmjlIsgqXsfelyMks5iUMP+cpR1YGqrpRrrQqjIaOKcopAlIWOWVuwdkNvh36eDA07YKbog90gMobpRWbG0RX5jAp6xDlH1+0YC6jGt08GwXjpsoaza+1d9qpQPt+BmjykK05cef4vwiiV0GXDRmsXqhG9CWSuaVMz2kxmC9deIZvRtJQexjE5hr4onUCoKlWc2BmFuWkZUsTof7Kl9FVBpSzKZtEPuKRUYwoXmlav0FKliFb8Ey26aB54Ll68pYFMJku0yQePGE46BE8ImvIfjdhCuL5nuKuqHlJiu90gzjFEI9URjfRPa62/K3Esu+mn1n9S5nnMQpWOXuMajR/PFhks+2G6zmctu6dc8tfzd4FvPeMCL3dMFbiMFfPZFf0wPbQun4z62T48xge9VRiLqAPcdcOIj1MMqervOBZNqRGZkoxqRfeEXTqJpVSVJjAEiwLJ9L0uFScmMaRBZUj0YBajwlEZnOgCq6oy7L5yZ7vK4/K0SHtGrEVrSwS58GpjgY1t29JvBsR5YhzGYtSiK8UyVQpHG2ueR9Et5wco5/RcrAtzgTpuaUyZStnjMgnaaNp+xrJ7SMVfltvP+PDLHUW3gOrRLEXjFvSXsYub0ZpFjU4NSmk0tA4NznmYsQEEH+hNz+WMtj+ddbEbj0/9pnF9Ykz4Ruh7/T6XIjk7at9MfLuj012MPH3vHj5Vc5wUaqm5kTryFHsNDLjgcV47VQ6x16yYQwNNgkZEZTIcS1AYlFu97QeFRo5OUTFKbf8zmlH2tDJFVUWY2ySg81KgD6PUmHE98cVCCTBi1/4iyO1zDdTJKp8MVD1civqTvWBKCVU72+DOwPOF2nM+nMjIx6fhZzczBCbgtGTG/rRTCt+Rsvq/KStthSCInwnObCJyViysL310S+TBqkrVOC0HvpigubEQKphXVNJNY8je7pWi0ESXOGf1rHdtS9U0o2iV9MTks9m9FAF9Ckuyb5iXdSgG6jAMJgDmHLAvK0958HPBEq6+On9h70Ny7e+vvms6zL4IY8+jLI7VqA0muS1GGaDy4AoXaLbrzNdgivYMQ2RRafeStsjB7Ht1HTUdoEtWlIuMcovhXL1RPWgLVMf4xVbhyhx/KAY1wGAAVxSl98HkNYwyrBFVP8qr4gXnn0Xlx1L5Ssi/A+dHGUNkqiidKcvy2ijZmfGa8/marczMWbMryNSkYlRmTy3o7O+fgewekfjLfDHa9MK+/E5FUpmxCcOsSKLwTGrjDjUBUhbF5JUiDRfJKdP3g3b3c/NUazLse7bfzTuoMe6VcU+kzJCiYde8QkzyJP+ppMmtq1g2JTfXZePhvye7BfOvBVWuoPPnEThhpNwZu0SSx2p8EnRDz65tEYNajSlPZpjSrJ8paft98ZqizhNpz0z/2pw4mbFwqDLZd9Znn5tdnE9bdhMDbb7/3Pe9jFHWUYozb1yzzox279RIUrsgj3IBFuGPkDzG/hM0u5KzRaynqLlmAyaHYh6gsRsBpmCBs4hsNGaARMLXtRrMxbDNea9NeQKFDVpQQyjR/sImMeFLS3BAqf+M29yp45eIzBippp/ZnBX50+yHMnL0xp09GU97O/Lpw70U9O7Js+y9fU47JTAaqJMed+Nc5mst1Jcvty9koJYDW6lMBgrflthhWQSheM3Fi3JW/KE9xN2YYNFFVkMgJou6WiW+9p2fgurODFQoia2MRytTE1pp3w2DRpRQjOiIjiqfy1p0lfKg1BagkVTiKHSlMrBEofTH44PTH4tKzTElIhM/mXfODg1VfAltM7m29mLT7UxCltFosxM3RpL2KnCvhPhLAQ3k0WvSN9v8u5E2eu8wGN921fO55j1l3a997YoHdN2F8hekSGoyMPNISv7Ug6KHbS57USb8TvFQlVtUFYfMXu/anlWz1Kp3N0VoygGt+sMprdpkSew5a+rwDYSQyGLVyvs3R2kdKjla6lamvSdF37nxsHTOW2RXZZhRmbrx3s3RpvQlGPHQ4z7TjlFDzPRDj3dupquMSk5MeQqMDBNSDKp5XHVGiWLDWXVpiYbps+57ZJm9f37msruk4Vd58xmvvvxR5HbPQB0PYo0ATh0e0ngYFhMsYYpTrNjDafedYYgsFpVBQiyNH5O1My0OpuzNY4lupZRwQdP+w6CtpZNU1KWwz+4TDC9XuBVlpssylMSjbjm35zg5o+zRBFdQPt4cGfKgBqm2Vt8TiFLpXD4/xKjwsNyrNrTIXWGVsKcyG3/cUVN8wA5jnQ93xcIcF0gLu+papzlnXC5O+v5bP2vZveCcf57/2TNefXljvn4iGvUnWzFvUhuAnKkqc54FC0wBWYtPfcyjo4Kd1Smn0dE2WxLy0+dMmT1nilGMfm+EDETVV4NANSR8mIxDStR/tGOwYrwiq6KQFAu6FaYJ70vGSoNZIkpFpVAAw9UWuS2OmjlVc0eePO2d4jDqfej8uZIdztY5bWaUJytUTylpBsrmbIKv5FGgSkQ6gxVHlsedIq/jej5zna/M+2coty9koE7Euwa2R5VCwSnpBJjHH619pBeL2vg9k6wsblVVbHc7pHSOMnyctyr6JJZ+cuAMBK1Sp/1nY+oJeFNIA2nISByo6oBb1GQR66pjXnwcIEdLYV1xuUYDdRI8xZYEfF2Bm1Jl00IY7tBh6SaLPjFhTp88eUIWYRgGay9mwl8MhCvzrVxuefTOpjSAfbYoTJv/qi6YQv0Z017PEYrPfHxBIqgwbcYYI8Mw7NFMFYMrDoM6NqVaUmQ8UUqEiay4u7qpWFYL2u2WYRioqkDwjWKOMLIpS/shmuZJBZ5mqxKNZLngCoeU6DeRZik0bt9AjTmRU0RyZoAxM6FKsyi3UgAWjHZNxqpn5xTbXDB64zM5kFA+b2kk9J61Cls5YGPf0vWtpUitc9Zobtv1Mgbv0c2qUd5JJp0TckShDSnpYY4HYTyIxGlluCsN3Hn58pt4j/P0fwL+qy/5m68fE/TBDNQSpTNnO5iYlsCG6oiESLAD37DSSajrGnKmTYm+61mcHHC5scKisRZzfkCb45ELJMkOISe4yludoAKt07aDDKGeCq7KSDmTosqDRptEu5WRrEahOFTVeNA7P0X3lZ4xgyhfcTDCcqRUfRe9nccGLClnYuo1SGBR4RCcQnzGAoA8c0DL/E2YxPKLMaBiYbZyDpe9ax8F8t4h/bLHV3iF/638Dz6375+PYBX3OVtQICvLR7RWpeP+Fs1gZie45IiDZpMGGRj6RFOvac3uGIbIerlie3luLCwKBXAuWEFqOVetMNWcjkKrV1XVnoGWB23JHarKIIW6ds70arb1Vm5sK75OBaYw2QLOaeV+CGG0hUbYkhct1Co/JWMAFKosyGPQEqfGfTcMI7ygMBGNxxB5bAWRZ9JJdsQ07DlSyQp155pURNSm8c4irZMm39O3L0n5vojcvniRVL5iKSN7D+jU1WGI2kJxfH124CervnAu0yyWhM2GGJXovjYQfIlWFUqEUXvkmYdkJGTOO3womBSjUkGND++dEvxqPkCFetYz3fsASav1fDnUfcBZ+muq0vOKlx2r6e0uHGQ3pSt0UvIoWL7S6ufNdsOQBqrgSajB4Xy5jHrdZK0sTGPBlEY9wOARUSxlgG14/V8IweZmggy4mQu/77nPtPH83/P3XGtcyjXv2w8qjJG8nJGSEv7CDNvaVw8Q+2ff9wyDM+96JrPenrvXNp+p6whVTbMIuBBMNj3eZ8NLgct+xE6DGX04qy7V640FcN7h8KSuJUumazvFYtcB8R4vtt+yKikH2rs5eIjW4cbwp8WpGv8+i/qHoFGobNF93ARNmQfolavSnj/CkHsQb86pdkqTMRqVi59F8fXKgR+HSKhLBFT2IgagdG5VXZFzNqe0YFEtumbyO+qU+WJ9hrL7Ey75n7vf5/euucoXZcSYSDGSc0/l0+hQTThi8MFrGnNIpJjZbjccHZ1QVZmu6zUiHmqq0FizkmRrH5gaKOtIMkV5YDrs51mAnBXuolHWClyecMljQUsaI2vkZDy9MnJMl/Tt2Cv8ylrOMxdjGnnvFHWj5owpcX55QYyMfKwiszR8Seuj51JKGclmLFkUawrKTFjBMcwHU/e0fI0cPvXvz152f8QR/1P5K/zRNVd52aNQlg1DOY/Lf2cRepnZDjkTQkXMyoSScmaz2XB4eEyMLTH1dH3P2clar+2M7qFIWdYzHzEsZpnKUY87VssD2rZl5zVAEHMCpyGFqIJLTtGytygbTkoa5BK3xztd5FONU7MTfIFQFee6BBL2swMlsDA3HJPRV2GBrMvLi3FPaOA54Vw2WcxTwxhVsGqKWzfDbPM5DHHKCl6zPlcjpdeNp03Xz0duP2EVf0FRmsfgLJsUE1wxUEaj0judyKwck6DeVrIwPgh1Vc+UrVXsCSpAopFWRB1+Lxhdg/1ksdR6HjdH8XzJmhLPdvgVQn7crLOJmw754iV5EzpnvDljENeipmN/X7HvKnyW9u3aLlJb5mWsNSB5jHxMQpDtPlFjM2kFY7pqVMGecq6raowQi/eGTZw7QJ/Mq38KN/jUZa4o3aL4KVQYn/8ouNOCoQZGbBy2eQt3pHYay9N7KArF4b0Qh15b91qlSF3XtNutpp6cU+cKN15DIzkw4YFm82nk5Mrtpy1Hs7eITc4MloHIUjBRSdNjUuTNFKRogdQkpwWoPz/oZ6lV0wpuHsEYnUcTaqTofM6fPGG5OhwN+5Q0zTSPHIk+ojpwOVuXOMZsSym6KXOCRX1LVl8PM41wO6PamsbLld1Tjvnr+a99ou98KSNP+OCcMkkK56kd+HMDTso8aMDAOaz4KNB3W5UDr9XMgiPnSVbGzj4yMxRnf9b1guArYkrquI3YUCsGjQpFKJEuxiKiidqn7K0pEDDD/btJfnPW+ysO+SjT5ZnHNZwO/Zyh3e3IWfdYMSTFAiiT4t3H7JcOh3rLeWxAY3YAsz/G7VQ4sa9ZrE+0xJ9cdh/w1/PfB/47n+h7P81R5ns09JNCQUoEVVklJiOFmHGVnse5UEelyKKp2e2CQjVSHFlHnGVmyAXHnkYjVb960k8lMOGC4/D4iEUcRp7qtt+NjnAsNQro5yVnfHGYxFoAmb6dw6WKrHo/1dvoHLjRxhjlfq+c1JSszOU203Udu+0OHwJ1VY+rrcV39ndBz9grslLgD4Xee2zYMXubhWpGnX+dofpJakg+S7n9ZK1OR5uqAPkNP1E8jzwms8ehHkU04Y0MfU9VNwyD9rNPMVPVNVN6cAqDTyEaRqHKUrxpz6JpaAct4ig8kjA3SPIofFNUZ37Qz5XlXPgmXNQUCbJDfuYFTv/V3+ekXRq6TgnOyzyknPAlfYzMIpxTIRrjxo5jNHnczcWWsO8NPjAMWvQQ/HS98bIzudmPOMxe/DjyOL9euR+ukccvwBhlM9mhPlOaoMZUGoYRuzNvvzlNjR6cEmNRI4gITdOw226JKRG8M8fKWaVzOU5N/grufD5Xwohz7obWcHZGHVRkN43WnmYAvB+dImbefAHqjxGpmbKcoADT6pcK2/KjKS27KSlYvMzl5SXO1zovIY8PkE2/zXWSvjQ5AgVakaJlQa4RPRF18lKMpK6nWS2eXsPZOjx1gdkfLzQ+QnYPqfmXePVjXPDlj7wnxxP1DcbFWMImInZguQwoSX0I2mu83WlnMO+VH1oL+WDCDV9zsM0OxFAFXFODF2KKtH1HND7rjNAPwzSpRfeKjLCWojeLsXlV54562c3ltBimE3q1YEZh5vzYTSt/tgUEcqapKrufKxNqn9sLNWXG80gN1Lmwm9ljdQbitLb7qfP5JevdA3b8+fyzj3HBz24UONrcQB2GwaJ6ebauOnIq2aBiLGnRXRUCi0VDzr1mHXFUodZI/8jsI9fML2MAYrRBHNSLhkoai/gP5EuVy5gTqdW9VOTF5YyEmR7NRZ/uM/1MAYEiz5OcTMGESW6v3quaM1aQl1Ru+05bswsTg0RZ8wJNHIN4Ml1JqQjR/Y+ziv1xUabzsMC9ZApWPGMhr/5i748XGp+C3H5iov6CCU0Grk8W6fFMdCeZYsCCZF1EJeePbLaXHB+f0rVbYkx0fU9dN5CFnEwJicMRGfJs08+VpxNCFThdrNh1W7qh14NeZtXbdl+6JnmiKIHJE58pyb2D3oDZk/duOFXnZwZqkZ5pNWKMbDdbHj98xMmNUxNEGSECxQjYdyjyZNhn6PuBENTz3BcKS+eLYoDb7Y6FC/hmtunH0MBHSZMZHbP3XUu8+5RsXiesk4H+RRh7+OlZNLWsGDkTh36U0WKklu5cljPEB09VNaSclK7JCX655Mnjx8RhAF/R1A0iwfaEecwjLU9RCjLdlxm5hwcHhFpxR13fWSGXtudNpdVfibqbQhHnIKkDt8+WUVL9bqSaKvjkUXZN5JzI3jxNnj/j4dLuWry/JPhAU9WTSZuEgrfWw0hMKepQiqI8dtxRFoBx85p8q5IUtAd86gaW66Nr1PdzV9j+++nI7o6H/Cz/x8D//iO+9/MZBYOusqwFT4W/1jvlbCZDihHxgcqoy5SDVw3LJUsunjyGmKh9TV2pg5PirCJ6/p1jWlz1V0JluWpqqmYBLnFxeUkfewqHaHfejtEb3Q0TO4pGRd0Y4R0j/U5wY8p/7vgXuZwcqv2f6aZHWxONfvadQsdYZNbL1WTQSIFIsRfFciJkcYqRzZruL0fHU983s1evjzi9XNndcpM/8v/6R3znSxwZCgY17xmo4JzxHGcgKWe4y4kqBHLWaqK6qnFOODw4gBzZbS4RHOvlIZVvRgNsnPtii8w9kFmELDEYxj2MNJHBVyyWizFYljDHyqLsAmMBtBNBkjbY8YBH27U6L9q0wsZcFvbS+rMAwRy/PLchCtVb+SFnmrpWe0hKSfS8/Jlx/4MgzppvYN24sjUtsMityFQ4VU6j66P/H7GwvFy5/WQR1JK+K23fvNPIX0rkIZKjkswGa7lYkPjeBRA/KrCmqamrmjQMdF3H0WqNoG9PSQzbF0fru5QNkCEVYLNzNMuaZlnTdR3d0DPknqqquLi4oG/jVGBgAlQ6KUiSWUp08oQU+OxnXpEbhawYm3ofeX/uRamvsiGfL63F6aJZsGoaPAI5k4esgH3BeOJmC5kdMapFrqwJcV8o7b9JIslpu8I5sH+Uvv//mBTBTG6GYWCwBhCaVunpO6cHO3aYClZBqgTRTagUF2q0HCfHx7TbrbXGDawWK3M6SurSKdwFYFQSs/sikkSrklfrIxax5/zynExmuVpwsdlwcX5u1ftYlCaPcooXQpFhRaoQgljLPX/lu0pqTYn7fSjQmakApkToS7/2OGSGPtG2LdENNAZhcPPOayUUzKTsLBEL2SGmyss1FQdo2RW7SJoZKoByzBic5mWL8JYL/jD/85f8rR9vxDQQY88Qe/rUafHloiH4StPuMeJcxIWA+KCdw6pATongHMuDA3abA0SE9eqApgp4qSBPuGRyYUCZKZLsDEIViXmg7VtihOWqUYiL1Np1LwQuLy607W/xtLHSUUEN0ewITggCQRjx/yOez01BAWCk9ZuwttMoh30261SjpkK7UzxjqUvo+54kapyO8bvxUrYXLIKcZWq1GoJipWNMeC8jzeIYXPiCpI42JH5I/3nfxlNjTpWm+OUamAJQADlFUoyEqgavxPopa5fE5apivVrhih5uFqOMlvahir3cj8qaK2KRVnO424Fh2ADQNA1piFprlwWHYVotE1SilJK0kt8hiMTiy0y2gukvDQYESraqOP16llj8NLNXIAYYLKfQZaqd4X1QTmIz7G0i9x5vP3Og/x4SpOzwOEQU9hUNXlH2TjS7rNhCH99A/XTHi8jtcw3UuUIoxqE+6ECKg0Z7orY2JA6k2GvfWkqxkHUqMcPVV8pxlxFCVRO84/DggL5ryUNi2SygWPkz6oSpdg3GQ9E4VyKRbbuzVKIaaY7AslmxudgB0ZSuinEJfWurU493imf1Yi33ikK85ogUKa3MnPGpFetUxp9Cx9W2yqk49AMpDNb7XQ2YqRJaLPiqQuzE413G0eFdhXcVGKWWQ0blOAeYq/LOkAuJ3Lj1bcNd57386cdcN+9dujB6f6HGpBiKUsMKJ5R+THHCUwEGY/TVo3OcwCKCHSLCwXoJ1uNbxFN5rxbX6LnPD/jpr+UehpjY9S2XF1vqOhCjUqkEVym9yaxesxin5GzKciJtU7ErFaXBHClNKxXvOeepIjWEoF3ORnlN4z0WwvNMZrU6IJNsD5aCBxlxYoyOlTlluVQ9Fy5EPRyKzO6lpxmDHnr/OUMakBwZc3OzdXsZsltxyivhv/npftGnOKKlSAvdE14hPs5XiOtHxyGlqBXHXg+lYcgIHU3TEMKS05OTWYpdFN/s3AjLGJGZe46u/qMcbtH02263IedMFfRwzlLWT7GBY7wlZTweX9KZWJSqGKSld3kpSh2JTmeRKDvk3WwficjYVjhhcoRwdHTE5eXGInbOnK/pQJ9GgQkYA4dtBec8TnmB2HWa2SuHuToB08E+GmDjFaf7GH/xKY7rZRdeSfGad7/c4dBztHSP86J0kc55Y+fxYGx/et6ZQReBaoKYxBjpuh3LZaCuAjQ1sR+oQ6U1bknbVJOjVlIqlx88ZScYTM/rmdv1PW3Xsm13Wv9SpN2CAKYEzWbJDN1ArvQ7CgTGj9R8+gy+ZLHGKD9TxjirJvYWZ3Raoao0fl3HkwePOTg8pGmWOHFUdc1isaDretPrhZlDMxclKFcyAJMtUOpbZvtahN2uVR3R6P6KMSqjAfao2YqvchwzB+QwOV+foux+Url9roE6bsQSgMpzEtg82WWlK4SlsCsLoxfipWxYJGeHKWYEkGGxXFB5R9e2E+9Y4Z67Lu+0F03U/8WcuNxcjt2d9lqNlc9kEyxmRUoGgHblOdyELx3dJcN0Fa9ZTKEmpirjFBNt27FYHujrWXBJDxDyDLCMzslVRWnQMX32K9is0fOxyvGrUcFiTOdUuNLmkcMr4ykpmSJg181v+dB177ru0oW78Ys6mqYU4U33qB3RJoLpKWqu8pftdV1H9UjJmbquGbpW5SoDaYrwlLnA/lXSs7ZZbA8l4hDZ7jb0g0YDaqkZMZ5QaHUZL2oIeMnmfZd7t4jjnIaqqLGcitE5RXc136WHcz9E7j94wOnpDULtx/1ZVRXDMJByiYYVA3by5id8lJiRCjHnqcOapYX7vqdpmr1oysQrq9RWCo1Ks++6InGfsexesuD76Vee+76XOUT2J0B5HOOITR5Jwq2YVFWvYiclJiTooZqSFo3GQTMEdeVJMZFcglQKkYquLQbqXOhkb+6L7h/iQNu2dkYsFA6VLIZeaAFNZyqmUzv4FAerHPJyVXZn/xv1trlb436wAqrdbsc7P/0pIdTcvv0q64NDRISqbvC7jpyzwQasmGY8Ma5Khn6X2qrmuImlQkV5VfUaeeyCVqBruo8HfCjcr3mctiuTxvSFn7LeZUfk7ee+72UMMaOzrFXpNV/OTHHq4DqxbmBSnCrVM2XNU4r0Q0/f9wgRIdN3rbGnREvvu31Zfc40jZRtdo7vdjtWy7UyOFCYKkznlywrjr7rGZoeF9xkI4jM4lEmqWpMUKB7KU3BvHKOFwOy63sePX7Md3/v97i83PDW17/Jl770BifHZ+QMVd0YH7XSo6WcrKbJziOb5yJHpQh1VMbjj6NuFtYNUPW9du+yOROz16yydWpSkNRQfQ5R/8uU2xcyUCcsaZ4OJfOSlALUqo1JOFEajrFr0mwSMAMXsXTJMLBaLcle+xuTGXuh6zzOdvbV55XpR5xj27Y4cdR1xcIvJt41mw0xb2q8j2zGshURTIUlBTw8qsUplK4vGg5Qr7Hb7Tg/P+fxw8d8+ctfpVmuCaKh9rqqNf/KVNg1kvkzxWh1fmd6zAxVjTYrHrUoxf0l1mpcTZ8atqUox8wUbviYY49O7BmXeNaVv3AG6mzKmmaxz9VoStS7KYJTDINCDDWBzW2N0nRIJT19tXLZX13R8uXlpyzKdCjHrET9XdcqD2RQXj+NttthnIvXjLU+jUYGpNcT0WV2oh69uOLkmLFc5KoUVzlH6Tq23e24f/8+P/rRj/jmN7/F2a3b1PUSESFUlTpTsWBNJ4PdvLu9J4Wpw4uIn2TYebq+J1TVmLry3o9y4n2BA6De/Px0/wTi+0ll94KKf55vfvwv/AxG0bVih8I88l/0pEYa59117Kks8yQx44MoY4rhAYdeuRKHYSAIRJGx14JG/Wbk/+M6lB8bUnylrAVRgA8VVTJ2ltF/EVQisznwOyqXqZwWbImbAhyFf7Ic+uWLsilNjbYZT7Fd9/zinPfee5/f/d1/xnK55s/8OnzpS4G6WoxwA8jjvi5dtkAhAcnubpr06XtHg1WEUFf0QxyhP2NHOrsWZIMHKd+njBuuTMSLC/Enld2Knpvce+Hv+ayG2gMWXBFHFkcSRwjqKKjs5BnWGI0OMpBzr4aRFbfFQfWiI5NjpI0JDNOu+GaZxPR5N1QMTq+sDiFU9H0/Vu6XKG6JOkIpaXK0ux1t5XFNha/8+B41A2bBDLOF9gpcKU0Livyp4fT48WP++E/+hH/4D/8hOQltOxB8zfHxDWLK2iZ1UGfIV4GUrWjLjNSnH9dsi9HQnuAw64M1cRh0nzpG/ZHH/yVrwW1XGg1UebYAXjM+S7n9yBR/qYQuN5FNo6lgJaqgJLvFYAtVwQxNRPMZiEOLq+vxEB8GLcRYr5Z4K+roh4HKa6WdtiW7YuyIKqhsMzHHColzuhC9UFcY1dRc6aginEDIin8qnSxEBC8zUP8sxQResaLOKxjZCc5p+v7tt9/mu9/9Hh+8f4d/89/8b/OVr/wK3gXEOdbrA/o4EColoFay80wSJp5LmR4u52wt1gLiPKGqyQjnlxu8d6ODABoEExGqUBG8N104P0jkyuadDIm9f+9599eJkVzzviu+Q3HovkAR1OK1ji0igaqutM+3c6ODoO1rK12zolRKpNE6ehSC+YzO+3azpar9WIDVti3LujLM0zVzWOwta9bgK494xWJUTU17cUHKEPqBIeqhP0bQDJvkxLHb7lg4IdQBzz4IfzQLzOAu3a00bWl8kyHYc2sL0x/9+Mf8zu/8Dj/84R/xL//L9/grf+Wv8eqrSyQLi+XCMgeBuqnJFsmYDt1y3MsI39ICs0nZO++p6wW7tif4dsQEYp8eYUBi+6tEBmyfTzL32ctupOKx3LrmGi9/FEPeiR7m87ScQ8B76qpRgntX41030o2lpHpEseuJGDPeCX2vHIvBQ+p7GCIuQVqkMZP0zCOpyO9MJ4YQqOuay4sNsbYWkqL7zduBreuoqcV7H96lP1xxfHhAXa9mETC77p5jpYsUzTkqfL5Fpp3AH/3hH/Ef/9Zv8c4773JyckpMikH96le+Rnaqq11IuBDAO2I2zmm5UhA2PnQe0UkFIiPiOTw85MGDB0in8zgGaizY4RDCFdx3cWYRmdHufXay+yUO+PfdX7jmGi93KKdpoAoVVRXIWQMrlQ+Eqh6Lk6vgzWDVFDYYvV6O9oyJFAfa7U6d8RwhJnY5cXp8iIV0rrsDgNFBRhjb5jaLBaFqqKoFQ1RlXrivCxd5sSnMXeb88WNy3xIP1rijFYvlwmyfUsHvkWBZX6cyUxh4Sn2C88FsITXe//iP/pi//x/9fX72y/c5OzvjD77/A5rFml/5la9be/SeISk3u4rAs8/TOQQzpTy1NcbjfMVqueL84pxd22mxtdlx2ndONbZyuKfpp9h5FNP385XbjyySuppqKrcwL26o6gC5h+ipgieEelQqVR3wXiA7hqHT9LeviTHTdQPn5xc4gaHroBOWhjO5DgM6fv+MRsc5T9UsOLt5i/Mn5+oRjFHFGU4PxcR0ux0X5+cEEoeHK5bN1JnHOKpGgStp34xDRA1hBXorQfvv/fD3+Ef/+B/zve/9HmfHN/gH/+C3+Nf+1b/FzVu36aMhTZ3HVxWuCsScCVKipfPDXohlq+ZShYdurOWSo+NjthcbmiZYAFhU8Zoxo/trlmL6GN7P/68OKVH6FEccZPAeJ86aMniWfsl2M1CFSjkTnXHaWas6BfAPyDAQsxVSSGa73TL0jtS1xKGHDEcHa0rl8lP3ojdUbgwRjY5XTcNRqEjZKTUOWkBQDmLQw9g7XdcH9+7RXVQcrhfcuHHKovIjIbSm1D3aR738T/endxXOVRapFLyvuHP3A7733e/y27/929x85VV++MM/5NatV0lZODw64XLbQso4j9FgKRRGFY3uq5HoKBYYjBmmiEEePPWiGYtnPGKFJ1fI2AXGtph7XJUvbyz4BW/xfwD+vZf7xdeMZE5PtW5Yrw84PDzk9ddfZ7u9ZLO9ZLu7YLFY4AvG36IhzaKivWyVlicn7S6ThBgzfT9ATkjqSV1PjgPDwQEnx4cfGYUqBmRVVabTG+rFklWfGOKH0++DN9dFyccdot38YqRtWy5E8W6LRcVi2RBK+0rRKLrKhc1BLrClQCm2C6FSBoOYuHfvHj99911CU+NDxdtvv83x8Slf/epbdO3Atm0RybgqjB2kNLA5M4pnozj/GlzR7eoRfKhYHx2yu9zQblu8OBIWlJHpc8lwkSJJZfgTZq8+yfhDVvw7/Bn+i5f2jc8e3nuapmGxWJLSgABV5TVqLqXTU9L5NTtSW3QOpKzKJsVE3w/0XUeOKqtehCZ4w59qAdN1Y25SqX0geF8jzlPXjmYJvq64PL9kuTrAOw1asdsxN9Cch2Ho2VwOkHpEIut1Q+W8whXEjYVN2H1rfEK0exrGAmFKsgQISvDu0LCnMUYuzi+4/+Ah4Oh6LX6sq2CV+SVQbGc7om2OCxXn7NkFy2gX7GEILFYrUkxsNxv0EiXbOnW9SilbAWwJQuYSQfjTC8RzxovI7Uem+OfVk8V7LK/Vdc3Z6Qnee84fQbvNVDPahYI9raqKbtcpPY6LWiGdNNW0ubxEcmLoBzZDz/HhAZq6zPtK5No5Ez2UEZarA8Ax9D1OHMvlivPzxyO2rfRKTkPPdnOJSwPOZZbNkXo/5Uc8OYlmG1BF6RCLsHnDVulN3L9/n83llqZe4H3g7t37PHnyhIOjY2KK7LqOLFATTF8ZPY8Zo6USWkQJy7OlhgqptIgjVBV10/DowcMxjO9FZkKWkaSKscAXxqbdn1DArqdO+eiRgf6LEUDdi/6XA9g5x3q15OT4iCpUXDx+iBdH8J7g3ZgRGh0gJ8ZYEUcPMA6ZbRzYpAFJcexAkpOlR0enY38oltVZJ6YS/a8IlXB0KlxebAjesVqtaXcbQ4KYd5vQ1G3f0Uti54R217JYrMeUvqbvPVnUocpYOt+iBCorGBG7EGOirmuOj4+1O1DO3LnzATdu3uLw6ERTwnGgEj9lfOatyfL4YKYwlXljnCgz1tVIXXBxcUGKWiFRUe3NS7ZCLOdnOKhCyP4J1v2TjENe5S/yP/5En/20R9/35rAo1nkkA/f6Z+UDtUVVS+FUTtlaLvZEAWa6mpzoupZ2GyH1uJQsg6MtR7NRBV61U8thX9ZBYQdTIZM4WB0ejhGypq7UUUrD+OFS+KbV0hUOR+wHcm4ouq/AFZQFBfugKPWPlNiuVjgPfSSmyGKx5OatW/QxErynbTvatiNbp6Cu73FeqC0lPyv/2h8ymq4GAygZQstGhIq6bhjantgPdMPAkiJnVgMRI34eIMiJ/YKdFxufVHZXtPw6736iz36ao6SyfbDgVFUp/KEE1bJW44tN8B4m3s6+4vwOQ9SwjenYEsQB2Zv760Ymj0WgI484YmrUbAbvlO3CvjamwlGeGRM5ObOoG1aLBU1dWxV8haD6tkQrcdotcE4nZQcJVkoFkhn6iHeBw8MjhpSoqgbEsd3teHD/PuvDE1LKVn2fxiYYBWKJnf/JNPyYt8pa7DrPsGphrCOEmrpZkFPi0cOHNMvVOE8lU5NS1CivOZNiXSxfdHyWcvuRKf75z5je0BfxzrNcrthtL4FMMLqQuarTdFDBmGkKWA80YRh6gwcotk6G4Zkp4hJ4LvgTGa0JFT7nFGOS7HVfK3k6lIM+GxWWHqIi+vvp+SwS5axMr6jmWeTLbF1ADcSui3jnWa/XhBDo+57Ly0t22x05o5ADb8pRpqeYP5PkyWhV/TYdLCVCHKwl5G63o6prarCiNMWCSUqIi7hUeAXyFDa+Zg71CcpzPU8Cpju+KoN7vkNJC6RE17bPv+BLGhO8ZD9islqtWC2XxGEgDoNym4ZSSKEKsNCQuejIg3XqMoUj4uiHlhx7nfecET95o9eN8bcy4ZlF1KvOQFXV+EodKxc0LWRhcUu9ZnJMEBx1VXGwWtFU9QxKU6KokyyPe0NG+9ZqrKxrSdsRfMXJ8QmYQnr8+Ann5+eAELMW13g/VV3nmcduRwWjf190w4j1g7J/qlBxsD7k4vwJbdexWC5nxpM+5+ho5YRkM1T2/VM+S9ld0/DrvPH8C76kEcdiqOkRVTdo5Kmqwojhm+upEplPSaOPBecZgraT7eMwceuaPnciWg/x3DDq7BCaUTJhBpyIQ7zHec3wyGhsmLGWEpXzHB0ccnZ6jA8WIreCPCdT5mqyDkpcWONE2Q77nIW27XHiOT44Ytu1kIWh7+k6TWXGQTsGYYf5qJ+YjO6cM0ksGkep9C/vYLwPZceoWK3XkDPnd+9yfHw0Qt1sdQy+IrgUkey1Krpo45nu/2xk95LvpD94/gVfwhhlxNa/FET6ETKnTlQRjxKAkTirUbFC5bqqEPHEXhgKdjLP51Cmr7o68v7f5/jSOYa+rPboVxQj0GqEUhw4Ojjg5s0zDg9X9EO7Z1yLm+tcuyfHxIYi5b1Kl6mFio5FveBgfUDOwjAkYtez2WxYro9GIzNjjud1j5fzRLhvO7cYqOPD21p477XTYU5sdzsOC4tKnnS21g1EdRAHw7d74y+0ko3PS24/0kCdd0goxulIwWwg4PPzc5Id9pVxRY6PNRKIe0ayGqPNSSmSonaKICV8EZI8E8LZfOfx80xGqr2oQUfzkkfhcaX4Td+bEyRh0dScHB1xuF4rDnUWQS0dTgp/peIGtfrNYBwIxdN3NM2Cw4ND7TwyJDabDdvtlpwzMam2TnmqHh2fIk9FNZIz2RVhMc9pZlh5rwryww/eZ6W5VKX+yWoUJknaRlYGfPHe8/RdthRXxuRtyVPv+WQeUYqJ3cXFJ/rsyxg5ZxaLhioENpeX2s2s0v72fkzJiaX6PcFDF4eyStbnviKm1hx9W1URo2KaHW4qiOWbbc9kPQyLA+GUfzXPlFweo+OmsLMZGlnwGY4PDnjtlVeo64rohj3WBwzPOdFMFQM1G8JISFmjvbtNi5PA8dEpu76l7yK7bUu3Uxqt0ZPPyQjaS2FJKZS8aj3mPZnLZhyLcwQfODs94/L8nN1ux9HRkab1nO0jk3uJg0JXrlzrZcjuioFfzY8+0Wc/7TFvdFIcnxE7j+H89xwdGaFIVVWRB4hJ0foZzXQh4L3QbxM5F+5BGWX0+ebpNIqjVpxjYNa1R5QSz5pcKGOKFnmsFgtu3TjjtVdf4cnFI7MRijNVaKWmIsUiAwUdl6VQQgnbTUtOsF5qf/auG7i43NG32uyia3stLjFZUGOVSabsmVNOuLFieZqFYmsUh7LgbWOMbC4vydlaY7rpXCwwohKekZQ12uaEfUqKT192l5zzq+l3P9FnP81Rzq8Cq4ox2v43/ZqTZlgt6o8Idd3QxUgyeIdzIBJYr1c4B+12y2ZQO+Hp+Xn+fMn8T2MtQaZX4iwYVvRQYfnxCDkO3Dg75Y3XX+PgYMl7H/xSxWLESxfHagZZsr2gcptJYyQ1W0GiFk8frA7o2o5snTTjYAVgY1Gu7NlRe3bDTPcWE/VqcGSkJbQAl2RlL0gljZaLk5pGec4yMPQ9PoGrgsJjpES8Px+5fUEe1P3DSEQLILzz3Lt3R0HiDrwYzm9sESpjj+jleqVAdjNsK1ezWK7ouy1du6Pf7SDqRk9jFMYMYftrmSgk4/xEaeHtoJuwpIm2a63Vot6zF6idI8fI7bMb/PqvfZvVasEv3vupbozRK/J7naWw9NAwHtZ6T7rfMl4Cy3pJ13UMXUvfD9q+FW96TyyMXgRMJ7EY3PszXERbDYJ5K8vbr3+JH//xHxNE4PCAPAxKEGB8cGmAPiZNXfgarDjgmeH3FzyRrrENnvEeYeg7Hn/4/otd+DMez4pm9n1kt+vYbXY0TaPdQXzhvi1KViuTV6HBL9TQ64dM3TQs12vWBwvOHz2m2+6UDxjsrC9OVUntTN1NpICmTJkVkH1MGe+VpzTmRNt1JJOzQh3kBG4cHuJJvHLrFt/6+tfZbM65++iuOmtjml+Mqq2kx2W8j9JBrdBBta1iZ1eLJSF4LuKGg9Wa5UK77vRtNy31zDHfs7vn8w0UiMMUzXWmG4SwaOiHgXazwcsUXRliUqxacgzdzsjcFUeefaXFM/ON8jHGsz4yV/tlfyzZ8U3e+fhf8hmNOe9mwaSmNEF8QhWUv1CC6S6V3YODQ4bo6YZMyjBEWK10jfu+4UlMdIMdnxnVkYVaZnYQXnc/BfaibCjmo9kkl6p+54TUK4RABFzOHK/X3Dg75ez4mMPVgtjXtKmzYj6r6MaPNINQoo4mu/a7IWac0zS/w3G4PuRgtVaoVVKHP7jAbtcaW0TQs8GYNiRMBuXeIW+6IqFYa660rhQRiJHU9wxdp0VgOY+R15QjkiIxDrS7lj5lXAgsDg5olitjFPh4AvxxZHfNht/k+x/r+p/FGPp+7B7X972yRQRr0YwGUwrjiOotT+UajuuKflBO2wQ4qVmvtQI9dr0WN2fjHZ9lBouGM1eIEtZSBn5BnLEClbM8m1Gate2IBpGSMqMISHYIicoJB8slPg3cOjvlYNlAjjS1coTOG0oYUnmKMAI5zWfFjHGDBVShZr1YsWoWXHDJsIssfM3J+oh209J1Pb72ahziVG79bF/KFP0cFXMuTpzJbvD4sl+9I/WZi8tLa06QrUGB7q4h9kb7B6kbOL/cAI5qsWCxWrM8ONJ413XFv9eMT1tuP6JIShdArXlNr6jS0804xIHNbovkTHAZXzm8E1I/4EWrHp1ATJnF4pAhq5JJGepmwWKxUOqOIdEmpZRQPJJQMFR7aQMzAoz8aTQInL1/NPMM35pHTKAaoDdu3mAVHDdPT1g2FWnocdZmzxU6EqB0wikcg5BHAyDnzJDU4A14DhYrGuc5P7/g4d0HLKqGRb3kfLNTUmeLKO+nPp+3mDIaMC4UQLYaMNt2Rz8cTB28pGxIFf7dbsf55QYXAtViSb06YLFYPOOLSkr7RWMnz3mrTX1Oic3l9sWv95KHiLDZbLi4uGDXd3giS6cefU5aOVp5TYnuWhiSYgCVnacbnSAnirHqKPQ+bjQoil3sRLQYAAHxe9GWOYmzCAw941KklOj7vmgQyNpA4q2vfY08tBwerIjDDiGaYzVXlu6pZx7ZH2dR/AxUPrBerDg9OCL2A9vHFxwsl6wXC1zMtG2Lr652R8nPNRZzVidJHModGJxWOwOpH4h9r+waWRVvO+wMZ+WoQsWu7bjcbMlO8KFhcXjEcrl6RsFJ+d3HkN+r9zu7ygcc8n91/wr/wSe+2qc5ChZeSfGVuLyjj50evt4Mtn5g0VTa6rTSwlMf1cmuKqEbrGCnyGlWWNYY+YSnsmQ6Zoc96HeOpONu5Lcsc++9GxlSxopm0SKYxju+9c1vcnK45PhwTRUcVeUZ+gJNsSgqs4CIpYOvRu8HUSaOqq5pqprGVTiB1jcM9cDaN1RZiF3PkHryUrsTCV6NSnnaAC9nTPltStbu2M6Ewtc7xEQ3DJoFzM5w0/0YNawXgRgH+m6g7Qd8XVPXNTlUqp/Dx8ekPm/MZfc+3+T/6f8u/86n+g2fYGS0k+QQycbsk4eEJIVRiAht26JnnNZz9H3EVwsKXD5nzRDEGDWDg7LeRMsmaFap+MIyweLnZoKdi5IzkuPEoUuBBuY9ysnJ6FXHrWlWvPnml6jJrFYL6rpSRg1EA0BP7ZdyD+X6jDIbc2bISaknfcnMBWrvCUNGusjJasXxcsndJ+cK5UJ55BNpgp5N1rh+VTFQy3OX7CvZMLFWfGYY067vcCaDKWmB+uX5hsWypgqKq80padMk56hSRR4Guu0l9Wo1Fmx9CiLyseT2uQZqTmKhcYu8JCEOymunqSejgkoJR8G4lXRHGqM7Kauyydnak2UQ56yftClQmPAhQIn+pLFWuERnigSom+Lc7LNzeytbNFIUU4IXjo+PuHG05vjkUO8rlT7nU9sy4CnhmwfjiiA4F6wHtsdVNbJa88qNmxyuVlQhAIlu6FmuGi1eUVcb58vTYmFz0T/R1L6mo/wIOWDEz8BI4YNW5j5+9JDDwwOapQcSfezYtT0LtyTFnqHdkiqPhIrZt86itlekfkyB7T39lU/tvzbGHjIMOfH4k9sLn+pIaSCPUAmVBXHQ9y1tp5RHWRI5O4Y40A/aT7xU8WtESNvJDhH14IsSSKWKsoyn52aM4MpsvkVjqpNtVaKrJV9gAIBC5ebU069qz40bp/S7SxaLZoyGOmeUUjK6bCNUgAIZoLBDWMGYgLcChtp7KvE0Qbh9eMqrJzc4Xh7Y4ZKsKZCMXL4uT4+dLdJk/9L/F0dstpdKh7d0xdhIQ+LRo0eEKrBarxAiMfW0XTdmMVK3JdYeLzWI3zMtphnfbyaiL45K4Jnrc3WtAmteyb/x3Pe9rDGHaOgZqgEB5eAEvJAiiMtkMQ5J65jXDwr7yGbggmLhjfVQVScFw57G2JMTcFmQJBNT2ohzBUxPzgvlRselBAtytAJDdairquLkaMXJ6THL2hPqgDMeYuetC085zZkbqPqfUo86V+s+eJarJVVdgWRq5zlZH3KwWHP7+BhfcMwlGyaOVHqc7TlXMu3ggvsbI1MlE2f6Pme6vjM+ai1Oiynz5PFjtpsNN2/dxIuyWhRe42WtBUJ9uzOHUItTks3hvjaeWVfjDaZxHp43MhDxbPLyue97GUPXshSmFkNS7YUYoxbHRS2gK5R8Q0zgMzEKZn+qQ9b3TOJg9S8FkSwFm6/XGJ0lUWf36sh2LzPjYDRiAZNZB5IQ8VS1QuqOl+pkFGdp0qsWrBJhpGoyw3fMJE3fToyRUGuqHdF220vvWTUN4dBx8/CIg2bBRb1T39NwoYUSyqNBjeJejQ4d6K7OMspweZ6MBhLFOaKxIpQmLCln4q7j3t27vP6l1ywboPfadS2+bkhZuyYOKZKd0CzXYxBmf3LLJD7zF9eOF5Xb5xuoeWac2d9L+1FtEWmGaop4UaVZ8CcpFeIkHaokdWNHw1wYTGL6rnQlopeZCV95baYgmRmV0y9sy5tAm7L03nF0dMTp2SGL5YJEpB+0HamzA5+ZgVqwUCKlnd4UM0im+X0BgDtP0ywIJ2cc1AsqBEnFkDV8K8Vjm928/VlM9GTK0EnZNHZA2EJoNw6NMKeYePTwEavFEhb2+TTQdi31oialyNB3dO2Oxvgvx68ep/B6IcqzHMVTgZW9903vj/3Arm3p/Ecyl72UoZjQEj9UeUXyaIzGGFUxJmWQSEPpU5zN6TG4yRCJUeU+DhrJKgYheSocfGpurr2reYFfWXcxH0Ts9enTImqALhYL1gcHDEGoG1VyMUXrvlNkdT8KNfdrLIY6uhPOORZ1Q+UCLmUacdxYH3JrfcRh3SBDtEYWpgzFzVJKM8F9Sn7mBSlTtDZZhDpTsJLKc3xxcclquWC9WqqyNQ/ehYo618S+o91taZbeYDz2bHuTK1fvYGb4X7sI10r9Lp/zDv8M+DPXf+gljrE7VC4GaiLnODoYincvUaBISsOoIochKlOK6YiUoe87MpVFPscWFIDpmnFmy6E3l8VppcfggP1WZnKqkCUzYk13hRA4Ojqkbmqcs4r3NCj2swjozJmCSX6LLkeYya5mshbNAu88se8JlWe5WOJ9xdlyTZWgQhgAVzCtZTfuGaj2Ndm0hHUxm1PFje531oLBFCPB0vUpJtpdx3azm25VsC5IHYuk0JW2TUSEarEwR29+K/s3NLehrov2wvWyuyHzg/288ucynKj3Kcmw87r51UAdIilOxPCle1wcErhEjGobZKCXHtdrcdrYE5mZroTpP3n6Q+UPRv3J5ETNORymzOxM9jIgDufBV4Fm0bBcLUGEfhh0j41yOtsH02X0HsbKe7svM7j/v8z9ya9t2ZbmCf1msYq9T3Eru1a82sM9vAplBEplQAapBISULRIaCPooWzSQEA1S/AGIFjSyi+ilEEokEEpEkiJRFEgRkREp94zS3d979p5bbbc45a5WMYtBY8y59j5Wv8qfLdOxe++55+699lpjzTnGN77xfQKLSUwOEdvo+rvuGx73a859S2c9tuQIphZJX7C31EMWYOAYq6dP6CmgkmKizubUa6Aybss7KbgYQ+n6qkxdChHbeLp+/aWpp8btL7Dmlhf6JnH7tdnE6YYDuomoeLmipCGVAScgWSHNQS9Iijr8VOSi5qAJqgKumYEBy0o3wBMHic8moQ8/XhkeyAZwy8ZeL7I1mjBCUYAsEkPWqDPO+fkZ3WqF8ypUPgwHfXV7hO0NRx5Ufd90oqeq10TzgcZ5PKp7dm49F2eXnBtPEwUXFda3J+dXJ/SW11ne5RhWIqLtpSVsDCkLJiXVU/VFSkiE/X5f7NpQyD8L8ziQ1j3WOlISUk606zO9Dxi91l+SPn3x8ZmTLd86RZVzymw3G+7v7vFt9wu89m/uyFILpIyQiiafELNSU0KKGGPUVaTRqWfvnGpIFkRdH9hIUnCKMGtBs+5X6KCcKXzzOoL0ZWLngqLMJW5Pvq1T+KeJabVW1QvsvWe9XuNbT2NWNI0jZdWVNLUgM3axOH3wZc3D+yYgknCu5Xx9poXLNNO1HRftmsdtx5mxECONQRfsys3N+QganziNHLdRKc93eUZLQqubdiYX1yFrdYOv11bycVgQ0QreS0ZkRQiBKSacb1We5liF8s1i+Otjtz6L1/yc/xv/K/73/M++wev+po+iVZil8PHLVzGdiJJIwWBdaTWnkvBhSTEhNoLRJDYbyzTqUFHbdrpJ4VCKiP0MKiInTKmTgsQ4LEWY/Ji9LQkdJWk4tl4LH9o71mu1lEyi/uMparGHq8NROmyir/1w00dYUNSMkEQ3+q5psVmY9wNu7bjsL+ibjkeuoYuZznimFHTM1djliTOnj8NJYvNgzzmhP2QRTSpP5iKsrch2pmka1uvVyd4hSE6kqNqdOUemkGgFzi8voA4u1jdYslG+JJzN18auAW4F/vPffn6qTMyKamWKwySkEMmhKOhklr3aiCXHQCaW9r6uITHp3uds5e4qJaDOnXxZEqRH2asxqqZwgpQCizufLfRDzQ/qDItdHKdc0VKfQiTEjOSgBhDVJa/8V+PWmIc3R066ODE/dL/KMWHmSOca1tZzaTyrBG0UTMpq/mAtzpivXuZEn4tjxNYk+ZiEK882FSmpei01H3r27Nnx/EUgR1KKuKKmJDkTYqItHZgFUvmyTFU/+Oe/99m4LcXtN4nbXwruOooflAUt65R7lMREpHFeNQ8FvFX5kQqm1jZ2nmZm52iaTsX2m4aYpgWRyvKZJ/PBn4qgd/kRawyNdcwoFK/hpjw3lROBmDLvffQxw/4Rz5894vxsxWHUdmLGg7ii1VeS3sozqa1KUeS33jhrDOfnZ1yLYX+74XF3xmXb028HGtvQDDPEULUAFkT2eA1Pf6dt/eXPhbeS0Qoopoi3ltb7ZQjNGsMPvv8D+r7VhVcyWSIph+OmJpEcBaS4ap1In/xiSeoXH1IW2aurG/78X/1r3n/9CfZ3Hv/Kr/vrPOokdJ2AlkVnTiBJSTD1mlcpE0Uu1Ywh10U26XSqiNA27cIlyvbIQ9Pr/uVxC2BFh7K81clWI8I86+taDI21BMxSOMec2A4D733wMevGcXnes151TCkjpiEXHeAlxpZ3NlixmlOWz2SKKLW1lvP1GTkkbl5f8fTxcy4bT3O1wzb3mEcZlzR5thlMOn6qarlnRDhtuz9skUrhSedlCEFEbSfVBlnP8Hvf+94RPc5CllBi2JbzVF3dnKpCwik+8JW71C92SOZvSMN/lr//63vNX/PhnGOaghYKTrl8rq78osVV0zRMKS73IWUBEkHSUXfXN8q1T3Wd+wbcMslAWjpqlTs9zZNKVhVJqzK8v6Ce0zzz8avXPLk853zV00aLc6Iq6KWwMp+JW30/fZ2C9SwJQCx8XG8gTjPD3QabG9xouVw7+usD0wcvWQG7GHWzF4rAfyZbe2zrGz3fWoCZ/LDLUKlq4zhqolWkuZxzOpaRDY8fP8ZaHTQBRU9TDqQ8E6VSjOpXuY7mdA3+gpbpL3H8rrH8R67/tbzWr+X4zNYS5wAp0zctoPdDh5ceAjM556XnOocZ33QYDE3jCbMCIcCXw3T6QuXvdV+V8m+s0YJPYiDFBClimmZJ8uudSFnY7g787L0P+NF33+HibEXb6CCfGEc2Dmd0qNZ/Qcv7OK4FlV99KqnnjaW1lvHunjPX0zdr0kdXvJj/nObpJW6csH1PYz1No/JwcnKddO3MVIG0+sKLwpIcfx9jxHgPJZ86fc6stfR9V/ZFLYTVRjYxzgN9WhUb9WOu90X39lc5vknc/tL92CVTLy3+ZPXmznNFNbUqbnxD27RMKRJSbWMr7BxyS19EfSUlZikTZSUJBDC4hzWC5DIGeNyuwjyzOxwYJk1w+7YhZ20x5azV75wyH798rRB6WTytszjjyEV097Qaql3TZQIRKa4UyqP13vOD732f+WZDfHVLtx057xzzux9jnh5Yrzv8nHBlMMEaW3V6lDOSH6zCC4KWq3tPrYDQDTppxrD8sLGWi4sLxGRCmbKJcWacR0WhvMe55qTy4fO/yvGdTyswPlOlflaN7ciG0V/urq65evEpd/dXXPy1p79ENP1mDpVLysX2Vo+cBEmliVk4PilGYkplbTvKSXmvU9DZ6M9JFkVTENb9ihwCpqgoaNxKed/CIbT2eP0r7F58QatJxeFwYCwQbeNduZeqdZmjELOwH2Z+9v6HvP3sCTEn5pRUS9KqLatasyod4IuWbu/KkIjoIm+M4eL8nOdPn/H2k2c024lVY3Gf3GCCwz/PPE4wJDlpgUKNlSVi5KhGIRVC48jnKz9VbDdrQqTIHynTdz0xBZJEYo6EQklxORPCRN+vOVJg63U8idcHYXlybifB+zklwQcxfzxemqf8n+y/z//hC67ft+FYELrCOZvnREyJ1plFGN17baVnqT9Z5JBQPqfzyoOb+pE4xS8f9viCQ9BO0TSMzNPMNAUO00TMWdEeWh2WdZaQDBFhjIm7zY73P/qUy/MVjx+dc3lxhmsU8VdXnqPqy+lnrYezTuNbWCy3vW94+803+f3f+WuYj6/onWMdBhq5JUXLm9KyjwPMiRTSMRkuicuxW1Fldh4iPLlyKEELVzmRARJVn0g5YqzanIY069BwVPrQHGameeTs7EwHf5dng4fxu8C4x97L8smFZe14iAF+vgC+JvD/MC/5n36jO/mbO5aW8iIaX75SIiRdY4HFcMI5j/M6ZaJrMcu/NaKa6s5oURHngJP5MzSTLzvqXqZzM2Ge2e8ODNPMOOkQUNd4Ugo6g2+hbTwhRqLAEBIvr+9onOeNp4+4vDxjte4QLE21INUF7RirtcVd1vc6smrMqeERvPn8Df7w9/46H//pv6INgfUcWOeB3mywTc8zaTkkkBCR1pUOM0WprKx5pmYklfNak+CC0Oak1uglj9IWvxZMalGvMdV1PYnCT8+JmBQgmEPQjqORwok9UhwXVN+c4raapx27hMe//9LDGK7N18ftL4eg1gf45EIpMqq6X3oNlW9mS2XfJFum6nVoQmJcEi1nHd2qp2+PeqYVQa2E4GMlWqKjVKI5Jvb7e3b7g7pBeId4i8EveUHKEIBxjtxud6p92XjOz9dFr9Eek+qTaWxTHFCqLIMv9q0GkJR548lTwjvfxV5tyO9+SJ8D7Wagcz0Gw7nxuHgaVGZZk06l95Yp68LBtdZwHPDJi8ZmrYxAE2drdGo6hkgW5f4ehj0X82WRUPKFE1MWwyoK+4VHXSAfbumy/O94LG3cLGw3G7a3txy2G8I4fvnL/5YOEVkQPI1TLVg0QVXpsNoCEVg4w03T0GVHCpkkaUl2KUikc5bVel1E0MOCsNcWSsEwqTzY+pwYa5EM8zSTx1nlP4wWU744V1mqDqkussEI2/1A1zb4xuG7ht43OtBV+NMaxubB565Joq0UAFjMrrxzfPftd5C//ge8+kf/NW309Ey07QHbtDyzLa+LTWZtw+tmXp8/SizJMbbLt+rkqVStvZyWRfQ4PGbA6rBUCGG5vvM8YXMmhJm+Xy33rXyqL7jDn92s6rmcxPFJPVVf56FWs2Gi4ec8/6Zh9Vs56jqRS2ElCLVFXoeS2lbRQB2i1thQOSi1R258w8XFOaE1dN1X03EqZl3TJETvV8yB/TDo4IX3pDCTrKFxKtqvVo9a3Exk7vcDxhn69Yo1tX1buddlXTKne8oxSbWlm2WtVZqBtdgM77z5FvIHf8xP3vv7tDHSxYB3A7bd8Ubb8EosQ8wFdbMPP1D9PfLFHcmacyBHF6OsCWptlWaRhX6QUmKaZkJSeaVpHGn7cekmHIOv4oOna3HdF05P7HTw58vvT12HH8vIv58/+sp7+Vdx1A7UUQA+l85QjdsTcMlavPf0nWVKRoehRbBiVerLaCHb+LYY/WRkHrDuyxF/Q80QTnFMyFEUCJgmYsq4tiXFQE4tzqpaT0pKQxERQhAGEe52e84vzlhn1I7XVjT/eF+NoXSTzIPi2FIMX0wZZHXKuX3+9Bnur/8++d1PsLcb2hhxTNh2oNmNPLYtKVGk4DgJxhIXp0OKnHBpYenSHrtYqi+rXdi0JP/1Z62FeU6Ekj+EIg82zDMxHdv6ys/+grV3KfZr/NqH66w9OdcHNZYmP0/z9LVx+8slqOWDVp1Q/V5BRq1ZArFeO+89XevAKpfSREWWXNFItM7StA2tbTR5XTayU6T0iB5WYWfltyR2mw3DOKsLjzGakOSqNqALesiZORq2+5FVP3J2tub8wmGsHHXZioHAUiDVL1EBZm8tjXNaGeXM5fqM8fycTdezCwmfZtxhoulGTKvckqHKbWAW7qiR04XxeP0qR7WicTklcoz6MOV2Efzn5J/qxONMSIGUE8PhwBwmVUgoWocPF8nTWn1Zicvr1Sv95VlmTkmLh4Jo3Fxfs7u/Yx4GcgyKDH+LjhqXmiiV6hw0fvQKFdm0siBY3UC993QYskkkiTjnFGVFdJG0lqZr8Y1FcnOSSBkeVvkPK35jlDcYZkVbpnHEeK98o1xaOgIpJqWUGDBJGKbAZj+wOltxljItBm9qclo2vArRl0q7Tq+6ulBS5sMFvDFcrlY8v3zMzRxw0WHNhN0P2LblebNmmyODlIgxthRNJyvNUtmffrcOmSkaklMiorqyp4VnzXOzJEJQBCqlxDiN2JRK2zSfoFmyRO7xYh7P4/iNz8bfsQNyWgwqkkxxMRImGfn0W2AX+XWHJk1CdpXPrjEgUn3QLTYJNguYRJJMzHmR0XPOsT5bERvoO51Q/sLj5FIfOXbVJ13jFiyN1/U2xoi3vsgvHSX1DMJ+mGi7hjEmolC4+SU5XUAHWTaxyhu05We9MaVToDFvs3C5WpOfPuXnIeGIGDsjdsS0B56dPeXCNgShtHjrsNlxdTN1Y6lJoICcbsJGQDIxBjBG4zOnJYlZkgBk0fyMokOqwzjQzysqvagmkkdU4uQay4Pd7eGF/wx2usRARXULjc6kmUfsvzRm/soPOSmQ6/MOyzpMSay8c/SdGkuIibhSeJlCp1j8602Ds0IaKJSUz8fsMTksCVjNGkprf54m5mnWdRuQpENvqjetdKRMLf60GNseRg5z5DxrS92bY1F1jN3TqqfErhzVUS261rqy7l6s1jRPn/K+86Qk2BgRmciHA80w8bTpOZikz2xdd2uqKLKoHyxIQ/28Bv278nNSinwwxGJbXde+UtdirFm6Akl07R3GgXGelUctqYzxnCjPLIBEvcqnxdXnFuTPxIUsv0gWbA5fG7e/cIJaxW1BW0bOOcgGSZWnSXHZUCZoLsG26jqMz7iYmG0kI3R9T/XvFYGmcfjGqTDtifTOCdaBKZw70AGVGBLDbkCMxTZaZecoTKgsiIr/6iIxiPL8dsPEfgo8xeC1E4s1QpUAMrCYANgSB42xtMbSGENjDF6gBca7O15/+AF9mEkGwn5LajxN1/DUtryIuagwmYW+LVLvaeHynVxbkYw1tqBJM+M4loCwCyF6SVOtctEOw4E5zrjGs99uiXEukH3iJG3Sdz/dw00FwUolJsKxFbX8yPJwkNUic2nZCrx68ZLd/YY4zyrd+S0g69fj2H4pv2Qp7Xunrf4IYpS3HCUd8/VyPbz3rGxLMjr1b4uFoiLTuqE67wrCs+T3mgbays8s1bU9bkQiQMqkOZJipnGK1OcEc+EZxhRKAm1IGcwErgmcj4EhJM6dLfypugELlQ5z4iOFRaeaGwytMbTW4nOmA15dXfHhuz/BxUhkYpIdrTH0jeO7bz7lk+GGmTKdW4YZT+rGJWWsbahj/BS94BAZDqrL23eqoFDRvxpzKWWGYWKeZ1zn2R92NG23JARVzo569b5gDVwGED6DOB3BhnonNDGttpi+DkOQ2If3+Rn/R+A/+pXj7lc9HiRKn/m+ZFHKSeFDV/Q8ZW3Zd53Hi8FnwboyDFimz4215OLm421H2xbXrs8ctYC1xqimrbHHoU0pKOoc1efbN5DVZzzYyBSjrreSyVmdy3aDOtOcjYFHMbMqqiOUQS2A+rScrruNGHocHYYWdO3N4FPi/u6Wq48/gmkgkRltxuWMdYY3f/B9nk17Di4RjJ63to8BI6WEk+Wzap5oUKGoovwhKnt0OBxIIoQ5LMVVTThrsjVNk3JVHewPew6HHWfrM3KOzDFgUlNENuyxA7pc7OMEtW4MKv5vPhfnpwGvLe9xHDEG/nwS/uf2TT742sj6qzxOuhRSZk9SwjmnsVsUbrquA28wXp/1JBkToyamTjsDzliavoWiUf2lRynEa+vdVvAsC7HQPaxTLV8FATKSAjGEMvOhcat7smc3zNzvBi4uz7mUMngthf15UpVXWKAOQzs0fm0WnEBjLT5lWoE4Duyub7j+9GMeRUjZEXJmOhiehsA7l5fcxwNDyQMkQ6q5AkUF5qQgX2LHnPCnUdWk/X5P00amaWYKhSddlZes6vxO06TFVYqM88jd/S0GRwgzMQZigq5kHWZJTh/mCOUi8xDWky9aWpYCNsfEj6P52rj9pRBUI9oOrwt8Cg8RopxzcTJQXbjhcMB3OoTkjKVxHmkNxnakrJI5khNzEtauVY7aaWL+4IMW//NSvNTkVpBlQq+6r2RTkmZR7kvKiSEYNoeB9W7Hdyr/KAewFmMaTlvc1hgaA2e2oU0ZO82YcaJfgx0mPv7ZX/Lxj3/C3ScveD7DZCL7lGkaT3dxzhvn59zkEbIsxbPKSH1JhaEXl4oi7/d7phDx+z2GG612KNVemmgbz263Y7PbMM0j1hn2hy3393f4puUcMMZjSmflc+W4nhA5hGIIYB4Mmh4vuVZk0zSx3+9JYdbkxzquXr3gfnPPNI7k5ovq/W/TIVxeXnB+fs79/eaYXJXNt3KFYgxMYyBlh7iOxliErHxq5xAsc8yIFbwrKhGtX9quNXHTVO0UTdLDFvS/clbr5i85E6pMGyebFjBn2BwOrPYdj4ZzjLlQpMFmTYiNX97DlOSiAZzx9Bh8StgUWRmrsTyM3Hz4Ee/9y3/NO7MhEDnEhEMlcR65t+gxDGKxYjGi0j0uazdhaTuUanjp3C9ZPozjyOvXV/i2wxnDsD/QeK8tpznQekcIutHe3t7S9I7N/R1N17E+v2C1XjHPOoX6EE2S5ZPWGA7zqO52jT/NTI8/jk4TazIxMA0H2r6jbTvImcvh9/gP+E9/9RD7NR2nCNSCYFITq0KxKTSqisofDgea7gxjPd5YTNOSc6ZZdTjXqKQPWU0ULF/Y3j49jm25vDwbdW01xfXJYoqedS6ONCfcQxHECCNgh5Htfs9+OOPRhZ6XrxrkpszZ62KMMZbGWXrX4GPEhYgPkTWGJiVWwI/f/Rn/1d/9+zxLGckTEwGXE83BM9/e8PxsxZ1M3KcCXBqICD6Xz1WAuEUbVVh0OGviGULg5atXCJYYZnLKtE6f83EcMShvfJombm6uwFk22zvu729pmpa7uxuSGKxvlufYAkc+dUm2ciZME2Gecc7TrlaYBzF83Ox1sjqy2Ww47LfKhz3c8aNviU0vlEJwcU6UBWfE1BkORe7mWbueBhWuz1YVSmJKdN0KEUOYE5lE41NxpbIPVoIvOxYEFVd42Mc5E9DrGGNRcklHFJFyrlEyU0ochpFhmgkx0Xv7IBGrSH8FApzUnMEhQeOxN4a18fiUcTHz4U9/zo//6Z/gQ0RCJmVLtJkI7G+uePT4dzjD0qQivr1cUy2fNJblCKDWJDnJgzUjhsjNzQ3WOkKIbLdbLtYr/Ww5kadEzlpcvX79milMZEnc3tzQdh3397d6v5qWftXrOvS5i1zWX8lITBzGA41vcE2D858ZIBOWGI5zUC3yYfe1cfuLJ6hmUb/DGkfXdcR5QLJdNnk52bxSSuz3B9zs8H2vjhwh0q5XhXcmSLE7HMdAuux1g7do+z0ZvTEnFZFyLwqsXWD3KrMDFEcTFg4MWdHB7AxRMsMc2B5G9tO8TBbqNTwRAS6Llyvcp/lmQ3Y9trsg+B3Xr675+T/9E67f/xATIyRLNJFIIMwTcRh4fP6ENo/ErOhjrdHrwnja0lq0WKFA8zpIE1JGxoHxMOKcK2hWqeKz8PLlS65vrxjnAWvhcNgzDAcOhx3GGtp2pTyqBdo8XkPEMA4DLz/9hK5vePr0DZquKzJHCYzT5CMnUo5MYWQaDsSo9nPeec7Pz9n4V4ud9lek3t+Ko2lb9SuXI4ay6MMZkMLl3e/3jJPQnz/BNS3zrPpwbecBq8Lpc2A2maaB9foCYzV5qGT0pQVystmcitiDFH6oeSCxlsrE9NIZBKLVBXAYZza7PTG/RePiMfkt6EHFElWqxGERDjf3XNgGv7qEw8z1q1ve/+f/gk/+7Ce4KZKSImvRQ4gDYThwMcx0SXA5U0nyX35vj+jl6c+oRefIYRzZ7/Z0bcuTx4/1k4tuYnd3d3zwwfvc39/hOstmc8/ZxQXjsGe72SA0aicrddAts6RqRSd0v7nn+uolbddy+fgx5xeXsBh8qByYEaXyhDgxjgfGw4F5nrl87CBl2nDFH8rfA/69XyygfgPHcRJXr/niFGZ1c6pARhX0N8XrexwPjLM687imA+uY55lV24FVbl+MiTBHvBO87Qty+E3OKS36z3X799Yuk/VqbhHRrfZYkCPaB0hZxcLnKSgSr6v6gpjWzoLK7IG3nnXbMe8HpFvhzwWmwOZ+z/XNLTcfv4BhhlykC43qQKc4Ibs9K3dB41Tk3NW28kIvOfblKqe6xu0Jyx8RYRhH5rl0AozhzefPF2QwZ2EYJt577z2ub16RycxB+Xvn5xfsdtoNENGpcRCwxRbT2OX6pJTZ3t9ze3OF844333qLfr0uyVQtcG3ZNwRDYpwOHPZ7vHc8ngf+J2x/qVj7dR4JUWMfY4os2BGlrmmcMZokppQYhgMxjTT9Obbp0BnUjLMOrCvgVSTkxBQjq8YrnSIlfPPFqcsCCphqGFK1eUvHyyi4NhdbXi26gnJgKz3NHGNqPw5lAPucVdcXE7eSmIquyQuaZi3eWHrX4TO0WBqxMEWMczDN3H36kg/f/RnP5qwqMqLGRSla0jgh2wFvIk0LYqqOhZQuUa2mlnpnee/l2dGrTIqRfYqkqPzo/X7Po/Nz7UCf/OyLFy949eoFwzToAG4cuXCP2B52GO/p+zWPHj9dBq4U3T/OBcmS4215/733ePT4MY8ePeL8/JymxG+dF5J6zbwhD4HH8/5r4/ZrE9RaJZy2neoAlHeaoA57B1bbNJU7YoxuDDkLKQREArl4viLgXQPWE1MgVp3EFEnp4VSlWc6hQvdlETktXUwh0hdeoZShoiVBLUEkWduLc4wchpHrm1uenr1VbilHn+Bi5+eNpcGx291z9/EL8u2O+fUdr9dn3L56ze3P3yfcb3AhaUsIHbaJ88w0HGA+1+1RSkiX6ucYZnVa/pT1aZZrHUIgZiHmzPZ+w9Onz5abnUuvdbPZcnd7yzQPYCHmyOGwo2lbYs6cnQnjMLBaWaxvqUNqOQlpjhx2W16/+ISm86xWPc5bjPV1d1mmAHPKhFm5VmRFRmKatVXj6jTul+mAfjsOlTrSCVJj6nSlaIJq7FKS5pxIMRJCxk2hKJzqg+msJ+OI00SOEUMkZVMmVE/aGmZJ23jQmtZqqmhaHp8lkUxKWrlkPdmTRUiTvZQt4zSz2e7ZbPf0j7ryfFTgu7a2LN6qS9ThsGX76gru9uTbPeOTW+6ur/n4X/8Fh5dXGrto2ld9tOM8EfZ7Fa0ui1/9YDpoVlyl5PiRlhg+1qakmAh5JqTM5v6ex48fL/zcyrU97A9cX1+x220xDcQY8G3D7rBTJYVmxTQOpHnGulYLCXvUFJYId7fXvPj0Y1brFcbA2dnZSVGgDTipMRxiEVgf6PtW9Spjpo23/CH/5a834H7J4wF6inJLbVFsSCW5qhI29c6LqL5iZMaJxYnBuEo/cVjryFkIISJppnFCbL+5R7yUFmImLxbSxlp9VlJAxJK8olWnGyCmOAmmyGEY2O52hPic9mRyv2JdFWVxKAqVUuL+9RVsD+S7A8OrO3Z3d+w+fcnth59g54DkYuvIcZBp2u8xqxW2SdjslJ1SUNT6bNlqYW3KXiOacFakT0Ttsqc5M02zov9No9QdfXBBIMyB6+tr7u5vF3k/5x3zPHG3uaPrVnT9Sof/jFLYsIWmZpSiFoaJ7eaem5trnDOcnalTlq1CncfAKDQPmOdAmGcke9Zh4t82d79q2P3qhwHjHMbZZRgKKohUy+bj/pVSUuqEaXEcjSlc22CN0pVSFlKIIDM2VbWVr4ZBFjOKkjdWNMuUAl5KPKakFIJUNN21VU7heWoBPU4zd/cbzlYNTy6+UwrD+nXUNpWCnnauAZsY77YckmGfLNfBMA8j6X7Dq5+/T9zsgVafWYQkkEwkzjPzfgedoWk9abHH5nOOagvEcfIXla4HapE8F5rgNE5M41TokceEN2d187u7u2OcBk3SHfRxxW63RQTmEHj2/C3GaaAVnSVw1pdnRPeleZq5u7vl5uYakYizsOoaLYAlKVizmNtofzFlYZ2+Pm6/NkG11mpCclIN6RSmOj30fa/JJhZb7U1PbOYoFy2LkOeAWKfyEtaTjSNLKLqURbczH4dwHiydC/JUeJhLRsnxYShleMrxSNY/cac6ViuJwzjx8uVrfuc7b4KcJqhl+q7gEyZm7l695vX7H7GxDVerFY337O83dPtBUSYsYtW1JZWhj2kYCOOIafNyinCCjEitfMqDIyzfq1GZkgrlTiGy2x948uQZdTOqCOB+v2O73TKHUS2fO8/hcABrGcNMFthvN9qetl5bW6VwGA4H7m9vuH79CuPgjTee0XUNzp0VZEF5Q8pbEVLMpJiVryuK8ErOeOsLupv49TpO/3qPLKKyNYU77YxW29Y4fXgLv/kY64YYE5EJs3xGR8YSk/IAKeLhqchUwREZ/6LDGJ3EXG512eylcC3rZPay2FSk1yhFZZxntrsdr1/f8MblO1qcmUreZ0EJHAabYHt9zdX7H7Nzr7i/eMGjy0vurm+YXr7GjTOrXBAPa0t7rUxy7nbYC48T7TTVCr4+ezVnNacLp5iSJGtGEFNkmgNzTAzDwPn5JVT+V7FDHoaBu/s7xvGAceq2Ms8T+92WmDKr1Tn73Zbz9Tldb3FNuUco2hKHmevXr3j9+iXr9Zq2bXjzrTcXx5S6iuSciFF5aDFE4hzIzhSnsIRLM9+1355Bk5qAA0VCSjl5LmlvXnOcip4e1wwpKGkmIDGp6HhZixOq+CE5QbkeX7TVHwvoctR7bKRMEZcvU4q5wodNMRe6x4ksTdk4U8ocDgO3dxv2h4GztuNYoB8LW9VfsXgs827P9fufsLWOu9Wn9O2K7e2NFlbTzKquhc6QjCGik8jjcIB5xpiMzQ1115AFIHi47prl8hWOX/kXMWXmwvWcpmn5uyWhFR1E2Wzu2e8PCIokt3SaoN7d0nYH2q7lcNjhbcvattrmL6T1MM/stxs297fc396AES4uzjg7P6dp/FKsLBSPqi8c1OUuC7Qp8wPz29dBNdYua6utSaqU7maxL9a1oWzWop8nhEQ2M1K4+531WoyVfSeECBLVnTF9LjofHp/561xQ3apprhQ57Qim7LDUYrdyBkvRJMr3nkPg7n5D6wy/8713oDGnufeyJhoEh6EzDTEduP7kJbHfEG623HWv2NzcMr++YX99QxMyQkSMVXtcA9Fo3jDu94jtcTQPPlRNnrNUOK1+0JLY1wn6kjepe1ckzEGLmZiWfSWXxTvnzHa7YbfbMc0jmITrPPM8s9tvCVGH/g7DHr/dsD4T+n6Na4rJhihtajzsuXl9xX67QfKMt0b1Y9um0IFkAYJyLjz6JDTJfG3cfm2C6pwjSTyK5JbDWkvbNioh1baKJuWEOpSoxI46M6gYv1hHrCtBlT6hkv3B2wZLwBjBUluk5fbIZ7PVuncfE+coCZvjsV26oD5KLDodmKlZ/6vXVxz2I+u2L5wqgLzkvXGa2N5s+MufvovbzgxR2HJLJ1arBDF44/AWFQDGkpxyaEIIhMOI9RZnpDgT6UJpEEW+SyVua4hXiN75BwlszijyIarHF0LQYQ+fuN/cs9nckXLAe0fvzhingZQTu/2OMM/cPHubfrXGt93i6ZvnxO3VNS8+/ojN3Q0xz7x68UwluJwvdYXavFZfYIdhHicsCWt001mvVuybltZ5Asf78W069LPogIz3DX23puvXpGHESMIUMwnnPMaolFjjW5JTMnmqm1jdJErB5Z1Kjhmr7aKFF1LfFJbNvXYCPnt9tDBLS4J61O4Wamu9auvlnIlBOBwyH3zwIX/4o3eo+o51kt9bTU5zjGzu9/zsxz+F24FNEq55QWstTuCRQGscjbXYUiAmpxywEALT/oA9O8OaTC792lxWQFOLKTR5tfm4WIK2o2v8GOOIMRBimWaWY2cj5szusOfu9paYA85Bt+pp5onDYcfusKfvdzx98pS+73nWtLimQVDqyTyOXH36kleffsyw3TCPe5yDH/7ohzTd6oiy1rWicMEb59TsYD6Qo177Ub7PC/63/OA3FIO/zCEle2qapkzgNySr19A3YK3HWkU0ahw0tiE7x2kvoyZip4YJ2pA9Fu5fdpjTmC+od33pmIMyh7LUXqfGYq5xW/4tQhLDME/c3Qsffvgxzy9/d0HWNHfIOrCWiwXmNPPxex9w/f4nSEg01tE5j8SZy2xpraN1DiNClIizlijKHw/TDOOAOIuR7hgHy3/l+mbAVcQNCgRYEqmSBJgq+ZaPsw0Fic+SOUwDNzfXZNFBE9+oCcHhcGDKqaDXlqdPnrHqL+m69ZFHmdVw4PrlK25fvWJ7e80UJiDy9NlT+r5d2t7G6I6ZsxDnoLKH1mLIjOkZt/If8PavHnK/0qEDUHofnfML31w7kg5nvTrJlS6PXRLZMihtTq1KzbLvaIx4cp4+H7NflatWxFlqCimIyQXxr393guQX+UGMivInidhkGcfMdrvjsD9wubp4oPqjWqEGshZWVuD+9Ws+/PN3cQm6YlpkUqafEytjWTkPkklGbXCjhWjVRttMI6wcNneKrqaELe2qRaMlo65WpcBbRkitJvg6zJpKQl7iNyVy0gHcJJp31QR1GA5MYUJMpqVjnCcEmMeJYTjw+tWnTOPEkyfPMY+gbRpNiDOMw8DtzTUvPvmE/e6e25uXHPZbnLOcn50Tl4FCuySoOav76MzbvPyauP1mLX5TW8rH79WESkSFk3OBxtvisoDRJNY7TWDHVEi+1oBzpR2tGpQGoWk9jevwvmy2JKxR4rKpkP2DaJRlsahHTOlYFQsnSxEPEoZc2jcmZ8ZpoGkf0fWtJsYC+92WMAWm/cDh9p6zR5fMcUuaAzEk8jzx2K9ocDTGYsWQUyRIZBKDd4EmBiRMuNxTGljqQc3xPPQhOqrj1YXQlEpHMMv1r62N2mLIKRHTzGG/YxhGsskwR2zbst/tcc2Esdomu756Sds2hHmi686YQ6BzK+5vr3n94lPubm5wDsb9nnk4EOYB4xyxVDu6WCjlYB6Hcm/0yVAuqwZ/lrAYLHwbD0EIUZ22mqZB0ozNLANitYbxvqXtEiElpqTc5bqQZnRCMqeM7RyNL4YUTmforZGymWS01Kq32hyTuhO01RqrGnUleVNZatUEfCj3dUy2Uspl+jJhjbpaqSsVyuuaA+PuwPbqFt93xC6Q50hOKvPWOk/nG1qKDWT5PFMRdfc+sE4Jl9UliNKNUFBSltaOdSVxLmeYC+rvXLUzZfn0sci6SFYJuDpFP44jwzCCyYwIOI8dJ9VTNoZ5Dlxfv1Q3mSnwne//iP3tHmcbwhTY3d9xe3XF7d0VZ2drHl1esL2/5fJJKU+lCq+DMQ4RneLfb7aI1UFE5xzvyY/53/G/5B/w6jcWf9/0OE0KJRdns7KZ28aj5X1i2baM4Jxu/rG0z7JJGKee4lh15grFbto7jzVCU2T56rCIDpHkwvJVVOY0d7VWxdPTibZoyEmVLUydbi/dAanDgvpaMSUMasV6f39P169oG1VR8WVSO6fENA1sDhPD/Zab2xtMX4aSQiClwJlpaJuGznkaYzAhkWNmthm8TviHFHEhYHKDlSLrd5Ky12RbqTUldSn2vUvLtgxMqWIHZWPN5BhwJaHKMSoCut8rr658dhcSwzBiUsC5hru7Gz788D0eXz7j8ePHjOOBGDPON0gQbl6/5PXLF9zdXSM58dabz5mHPeOhA+eJOWGp6jXHgmsaBjDCy3ng/y4/5b/J3/mrCM8vParySdu2tG3DPBerb8yy7+rPFUtRq/SLmLMOkzXa3TLOIUbX6lQskr1zeNOW4leKt3x1QisSjsApsrg8N9Yu3xPRASgdNC4mFhUSLyASnK7Zet8VSb3nu289xXnV8XVWFQGQrIoP48z29RU//vO/IIeZOSSmDL3z+JKs9t7TGouZFZCIUAA0mFLAxQg54aR0kpFl34ey7toi3k99ClWjWocNzdJdrXnEqTTa8gxkQUhsNhumaSbmpAVCSEzjDALRRUwIvHz1CUm0vd80DX3fcX+/pfU98ziSxoH769dstrcYC+u+YzzsCPOo3blSCFhbr7Uj5czH+Z7/WP7JV8btL291WqZ8rLH4tkOSw5HpvaHaeyVRvpJvWxjLjbelagLmeSLGGc1ZPWdnK60KRZnvpwJT+r4WcxKAlRwMFB5eacFgS73BSf9xOXlFqpyh63uevfEG636NdwayJljX19fK74nKJ/r+7/0O7geZeT8wbffEuy3T9V3FZ8vCZFSORDJTCvg4Y2PAxgZyafOLwRbhfb1hxSDA2ZIEnFSMJfiqMsFpJVj5kilpSyumVCJViCEzz7HoICqn94MP3md/GFivz7G2BWP5a9/7EcN2w7jfk+cZ2zekMBPmkXkaVIB7gRpskUCamKdRHTico2s6hoMOTcUQCCTkC+zfvi3HKYLpfUP2LY2FxqvwPqKLpQIpDuttIYa7wq3SKnyeR1Ka8a6n7xUpaJyK2VTnmBqx+n+9mwaOE+9yPCddG48bpJz8gJHj6xRYEqRqXva0TUfjzMJ1fv36NfM8I0EX8B/+/u8i+4lxu2e43zFf35GnWCHhslDp54oIc06MYWYOMyZlnIimQkcoUtukVq2ErVfEq2qe5jLNDVavBbLYRB7vg/5aOa8pJYzV9SKEhJsj2IBYTcZfvHjBNM7cP9pwdXPDOATeePacvmnZ3N4w7HeEcYbVSl3mw8w07sEqklhN24z4xRFomiZs4xiHA03Tch7f5r8r/+vfWOz9IkeN09p6rGuCdQ4nSknpvEpEpZIgGqOSZyEashgyGtOmgAE5pmIlnWjaHm+NDpoY5R5Xjh4lDk/lPAQdGjm2v/W7yyRx+aEHG+Dx0yCAk+OQksXQNi1to90lZ5R3evX6NcMwEMYZmSLf/93fwUyR/f2W/d2GuNmTxqQgR0l4VDuTIg0pTKJyOV0MSLQaw2iCacrPYI4InClAwIIQ16EkY7DYRXGlAgWfRZulyCfpXqCJwRwjc4hIilhfBh6jsHu0J6ZMjGp48ejRU56cP2K3uWPc74jzhHeW1jnCNDGOe0zTELPgK0qOrsUpROZZEUUf7vgD8y9/XeH3Sx9VUtJ5R9t2XFxcEOagerZNg5ZTytU3xmCcit/HWaAkptZ5jLOKWMeg8mkY2k55qpqcyfJeqjt6goCfflUqQTnqn9T286jhrqBPNbSA4widrmtqh62gQFPiVumAghPD7e09cQ7EKRL2AxdPHuPOLxi3B6bdgXmYsLhFNksbI0XHvUSOlcwcIz7MSND925fEXoGL8knK+otUIooe+prHZOhUarB27+AYx2UzYpom7chKJptMtrAqg+wmZYxNXF1dMw2J8TCy2255+eIFm82Ot9/6DhIzd3c3DIcdYRpZrVc0zmIkcThssU1TMjHl0OslVpCgifdfG7e/tNWpQauTtulovArgeit0Xi9JtSutLRVjbZlaKyiQ5EKw1+TKGLWPPF/3eOcKgnS6RJ5yysxJC//knJaLX9C9Ewu5I2SpVb6zjiePH7PqlQMR50gME/Nw0DZg02AbS+sa+otzVr6lwdJmw9ks/MP/93+xnNmSTphCekY3W5sytqBQy2KelRuC1STIl3+nVbq2GwtBojwmp8j1kldoAZBy0SqLi0jvFGbyqNdbjMGZCSuvGaeJvlvjXEvfr3nnyRuMhz3jcGCexiI4nxdjAExp3xqj7YgkpBiYppGYVHMxzpHtZss0jsQYVDXlqwiYv+Xj6BbmaZpORZRtpi0IFFJa7ov2jAFT24OmDPjlE95dwlnPqm85P+upRk6f1d9cbhrHttNp/H7O5jHno/nX6QeQXJLThqdPn9B1nXJiU1KLxWFAkkq82d7TrhtWjy9pLw28kTBTxO0n/vKf/Ws0OT1GVy6LYULpMjknbMoqKyU6C48IOaperLG5tJRU6D4nKchaLaLUqcSKffDZTr9q7MYcFTcWlfZhskSRYgKgGsvzOHPYD3Q3N0h2NM6S1+fst/cM+x0x6MCeESGFWYdSfKOTsGVDMBlyCsTy963pig1uYIiB62Xy/9tz1PvjnMN7NTKx1tAX8ZHadckIrnFYcQrAlw06l5hOuTrJZLw3iqA3Hrsgn3Uj++wZHL9xChec/lgd2Cp76UmywHH9LQoAjfc8efKIxjflWdLnaSrDHDGqhWh7tubRG8/wGB698Yw8zpjdyMt338PFfNxLDIr8UAdOMjEnmhgxyS+yaLHEb+V5G8raZo+FYy5WnFrMmAX0OPmgVGMEY47DlLkI+Geja78Egx0GslEpvnkMzONMDJlhnJBsaJqOnDIr4zjstoyHA3GacV2DEU3OYgh6fzBV80BBmYh27IKu0/vc8TPze794cP0GDn3ubbENV6cmB7RtC5iig5zVCdFpASCmaDlTDT6ElMPi8GetpW2USufssXg77Xp9PiqPRy3BH6SvhZet9/H035sHryVZsI2jazvOzy5oGrUTNqIIbJpnhsOBaZp0mCsLz7/zNg2GaT8wbfbMN/ek3aixJHo+tctQAbeEgiO58MNdUgi/Xo8C9S/USIvTZPfh1S8gR7kPsNDRNHcwNRsHNMkPIejAWKEcmAjDNOFjpErzpJiJs1qG77ZbnGuZp8iq7XGYEr97wjSr+UdK5BCYpwFXpvh1LskX9aFImCf2yX9t3H6jFv8Xf1+HpLq2V4s57/BGN/x6Y2tbRGWfygR/EkzU5E0k68VLQBbWfc/jx4/o2padnC6H9ZoeK54jm6jwO6mLR0ntxBQC9GnwUTYr5aG99fwNrbqHEYmBcTyw39zz/PmbNF2jvEQsIQurzvPs6Zt879mb/Oj8Kf/kv/h7SNUeQzdXddA5ZpEmCz4XC8ykiHDKyjXJkjHOqllB1ge0cpwWUnxp8elDelL5oJadEhNz0k3eik6zjvPElCJaJJSKMWYOhz19v+JsfQmXmTANzOPAdFDJna5rKC4LSFIkVGDZ4CUJkhR5iklbwmGauLu5IRy2ijh3R72/b+OxJKe+pWsNNB5LwEtSaRNAJB15t1ljJuWMjRlrE7nkWzklUkxYYN31PHnyCGfs0XhB5FihV4QUSrujNsVPKmPgiKDWn/9scaXOMY13fO8738E7RwrqJDaOB7Z3tzx58gTftsqjdQ1ZoDlb8+T8Ec8vHvPds0f8X372PnY/LHSMDEdpOFPKItFF0udCzC/PXCiezhQEpF30MXUBW5CpWsWbvKAdxxg+TkmHqC5oVrQgMsEQyZgw62aWIIyRcZjZ7w903YpVf87w7BmddYyHHcNur0hiCPoVo3pPF3vCWtxKdWYrQv3OOWJOpDnzcvqIvyv/CfC/+fUF3C95PChYyv+tdbRNW6ZoDX0nS6dK3fKO3D9bbKdTKgNTonrUSF1LoOsaVl1L690Jn++LN/kaiw+Su5NzNcbps2Xsg39VX6sOo1tj6NqG77zzDtZYwjyTwkSaZ9X0NJa+72malnW/plmvWPUrHq8veLK64EnT8fd2/ynh1TUyziqsTi2JWH4vgCkUFSdZ2/xGn9k6hGtSwjeq2QiF3500DdTCsMoS2YW68GCTR69pCDMhhwVBtaICWiElpb2hihpd0yEZ7u5uaZqOi/NHXJ6fE88fMe4PTIcDcZ6wqLqKpKQuWFkF2yl7pSQhzRnJGuPGWW7NE/5e9z/4FaPu13NUxZ1atHR9Q1N0beuelMrGpLbS1HmbZQDMLPugItogNN6y6j1N4xaqUXlHviwxPerNZk6hKluACrV/PgUMTkCwBRwQ2qbh8uKCN5+/iXcOiYEYIinMDPs9McSyLgqrVc+b3/sOrfPKz54i4XbDJ3/xLvZuA0GLoNO4rcotGU18bdKvnLMmwjVPKl1pnF0SN1OctZSycrJjGKV8KQ2y6Egv+UNF/lWLNqaghV2J8d1+XyxgtRsztTMpCeM4cXfr8a6hbVbsd1ta65iHgWm/V6vUtiFMM3GaifOMLAo/ip/HFMizEMPETb742rj9ygT1KAPyMACWACyJYE6CGJ1ozmkinfmyURf9uxSBBmN0448B5ZoZVyQdEhIzq25F3/XF795hpbQJhdKmk5qdQjZILrw+OVZSS5dKKq/QPMiZ9LwFbywXqzWH3Y5Jsg7LmMzjZ09Zna+xTsnyzijy62xD07SkmPjLd9+ldR6TgvI7siwPWa2uC+aGLXZ5kgJWPMM0cNjv2R/2pJx5/vy5Cts6R4hR+WM5antTVEdyt98BmtikGAhhIuXAeNgzTANTUIHnjEFMIsmsraSSQM/TxHrdc3F2TmMbzPkF67al9w6P4DIMdxvm/Z4cA94ZxCr6Yq22gROC951ydLf3bDb33N7cQMrs726ZxxH/5FLlwr4Fh31ANSgVtzV432KNI4UB64oCQw4FvdYNJgS9flmU2xlSJoUAGGyBvHNSorczjnW/Yt31WOOoupsCqr1JXUJr7BoQbYubfIxbWyZcTUlyTTjlVJWfKQuqM5ZH5+dsbm4wpeJGEpeXl5xfXjyQFyJmjPG0Xc/Fo0e89eZ3ySFSaSOhOKhUsv+pKLzPgk+JmBOVlzBOgyJdKeEaz+XlpXJzywa/IMSmvH4IbHebZVPRTV3jd5qmYq03lWFMyFhMziQyYc5FbWKk73as1x1vPHvOqulxkmlAE+SUiePA4e6e3aN7OtfQen8idaMFXxBtrRrrsU3LZn9gCLrRH64/4Hf4Z7+5gPwFjsqdPyKTZXLfeqw1NN5hjPISrWQdtomi/t2ivLiEFgxhTPjuiDqFeSKnRN+ec3lxyePHjxRxFx76G5Q/V71m6wyG4hpYEmJLkcByqhfNsgbrpnlsoxeKglEu4cX5GVevXkHSgiKGCci88847uOJ45awjIsrVbFoevfmc3/3BX+P/9R//J/iUFiv7LFWb0SwanCLaHm1EB/FMToiFaZ6IYS42wpnV2Zqm66gzFtM0YV2zQB9IJqbIcNhpm9VUKbaZlAIhBsYwMc2zvjc6MGhF1wxNWkqS6tSC+vx8zVkv5H6NzZnz9YrGa8zHacLkrBI/sBjhHG0vDdkYdc4aR5z3pJx5uz/wH/7tT4G/9VcUoV98LBQRLIhFskMKPcqW/0gZIqSoBZSIxduGIKpMoCw2p4IqiBaa6IDu47MLnlxecLbq9V5RQ64w/Ysaj8k6KGqyVZAhcRKzOsTWNL4MCwom655lMYWvzBFoMdC1LZfnZ7TecXt9Sw6BFCbCPDJNI2+//TZPvdcizXn2YUKs4+03nvO952/xo2fP+X/+n/+vXP3rH5Nm7dDmk/ZY7QJkiZAzvtAb4jAgrIoCia5TMUYysFqtlKJTOLjzPNOaY7GZUiJmKRP8ka5pSEnKrIOaFMxhZghDobYo0mqxTARgLrQtw84Y5mGg63v6vmfV97SXja7BxmtLH0jjwOT0Z3NKtL5RmlEBPyzKNRejah/fWY/8h3/7jq+K269MUKtAdD7h3xyn4bUlst3cEVJA4oyEGYkjb79xqR6uUib5jWHMQTcgAYmCxMCqX+G9JwcVOPZO9RUtYJyAE1Koi1xa0DzKKMmDtLmcYxXsXzioerLHB2hJrDP77Q77xqVakdmGtnGsz9aaHJbP7qzTBNroAnXY7vnzf/RPS2sKHSJByNbRlglmY06gdxEVuk+BGAzDNDDMA8M0kXJmPwy0qTpdKTI1hoGctbqpHBGRo2d8KnyYf/iP/xHjOCk6IBq4MVvmMhARUsRaj5lExa/bhosciOPE9u6OMEw4MbTGEeaZUB76vuuwbaMor/WMUyAcBrb7LR98+HM+eP8vef3qFZu7DeerFX3X4qxlnTO2WKr9to+aoJ7K9QCkODPPI+M84p3hcNjjiHizJoSEMQ3OtIgcSJIQ24HJOjCWLI1ztG3LwXpiGdBbda3yppc5d5b/K1vgyIf2pi1mC8opzEkHVHTQSS0lQ7YnPNRSZ5fctqJXOSZM29A0Fu9amsaxWq8xvnCrSoIqGIxTzl+aZv7FP/4n+uyKEIutnxi1Ql0UAah8MrQ1myI5RcY4cZgG5QmGqBP1xtD3PVmUtwRWEaVsCTFzmMYyiWqXadIQAhnhT//Zn/D+B+8vUiSCJUtEkiEkFUk3VieDs0SExHp14GJ1wXQYmLwWs71vyYzMh5H9dsv55RnduojQG4NYsKbhLpR7SqZpHPf3Az9996fcXF3z/tXP+Oe/4Zj85kfWmDFVV9oToyacKSfGnNiFA08fr4vEHwtXNeREFFuw+awc/4YFsUrR4KRj3a9Z961Kw1lFXY8MTO3Q2Fx5ehZTqBq5xA4FnfHeqyGE1TWZWNbE2gUous+VT28K4htzwCH4xtOtWs7OVtreLpqt1qmBQvKQPYQ08uLlB2zCgWch4VLS89W30KSytExt7ayJYHNWIxgnzHFinieGcWRzv+Xx06e0pdtgi6lB0xjtTpGXoivlozNizrq5Y4VPXn7CX/zFny8tZMksw8KSHGMog4kYfBmsssbSt2uMgTQH1v2KVdvROI/FEuagvGhnWXUNeItJlYJkiJJIZJ5cXtBYwzhOhOHbQU2xOIw4yA5JjjhFshfGcKD3Bu/Weo0WVDAX3qc619epdUKg8y2Vn5+jYIzj7Owc56s9r2KQqcSsaEAq5UgM5Y0wJxKZlbt6qissWTDJlIHf8tJ17UYTXUmJeZqZ57lwYhWRbdqWyyePaNYKTii1wWunxgmBTJBEQnjvw/do5glvSmF1ct0MR2Qz56iIbxYkpmJtrMjjNA2Mw8Rhmjg/P6dfrQq3F0IMuKYhlXVSpbRSeb2EtQ05R/0zwmE88NOf/KTYoVfteYGiXBBzUlnJEnt7JgXjEJWZEqH3Def9ClaB3jUEMeQ5IlFdv1brHtv40ucoRkvZkq2uv2q1/NXHN0JQ6/X7rLRDzjp1mVMmhUgOMybHggpVno/FNR4ZY2ntlcGinLHeq++2iLYvDZhFauIh8lkRySK0sPxcHR6ovBdnFbqP5jQEjq9RjyzCNE/lwxmUqW+1vVgFbcUs70cWwjQTNiMf/Pw9TCj82YqMFVktc/JWtU2RU9QBIwLjNLLf7zgM+t7TNLHdbcoiafFNQwyReRqZ55GcI62zNE2P5MT9/S273T273Ua19bwlJeV2pFIJxTLJGpOSrQ0ZMxmc82z8hjPb8fOf/JTd7YbpcADR1sJue89htyXNE03bIDnjGug7z9zpNOqLFx/z8ccfcHtzS46RedpzfnaG9w1uOqPLD27ab+04lRU7HTKbJvV9r3Ibc5honEo9URYeay3GGbCZTPUWt6XdIrRFl1Jfv0qI1a1doZ3Tq/CgWV/3fKMcLCe6GfsyUa2IlflMkYOem2GhAIzjCGctpigMVOQpn75nQYYka3E17Pb8yT/8L5XrFiOSpDjTOMQ9RE/VkxpMVt/kOAd2w5ZxHDkcDsxzYHV2phq887QMPlrnSVGnQOc5Mo0D3lqatsUgDMOBq+vX3N3dcnd3S4wzTeNOhoLUgSbERMhRh2sQJGaYDfthYBxGbq5eE/cD815RJ50CD0z7HcNmo63AxmOc11+NBVH0a7fb8emnn3Jzc8MnH37I7c0NeXvFv/ftCF2qJmO9h3UtzIVGkVOAOahXt2gnCdE1MJeBHYwKyqec8KIJlnNaQKeU8cbqYGiV9CtrduXGHYsjOCYEtdVdBrbKV+P8MtleD3PyLx9+OCCr1FVrLb6x+EZdBisQgtE2rBjtSPnGMw4Df/rP/4w8zeQYFegorVFrKMXIaXEoix86KTIMI9M8Mc0j4zQyzppw5Jz0sxSbVltkgWKYGaaZcVZ+dHUxjDGw22+5unrNp59+wvXN1YLgSukaUNr7MUUdzgKyWCQI7TjRHwZ2zZZbe8W7f/7nTPthAWZyTtzdXJNTxLce23f4FDXBSOBsVIkp7+j7nmmasTLyIZtfXwD+kseCoJaYjFGvfZhG3KrF2QYVK3EYtEuq9AmLtaYkVboXGaN8U1toLCTovPJQrdF1OpeEqSQnaDvfHPMPU+P2uKZSwIDGKeKZiCdc0PI5lt+VaCr8ZGt0AKyxDd4ZnDc0qx4pIB5WefeSC0DgLTFHPvnkEw6HA49SxhR1CEHX5OUZN0p3yClj6uxKiuQUmMPENI1LxykUy2aMWlinpBxo5xt1OMyJeR6JKSM5LblbRWKH8cCLly/4y/d+Tpa0dJ2zUKYhc6FfJVLSAUchFZMO6PueGGZurq9IqwvSHGis0nxyiOy3O25vrukajz1bseRymdL5MZizMw5nAuy+Mqa+WYIKn9tw64KWklaJMUZyjHhzMrFbFpimdcgwI3K0d6vTvk3TYCXjVXDh2N763LtW1P0ouVB3ft30KxHhs4SEk39fFv3Ks4uF+1lF01XzUopN8+mUnCFH4fbqhuHVLZv7e86dJ0o+DnLUk6l8lkrkLk4Y8zixj5lhLm4qIeJcwzRNbDa3dF2nvMG25bDbsd/vsM6zXvcIBuc91sLV9WumcSCEGZFM17VYi/L/ZkUv6xBP5frEBGYOjHZi8AcOfs/H9x8wHUbmcUKHFQLbuzuuXr3i7PKcdr2m61rOHl0AlhxmwjTiXXU9Ui3WOQTEGLxvaA8DeZ6/KqT+yo5FqqdE1NL6KIN5IrrZxxhp7ElhVNqZKiuVFp5mJZsLurmqNmXhrD3Y1B9u7cumWSgG1AW1uvGYY+wum7ocn4HPik3p62nrXMomXi3QFAHTV7F6EXRK3wjzODHeH/jgvffpk+BSKsmfyqXo1N7DczCiiWGadQBrt98xHA7shwMpJtq+Vw29uVHJIq+J4HazoV+dIVjaxuP9OV3X4bzVzf36JcNwIMQZ3zhWfa+LbIkdRa7iguKm8pllnhnHkXF/4EauONgNMiWVOomBmCP77ZaXH3/MHAN4R7fuOb+8pF+vNRHOif1+x4uXL3j96hXXN1fsNltkOvDsWyNAcRpBpwmqyuvEEHGFm1hpVgaN2SwaF9WOMFVOuzV4oxrHyLG4F74kbqlrsVnW+7o2W2Pw3pU1qSS+C1ez/HCdfseUoUMWlEZEsNYvyalr3CLhtigFFDBDpbYyh82On/zpv4QhEGMubJkya2DQz1stFUsSbFKCpMXVdt4QY2QcBg6HQVH8nDnsRzCa8FnnwVimcVDKVUjkJKxXPb5paNuGaRr46KMPub29YbO5J6dA1zYFWU6awheu4CINiCbTEoRxmhiGgb3z3CfDz3Yz82FkHkdyCLoObzbc3lzRna/x656+bWk7BRhM49W9z6iLFSj168tmRf4qD1MoTCJVlkvNOmKI5M4vck+G2um0iGgLv9p8LxJghkX0X6z+K2+MyvjxsAA6LaOOKj8oel/W3ypJZZ0CWd650hX+mofemGU9slYLmcZbnLe4RlUyBIp9rlnOQD8TzOPIu+9+SBxnNVZYiNKqb62UwKo/RHlIMmTly0/jyDAdmOa56JIOiLFM06Q51Kyyb847mqYlxll50WFWaogxdG1L17aEMHF9c81ut+H11Sv2hx1tU8AtUXnE+nxWkCvn+tyrvJcxhmGcGIeB1y9eMHZbldkMQXOdHBkPBza3t+w29xBG2m6l/HnjaJumFGOGvtBrvur4pab4rUHF560gkgjFnUVSxjlTFj/VhHSO4n1+WELomEGqbpo30Nr2YSW0BNrp4nlsFx2/W9AvMeQal2WxPEWEPpvsKo/WkTGFD6IDVnLyXpnKy4A4Bd778H1uPvgErC1TobkMYpjF07omI5JFSe4pE6eZ8ZDYm8B23LHZbjDG0feWaRzZ73akGHXy0XlyNqzW53zvBz/k4vJSOVnOcnt7x3svPub6WjUfnTOcrXpy12gbdT8wxComXbQ1l41CiMER58Cw37O5PzAdRlIIWKsyRa5Vm7rbuxtCTvzghz/iO9//Ls55druBVWP567/3u8yToq5XV3eEeWC7H3Eu0Oz2bLe/fU/orzqcs1gHkpNOXqYMou0eWxYLX52mbCKkk7gpdAHnPF3X4yUUHb7698v/jn9cSq3j8ln/JpdpebI6ioi1C7H9wYua02jXYaLq7pTFLG2uKBlDdVqqbV6LxMz1yyuGVzfM86x8tpzwxuCVQMjRHorCwwGbIjnANGSG+8R2LK5l06RonQib+/ti2KEJasrC2dmnPH/zbR49fsLjp4+XhH673fLy1UveffenXF6c4S2cr1e0Xj3dw33UxSVVrVhZ/N912DIyDAOHZk/cDTAn0jCXWYhYWlqBn/75X7B6+SlZMo+fPOGHP/oR73z/e5yteqwRxnHPzc0VH370AdM8MaeZjST+5Le/x3/lUa08Qwg4VYjSgY+SmznvETMhJOrERLV3ttbiradtu0X39wuPWl/DEVkyWrQv3HprtCApSaXUZf2Ew1y/VxFX5Ji4pYzGnFM+YAZVYLBlgzfq2S45kubEsDmwvdny6Z+9y9o6ZhGyKXw2GnyhY2nSUz5DiuRoifNM3B242d3inGW/36vrU+lU3N/fk3Oi7VqatiXEyN3drTo09T2rdcuT7pK2a8k5cXNzy89//tOC+mcuzs9A1JlQpLRBjbblVXZNCqVHY3iaJqZmZDCO+yFyP14hUTWCc0rKz2vgp3/xYz598QLftbzz3e/y5ltv8+jJM5qmw4gwjgdub664ub7j5mrGmEe/8fj7uuOBzJjIIoMouSY+dU9W4EqN+9RBEtuUjV0HiU0BPaRV5M8XYfq6BuvxeQBNzBGIkpNzsdbSeK8UAWsK//QhsvrZ9bsWcCEnpS9ZhzU6pFTNhnL9TKJxi9EiOKdMjpnN9o5/9o//CeagjnrVCcoW6qMzxwRVsp6H0kUSYZq4v79nOw3EpNa2m82O1dk5IsI4DmSUstCV4cJp1A5tTvrc931L27SsVisOw56PP/mIYdwzzwMXl2ekODCOI3MIem1L9q+ugkVrOWUEp7KhGPx2x5nxpJuBG6NuXGGeyfNMkkgYLdu7O376F3/BlBPP33yTp8/e4NGjp/Rn51C4wdbym0lQK4SZc9KprKJ/uJDkTn7MGUPrlJCcpQjQi3I7pUyyG+uhSLxUrmuVRliaS+Yhb6Mmj0dkLGqrslSyKaVlk/ui8xfnCLlOtx0/E1DaniWdSAkZIx+/9zG3H79iuNnQWaO+0ygP6uElNsU6Uh8MJ8J+u+FuFqa15+7+rjyAKvgupng+T2MRwvbMXcuqb1XI2gvGgUhmDgdiGMlpRpJXtISoFqfW8ejROXm7oxkMwS55DVU0uzIWsiTGeSSmESnOX9Z5xunAq1efcnX3mpgTtzdXbDY3nJ2dIwJPnjzj7T/+fX74vbf5yU9/zt/9e/+Ijz/+lGGeEMm47YFPrl//UiH1V3VYozzPWCYYy25ZEBtdRL0ryF8jEOYFuV9KFxG6VY/LlqZtFgWAXHWb6vMBJ21HijrvkYRfq1V1W1HUQYo2aNW81Z1fnw0pUJZOV6ptb1nt1Y2FYzqcC4JjgMPdhp///ENuPvxUW56N16GE0vZZu6qZVwKk8HdNhjzOBDMzt4ntZsM4DloMOc/hsCflyDBGQnB478gijIc9zsJ63XFxsUIwxBDIeSLFkRQnkEaHAkxS4Wvf8EbzhN1+UGFyo0VEAkQc1hqcM6Skrl1hmJl3B9I0K381Ru0mmIaPPv6A3Xsjq7OzYnsqPHp0zu39nnG74fH5mv/Gv/E3+MEPvstP3/05r15e0dwPPPt2UPm+9EhJ76nSNkrXpHc4Z2kaz6rvsOagxgTFYMOYMiiCxzc95xelELO1BcpixlG2xiI5VhAde+ww1Q07i677HkMsXL9UkCZ9wZP0oCRnFc3SUysSeU6VIFz5kYXBXehKYLBT4t2f/Stu3v+kUG8sU05EyXgxrCyKRFUktexBqTxTKQRubq6ILnF7f8s8TWqy4Tz7/Y79YacybaGlbVuG/R5jlHbzozef8+jJY+1CxcjhMBPigWHYYu0KTdYTq3VL1zeEsGKcZvaTDhHWAR+xmtAYlCKQJYEocHG436qyRGnNGgtn7oL33/s5pnFkY/jkkw/5G//G3+SHCJcXl4Qp8OrFJ3z04cfc390z7B3w209QgYXfKSUEY1JJsBRV/1V/xhYOYkPbZcKQj2Pt1FVW0X7TOjBRW9G2FPm1cNUA1dYzRQi+nMfSFTUsFr1CdVACK0fnKymdpFNi1oPucQEEMpkoym0+SkEuMBlVaQEMLhquP3zJ/sUVNx+/4ixBlOoGJTTG0pbk+HSeJhdYgSzEGJgDjJPSU+Z5YphH2tUaaw3TNHIYDnRdp0jrYWQ4DMQY+P0//Bt065Ui0KI5xt3mNZ9++iGQ6VoPElifdXR9S4yJeQ5s93ty4eamGEmihh4SS6GVMwNgzh+x224WzqmkQJoDeMM4dsxh5DDviQaePnvGW2+/ww9++Dv89T/4I6XKFTWVrzt+qQTV1CBJujiJaMvUUD4EdeOVZdO3Vh2XKrqjE2hFMw3wpTlZuXeniaWyn4pTh6hw+hJ8Jehs2f51cUa1Rr8gN10eAhHGadbAssfhEK1uSnUlWtlv7+54/fGnxM2gHvRGyOuOPEQV869StKVbILYkzaKE5H2YuMmBZBrGMC/JwDzPbHdbjBFinIumZEYk6EaeJlJuMUZ5Y9vdHdO8J6WJOUBGNSSNNYqONC3+UB5+8eCiDv54de5oGk+/blWNwEWiU+UCYx3WC76z4DJRZgRht73lo/d+ztnZGev1Gokj1r3F82dPaPwfsttO/H/u/j4hQsrCLIaPd98eP/PPHnXDioUeIUshsjRYlrirrUXr1G3JiMH6Bu89YgTnHTYd0QAoCSafbT8V0wgRwC3o/PJ+lPdR8lKZ1lRM9HjWbnk1rCFbwxhi0fIzDxbTRShFhBwScT/w4ic/Y7i6R2blgWdnIdZFFbUpLl7ZVcw1ZNVA3Q97blNk6M8YplGr6qzc3P1B73VOgSBBebrGslqV5J5AShMYByYxjgcOhw05j8zRFT/sSLWN7ZtGF8oQ8d7iRAer6ii5djgy3VnHmCPhEIlmVi5ja7ENiE3MaSQRGIYt203D9u6au5tXvL6+YzgEGuP40fe/yx//8R/wx3/0x/zpf/0v+K/+5cifvPh1R9yv+SjVuhb5akOszR9FD31BqlNIqmUrNXYsNdKb1pNlxlq020VF7D+zWBb5msrxyyd91ZyLfFUOUFQupHCVc6qxe1q2F0zLOnCOOUZEHFmySotlowmc2JLP6uZoQuSjv/hLdi+vme60M+OMobGq2Swp05usyQbVqEVjOBYZnXHKXG137NeGwzSqykg2WIkM41C4+yr9Bzq0teoaVqsGZzNCKJcmE9PEdntLjAMxFs6gJKLMGOPoeo/1WuBN8xbrSp2AFCBAZc+sMxivjl8zAYgYJ3ivbXwhEcNESqo88fJF5Ps/+D773VOsUVrCzevXzOOBs3VH351x/S2I3ZwztilDwqVYr/bCIpAqNUM0SW0bT9vCdr9XxFuAsv/mbBF7pLZlAtaLCgRw1KzVo6g3lDJH01VZhiQxZVA5JpLR+AhZGMdRdWzjUf/2FKjSFgUY77DeEULG+qxGGKKvaTOLNa4p52VS4nBzz9VHn3L3yUsImbkUdwa9HrbIQjnRLkiVyFQec2aeR65vBrbrx+zGA9M4EmZVl1G0UwfL53ku678+wzmt8N7hG4N3YGyRJ5PAdntHjAPGCCnr0FSUGYzBtw7frDDWsDlMD7DpU/MgI4KkyDCPiERyVoUicsQ48M5qZz0H5unAIYTFjOmtN99k2u0QgXE4MI5fnyv8wgnqKURvi92XLpB5kUDxZdLOFMQnp/wgATTOsV6v8K1XXkkGt7QmYeFxHuPv+KuUimi5fEeEkPprZpFOqM1V/bdWS6cie1DneY5tKVNSB31DK0DWaf95nMriJiSTaBtDnjUuVBbyiBjoPpKP4uZG1AZ10paHs2ZJJKZpLOeven0ilpwim+0djw6XuEYRjN1uz+3NK8bxQMqRGC24pPJelPM2Qtt5usmR8eprjUpQeWNpvKPvOwzQ9I6clVsrRhHw9VlHd7YCr/abcZrIeSbOluhhHDzT4QwuLnj86Jw/+qPf5yc/eZcfv/s+wxTAaPvt23zoxKLu8qaEQm2RHu3gjnSNWlgZlHfXNF5j36icj3UPpLz57EYvy3dkaQfpn7Tirj8lkosOYN3ga98UMLVaL5xpEV2kyt/VNdVW/xApj0xK3N/ccrjfkMYJUiYgdJ0n5xmJWkRSBwOLrpAOn2hZGFNkDBPbQQuqaj1sxejkaEWei5qF90ZleOKkBVbqEZnZHXbc3V/pIpkicxj1mmSd5hVjsK7Fe6vJqVW91ylUG86io4qwOluRpgkc2LYkK+iAVtt5nrzxiLmobpyv1xgiN9ev2NztmINOtDoPT54+5kc//AGH/cDH9x/yn30LNvmvPmQJCeeUE1Y1HGtxvwjJi1G5ndKVsVaLeu8tLltNkurm/YWVfG2IlRLOmpPVtP5O6TE1WaxOTEA5T1POTaVlMAUYmGfEdAWlNeXH6wZI6S5kDrsdm9fXzJs9BHWQigbl2kYFSCQr1Wx5JVOpWbl0ETK78cDoG+Y4I6n4EWbDOI3L58kSSAmMFd10c7VuVqrUNI8c9lt2u42aYoRJqUJkco44C8Y5HKYkmsprjUVTOaWEbxVdbLuWbtWR8kS2GWzZK6zB2MT67Jz+co04y5wTjXPFXVFNVMI8EuIIkmgaT+P8t4KDChwTvJKYG+RkMLR2PMv9qpQRY47uSIUrSmn/WmtwxiEEjWso+/zDOfjjex+R/1Pdz8oRVv61FkcxxpOC6ti9rb8//Tg5qwZ068yDt61I6nGhz0yHge3Hn7J5fUPYTzRGhfidd7rmR5XuM4shzJFKU7vHKUaGNLDZNxzmQVvoQVvuOuRrS6ctE9Os2vLWAAlvBSQhEnSPz5kQJ6ZpT4yTvqVNCvlJ1E9gdE1oWo8ZRpw3+GSL9Hb5cGWvsRbW52sO871aaBMxJuOtwTqhaVT/VtXxMiFMHIY92+0903SgbTrG4cB+t6Vy5b/s+IUS1KPdprZKvff0fUcSS7YWb0TFdFuvF798rur1TGlJOuvourZUzccxECne76cdomWjrt8wfO5hPC7UslRtD1r7J9V/1RasclQsCFQ9i5KcFtAfEXX0cA7fd+SU2AwjJk00lAlu85nCC1jE2KVqpArTPBfagcEZQIoP9sK71QDIWdHSzeaCTCTlzM3tDff3t8xhRMgkiZpA16SlJBRt6+j7FuMsTVaBY4MORriiC2ms0PYNKc1EowlHlkTTWs7OV7SrnjnM7LdqQ6iyFYEYRsb9lnE45/zyCd//3tv88R//AVe3G27vtuS2IX9NwP22jyNPrnKpDa4gzEcBfWrlQy0lrLM0rdc2stNyyqGk+SPjqRZWn6d9VApBTSrVpCKVLFg3ZJUue7jJ11/0qybDZrFXrEWgxk5tOpWNPmd2d/eEecaUgblJEo03ZAfHCq2WOCXxMKqNqiiztpWHeSzyULnoAxtMSsXP/OiIAqjhxX5Lt+pw3hNi4Pr2iru7Kw7DjkzW5NbVuK38NE2gmsbRtQ02q/lEFYFXrU2jU9+to+kcyTpkKmoaNuO84fHjC3LjaLx2FZrGcXd7xXY3EmOZ1JbEkydPePr0Kd995y1++P3v0P7TX0OA/SaPcp8q3cGYuvblJd5qfFujnSpTUFXni64zilbbqlsqSwlf34KTW7kE37GwOoICVeRccl7W3OW1avIhDzl+OQshRKqxir5FLcBqQgvkzPbunmF/UMUCAGcJzmjCI+BSRXdO4ApT5bIKoiTCFAPDrNJ7RgSxDsiq4rJ0TPR5FLEqQzcdmKcDMfRM88R2v+Xu/prhsCPlSEizdlaMoIaVimJZYwpFyNF37WJFnZIsSi1t6+n7hjEEXFuQ8JIAiBGa1nFxeYbrGhJC4xpWfQsipFmHVU/7ilWO7NtwnA6b1tt+5Pfr/l6WQD0qOleSUUxRNHG2zEaAE02ebMl0l61eTtbck2S0Jr/mJG04uk9lBYFKUZ2lqlOUf/lZBJWC/hbHttOUqcatGgKVdr8Iu+2Om9dXpO2IyaLmLhly45Eci+xkHVQ8jmMvglmFpx1CZD8OjHEkh6QmR8YW2UmrlBAjpBSwVsjiEUlkibq3ly7ZHAL73ZZx3BNj0IfHFvH8KqGJ8r4VHFCgMWaPGO2Oq5IFiwPYxeU5835PnEFiLuu3doCdh27d4s56pFG+r28sIUzM00DjHfM0MB4OGHPxlfH0jZykDJ8Pfuccq9WKJ0+e0K8UEm6sYd0auq7BmaJBVjyDqUyLYr83TzOCShg4Mo0Vlf1I60XqZNnQj3FWJgHd8Zul2sCoVaWpRCc5+XdyTBmsaCupJti6KNjFSccuDkrls1vo1x0Xbzzhsj8jhpm//Bd/wv31yFvdmtZ4DcDCPdFCsbQ14lHgXCQT4lxaSlotW5dJSRc27QxV7crEOOx5/fold/e3xBi5326WZ6ZW7jWhrYNixgiNM1ysV6wFIpSWrGGaZkSEu80d7zx7SrvuSBIwTuitI44Th8Oedt3RrBqcUzFjESGRmHOgT5Ht9rbQBDwXj57z3/l3/22ubu748KNPuSex89+OhfKLDmN0arhpHM4ZHe5xnq5tlUBvtXDIIZFDLBP8+nlccfLp+16/lVNpzdhlwSzTHvoHo0WOlJ5WRWNPNflSyrW+WNAl/bv6I0fErHyCsvgW2zhj1PHLWPLiR30srrKIPnvWYvuGlFR3N4SBhkhn1W/84bNShhBEUZ+aloSkbilZVJZHheMpEnF1OFLbVJvdBvNCEaphOHCYBq6urnTytDgZ5Zyx1ukGb1TnFDK+sZyd9TRtQ8iZ8xCLi4omL9M4MYwHXANnlz3DXp8rZ5UTPgblYPVnHedn57RNgzGG6+tr9ruJkFLRlMxs7q9YrRtWveP3nj/h3/mNRN2vcCyFb0lGDGV4z+AbFDk0haZjUB3Icg+MsKB1q3WHa1q0VM10xRRCMkjhmNZ+U1WcOsVUddN2peiRZeglFzJfYfcVRv7phktJ4KwKpFOeE+tOMpTaubJlk9RzkJS4v74h5UR12pHGkr0hScYJtK6oVCyIbuHPHjMYrNHE8HDYqcc55bo5Q3GcVoDAVMTOMowD93e3rNcrjLHcbm65ub1mu90whwmMqtfUJArJpDzjxWFtQ9t6zs7WGKt0hliulzMeKd1E6yyrs5Y4rdjHCbIWHtlkhunA885z8fgS37Vcnl/w+MklOUV224nt5p7G6QR8CIJI+xljkt/OYZwiZsYKriCNzjkaowmPrhFRu1ao5jmiqihidI01VgdQ27bFG4MTNV3omu5kISwDSeQCcGlHZwnaumbWWC6xoOup3i/Vac91235QYMMJgopdeNN1IHoJsWx0YApb+M8gKXN7dc14GLFBZR6TEfAW2xhyMuQE62yWPtkyQFvWXjlZg+cYCHNQ3etMkZIq6GuRNjS1K0gm5pn9sGMc9wiJaZrZlKn9zeaOkJXeY1LGWg8lAc1isLgiX6aFVS6DizFFPCrL5UtHpl/pukKcmCQR5gkxWQepTKZpHW+8/QZioG3UJvbJ4wvmeaBxljAP5By+Nm6/WYJaC+BabaIuF+cXF7zxxpsYZ1ScOSeszFgSloBB/crr7i0U3lEI3NzekpLQGKHzhvPes9s03HvL44vLZdJYg8YfK3uAY729JM+q21iIzosWZ5EI+oIEu+JRdWrNnARlzTWMMfi24dEbz3j+6DnvvPk2+/2Of/Bf/xP+5r/5t/A3W9xugCGUQCstrxJkJmv7ZwozQx6JolJaOWuFn03ZYBBMWewUVdJJxt12o8lASYh0mr9ytSree/KrJJwx2FbVAKxrtL0kmXluS2DDzIzrDL3t6VYeL4bgLQlhPxyIZE3Y03FTrEhE03vGac9uf0vTdrTdGf/9/96/y4efvOCnVy/5h4errwup39KhcXy2PuOtt97kzbfexojgLawaQ98avMt4o0xRrWyPyHaMie1ux+4wkEVoEBoS5vKMJ+c9502vsZj1Phijw1e1kWCkRK1QJvV1o0eMOp2Y2ibXQ8wxZpeNVzExDHWPr8/Acb8/La5cY3nrnTeR8ye8cfmEaZ75z//+/5ewHXm7P6PxTp3Lgi7WpjwMJmeaBDEEgssEMsM8lwnsrDFmlMRiyIUiUXSIvcVIYhoP3NxEtrt7baXVoRerVAlrBevKgpwFkQhGNZTbhuI775Qoc8ILfv36msO80cHLvqGlY9U5PFYpOMayGw7MVri7v18K3XmeVWbGWIzNJNmz273iYudZnZ0xNoF/8JsPwl/wEIwVmlY4O28wriMlvebnK6/PrlftRES5mKrMoAloypbVaq0IUDFHQCLZTEyhI2SDsR4nbkFETUGss9GuisY0S2J1eiy5MzVJ1E1SjGPhxJUYdUbxJpGi0FCSDDFH3jOUJNFo4nxxds78OHPR9cQU+NM//zN288CbT59w0TRYb5BgSEZI1ViAMh0cCypEkROMGYmJZJUP6jFIBTo4Is/GOVKKbHcb0seJu80d261KVKnudca5RYBguU/KPy/JmXGsWk/TNkvKjuizv9vt2O23SJp5dnlBv+4Jc4fNmcYoR30OM8Owx3WOJncYycxTZDyMzNOsNsulRd33HdZ0v5nw+yUOYwTXGNreYXaiXGevBZWrl9uUrglqpekaX5BM5UKena1wbYshYbLq2bqmrnyl11NxACtlgv40Np0WPV9AN1t2eNHCuPiscyzLPhPjppowSAGQ/AIofOEhmVicxUzbkGLmdrehPV8zxgmH0LnyTC1s2SrhpOoPRME6S1tsQ8M8q2yaMZhsSVR6wAkYUoC1cRq5uXnFT3+SObu40MGq4cDhcNB7447DwJiMwZTBMYOIxzrHum9o2wtCEuaUdWi1uB+mlBjHkeu7a3wD7aol50BOgab1qtxAYoijIrtti2ucdr28Y7u7ZxxG9qUT8XXHN0pQj+2ZY9taJ8OE11c3OkHaeazJzMMWe+ZwfUOVrpGUgGJ/mkERP0VurDXqzmM90ziyvbec9avPtyxEQ9p+UQu5JITZ6CZ7+hOnG/fCgzVKRl+tzgghEr0pQrMnEH9ZJMUafN/SrXqa8zUmB6Rv+W/9O3+H6z97l7v3PmIcbyCrjWKdFgSwSZAkjGFklBHTN5yvz9lstqQsWJswUqQmyiIdo042p9wSk7aRrbVIjljnta0ELE09Y5aNBTIYdXRpvaPrO7LAGGacE1JScrdxglgdzDFNi9PRQkwWYo7IrNqA1qjemzGATWATSQLDeKAZWs4uJy7XT3j29AliHBsr7P/s/a8Lqd/KYUrStlqtiUkTwBQmUhgJUQfX+qZZvLdzSkhOZVikcJpz4esmHZ7qGh3Gu7vf0JvE40fKvX7Ij+IhHPXg25XSYsDYE/TKPKjotcqvDFOLNSo0vSAJi03qscASowLn60eXdJcNbz1/i+1ux8HD//B//D8ifHrF/cefMry8WhbbnEUH/KxdUKoUI1OYGPzI97/7PT558Skx6lCiN5aUdHH0TqdyjVGr3pQjLjsl4qdQkugirWJtmcit96agzAWVBm3nt11P07bKh06JmCKPHq2VMuNyUa/wuKBWnTZpQrLZbenkqGEL6pvetg3Ge0QMKU3EODCFDU00vC2B/8WX3Kff1lHbpet1z/e+/13meSClgEiitZmz3mMJBCIpjKQUNUkymgzlJGw2G6Twq00SGptYt8J+d8a68biVRRpKCZRLrvi5CNbzqQ2tsv5XhyXJBa3ELP9OFt5TGV4Rbc8ac+wUVfCjdgpqOYgx+Nbz7K038JfP+O7zt5jmiT/94GdcPL/g8cUlzRTJ++E4FSj1eUKHl7IObqWy/kpRUstJHQCdTSARsYbGmkJXawsXXYX6p1FbyyFMS7LjjA6bVuteTUrBJBWYRxIW5U+3zmO9urnVgUxrNIn33pJMxPWW9aMzbFI7T7KSE+YQORxGfBbGYQLZEGcdoDSSMQ6lGkXBmVndkH7bhwPjDU3nOTtf4b0hpZnzVcu6b1mvPMbW4l195lVRwmjOWmhQh8MBxoHG6mzLTMIkYc7PsKZZ1GlMmfM4znoU1PMzz/Gp6ckihbVwtzU2j9QUHgBWn2NO1HyocsTM8T0sgLU8efwEoS8d18jrf/XP+eT6Je88f8KZb/Q+i11ivj5Py3OQKzcf5hgK1zoXUM7qoFbSD2owZTjS6VAUiRBGtts7pjCTUiya5ToI1hTuuinxe3yo85JjeG+xYmlay8rYRVHDiBDmQEyT8rS9xfWOznR4b2id0+FG5xYqj8kKSI6HA/e39+pah2MOM+PQY9I7XxlS35iDenqD64R+SipNMI0j5J7GqVNP6FqEasGnkh95IUjrXan7k3e+iCTrxG610stL9fNFsPspJirLTeZkoVvw/ZPDGW3h6qKYCGHmk08+5e3nj/GPzulbT51Yk+XmWW2jOkPyhtw42otznr/1Fof3PmXj/MlZlCrsM3zYFHTaXhqnyBAssHoNu5QzRCGJSj5p20jbnlAGEbItlWLlPh3vjN6TCFk3f2c72sYRopKmjbE4p5wb5/VcjeiXF6PclaT3CdGNyjhNUFWUu9q4qmdwiBPjuOf8bKZrO548fsTjYUeM326tnhgT4ziTcsbbrAnYPCAeLtZK+agastXSsd7Hik5KVukvZx1GpGjPWR4/evywuhb5/Gr5mUNqd+H0dkpltdb+Ux0wLIum1Tiaw0zbtJoMlNeSshhrV9bi+pbG9vjzlbZ3Ly/4vT/6Qw6rT/jgMHF4cb083Kdc7gzYrEljyoGYA0+fPOPlq9eEFPQZLRuEyslE5gCYjGt6lXrLAROFnCJQhMatTnme1FdQngGRhCmGAdYI3hlWXcM4F1QaOD9vVZaqcDBtVsUAm9Ap8piIOcI0PthcFl6cNYo+W4g5MM0jbezYp8CHv2RM/eaO0r0qBg++TDSnJIR5InqhcVXtFuZZKRTgl80+Fe9uyZoQeA9GLONhZOc9JsPq8WqJgWXz/uxR+X5lfVtQ7byklWVjrxf92NuxmIUqZgq1akEWK3JZ7o/Uvds6uvMVq3XD5ZvPGOeZ9RtP+P2/+cc8NZ67Dz/h5ucfLAjEstGbutnruUhOTMOEf7QiZ1WnkJSXjoUg5II+5ZwhJ5IzpGxJKWhREEM5z9LlEgMnMl36qFV17rxQBZyj0AQM46RSfKuVLyY1BkUGDO1Kky4nQKLYq0ameVKN40IFylEKjUOUg+1AJ6kjiwj4b/EQjsjf+mzF+dmKnGYaB603eK96Es5Qpur13I0pe2JWJDxm7VJ5Y3BWaGymyYZxGHGtFqV2ud4nqOhpriAnXzWBLRQYd9JWNsurLI12KrprcKWo1s6LLXSYB8op5thnwxpc47h4fIHvL3j++BlzCLQf/SW/8/23eePynHm7Zby6wRS0/DR/kWW/0UIrkxjTyMXZmTpRTlM5/4ZkdP5F18pqOqCKRilFYpxhruhoURkqEoJSbJRZOiamgHMav9Vy2zqHb7tiuqAFv7PCWWoRo9q1trV4UyiBxuCiLch4ZhwnVbVAwQnHgLqMaf4j6RH2a+L2myeoJ1n+MsyRBW89u8OAt4bsNcNOyWuzx5jF7UMe7MBHDbE69a/OPVkrTkMRhS0DUw9C72Fyemzja3ApUb/gUMYsa60xxTrNaqDlnNjtttxeb+n8j1j3LRdnqwV2r0EvBrKFaIVgMslZVhfn+K5TbbTalq2/yPFa1W/klLWtFfIy3AJ10lQDPqpiF1YMXed14cwJ7f/aYi5Rrqetld2RMG4oOnNJBY6NyWWiWpERb0WTL6FqshS02eDEMs/HB89Yg/dK9K88Te883lm9xiYR08xw2DOe7WjahvOznsuL85PC4rd7PLgHVIK8YRpnNpstcwicr1vIgfkwII1B5EIfrlwl0mqOWTjFtbiCxebRGEOKUa1HT7JTqdwROV0Ev+A80dfXuDs5aq3DUYZERdn195KF+/sNrb+g8xZjNGY0OdVENRswXoX4szPQei6ePuHxszdYHxLXZx+dJBQsi2TOAo7FbKIm7OvVGm8cQWIpInUCOQnMUXl4GOhXHdXiN0gqCBtL+7aqEVTdZFMSm5wT2nbSDNKaTOMdIQixTKe6lVdqzCLybGjEYBOESTc4gJACzhUqkFFKkvNW3WmKYLfKC+nCfx0D/7/f/h7/BYeundvttqB2mRRnDtt7PGvsuqEOfU6jShPJ0kuV0gUsRS2q5NF6yzzNbLd7LIY3njxbYqfukfWomEJ9OVXBOHLksPLFwV0mlBVZqkmqIv2gihCVV6gSaxwfn7JvmMbjXYdZNVhvefTWc/6t//bfYTXM/CRkrn72wUleIsu1SjnhivKG5Mw0jDRPznGt0pxinAtSWxJUUWHyaQZjMq6xZHFkicQoZRjFFlk3iz3pktVCsgq/V8tj7XFZzlYd1jpiUOefrnPH2cRitmC8wwm4snnHMZEkIUEL6ZgSVrSwMOiktDE65HLUcPrVouzXcVRBfN/oMGnfduQ0E+dDaQNnrOkUaY8KEMUYEdeUdac4/aGUoEBWlz9vSM6x3+xxZ4ZVbzid/j5NU+thTr+kDnxWMKf+r/6wSmQu62d9FmquYmzRE61d5CP6/yBurSoQrB6dszpvefr8LeaUOH/zDf7m3/43WYnwwY9/zPsvXwPNCQBR89KyT4modmyOTHHku2+/wyZvOOwPWJeLAY8my86BFP33lBJY0c6VqCVytVtXgMoer9fyzGgXu+4tNZ+yxtJ6y3rVEWMiRCmDXRZje41Bn8u8lYPGqGRWUm3ZjDAUR7aaUTnjSFHd2Yw1eHpWXxO33yhBrZOa6uaSsEkXAu8sjx5dMgd1eNnHEUkjTy76ohOtD7VvG4glc67XRaS02jVBNU50UyrkHqEMYHCc8q/4qQHkNMDKXy4SEbobFnHkk8TY5MXmzFnLbrdjc3vNO2++yfwsLidXJ+tqsgmA80QMM4J4z5gzwzQWGLu0KCqnRNAqpQj36kavG/71zU2RzGF5IBKijj5VT83WgbKs1bNRQvM8q26rsUZdn8piry3pqkOrp6uiz0LTWqaQ6kUHe2ze6TxZ4YYRF7tV5zyt93SN12tlXGktF6HjmImzZRz23N++5uLSc37xmFWnarTfhuNhMXU36E3cAAEAAElEQVT8vYi2Pucpss8J74R5DsWVRmMmG7W8TVnIZfEwRpETRX0sbdvhG4P1gAXfNsQcl/c4rpelWDqt7s0C/JxU0XJcNO2R8lGRU2dtSVQhxZlPPvmED8OOv/U3/4hV15bXWyADFmF0hGQhGEG85ezxJSEndocDIcw0lGGF+szUpbnoGqfSJjUYXr96TSrT9oqyavxaq9cJKVzD8iWSybFO7VpiDJgc9O8bW57jUsCmVIopwdsG2xmcyTStwU7K1caWGHeKLpQUSO9XTogVbKPuRiTBOeVsO2vo+5bO93ivA36N9zgDcZ6Zx5F2nvjjX38Y/krHaQt8s9lgDGqz6GC/23HeO/quausWW1MUWdI9sxQGoimid0ZVDTylANZBvZAix5X2YX8KyrNUf63gRAnVlI87zHHDLYMrRt2e9MMoWpNz5vb2ljht8N95TuvPqunVET0qVV2N3Uky0cHls6d0Z2su+3NW6/WJvE95irTSUcTY6OeaJar/+G7P00eXNCGSQizPhiL7iK7N8zjRNp6+7AEimRjyktCIUw3To6kMJWmXkhBkZUKZoqVtDW1jMM4gEoCMV0su5fiXQaLFYCAbcog6TCOxmA1E5QSL4Gyj7dy2oes8bdvgGwfBVi+P3+qxuL+lTAiB8TDQNIZhv0HiRNdYHp91WjfFpBz3EBFCSf7qeqCv54yj8ZaudTTOcH19o5deKPeh/uznY/b0WKhP5YXzKep//CmdSQGQstbWLbNQqMZxpLGerlES8umaLiiInS24rsVKC73Hm5Y3f/A9vvvXfsR5hu3r17zPcZagAmqCztFoLOlzOU0ziczjx0+JMXF/f687hdE8Q+nfQkgGZuU/N8bT5KZYoR9BGmME69ySiC8dkZPLIFlBBs1BPM629J1nr6P6GJtorNB0re5PZQ7CYrHisbkMGBcazTgfyuZptXNW5ZnE4nG6nv86ElSEctHS4ryAUWs95yxd3zEOew77AYkDOT9TxxOg7TrefOMZ6WZHGkMR0tdMtV316mnuHIISZo11HIaB7XbLQtxf6ho9mSMk/ZAzejzdgpqUfn6Vo1BIPOO8Tm63zhPHgdZ3OBpEbJnqLC3V8tSIgFhDlKTtTWP5+MVLXl/fMB8G1nJMJAoAsLR/rLG4bHBJPzatLUimtpNMtsVpUqsv71QPdhHPFm27X15esN1uSSXLT5MOI1hbJlmzLg6td8QYFxHfvu/J20QyqaAaWRfGUsbXylwk4psVfd/RtZ22V6iJ0Wk1qhWe2rnN5BwI84EYGnIavmKZ+Ks9HiSnpcUhotZv6/WKEDPDuCdOB6xMNK4nxKxVcIlJShKEoQw/FTkXY2i7Bue06MFajPPKa0UK+n+00ltKqyVej9+Dh0tlAcZx3iyJtTU6UOSco/Ueb9WycXN7zX47Ep5mco+2bxGKDEYF0BBrCVkIwNnlBbth4PXNDYfdAf+Asf2ZawgLiowYPvrkU9UlLRt6iBHfFJmpIj/iXVPiWwp6p1ze9dk5c4xqGSh1cMYh2KLHmxDUfjUXx5iEsFr17Pc7tTFlseuAqhpAkXjxFX3JavtnW7qizGARusbhncdZrwWXFPpiSkgMbEPkn//KUffrPWpimJf4hTkk8qj6jbVNLsaQDEQySoTICDr8YU2jLUAs3lmari988oz1Ht+0xKxDN7laN6sIpbYEOW7woG5WKR/R/iwVAYdajB2HnurwiSbN1ung1V/+5c9xBC4v/i0uz85KcVWMWvTdsaYU3gadgnaW/vyMVzc3zNEwHEa8ceV9j0hUzVmTCLEUmZKFKQTuNpvC3yxOOYbiMgRYgzcW65vFM77KrFGK+JxgGmdwAesbaschiZBCncmun8FwXhRZ6tSAllWpnKp2EMTWBKUkHDYjJhSpllzoAI6+62hcS+tbuqbBNx7nHd5ZcnQMv9bI++WPEALTPDGOI2GaWfVdmUKfMaKfMUkm5ETIxYGSoy6qtuW1ve7Lete1HueOlJAEhKQT4/VfL0EotdyXk3NKJx0tc7r0fubIRUuCEsaVRiDEMPPBBx/w9puPWbWetlk92BfhmKHMYaZpGoIB13qevvmcm80dq/NLnPc0rtXEbOn2coxblAKREdXtdbDbH4gp4RqPMfbk02nsxRyJs9B4LV4UKc0KCMBSSdkYVG9Wkxvqc5OyEOswu+ge753aV5uiymBM2fcLPcBYRy5dv4yu5VjITvMMk4E682JKdBtdC/q2UYBAPMxfHU/fDEGF5WFc+D1lo85lIi/GpK5FWRCrmytF+kC5qpOKwiuwiOp31WxbKxa1M7UaTFJ0z2S5f8sCdloRHYnFx/O1Bs7Pzzlb9/pzWWWY6r3qmo7Gt8RpZrVe0/Ur9bEWraMstgx0nczIi0rtYAzPnj3jg/c+YHO/wYdYFiRbluLysJUqTLIK8HsgoDIRgpCSbjxJlueqoL4WrE7N12RDgHmOxfZRIYIsgNVKRRGvkn9WwbJyzvOsbaIkBYGyiuZyihJHnf7vGk/bOLxTtNCdPssPNqESDzkjaQaZyWnUNlb4+sm8v9rjYaWcS5LpG8/mfmIeBvqmVtZJBz+849GjRzSrxMu7HdVnV0QHIpqmLe39BKYUM9ax2+8JcxkIKnzKr6bYGB6MoxjKuSnpvdJolFJgMEYn5n3b4NqWYb/Fu1YlfMrCXlGlKnhOQX1DCoQUaZqOly9f8/rFSw77/YLkLvSOuuXnoi6RBZ+hczpo5L1bniddnOoGrxOuZimwZNEzzhmdfkXbOyBl+lqTKBFTFA0yyarIZeLIGav8akVvH16vbAxKu9dFwCD0bUPX9AX9l8K9dGVAi6U1uiDBOTHnyM0vF2C/2WNB/oUYIiFEpulAOOx5/vzpQrWorjnOerJ1S9SbslZbp9xP33hVRzFGkQ2niPNp96m2BL9oLzfmWHTVRGDZpEuS4F3VxtWfsla0+PYeZx3TqNaNaS5rbN0zayxWpAsQY1WmyRj69Zqb2zvCnDkcDnhjjmvSye+Fk/kVQbtXSfcoZ5XhlHJSGkG19cXiGl+moU0BZEQT/Sx0bY+xlmmedJiv7juIzhLEhwlq47VgVYk0/btU2sxSNIkNeqK27hrG6JpuMo33YCw5Q+ssfdfTOq+OgV7tbZ11RxT5i6bafltHKWBzzgzDyDROGIk0Ze9THuRxz1Mhh4KCapZeWtKNOkl5h7FRb+piparggya15RqXmF14gfV0CsBW+1jqTld0UcoapcBSoaLU3ONkCCqEwCeffIqVwPOnjzlb9yWR1vetuYkrC2otrJyz9Odrbu/vuTSeEBLelPyChyYvp7huVThADLf3d0zTSMyJ1urz4KzlCN6VwUN3qhOrKLzKzemzF8K8JKjGUIqjoyYsRYnClbwrxpmYIqtVyzyr/rqU9UHpXeZIz0K0tKyyc6JSbjaXtaVwz1vf0PuWxjfY1BK2Xx1K35CDeqxHFnHccsQUlynbLELr/CK4j+jPhzAXQrSujRpLOowSY8Q7o5IhS6VeruDJ7TsKrNdl6ISO/wWr6Gq14uL8rHCD9MJbKNPXHQbLqxev6PoeXyuTrC1vBXCOEH9N9moLY9X1bO/vidOMW6brHjRx9bNYnYDLJ8k9NThFq7qUBZO0PRKNK4K7QswFUSgL/TjNpcAp3EjMkoTUagp06EqMXl9jTNGeVOF1JC+mAKZCg4WrtV716j9frGeXZ7N+qGXHM0t3AElK0JegwsBhIk5f76/72ziOBhNHR6IwR+Y50DWNttHKtKSzKrbtrCApkZMtxUpJ/sxSbC4oIxhCKNpxJ+9bi4+H0Vw3+uN56feUmH5+tqZtm7LQ6D2zVoWZG9/Q+AaToe9XeN9ijVuKOr2fctSsLM+g6uEKfd/z6Sefsr29g3GiXz7TafSaxbLYSJF5KWfvfaOmDWUqVHnUVTvYlE1Zha1tWbxq265Kw8AJd3u5QSe/nDzr4zQRYtDnyBZh8qpzXJIlMULOym/1hZrSNsoRtqgMjS3E/2Mifnx/yRmbE09+0aD6KzikJOoAMWrbbzgU4W7q9VcEe7VeY7NjSkffc11stevkvV4TTmOXMsBTHMWQI0VAPpuh6i6+nNfx15IclyS4aVxJxIqvuNHpd0X8PLnpkDDrhlZ4l+oSZI4nVuIjiRBSwhhD23a8fvWawxAYtzuq+cviMFT+r3QkU0CVXLpE9flXPkEKQbnZUpD0kqyaotEqQpleLudS5heq85laZJvl+ZfKcVmE1/W8lk6OnNCM6pCKQW1eCxCBGExWnVC1jNRhgdZZurbBW6corzUlQdUkJX9BIfFbOeTheeSsPMR5GGi8sF61RU8Ubfl6h8sO4x2iIjGLe5k1DueUbmadUtDqPcqiw9gP07kjK9icbFwLPQWzhNUy1nSszpcEVUGmY/pRB97AMAwTh/1IinlJgo2p788ylIWUHkbpTDRdy9WLT7jEMY9TSQBleb86xGWKxfopxdFg2R8ORQ9YE3C1dq+DhfrsWecexG6NNS1QNUGdh4lsKlBn/v/M/VmvJFmSrYl9e1JVMzuDDzHkVFlz3aEf+o0EAQL8JXzg3yRANMC3Jgii0X1v4/Yd6lZlZmRMPp1z7Jip6h6EDyJbzTyyqqIqKqsyNODhHh7u56ipyt5bZMlaSyA4RUH7vmwJqvcgLlhcN+0OWtIphpvK5rnc35eeAeo8ooHgQX1iRdSVJSWmYWT0kRQDuMD3wVk/aNQpFhrSlGuy2oQk5z3jbtRqsl2CJ2f18cIS19b0wVdLUEvwOs7NKfqC9zah5vI9ta2vVc3WOHKdeN/TgMtP0zSy2+2ZxkGtkryQQmQY1Gy95Mq3X3/Dfr9XioG1g2rTiUsdJb64mOjM5lLVwL3mqma3Vq5L1T/ZrBEpztNiJFvynpuQm2jlbSh0U3oppTSyK1uSEdUvQtEHdMOtdVF+nRjm5gS/hXdfcJfESatNz/k494doDl8aQDohSivGIUVevLgzNSCK1G2f/JLEbkd6Tzj69KOaqXmhrDPr/D2Y/R/g0kNEF5hOjXKmdKx2+E34EDUJqjoNZl0WTsdZ+WrErbIHLaxqKSZWwPhsvbFnLdJe2fZiAjbe0UfFVj+fLAmNMfLy/iW73XQV17qZlbzgnAfxHB+eVLSUkhn16xKLHT1ol/fVRDdLwbHf7fni17/BPT0x5YLavVmSIX3TdjgbfLEJYgzliVEFDRTjm7WqbV9fCc5TfVAaSrsoXhHHsmRqtSLKEDEdn+ku6L+tZxUyKh/94fGBZV2vnD3EyjC2Q6Kh4sAhJsZhJMWoY/fcZfPsJW0/FAA6c02kMbTKn/+LReAPvXSv6AlqbY2cK+taNm6ndAqLc7x48YIPp4X1tKhgQjzqzVxxXt1SuuVOp260JhyPR/ORviAymn9d4hiworsDCddNVBVaqEH9QRNU46pqRiBIFYYYSWkgukBoheDNKk2rG0vWbEk0dS+prZJrIXhPSgNf/Oa33J5X4sMTyWLW8XGG1kVj6nxSiSJEhyLqUb2JMyvOoe4ym9UOhu4ZIiSdnOAQ8fazJToKO18K1C74Q8+lrifodlzV2tnN9wZtU59p6S1bo6KVwhgHUlDrqxgS0TvlHrugU5l6ke2vkqnv1Ht/iMvLJUkTgVoaj49H1vnIYZdwL+7IRTsvIUWGcaR4R/NBiwFDNfXxXXyVvXebCMw5R66Fx6NCbz1m5WpV63Vp9F/0qpeiyvUE0Sm1buPOywXXdB5cDEQbJxtDArzGQufKir1vCwQxZFQEcin4Wggx8uVX33CThfX5Wa0ixWiKVi3q5754veKscHaOZV1N3JmUe9/6nmwf1yhmutd6qiWWmDhdgbmwAVW21UJHT3HWrpAtZ/fepm2KmI+10gm6nWen5BiADC7SZypqCmef0SviHXxgTCqci6hXc+sCj3/g+icnqKAbVRWd0f3w8MDxdKbSSMPAYb/fTLm7IElaJUQHpZrQSggUzaDLSkueNOyIXjeAGBIxDhsy5T2bd14Pvt7q8z7ifSJ548eCGjZbW7EJuNoYrDXgnONwuGFMI1+++i3/5i/+nLaulFZZS2YYNHHWqYrdwKVv5o1aKnUtLBSklI0WIOga6htaw1FxFOdZRFhKJa+FNi/KG+2JZq36kmlm5VAIoY8x61xYC5qsG5+YRUl0+vqaVHoSE6ryvHLNzOtC3c6bC5F8e64CKUVe378wtKlnuGJVZWf/WtRt1WkXZwmlrORyhlVHXC6n+YeE1L/odU0W3wj866xjZg39ERGkFkUbS95+0JQzWavQvUpLqSzrgk+qDO8xEvoIyZ51Gmcy+K5eBicV7xriPfiIY7VpU/pXgk9aWE0jaVDlZXBuE7CllFiWzH/4X/4D97e3W3HVqiAxaNXdNxnUbQCn7cXWALM9G+eCy3ofo03/6vwvwVHs7ktTQ/A5Qltn4m6kgjpY1IpvAlJotVF8ZRgdKVxGG4IQxLMsCyFGXNCBENK44nsJVXQccvUObwjVmlcT4SjBv22JkfIDtYDTz7yfRvbjxDSOaglD06lxW2Jq1imWg2+ezCod5tSE//CvEIv/2Ouau2y/Q6m6R+VW8cFTS6AUd1GFjzB/mFnWmVqqoXo2LEKSFZT6PPuM+QbsYh996q5QMNkO9V406IEe8L5eshC7pnHi09cvub+/ZRiGKwQKUoycnmdKVh/PfJ7xu70hkiB4LfDs3TTLKrz35JJZ8ko0f+K3b9+yDxOpd9R16gUSnE7086LFuSFttTadwNMcY0yEFLWAWuzVe1XIN/FA0mS46DnU6VreOeZ5VpQqeKQYeNKFrqK7fy+GvO8OH2FLyGq3qZOqCGovjsw2qiOoADf7PcMwXg53HIGPx3Nv+4sl4z+GBFWLwKCFUdWO47JkdUIQ66zUqhPOdiM3eCYibz48byiz5kmXscch+A0RbbjteOrfD/rWuZ1udJG0plEXoAx6kmq4v1eq1zQNvLy5hU0A2LbivCGkpC3pF+4FN/sbgo9bgurFa2FlaHhrDhd1IFFpBd9UkPfFF79mP5+ZnhcddGLF56Vc0fvrIJ0TR3KewTL2FCMhBNZ1oZLV850+/No+ufNUVOArVQWrIsobn+eZOIys67oVVq3K5cn0JBkFtsTr5/Ap8Wwc2CpKP8FVTUrtRYg4nKjk0FfruLlASqNOEouR6JOKX71243yfEf/7TlBrq0QBaUoaf3p8JpdKSokxBhV3oFm1bx0298ToubmZKFUoVXASCUnHah32I/spmkl6McGFHbbec3Nzw253IMbE+Xw2NVuA5nHVkVyieOXoqZeY43Q68eLFC7DKVIqR40uGWvCGpL59+44Pb7+htZXbm4n/8X/495DMhwlFG0tdcRI2xLdKo6xqfpvsxTQsQd0QXcezwCPCc4O5CrU56tJI0W2bv2AIgzRSEfW6Q0glbnNxvehhL01J1OIhxrBx08SSEh8cJXhCU9S2FvVEa6UhXtTjFCz5duYmcFE6UzvWr+hTb8PKFerUn2e3GmutKULuIa9nltOPha5/fV0S1GVdVWD0+AQ+MI475UfK5TDXlLMTu+XKS04TKhFPXlb2YyKmAR8jVTzj7oCPIxdWnj7jYNVosaLDXjnRB7LTARbiRTllVjyICK1oiyYkFb6llJimHdO45/bmwL/7N39FzTNPx0cce4bhsIGgnXJSa8WPOu6z1sLT4yNlzTCfcaWxdwlzNrcnpVt8DQGXItk5ZlRglWuD5xOuv38UbW/iqF5nTjeZ8cEs3YK2IhU5bZrMBmsFxUAIVZ06jK/W60/v/Ta5Z7O0E0F8s1iXDZmtqH/kzW7POAymvpWPuypXQjCx76e1WNsGDXzWhP/Hj+KQ/7uvWivrkslrBYnc398AYgbcimY8PTwwz0fyWo3j7oBGwJPXhdkLITQCyh8Tc693BPV6JGxuRY4uXNFulnOiwqquyPUe6sVLwft+ICWzYNO9JaXIkAY+/+xzTscz796+48O88LOf/4xpt7PkyikXvyOQWyEt+BQRB2teePrwQBAY8eqoIULyYVtP7irJcyGQ28qSM0utnEuhzTOxDTjnWCt4ZyLIhiZUpTEME06gRqPKOLVSlCVr0jREpaTbPtk7iq1ogSUx4Exh3hCWdbXneQEuBH2eDqVo9OTAe8/9zQ373aDJKZ2Wwva5rr+nfuiGWJv2D34ZB95bwT+vmbVkgzk8SKAW9fEN3oNrHJ+eOJ1OGq8Ws7rHVkQK3eUD0cK6tYZHhyB8XL7BFZrCBUypSiWIAV96F0cXeoqR25sDn7x6we20J6VED6YYIk4cc8mczjPLaSakxO3tHcEHRTGvOq19txFEiykPuVVYM8u8kJeVhCeJtw6yXBBHYPPL9tCt0uqa8YNT7c6IHdyetRZ81b8dxJn3uNOOrPPkXMF1rqtDzfsz0353oZso1A9W7IMh1ziivzj3qPtKNTTWbYi0q2L7tUBTwaaXQC0Z7wPDoHld9MEQaotn41wH64Z/X9z+IAQV7CGWSs2ZlAaGYWCIHucqymNwtE3NKQwpgdfRirVCjAO3d7c6F92rhdFyfma4O+DQSRu3tzekcUccJkTgfD5znmcNUi8bpO69JqrX50stRnbdxoK6rR1Qa2aez7x794Z5tyMgm5sAG3dJ/06vcBxsnNncdGKLT4FWHbU5crhsIOIDEiPT/R1ffvtrHteVVfSeYvF6IJgCqRueI5pgY63YkhtpUD5X8l5tTBCKKMG+t/2avWHvFImorVJbNPsYbb9K1TbTRpDG4zwMMbEbRzU+BrYpWAJIo5sJfxfL2apLQ9dyzjbbOtPWHzMHVUULuVTWdSWmgWkIpORsxrIinr01B7DfDdQWzG5GWxVaWEWmMTKO2h7qs7lBNvJ+8IGXL18RQqDkzMPjoy70qptaIBB9BKwdKoo6VJu8pmWuGBfEKcLb1HYmjYmn4yMP79/iXePlyztuDn9Ksy5EcM1a/3Vrt5ZSmNeFkq24AnCaHJhFPirs0vksT7XyLDDjWGqjVWFeirXD7MysmlwrIqyeqd4XfFAv3RRVbCed6GQbZJRmn9MgkSZ4ufD8Ng6V85Siit3azaUN7PNO22sxKNKiLf3r5LRH70cnF71g2dZQazxK4//3rxCHP/TKOVNbxYXAaK1yruLUO6+DCprhRoacIAnniqr8V6hjJESngjavVKI4JDrcuclCbbpSR05V7KMHZHCB6CLNF2sLWgJRTVzVsL3Gio9aFVW9cVQTZN4fDszzmUfXuDlM7OOoinWc9Yo7kqafsdSqiUwuFFcIxmvzTYeaAHpWCnrO+EBpgewcqzjWJlAaRfKG0kpv0/vu9gGgAEkTr2iPd2rsb6heA5swqAmx887Eom1DN4NpD0opLGaurtSrzgnn8oxkw/wAGJK6CATX1yMbLcPAtN+5HI4fg1H/dtobHWddV5wLDHFPShNiYrJWG3ldWZdZxT9l1SmHzSEbNcmTV4f3TcEs0ZjUc9IEmd7jajOPai0mvPE0g+s8eo+TTo/QbpIi2g3vAjEMJnqNG2DTC6sUB17t97x585Y388I07rm5vSHEbmB7kSlt7CiUOxqC/plcCsfjUd9llY3mGIxOsL3TqwKkIuTWWGslV+2c1LUQm67pnBshVqUaOActU5uQot7EJrbrRSa2/zq/OR9g39Nt4I0myz7q9/BetKu8rle2cj0Fl493VFFXIhFHSoldSuyniSHpc/e9e0XfP6wb84+I2396groh5rLZ96QYGIagLfq6gtOZr63pQRZTZIcqfEtt5FIZhsj97R4f7CG1Sp4b3qyQvHfc3O5xfqDUxrJkzvPCav5e2+PaYGZ70PbftRZUuX/dctUfrTVKzqx55fb2wM20Z0ie3RS3CqYbp9sgNRyOPj2rGPf2MI44J9QMS586haOFiEwTh1cvePvb/8Zzq2R0bKSrkeLV6cl5yz/QHKQgiGs01BHB9k8kKjG60Iyro5/EB79tnMqzka3dW6ry1VrPCyzvdHayO2BMid0wmPJQtqDFAlhFCxfepJ0a9kcuKEf3x221Kgr7I72cMzGaCYaGKTKMkRD0GfZOWV8ywXv206AFyKqcvhgS425k3I1MY1QhVcv2tzRuY4rWDhw4HGwWutmAXclX6ZzIjopo+Op0K2TYviadZycNqYWKp9TCh4cPnM8nYoB5UYVsF330n8USwGajQnPWFnHwjuoxeyLjDOFM6OQJMXIOgbN3zAJrFaQo0i+GuveqvX8P14utueCDovwyaJKqJH9Tt0pjJG4CRtBDJrpAt/ZprZFLIYRk3HBF61zPX8ytwnvHNI4Xn1iR362muMT05XcvRH8RYRHhiz/8Gf/3XqVkBD0AUoxmqt0XtWz7k8cZrcPWfVWKVHCdFtUIYdim94lAGjTWtIbTNnVMicNhr8h7qZzn2dAY7QaoHqALPoEm5KzrQMzBQccCq0+tQ4ghME0jUveUWnjz7oHdlBBestsN+k768sDiCpDWXWIypRaWUgjSNu/iyw8AR3OB7DyrdyzesYpQKlCE0OqGVnb1cmtNwRRx4CxBNe5jCsqt624bTaBa7IWgXQKxwquv4xZMr2B2f97p6MfO/0PQ0at060MNPC224sZi7x6TPUGFLS+6oKj0hPcPD/9fCz7FzoQQIuOUtKASm0hkhXjO6lHbaqE1bwmqJXsEcoYQhNgF9U4THOcCvnMue0fKfhmMqtIpPf0fbzQVxG+te7Bd2F2GLCCXpnkIntu7G+b5zPPjpJSqGDgvC8413G4kmdAOZ/ut9K6ovt9aM6fTSXdy09+A+eT252XrznB0ilO/9aU1luqU458rxawOmziKIcHNqduE/q9+lqjmpvUkEIAGazZnDMw9oieobVt03kGpDR8M0JhnPdul29qx7TmdloM9SRFhSAPTOKqoLyg9xduADt8VNfbr6r4/bn8YgmqHgPdOEb7kbQ5sJtezmrxGR63a4ru9PVCI4GBdM34VYhC8K1qNh0CKyQLRIajJ/G4aef/wzPNpIZdqth8f30dHhnRCkxGTnNrqXEPXWBIsdoiF6Lm/v+dP//TP8KKTlsYhoPM/OoJjVZtRFaSqCbEAD09H7m5ekvaeNheWueBTUvQpDfjDDXz2glPyrDko2b9UBly/S6v89CrmBlDBvPTFFHumwKaZX6EYQd4T7eU6aZ12ZYugUUxM4VwAUUPqjgk7UQPtMY1Mw7i1hDfVbs/WuKT3+hxtAVhAXgzZZZs6tJFef6RXWTNSCilF5XkGwVFxTn3kclF6RwiRYYC035NLI/qMd4Fp2vH69StdoLXoCLr5zO0+UVtmGAfu724Jw0CIEyKN59OZ4/PzFR/qijLRAeuORDl0tKK0LYF19m77ZlJr5fH4SOCGV69ectgP7Hej2Z+a5RDhqtJ1xr0trLWySmMckk5Ia441WbKJqa1DgN1Ie3FgWT5wPgtLE8hVp+nQD3PjMjU0SRJFhcqaFaGvJnQRZ3Pi9WBoouha6gdLb3w6jE4RKFVH0k67RK19aMLlUME5Ao7kAy9ubkgxqe/51nq9ftJdHOW2/94evx0SSYSf/YtF3T//KqWQYsCHpD/7usE2m6F/1TGIuxTBB5a5kNfGaH8npsgQPbtpsDnnih4Pg9FS7GuFELm5OfD69WtqbTw/P/N8Pm/7fr+USWduAlb0m4fgVhV3VXbNq6JjrZDLyttvvuH56QM3hwkfGp+8vt8OaY9s/E5FZgu16DCB0ipzKySEZAd5cP3oc+AC1QWa9zw3x5MIpyasRWjBHDq8w4eIUBU5NRTJ4Y2bi454Dh6GgA9WmCG41sjSLmdfF0TawIJahVq7CHPFl0DwSteptG2KXy9SceCiUqySaTa0M9CnVmnCsbl02Lnb38CmTv8R4ALXtK/+I40D4zToQAFs2lE/l4XLrx3b+3DiwFdaEWrR8eJuSMSoXU4fg4pDe1K2xa0npXQ5k4QtgXLY0AivRXC0or1ae1w67Yem5251ug+3yjSOvHr9ik9fv+bN11/y/t033B72/PTzTxnSYTP11wTRb+dhMxHueVbaW26NiFpPOQkXIbm9ThGP+ED2gbP3nERYqqNV8LkSROkDMQ3Kca2ynecOs/4Th5AJwegm9m4E5fl39NR5R7JOKtaxckApQoyNWgvrunI6nTSnEB2E0rYDS4uQjlVFyxfGmBhSsnGpmLWf8rK17NIF0Pfi74vbHyaSQvAeko3CdK5R60KrK8tywt/fg1Scq6TBcbhJVAmUpv5u02gjwcZ+eIj5LmJKU6HkmYcP71lWIdesrWoBrqwY9KVefAKbVS5WVFFaVYTRWE3a/tK2d0yQUuLLL7/kk1cvGPaTwvJYy0UU7pGiL0SkW5401rXw7bdv+ZPXP2U63DIAe584TBPR6yJJ48SbBu7+lugqYXbM57NyYIKj2zE444G0q97PNo3Jq8o0iBLpS+2CEU9szv5b011NUJXfUotoW7TCzc0BkrkPiFpINFm5vblhNwzE7enIVSvJFlo/2LsMEi4/22K8CorLjx/pJYKN4UuMQyJEcBREVnBmj1MU5TkIpNKI445cMifT6Y8RHCveefaHEfYD6zJo5VlWhjEw7UfmeeXh6cg8r7bA+z2ItRibqSPVVL13AHLNzMsZOFiRINv5V437NKTEL37xC37x+edMUX1rU7zYqKhTQ6XPZfBOxyu2oinyh+ORn93ck1yE3DgeV6akKu/gg1IfXr3ip//mz9m1E/X4XtvFxqPWVht2YGoysjYhq8GuIVSO2qC5QiigW3/bKCkuOJzXwxiL72BqexEoubG6zDBdoWN2QLcK0StX6mY3sbPk9GMBCdvm2WO2b4tYgi1XiezsHP/1XzL4fuDVl5P3nv0wmVm3QF3w5onp8ZwlgET2u4E07YhpZJlXzqeVl3c37PcjaUzaRVhWlnmmFo9zidYKu/3EtN8zjDvwgTWvvH37Rn0slxWCv0IdOxCgsaW/FkpeFElX5ZvRr8ScZipiyP9vv/yS2/2en3z+F9weJg77yYo0Q88l0JpnU0Q35dnN60IWYa6aYPgAS9D3WoHoHDkE8hj47I9/wVf/5X/nr9++IUfPUgoSHERtDQeFIbZphUGcOqqgRZwmPCDO4YszOy9NUkurxBgYJdIk4r3gmglrRWiiItd5nvEx4ZLX4sqSUkVc9ddOm3bsx4n7w57oPdGBdxHnVB7VCyxFDy+x0TnC32kZ/MGu6+l9IjAMA2mcGJLToSZmDt/V5mIt/d04EdJAKUJem+pKUmAYo3GY4bBXBFZoOO+ZdnudsoglnzFyuLnhk08+RUT48OGBx8cjKSk9ZmtTb/ZQ2hnIOdNK1m4AaJfKFPBINXS3sKxnfvXFr/ntr/6WdX7m009ecbjdcXd3oEmf4Phx4VtrpeTCsq5kaZzKim+V0XmyCMlZumZt70agDCPv28o3eeYJOFfBlUZ0Cgi4gE2EsslxonSBtTR8LReNj4dxGhRRNmjAO89aMtrhBhjNJspogmi815ZxxZwUUrK9XAXhm/9kh2I34FepXDFG5bCiFDZvHFRH3JDqDeb6u/gq37l+MAcVOjHeU8qCc5AGx2685XCYWNZnpFVSADcEKp7aPDKop1Z0wfhxSuPXW+5K0a4Sb9QiUIRuWL8FuNN2uNBsPGegOatyHUY3UIWuPnSIA3gntJqZz2qvIrsJ5N5Un1CyBoHzYuKggNgYu87lW3NmzZnsHXWaICXOwHh3j0+JBXhcM4+nZxYnhGEg1YqczjrVYrNgUJswRaQ8fV5wQ9tHFQ2EKgKubh0B72AtFV8ACkrkd5SqqlhdjI4hjfzRL37J3c0tXZxDqzw+voO2kDoipaeDhU7fRADXPWG5tABMMOGD+yi+xFp94UdQyf9DV1fChwBNFpb5SHSV/bDbEPcYHbsx6JSkADjhsFOLnmFwTIMtMeNTqsekx/nGusycn4+sRRHLVpu1gC7iBq2wLxt67QmV04ozl2ziiYunovoJ60aLc5zPZx4fn/B3N4SgHMJrT0fluhqyLTZvHC2w3r974Od/+injzR2jD+wK7KeJGNWrbkgjMw7/6p5wOODjAHkx5X6jmj0UToi23QidS22OBr1XW7QoUp2qqZed8l5z02LMiXJ/Bx9szTcIYoXkwDTu6MKBGNWAOjphGgK3+wPRX7XznNqgdHGmzuPTFpNrnt5Dvlh9aUxX53jzrxSDP+Ta7/cmbtB2G3XFW6veiRa69/d3FHGENOh+llcWVlKoJN9IVmSlISE1W0u0kfPM3d0Nx9OZ0/nI83lhzV2kpvroXoo2acavNOcQe6Su2XQ8equbzYu0GqUGhGEY+Ku/+is8wn4cGFNgGLqKXzRp6QihFcbNvt+aM+c10+5vYYxIrJxkpYXAlKLyTmMkvHzBJ//+rwhvf8vyzRcQHbksxDRQjaaQBYtHvcdSlT6ztXi9I4rndC5dS2LoWkcHZdu/ccoPV4eIaJQnTQg6tUoh4UYt+svNocU5bvY7bqaJKSY9BZ0Wig6dtLb5Y8L2bJFrZwXP93RK/9Uv52AYImmIBF9xTul70zBwmgs0h3OJadyTpj1xGMlrJS8ZqvDq9Z1NRdI1neeFZZlpdU/3nBqnicPtHTENioyiI4HP5xPrWj+iQXREt+8/3Q/YoR2pTXhp5XBwWlhhyGTOhefnZz77/DNu9hN3t3sOhz1VNHvpbX7XBwKhqHeP29KE1cHqHS7AaolmEyE7r6POQyC9uGXNJ871zNnrhDgv1pg0677UBUsGaukUScPTq2iy6lU6GTvdhwqCTTmshOCIaSDS0VPNnTyOWj3OFUrzlLKShgExn1NEcE2pWj2l8Db0ZL+ftkmHfdJn8Okj4fjmK2v87e+L239SgnrhmPQXqfYH6/qMFxWF7KdR+Q/S8K5pFapPiE6elqb8CN0A2ZAMbWn0GfNsC13s70rPlHoWbyhkGlO/Q7Ryb9sfsQJ5A1Z6zPZBArtph06mkCszZTGqgeV0hoCpwMsSVdHmlgT1O11EeGqFkJUvtcxqQ7SKipWaCDFGdFpRJ3C7bfqP5ojWmkCDUsU2TQn5/fk7vXcdjGBOAyKaPARNIJq3ZNEHXty/4tX9y+2ZutYYU+L0+BYpCzrxSJ+dbKURdIYKbkuR2BDTbd13TpF6w/VJPj/Gq29WalmmIdRKxnvU1D1qoVVrJkQYXSDIxZlh8OPmU+poSHPqodiREEuC+mFackNssAP2VLxzxjvWJDMENVBWk2Mjqdu7phcNrZ/TGuvNiOvz6Uy+zRYTyh1tVbY/q4lYt/GxxKL2+ddZE4JxJIyTJoQ3N/gUIUSKc7x9/8CLWlikcz8D0rJ6HDuNe+c6H9nu14opf+Vv2tv/HVHrxU+pjVAr4sSGAYgVV0Lz5heM47C/4eWLV4zDoBynIZHXMzmfkZqVAywdTdJb0T1EEf7OW2NDT9lQVefDdjANzvHzf9EI/Kdd215ryXQIgVaytaDV/swHwCyLgnfsxkgRIJi9TID9lBiT8oCjcScdjmka0ARTzbpzXky4UljXhVzaFrs69MNfDL23JX4RkfbEVV1ezM/a9erWPofXA+r5+Znbw17RlXDlzdq/tPT9+VKcFeMs5qwT9eJux/4wEIaZYRyZxkGteGIk3txT9iNtTGoFFzySl40D2KqOUBXfHQOcHd5Yx039Opyoc8VGD7QiS6x3XAVKpzc1Ub5dU/ueZrQwjyfGkZQGReNwmhhLHwrSmNLIGAdVPOM2jp4+l+9yUN2GQH7kpfwjuHprvXuLds/k1iqEuomP/OIYhoH9HnwcSdMOFwJeVlyruCDsp6Q8aW8xFjw1eNRVRe3db+9udEJSq5S8kktlWVZyzogoTetCQbskqN3PzKEJW6kZmOwz9McpGyjlvWe3m9jv9wSEu5sd+91owIAYtuO2/a3bXWK5RCmN2oS1NkoITNPEamJXH4IWkz4Qxj31dscYduwGqB++oaG+6655K/S1uMcAIcT0JVU7xbiGa6oBCqUanUrR4L5OexFZN4uzhuvia2/TorhoAaztun1W2c4kh7chIGNMHKYdKXTV/pVAyoftfLx0AzoQ+Q9f/2CC2rk511ze7/5/55wmKU3RRrU/KjpzGLHEtCFN+T21iKlLq/ma6Qf3XrkZ/cNsqMxH39wetnOb+a73kIaom59YFS7W+t6QQez3Lxumc1pdjOOIkpmrOg+I181WVADQP3MzBd1GNbDX3JxDQqCUyuOiLa6cC+tS+PTTT8mtcV5mcs4MQ2Ktqy4W+xx+M4Dut/txsqq2Gs44Wf2Rygbvb5WT08DpdAcQvAvspwP76YaNzwgqSHt+opSCSVnZgPetRXrN1+uJqhUSG6rXLAnRKrP7sf6oru/cTkctvBNKzQxjZBoDaYjGW86EAC54vDhKU2UowW+LrdVKab2s6M/NhCNYYVU6anS5hX6w4SrORRNTeTaznu7taZwmOtLq0LFxDqQ1SlMVfj84t0JKDBOSjnh1LpZutt0hYPPgDQE3jaxrYR0HJAYySh15vy48LDOnddmM0tcihjbYBmWAaRd46Vq/imH6f9vmtj2HZqIxMym/iqNmiG9PfPb7Gz779Cfc3tzgnGMcBtb5xPH4yPPzI+vpkW10Jxce78cJqt6wcz0Ceix0hw9I8KOZJHWNtF//XhNtN4oUdEZ2R0YUCBgH8Nv+IeovHUaN7+AsodWvPSTd+oN1RGrJOjTFxk/3F3jh7uq76olmMP9dCfrSNQb0vbLRQPSdePt/Dl073377LdPwc2Qnl/jtn7WjCgZcYGdAlUqpuq5Ka/hxZDq8IAxnxpsbpmlkSEkTFjwf6srqBJeickl7R8Hr3ls9WmCh+1m/h55sACpGaV2drxXYpmvAvIDBWsLqctDaxSFA10ZgGnfc3r/QboGPxrMu1LJSyswYJ3NS6WM+wvYrh+4FzdD+rROzKaEv8fKHvi73Yeb3PlBapbmKhIYLYRsWkYbIzjl8EMIwanHjQbxSK1LoyQ/gHC0G/DTaEB3dw8YhsZbC+bwwLyvlaoKa91f3JLLtgyKyuUQ4jGpl/HiRyzNullhLU/u/aZoYx4m6LozjqKM6bZjQZWf7+Hv1ZFU7CI21FGQaibcTtZ1pMeDGAWe0k3i4Y70ZGQ479klwv0rUpeCtsOrxabMv6GfPRRzeNlAJ7008ZSJqaeC0kMIrClpFu1e0to3ubVd7bwfstkS8dQeAi7Cqd28P08h+mEje2zAJHWvsnFrYbUWu9Dj5uAP7913/YIJq9SLGktPnERRz1kOuUspCaxmRSJNKaQsuN9Z62ia51FIRNyBNSbg5a6I67G4QUdKstG7KG/W/8TgfgXV79xoHPeuWrT03xP4xTNhjSZcUVZW7YG9042fr3FofA/OyEKO3HUUV7Sqw05euFjkKRRdTQjvXDwwQFyAkWtHFUUuhZOVrxHEgN+Hx+ERthdeffsZ61Pat7zB985u9k22BmmiqJHBDobqqE3umrcGahRgu+Kf+XOmcRl2gAZojhGRVDEQ3UNeilkD92W4/uS3xv/zupiu9/N6WeFhC1ioXv9Afz3VVowAXBMJ5T6uZ3e6WcVCqSqPhvHJ69NnreFwxV4QqxTaJQBN9liH4bc553yxaq5SihPWPuLpXC9IHp0bI5ifJhjA2Q9otVrugQi4lg3ee28MNu/3OENlKFYcaVnVEXYs1UFS9VDVJ78VFQxDvaCGQo1DzDFnFOHkpMAw8nReezs8seSakUW98I9Oip7Pv6m/UAaCpqtTZJogz4cNmcWZfppkNlqsgFe8clUqlKHqkbp3EMDIM6v3qQQdzDB6ZBCmFfDrSC+Eel9uEaxe25Oq6yOqbiW6QisbMwH//EdVWPRYwugIIta40KYQAMThtJ6rfET4I0f6e/uNUre+1EFdU3ugmEnRPMSW6c8LhcOC8VFiqGo9LL0S1O+Jpyof3Hhfj5nXZrOXtnSLntLaBAVs9hg70oFbOZx3u8qd//Es1szfVda3Go7ciYpvYJI0ildL66Ah1gSneUQ8D2VVuXtxQpwkZBpqPfPPlN6zv3vE4zxpDPhIJSDFgwNZUHyN9eeZqcr4tOZHNN9N5oU86ArHJUJ0rq/tEFSFsCTV4S9z3+wO/+PkvefXiBSmNCJVaM3k+8/7dt8zPj/hWjS/eC6dLpIKisza0nh7LgE29+nFcIfTORU8MtehG1Ecz+D4sB7zvoJQQolDXgnfCkDxDCsRgYqJtgEQk7gZSUopgqSuPj4/kKixrJdeLBFW//xXC3Dqqa34MV2uqiYr7CuCa7feXx6sFjNNW/Zs3X/CTzz77CLUWEe0COV1zVUwELSosKsZhFYR1Lfjbgf3dCwhHhpsD+8NercVSZLi95e3xGb8b8HlPHHcwz+oP64XqDdxAn5+zvf5S+Osdd0vzUrFOVMdEjZaDgHhKt/jZSJaX6Z36v/RBHA63IM5iXr9eyZlaM47GGD13h4Oq9p362QfvCT4a4HgFcl3Hx/cDqN+XoCpv7TpJ1XNAN6fcCvNypNSVKIp+lLXQZLXqMNOq+mniMjFOOJ+Iw8g4DWixEyxRw9pKDd1q1RKC4C+QuS3Z64XQP2zfTLWCCEjryURHCC2YmlVGBZ6fn3n39h1/9se/ZBpGpOoB7WyjKDiW80waB9q6Ms8LOWdiGIg+aZu36SY3xhFnpvje28brI+8fPvA8n/HRU6n46BFRNX5rDmd0h05WEuPI1aI80uYdLirSOvgLybgHURWbee4uvo5UGMLA7f4WL5HodzqdKwRSdCQnHA83nJ4ztSwfPcfNnO3yu/Z8w8d/hv5sbT51gVbKR6jPj+OSj+5JpFDrmWV9pMqCtL0VTpooTSmA6PQZJ4Iza7F5XpjnlVIrt7evgbC1j0NQEniz4sf5gJIDLrzpnp01pwd99JCCydNc3FpLjqaIT+ulsoOKmX9j6EPis598Tl4zi3Gta6yEYdRlE3SzwSt/LbdGLmo7pjOs1fe0ughhgArnrOhqLZrIHg47Tq3w+HzmdFp49erANE3UYuitFwjePiXQ2EQLItrmRCpNHD7ZUAj9BIBOtaql28rpFyitKVdc+rNwjMNkiFIkxZEUIs1V3Oioa+GZ9xau1Q5EKwr+js3vYpXGVXtJ//ZnAv/3f16g/d6vS9RW5vlEXhecb6SUuDscIEKWFS/qiiIOok3WatayUxFaNXW5kGskRWcFg7MDJPD2/XseHk7k3AV9vSC9bAZeNKn1MeoUO+njlrWa6ri5GJJEP+RAFdOGJP3JL/+YmCJNVNSh/sIR4YLqdMueJjZOe1lNAW7nkVe3lCVW3tXC43wirgtOHL/98I4/e/mKc6msuSjfLqpARFzTiTZKMDSUs6O1oJ6Omlk7D10X0JNn6Z+x6J5Ror4paSpeFfMhBpVOVDw+DOynW3bpoIhS8PjoqCHT1kaZV1o7g3UAth8dXkaBGPFuq8Ourx8Hfqr32C8RYZkzYWB7ds41cjnjfSN5h0SPDyCuEQYYggqnUgyMyW3JUMfwW4Ww88ToqBVevHjB+4cja2707g1wAVy2rp8z3YGeYcWKUieV6BWh9r2aAhBnE/90ctPptPDw4QPffvstv/jZT3U2vfM09B3HDv5f2fIISu1bqwmwnGMtWSfl3RzwYcDd7GmHHTKMECMPtfDl+ZnDOLDaOhrDwJoztWdfbjBKoMXkRoOBPlhHcySv9JRm92U80a0DUFVMTTCKn1Muqe7dVsKbwPbVy0/5yec/256hj1EF8adnTs+PzOdHXMuMKeIJeFGqZgjRgDWugnR7OUZz/Ievf7JIqh+4IhUkA03NdFHBU229YdKRC90Y0zBQazc9rggreW3ElPA+bFV0LolkbRYRdOMKzdSRcH3y6H920q1++B6Uatgsm99ch5Y3v8km7HY77u7umPZ7QlTvSugg0aXSkKUQIuZPCbtp2gx9AdvAPNMwEEMw9wBFqZZ10cI3OuZ1xkedCtRf1sbzcn0zEvoM3T4P2uHwUTmPW/uvgRSoJgrBmx+agSpYMq5Va8RvhrmeKY6MKTE7nXahXdCePvSgMdiuCwlc296n/UENYrHqsbdEav9sP7brgjA3lNrgEVpdKMWDCGXJnCUr4i8OEU9tjhgmnB/U+xRPCIP69gE0sTGGAlc8G9NEfIQ0d1cEJaVfJkxtdso9FjDiOaJfxNT8znmzD3vmzdt3PB+P/OJnPyHe3CDO2u+2DkQKtdih2XRIRLb4DSFs9mW1NYYwIC0jKPLa25wfHh44rzO5ZY7LM8NusDpKE0lXBSyu9LLDWbqS1yl674J12XtbR2e1l6Ioiu/dA+vMdGFXCIHkIykkohsILuExZW/0RN+Yn96ynN4rbcP3Auq7r/6S6vUD62Lgpz9/AP6n32e4/TOuj1u2ug5rXal1sYM+4Pq0Lt/U4zR4ZNVWeBOBqjO0l7lQSsX7RIwTgteiAWcbhbalS6nKYd4yIC2+rd5XOhOVGKLRAvr96XHpUN4/VYs6+l5lbCtne533nteffsLp+YiTiWFQ94jkAsU3VdM7TzDVSTFEvlTlXgsgLugPH3BxZC4CpQCZVgSXRloIVO+Ya6Y8njnc3VGOxVquhVC1iyI4WgcFNnqUHva1Vhr+6n3YiMZuKSViQj/l3zqvI0mr7TIRRW9TGIhhBAmkOFlB64gkDtMdT+4thUKgbt25Ln5ycuHsWXD0W2ELdIG69hlgf9hro/NIo7as2pToiMnho1CkaJcqNKLrjiaKHndbJI9Xa7Fmww2bJ8bB+JOX2Hx4eOB81lGq3QNU6MLS3oHQR5aGPiUKRga0oNUOpI9GQRFnx13XFmhhJdIYUuJzQ08vlCT1KO06myZqM9aa6lWKWfvN82zUAk8LgZJ0CMuTc+RaiXnFlcLD8UgOiaU0jqeZ0+lEipFlnXWaFBCC0pmC75/XQWvmg4o+G3EqhjLk2BmdxSAO3Z/FaIJUm2So+VpATJDVCCI0AsO447C/02EblvcE52jjLfN04Pl54vnxW92DjVblv0NBMZYVnWZl7J/vjdsfqOLviYu2RC9NBtGWtTbjtoO1VoFSLyKIpu3SdSk4vxj8rpXTtE9MMumHDJ4oAaFhup8NUofrhew6trsFqT2Z7W478rslviHw6tUrI2vvVW0YAiF07ODS/hFLMjSpiIxmDN6TjVobMeh4rwjkWrYJS96r/6DzjryuDNN4OTC3XO+SnLortwJpF46UIm99hi2KxrU++Ui5IfpsFT0LITGO+hydzYK/ntzbmkL0rakXbecT95tSZK9jgBb9F1feq0O/i9iMG8SPY6P8+y7p1mNOUZ/WMq3podJaIa+reeOpAlxcn7KjSJNzgVwvW2FFcNUOKNeH1zmct1F8fVO9PmS2ilt/7yJI67uMxe8VhiZW8NWqXCaNLb+1UBxXnO1qQyVotn68OQpU8wnU9dnFAz4EhpSUJ9Q9dmPk62+/ZlkXxEOuK1OYcNG4fNIuXn7hco+9UNTY1VZa8w7xwf7cJUnvopvm0AI0yCZkkQZD1E5F8IkQIsH1QqvhfaTEZJZGzTbhngDbAuHjZ9i3hEuUXz9fyD8SKOrjLoQdMF4PE40PHUJSSqOR8WYhk1cdu6wJHZRc7cAMNnlnQFqkVmdufdq+35LSLTwvtI3+0FRx24wWoOIy/cNB90rnCZ3jdr13tt7ZUW/QZV4R5zgdjwwxEIMq+Gvo/CuPK5VsNmC9kCo2JUtBAbfFSYqJXIv9ng7h8DGB9+Sm1lRLXrh7/VKL/HYppp3oMJlt5sFW3Il2BFznMyrM2g/Xfq61BqWq4pum084uX7/homOMA2OarPU5aCw7TcRc8OzHG/bTnrn1buPH51rn/W/v4hIWl1+I0PIfHhi49j8V21ulQQg6tts5oUnBuYr3QjQUsDTRgUL27IolebhEsy5ljGZnZtS41lAT+SK0Zt1WYDux7MwWGj70xKm/Ou0IipgvrtOCGWfkoGbZVAcXBLwP3N7uKSVTinUUvX7d6qDzRpy/LnCaFlY5b92bhqM5jx9GFoFSGr6oFeHxPDOOe4roSO7T+cTN/T0em9TUmgny7Lzt8dgdgPr+K7aHup6YKlqps1U8XZ9TqwmmbLF7Z+vKcp0OCA5pIrhE9KMiqN6TfETcqFTFWjk/vd9oiF3g990E9brF33fh74vbH+aD2jdQg+2ds/aOCK0KdePQ6MlVa2bOM86NVnUItcGy5A1NjDGw240clp2NzOutJJ2K0A/+1pSp1lVhfUFvamY0jjUxuyQFXW3cxUfeez779DN88Jt5c/CO0JWdveLQSMdj5u1pYBorKV4UgrVWXBx0Iot3uKwIVc6ZabejSqZKodZCq/GClElfLIaeCZYoXhYYTWjicK5vzrIlM7XaPF8nVHcRSDkXGIeJ3e5GkxHpTRJdvqWszOcjeT3T6gp+vFRXPXycQ60nerLerv7vFgl0rm5zDrWi/nEnqK019dy1sabSdIqJh21ht6uJHN4ntUezkq/RKLlhlrma7AdHKZGuFsd5fNDJTt5cEjYHiv6X3N+B9tn/Vj9fZ4b7btvcnDMzkCaM48jtzS3jOG1j+vrGXJFtwk9rQmiQ14wITOOkSV7QpHg7SMdElEQskRAz4zTx7Zs3LHnBRxuZ6rTFW6XTJoIJE/VD9VXauVD6bzUpl6Bj+S4WQpqPNLdhImrGbz9oqBAhRPPS0w6HmrJbLLZGLSv0iXEO5YPZ1++bc6ek9DjWtddZ3/pno8iPRiTVrpA85y7OE87rgSKtUOvC0ppO7hPdD+uqLM3aFEVpzRPCiA/Rfh71/1mBpdPANPnqSVhPSNUt51JYORe4KLQvCGr3SO7PtnXydPfwRTZhaSlq1fP+8ZHoHe3uVg9bs/rpk0vE27x2o3LpkJa6uWj0Z9RKZZd29NHC1XYsH/VzrDmrQLVlGpWQgjkUWdJcba+ypLcXV1h8NxHoIjCCrsctGWkav0XAN1xrVC/UeBElhsEzDiNjGgkuEsOwCaA0YfDsxp1aiJUTy7x8tNb1VVyQfk1WL+tq20hEJ439oa9touAmml1pTQc/eLPrq23F+YYPVoSL4J1sAuTugALdjN8jEhBD+zE6YBctX1Pir8u6zsRu0oxn3XMFUE61Url0yppXS8pre7N2+aJiyaHzjmVejBYgKpzFUuJgf08qziu/cyusaiV4HQLUrBOc0siyrpfPnTNraUw7T23CkjOn+czt/b3u1U0tzlSQp7Q6XB/v3DtXtveaU4X3/mJV2rPzHj+G9uIvKIF3bqMHeYM7vQ+kMIAEPJHoEsF5pThKgNioacW7EaQYncNtoOOlxtV3cNV60WLke+L2ByWo/ZsH3wniRtatSsyt0tEdbZ2ua+Hx+Mxud6cIVGnM88o8q7J9HAem/cR+P14ZIuummGK0sXyBEHRjUM3zJeBAqzShe/RgXAituJptPKUJXiDnivOFm5tbXPBG7nV4r8hwV/4BpkizzStEVWG6wDjudNNubJZV0Ud8DKo0zZk0jnz6+hNS9JzmZ5a8WMVgAg31rgIfrg5R+v63VWEips713gA5s9sQQVy3ioAS9bmEGAlRUSdHpGaD670mzR/ev+Htt19SyomUIgobR1Vm2YHYbSWgk8AvvMqNZiGNlnvN6hSd/NFQ9v/uq5uBg5jIpCASdQNsnuATiA45KL1F3xrrmul2Y+taKVKRquKecRwYpsCtoHYpMTE4WIsQUkcWVAAk1sK8eMOxPTGHXAqjq71StoJFx1KOuwN3L19qJRvUqD96neYGtibsL3SFsfeeFAcO+wPJ1pJgk12AcTepj2rWGe33L+549/49tVbGlCitkJdZn11TpKcPlLhMa9FPs/H0LEmurRmC4rjmhHtXjR6kRW7JRRG/CDjlJSrZXmkEOjvKkZxXjvsys5ye8K5AM6qEdQs0qTBx1lYFVJR9y8cYa2ucW+Nvfo9x9s+5minHO8LuvafVotJRJyDa1mzY6NuSyWtBmqdKAJcIfmCaJva7F5zPhSYBV+OG8C21gIfRRHw+aELQUITc+UuSfH2JFQHbnug6l6yZ+4mORLxmyzes5VuVZrLmzItPPyUNo7W7VTGMIZv6jvR8CcnTSsEB4zAaoq6cg1Yawy4itapquak1lAtwXhbO80yplTQNHJ+PpGnABUe7piHplr+txR6zbPfUEN/0MzZ/2fpQnrh0SykwtNgKLIEhjptQRFXfcYtpfWQrDqi56F5C56D+nVEB9E4J2MxBu2dY1/WHhtvv7epdnda02Ig6EpHaVmoL1AqrFKZp3D6JciEbec0mngaIpHFPCDsTosGaG0PUqXSAxl0IuNrBlO3INoxABWWCia3cpdDvXrdI5xXbKOaeblqBkktlhxZWD0+P/ObL3/Lzn3zOMOg7RExEbahmMfFVHzVccqa1alMyEw6vFmSlEYaAa4IPwbjFZZuU9Xw68XB8otbKh+ODPq8KuWbzQK5bwYgPGvPXvqi9uIrOak5LlAJ0z0pN3kGKFe1WMAnoOe51XacQCS6Q4kD0gw6QMH1Q8Ik4OLwIt/tXzM9vdN81tHbDtC2gN/S0A4nI98btP52DuqEnqsoLIVCdIjG5ZUouJD8yDjvlpomKIQ6HPSklMIN6kUBKgd1uZxYOSauupi+1E9NdUOuQmCJpGNgfDoQUeX4+8/x85Hw+4fGkNKla1XuzCKpQtNUurW6mxr4UDV50klRIgeDRBHV7gJdDFNSs3kvQtrBPTC5yc7gDiZSqiIWS/d2WmHvnub97yS9/8Se8evWKZZ1Z14WHhw8sy5mcVx3/2DA+SbAWhB6flUJrghez7CDgCpd2D1B9Bd/M11ODP6+9AtXKdSmZcVl1YEFw+Lbw1Rd/QynPiKyWVKYLkmtV6gYoWCDhLmnU5d+XulVKY4iRv/zjX/5TQ+pf9Qox6Bg4Ud40kmi5kkVb9JotRtZl4TSfKfVEintyVaFRKTpCNgSdopamkWk3aJyJno4xasziq/qmOlV0rmUF/LYRdT50MAcART0jzqltW+1oClpAVdBNTeBmf9CizV9ECMoTNiRT+MiuKuAYRH0b97sd3hSbYvY70QV8UqP8uVbdkEJgGkZyceRTJs+LimSu+YUOXNfdO0eh3wNbq7M1oUZNMkJQOgkiFOfwruLQlp+Yf6cnMKSRu7uXxDjgXMBJUHW5h1wW3r/7hm++/hXv3n7Ni/tESBPOJUuWe/p5nYZCV+xvyKAFeWuNpTXe/yvE3z/16nevXo2adIsUNhsmQzm8EzIwpEkNtpvjvFRO8xN5VdTIObXBW9YVWLm5mdjtBiAQ40BKmmmVIkQ7XnQ8Yi9Mw0egQL+6t6JzWDB2yLfHr06Rubu7Y3+4pQH73cQY1aM0WEz1DpF2jpS25KpymWMYuLu9I/mviTEiTshNn8n+sDdkqpLGyDAMPDwdcQH2NzvtLkjdHAb0/hvuau76RlPrFjq9yGqQs3pWdu62Irb6x0V0opU3pDf4Solq8h5CYhp3TONOOwC+z3zXYtQ3oeaFsj7T2qKoIh+LjS6HOpcHv/1aq1fBCo4/8PVxQaPPTym7K3kRpDTObeXhQYdx1KL4zLo6dodbYpxIw0DwI6UpQtdpFFrgrAw7teZzzuFDwvAEPZ5avSx5d/Xs0GeqSamzATx9NTWagRQIm1dv/wjOOUouzOczpRT2NlbZWwLXR3/mjsaLOl3EGE0D4tlNO5JpbXrXdQgeN47gdDpb9IF5XUjDwLsvvuDbb78lDomcZ+5f3uKyoyzqZAF9yIDbur2d03n9LmouCjgFFfR28Z6+Gmf1lnlwO/Vr7ULfXlTtdztiCFtrv6+DMQQbrCFUA0oWmgF9bFSHy3VJTvtPW6H8D1w/vMXvtDLZ7Xa0fKaVldraZhYeQtQfXri7u2O1qrKZofFuSqSUmKYdMeoIPsECxPD1YUi8ePGaYRzxMSBoILx9+5anpyOLKTtXvxLTrIlsVBRzl5JaXGCHdDMOh20+3kaSYmiAuwro4JX3qrHe27/q5xWdMIhnvz8AbjNllqb35m1En1Z1nhf3r5nGPbmu1JZ5cfeCeTlR8rrZNogosplzppRshtTZrB70WXvcFS9KrvYqSyblwgG6bg+KWOuMpv6d6zPHp7dIW0GU91TKSoidkE9vJv3ue9++p3znZwBhmkb+5C/+7IeE1L/4tbFS3GUCi3KZmvk+OuOH6aep5hsawkgIRjxXmATv9IDd7yc1nvY2a77bmwGtFsZxspgMiiBGh3OJ5+cn+hCKEIJ+T2tBeZpx4wwZx4MUnAghKo+zx64LTkn+6AF/ad8oCuudv5gyOOWGTy5yc3OHcxGpzgj92jIzJiwOiCHxRz//JcfnR+blzPP5qGttPVNL1gS/qZWU75u688SQKCYsEEOnotfk0rWPY0pqU2svQx660nwblZh0Rnyt6i0sCC4GpKw8P33g+PgWx6pjYENiE2zJ34dC2dUFaHTOsDDS+LGUVn1wiDTtkGzr2eA+bc9VgusJj9rz6XjNrBPlJFjcCudTodvG5JxxThgGTy6JXDLCRIiJmOygc41u01MlUKXgXNjWzbY3ODa0+nJ/DjEShsMhJqCrrRFiYhgjwziSYiB6TU51771Qm8xk0A7hRhBISRibsN/vt85cq5Vlzdzv9uA0UQ5NubklF4aU2E87W5u9W2BdMgRvyKkLWmDh9bn6fgKJmGBSQZYaqt2XWBfYUhwB8RUI2yAaD4r2hkTwER3iobZ0CLRSKXnh3Ztv+PDhDcjMOARC6obx18/5KpjdZY1ekui2uXP8oS9naLrSqDQeWsvU1tecPtOaG605hIALiVJ15yxe1b8lK+DTpJvdZ0Ks7PaDFa06hjMCrqpY2HePWadc5o6G9kvgskde3fPFDVCrLwWwzOIKx26/45NPPqEB0zQR07DZs3Uv3f4dnH2tzWbNmTYlBPPHVWpKKYVhnHBOkfqYIlUawzCwrgvn8zM+QEFYczYkONKHFLjtoxh9z1DT68/VOrRvAGJrPYtVh4paG71j1xw0o9rUljaEdEgT3purUqdubtCoAVRSyeszzuka2EAsd0n2vyv0E/Rzf1/c/uMSVHep5C8Iqm5OwzAwx0DJaqLcHfRjjEQj6k0OZF3IuY/Hc6Tg2e0Sw6AHtJJ8m6rpnarr7m5vubu9xcXImlfO85nj6cT79++Z58VsRxyFgi9ZN7zgCTEgoyKqQ1LD2B6sXYXWK5lLoX/9UPuLt/+2QPBeicm+OfaH2y0pFOOVlFaIVb9uCIGUBj799HOenp6YlxMihcNuIpdVp4rYdIdq04HWdWZZFh2rdjyxLMu2uXakZ2u995cvYtY6xstpl1YQCK0WhdudILWwzM/UMuM2v1QVTAV0sozQ1frfCQCwVS2XYDBqQf/PGCMvX7/+R4XUv/S1bUzbiu0x2/lIyprrtkpOPC4F84tVJDyGuM0iDuKJxrtMMXI47Jhs1rGKrvrB5IhBvSJ3+wMxJaJNs2k0lkVV1bUUmm/EqK4VPmqi6+zAVlNkq/PFLN6McBWsonXBGfpk+1C3zNnMxDerb5o4Ih7xjcPhBu8v9m6tqaJz21pE7UVevnzNOI7kPLPke9Z15XR6IueFWvpsZm/IUEdLlYNWbMyqfi2/JY4b6G6CsO6w4ZveS7VZ0q3qXqDiPxWh0TT1qcvMfH5iWZ5wLlOrDtfwW/zaZug+CoCr63LgXw6Yjz0x/5BXR9D6uFs9+Cx7E8GJR6pxja8wwFoKa16pNRgSGCh1ZV3zho7UqhZiyWKydrcFs8bzXgUpCjB4xEUaDe+TxfnFowWwjo+eyr0D1YxbDJiFj6qaA44YItNoU9ls+IS2WXu09+JKO1cihjCJMIj6tWYTAXZ3CtDOiMcjxVGKmvTs9wfu71+w5oVcV92rbc/bGkIeHWxhtClxSp1wZvPW1dxdULidf2K7pdOpZ9Kafv+qCZXHBnvo3GwrtJqN+G5IKazzmQ8fvmU+PxF8IcWRyIjrEIFxqK/Tjp5iuD4pA5BWfhQJqnQU3VB+57pw9PLcLkuyx62OaV1zxVdNCHMu1KJcTLGkrpTMODlKuaGP340xfgTGaCLmKVItLrsmYDuwPrrf61QfUI68/egItQDDOHEfEiFGM+mPOqLc/a42w/W1aj+cUzQ1xciabQJl06lo425ningIrRGWQBqGzbGir/mcV6Mu9W3taq/azl/Zfug20Sdn2R8LcaMAXD4vFqsax811obUW7yFEhmE0u6iu+fFbkdakUsvKOp84n57wrho/8NK5uk5Q5arQUqzn95Wg6ne4fDCRLb6STe+YpavZ9YBPKRGDzSVu+WrCk27AIUCIjmCKPo9apAwxElzDp8gnr14Th5Hj+czbt+94+/4dp/lsiak+YWnWgJJGsUQB78nzwm7aEQ4HSGqP0LD5s2nAOce6rux2qtLeklGno/88W23SISik2354x+HmjqfHoxqgN6G0aiRvRZdiTNzd3XF7+4Ivf/sl7969odbMbkyYySZ95BpOD46cV/K6ggjH44nT6cQ8z8zzzPPzM8s8mwjBFvtW+tn9WTukK/c7zB+8EB3ksnI+PeC9Vbc2DYKm8oLeIr6880ugbQf9dZvp+mfA+cAwTP/okPqXvLbJIT39sOTau07DCJSi/OhW1PNOq0VN/sZRcEGVz6VUYlAlZ0qBlBKH/c5I4IZyGvIfg2c/TUz7W25v77ZWZ2mVx8dH3rz5lnleaVUHAsQhMaZASGpC7WIkeC0f9JTXEXfioYv7dP2o1ZBHE1ot/nVa0Danme7b4AnRIwTEVW4Ot5SqfKMqilCupRBbtIRRi5TD/pZhGFB3BqGWlXk+sayzIfzV4r5u8TvPZ+XbVt0gnLP2mtP0t3tDOy+6fulqbTYniporOVebVmT+f6LWVrVUTqcn5vmJmk84MqUEBksYZEOSL+4Jfz+aetnAC8L7vyuX/QNcW4K6JVQq0Og4pcNT88cFtTQoubGsZYuTEIzfKJXaCrU2xjTw8sU9aXCkoc9418JBbOrMMEZSGkkp4KNy/WKcOB4fTGTYwPd4svWEWaPZwI4K5gmq7/paYBVDxMerBJVrk/XOb7YR0K1b7sHgPHd3d7x5+2QOBmb1VFb8sLN9r2mxFwP3L17qMJZ8YjnPnE/PlLJSah/Z67fv08cNh+CQkm1fVKoMJvbQZyTbVqhiKIu1ntxW5aU6e0/9oGyi430jDU+hlIXT+YnHh7fUctZWdQ3KiyduiPLHhZO7+rdsP0vzzPMfnoOKKeZxjSZZoU24enZgY/E2DV2pjSXbXudBpDCfdZBPaR+DMiEm6zrqV40p2hpxmx2j8xFfs+7Ktu987Lfptw5F7xl1MXCTS9LaQIGBJqSUGIaJNOognxS8beuCUAm4S3fRvpA3LYt3mkgPw8Balm3wRM59ner6Up62il8dWpzGlFTPUvJGa3DSiwAbQiKW/PUEVS6o+nU3VdTtX39t574QdFw3ShNs/oI4O6/3PU2TdQD60BOnHT5ptFqYT088Pbzl+Pyem/24USVAO2tb+kRP9zsFzUFr3xu3P6zFby/P44hJx0SK5TtpGBjSnt20B4Rl0QTrfH7GOZ3BO4yDWXAIwVXUdsIRHQwDBC+UVqh15fhh5de//i0PT4/MJdtEG7+plPWDYyO8/DZsI5dCzJkGpgLWxzSOIyklaq38+te/5q/+4s83PsmW7XeVEj1gvWX/CnPHmDjsb3n/4UjLlaEUcvI2TUnvaRxG/viP/5Sf/eyPcQysa2WenwhhtYWr7eCtkB8G2O23e/jsM7ZEtOTMmzdveHp64nQ8cj6dWdeV4/lkyUSvUJ2aulsbSM3eK4GGlMxyeuTp4S04NffWBVlpZcG5g7W33eVzW1KnlWJ/BrYAtmdziQrnIA7DDwmp3/t1jfRfqkvwQdvjaUiU1Qz6WzOls25EIQT977Iyr3lLkkLwTCmx2+2ZUtQqVVSxGscBR2O/G6kv7tnv7njx+hO++uor3r5/x/vHB47HIwCu6YGVEXzNnJ7VGTmEwBATu3Hi/vYGF3RiD7bRhqRVfDecDldA4aWdInS6ipLZATHLNg/kyt3dC969P9qM6Moqaiqdi/GigSqVP/3Tv+Dx6cjp+ZlSZo2zu2pIiQpp+sbamhqqPzy8t4LqzOn5xOn5eRuvKjbbGjqYqoWut8LP01HUZtxxEwki4HWzGoPj68d3rMsT3i8gFWnFNmbHxW7mHxUl26/unfB/+8HR9i9ziRX1rSkVpOC1IC/ayvdeubox6LQ4EZimPd4P1vJXwcZuCkxjAuDm5obdLiJSQCrd09MjKp4bE7vdgWm/YxxHE07Bw8MTx+PZFMmeIUWGYUL9VaMqmxEkQ81VhUXeEX1gqVW9ow1NijHq6F5vJAuz99FJP2o31a1qtu6XD1Aar19/yuPTik4cDBRD30Iu+OA2EGQcJl7cv+Bw2G/I3jKfWZaZdZmZ54VSKksuGzWqlJXWCp0b3vfyZBoALx5XL4im4haKuCqopA4JJTfyUkxgy1YgNBpeGtE18nLiw5svWZf31PKMd4HS4lVxtX2H3/n1FVRkcfIxWvaHu9r2Y6OWdScN87PVaUXK5V3XzMPTiVIDMe7UGUUCS8ksy6oWTDiGmHj54p7DIalTjvEpPUqd8uZGEUIiREdpme5JfW3Z1YuFj228MOGhEj8wWp2OpbUE2dBaBd288kedbL6s2vI28KMXe1UQpz2OyXtevHjJ8fi1dZqFOjTyki/m9+5yZt3d3/P5T36C8/D4+AERBb5qKfS2evBO9Sm2doQu5m5KAzD0WcOij2jv+50Vfa3iilKslI7QUW5HdInkR5KftGODs4GHTcVnZUHKmacPb3j39gukPCMFxKiY1prYutGXvVYuOgHhe+P2ByWo2tXR6iHnGeeENERqKSwl8+rlntq0sj2dzzyfTrRWdXqHE521Gx2q+mGrNIL3OCm0unJ8PPOf/9N/obSR5/NMFUhhwFtLQBlAdlCiEx16PeS8w7XeqjKEEkUVzstMk8aQBj755JPtz1wI1VcVa4fp0RFmuTTO54V37594Oj7z/uFICAlxUaudJqy5WIVW+d/+t/+IMIJP3N29xgHBn4CiIx573dOuN7zOQWN7qeOQSPEnfPr6E21Jm5XH8/lEk8qyzizLybitC8mPyqc0ZKOsmZrPPJ8eeD69M1N2+/5NqE49MptvxgFms+/o9+C6cKATULp/Rb9zp4slhY7K/DivEDzDmNjvJs5HtQ1JYSCkgRgT0zSZ4EkrxLLM2qpL3ibSQIpFHR8EG8gpqqB3mf1+B06HMfzNf/8bvvzqS87rYsMb5DtWp4q2gAr7wFFK4dzO7HbGzfYBnH6XaTeB0wlozsGLF3eXDaCD3bI5sXKxsbL2iqjDw/7mXlGolok5k5Kn1oxauTR8gJv9Df+n//P/lf/yX/+a//bXf82Hd28IYTWFqh483qmps8ilKHr14gWKlGrCOs9nllknsZQ1sywLx+OReT2r8Md1ZbRQC6rgH5QeM46jjh9slUQlecfjwzuOD19T8xHvDAmoDaTipFpXIVyQfviIh/axSb/9nsCTwP/8LxJx//Rrs827erYxRFbnaKWxlkpeVoZxUhTQKx/5008/ZSmNWh2lCPO8kqLDx0BKkzqmDIrQlKpCniEKSGG/27Hf3zLtDhxubqmt8XR84ps33/Lw+MS6LtT1cpjM3oF/xgdHipEhJhtU4hlToA8B6CBiL646Ohw79xSHd02RLy+I9MEVBkKgxVXyalUU40Tw3+rXtu+RS2No6h6gaNOOw+GO/f6OWhvLOoNkbva3IN0CSRPZNWvLXxCW5cS7d+94Pj5zPp+t0DpZq167VWLUFZEGQcw+yQ5j6XunkIvuqdKU+78uC0MaCCFS15nz8QNPj9/QqolVZbTizQCAjdf7cUfrOk0V20zcBnj8GC5Lmrmoyktu5KZOBUOacF6FRzqoI7IbdvigwFOrmhu0pmfJmBKH/Z67uxuEbKxx4/obz1O7uIkQA+OYGPcqSqu18f79e9Z1MTRVkc8QgolXzUkABXLWdd3sv9jszBTgGoaBIaqo2jlNYEM/C511H3ruIOqXTv+dGHnx4p7f/OZrFdh5dRzKRX17A9E8SAshRj77/HN8VCpIKSslr6zLwjyfOZ2fOT0/69e9ssxMKVnnCisA1AkjBXXo8N5vA1BAt8cm4FtTV46A2mOtjTZoF0saOKcTqbIBF81pV8PLyvv3X/Pu7W95fPyGJjNVdsReoOjT4yPAy8C+doX+f1/c/nAEFW3HzecTwTumMSG1klf99Mu6ssxnzqczOWd6Qrmu4FwD0gUCDgr7a/YeUeNpHQVZLNG8Jtzroc5VVSTGs7w6lyzhhR48+ut1XUkxMQwDt7eqhOaK6Ny5mz05Vejek1fh3ftH3rx94Ju37ykVTudCiA7CQhonDrd2L8bP+Oqbbwj/6T9z2N8T046YJpwUnBn89iQRa3dsdIKL7mC7pmmipbYtehHh5u6WhlZMtWZ1BsiZ25sdd7f31lKDeT6yzkfO8yMiC/TvawQrraZ0ZrAHjPzV8077M4bObY/X4aRzfC4I5d/fTv1xXN0izYfebEAP+aBoVAiRdV11vOKqgwyGYVBrkaDVuvcVnFrPOEMsg284qcynI48fTiy58u3bB56fT1RRdB8xxSTXz7XhfGCj8gmbIfpV734rBkoplFxULOI68s9mM3IFgm+bQnMm8qqwrJVSYV51ipSPmd1upDZFiZso/xPnePvuPc4l7m5ecjo+E4Ij+AKuF4c9VDpnztZzNzIfB/a7kVaFu8ONivRKUepKXil1ZV7OzIs6XAhFHS2aAnveECiPqA9grRwf35kDhc6BpherhoI5r4K3zhun/9yfE5fOS0dUQJdg/JHE7kZPkS60UCuz+eQV6c6rciTtIIxp0G0jOCIdRayIVHyMpMj2I3oz+3fVVLmKot7eHHA+EeJIq5U3b9/y+PTI8XRiXmaNiY1Yigoumo5SbLVSc6HmzJASKR7ohvpVYAhKiwGMGqIilss66PI8s9fqjiJOiw3n/aZW7tzpVkxQJ85G5GrQ5Fy4mQZ+8Ud/Rmuep+ORr776LbQV7wvd0qx/+VLtNHOOVm95cXfHPC8sy8q8zDw8PGqCua6UnBERFgNMsGk92n5FE4amP1oTEwBHLUpbYYgTSCYvzyzzE6U841yP47olqP3w3kLX/nvbILAHt4FS/kex714jk71j2UR0BHYt0CopOgKBGPXM93HA+ZEmOoREWmNIQX3JQ2QcBnbTRIraWfEbKgcpRaZpYNrtlSsZg3rotsb5fOJ0OnN8eqKVYp662t6PSTmhMQQFcbzb+PfNcWW23zbLt4uKvRf/RrfE6ArX76bpxEYfItGQzdvbROiOJASaaG5TWiXWYoUZOB843NxTmyOXTKsZaZVaMyUvul+ezza9sANoylM9z2frDJz1XkST4T5BUkmmVkDo47fGk+Y7YjoAtc1SmoATnUhXq3qc6sAOR1lPPD18w3x+T6tnhGxgYc8FNPlUAafbwJPeOe6L//vi9of5oNrKERHm85mbfSLFSE2JajO912VlnmdT2hs31DVT5DZCaNq+8eBEicJdINLP5RgDvnkSES9mxNzscNQ72RCk3u78GEXCuK9ioyN1g1SOqPJCrg9/+1RctgSrUnC8//DIl1+/5etv3/H+4YiPI5BIzjNn4XiuvCyW3opWZMfnM//lv/43/uSP/5IXdy8IYUTawpWLiHqM9uS030G3aLhKUsWDxMvhqh9bN21ntg6d/5JSYBoH28yF0/mZdT6S8xnn64VzulWh6g0ooSjS1rp/5PadEPrElXahEHDFWTVEkO8JuD/0pfFlrSfbBHXjUtSuibDmlWVdWdf1yvJMkX/vhT4tSwnjFq9WhZ9Pzzx8+MCahePTkyWnZmhvMp5NRW7Epa1Nv92jbfSb+b4e6LlcrJrGMW0J3HbSN/nOhwXwSPPk3DjNmYfHE+taeZ51XYoP7PYT0ySm5jeeqHP8p//8X9hP9wzDnnE84BATk1TEOgB+Q/r1Rtz2fVXAlaLSE6ZxNPTYHCtaodTC8+nI8/OReT6rX2JRUd84TPo1jHcltZDzyvPzB2o9a4sa2dZ3E03I1M/WNm6RbdqK34R/122n60clP3Ss3u/92nhjcrE80kl36te4zAtTmgCv6GVKiEBt2WgnBRFF+UOEGPRH8IJ3Rd9jEFwQvE2o2u0malNU5+n4yJs3bznNJ7KhgIrmdgHm5QhSUxH9M9KpHNzSh1Y00aRSTdeFeZ7Z7/doKu2uPrMipkb0YNvB+wQ3nd+M94Fqw17W3BhrI3bRlDTrYAU+ef0pIe2Z3j/w7u0DtID3K8518Y6usyQA3Zd44v72VpEtm9b2dHyi5MLz8Zl5nmmt8vThgWL8YFDv1yr5qpuhaygNyUASAamk4JF1YV01QW0GFjgDC1qrdnZdPGSu11VnIYMWpNuL4Dvr/g90Xe9b3pkGolbEElS3caMjATQ2RRASy5qNzlOJIRCHgZQS4zAwJOXmimsE3wzAErUqmw7sdgfiMNBa4+n8zOl04vn5mdPzmWVWTcfmQOE9vgSS96SoeoNok/T85oqhhXpw7ju7hDV7rpIr58LFUqnTDg2M0I6xll8pRWJIgIlTpY/xteEGUo3Sknj96jN20x3vP7yn5JltlLyUrdVfrXjFCtJ5PtvnPnFMR5ZlUY9Rh35mp5SA2pX8l9DSc8gmVHaayvYxUfBKaW4W2yLM5yOn5/esy6N2APqY3qu/p6J0ezRXhdd3c4t/6PqBe7LdQhWW05n94Iw8HMleWOeFZV02JOrv+tvSmrY9HSCe6B1xVD5V51LkrK0PrUL7A5SrhalfbePcuf4oZHtAmg/0zD1oa6HWS/Epsi0sRKtW3VL6o/bUBv/5v/4N33z7nuN5JTdPGoRhGnFpj087zmtkyQFHpfoGFJwP/Pe/+RX3t59xs7/HkXAuaaVhk4x8cFv1beF/NQ3HnrOzT9U/P7DxDT2GbHmCswUXnXmXRXMHWJC20lyzxdSrGBNcVZ32FUQNsL0LmlQB2xxLrnmnXWdqb7O/E3fh+v5oL4e17zMpRXa7kVZVVJZS0kV+OjHPZ51y4Souz/hQEYnEpFNNQvDgW+e7KCrTMut85nw66ThU50ghIc7Z+Lgei/3v6IF7qS4B7zZEphv394g+nU7c3txwf3fH/f39trc4+/v92fc1ontloBR4PJ759tv3/O0XX+F94vF5BmCpat007vZMGfMaDQiO//n/8//lz//s3/H5pz9lf7hnnbOhI+pdCnUD1bbaRLTA+q7Jux+GTXg4TRPihUrlNt/oHmEH83w6KdqUIqoMR9XpzNTlzHl+orUVEeUG98SpdwBcs2lCvp/dDkxJjFzQkV7N9tuvNB5/37H2A6+PEVTliinqr6LQdVmZhsPGmR6HkVILy+nMui7aXm7COCaGQffWFCFGUaTfdjcfIAYH5l94mmfef3jiyy+/5lyKcs0EtkkRW8vusleq+E3vs0pHsU2kafxDF5R7uswz83xWZNf57+wVTrVG/uq9uIAQ1Kxd/DZOOFdHzpXzUkhjJsbOGRXWZQUcx+OJz3/yKS9fjuym39BasOR3RdDiBizRw13avzjiGGlDorWJu7sbvAusy2LT2IS3b96oUNerA8uHhw8cnz9Qa6ZzLmtHoro4w+zpasss85F5fkBk1YJfzCGgFuVTk8A0AhjqF+Cyxlx/Yv1Z/TgcKDp9wxuCqrzegpSMa83OpESKCgyJE6oItTnOs47e3rj+U2QaEzGokNoZqBJ8w7tKcMKLu3um3YE4TNTW+PDhA1988RvOsyH+W5PQxq07kAqSIVt3MXhPiondbke6uVVHIVS0PO12ynnFOq9J0yX30dnH9nv9V12r4kPABY80RwgD3qvfuE7AUt9RjVtFjmNIjMPEL/7oT8lF+F/+l/+VuT0SUzVhc8GZvefl+ygHthRN8Odl4enpiXVdefv2LfM8s64ry7Kosqaul9ypYR26To70tOqopuJX31avPvJOO0yeBnXlfHzLMj8aWrsaNdDyGtOs1Fa1I3ad6Mu1A4H/3rj94T6oItBgWRY+vM8McSD4wDQMnM9HchXj8qFtdKuMdtPEfjfig1aMTurVmFH1ddRWY1XSupghrNnUNJtiQ/uu+Qb0ur4nrHFINCesrTCYAjCXbPyXcnWIXgdXb1eCMk08x9PMt2/e8/h8ptlsc3EJ8QO729f89Gc/549/8Sd8ePtbavug6IVr7A43/O3f/opXLz9jv9szDZEhBZuJax5khnGHoHpSTUi76Ek/knNmKeNsIciFuhCDV2NgH4g+goPgdCwZ4pWY7gd8cARXkDJR8yNGqdoQulZmqOpo0FwluKiuBUBPnGlq03Wp5i/voFYhS7zksz/Sq5aiEz5K4bDbUXOlWXV9nmeOxyPn00yplgC5yrJkRDK1RFIxex4P4zBsk8569ei9U9qHDxziQCWQS1UOT+3Cu15Q6XN1VxQJ1UY7pFQkNp1MZerK5+dn9vs94ziZR2v4aIF3Lp3GlNc14wJff/OG//43v+HLr7/lea3EMOLTgTgkiou8+1D55JPE6WxzxZ3ntMz8r//hPzKkF9wePmW3u6XkIw5H8PkSt7D9eitcLWnu21K3F+nFIIBQiHiGGHD7ie784bHRw8kzDSPOOY6nR06tInUlpEArynFs4hDfqLlR80z3hxeniVy3RBJpJixTlE3gO8peKK7y9vcYZ//cq6OnzVwSnBfle46J6bBDgJu7W8bdjhASa63MeWVelq0tqSrcRAr9fSh6GsCEoeDJSFn5r//Hf+bxODOvjbUUJEQ80XZEoVHpTgwYAGAOp3rAOTvignK1t0mn5nF7ns/EELi/v+f+/r5/Suhf0WK32n7nLXYRTymNp+OZL776Bu8Tx9NKzhVxJ27u78jNUQ1IWHMjppH/5//rf+Lf/tv/kZ/+5I/45JOf8f7db+nevLrTapHedebea39jAzzwWnRaDO+mid2oDiUvX7zQhBwTGS4LT6cnpBXm05nT8YlaslrXdWClVGpeWZeZ5fzEPB+RmpXbbyr1LjbU5NcWkYESl76ebdrfOdk326wfxWVJiVOnHNeqopFOQYBxGtX1oxa6P7lHhT8Bx5C8of6N6CE4wXlhiokYNAkK0XN7ewAS337zDW/evuPD4wNZ1Fv84nCzQVFcGXnTOiBVK9KEYRjUD7drMND8YbffM5/PPD09cjj8Ed0XRc9h3fRdVSBBv7TaiuESSMCMqxEirQZKbZznypCENCgQUM2aLMaR3e6Gn3z+c0IYePPmgb/9m/9K9OoB631AyFrYib+0zxHG/QGA25sbXr98SWuNP/uzP1Ov4Hnm8eGBvKzKsT6fWFa1sKxNaWxq5SXECLV7WPe14JQzHpyj1czp+ZEvv/rvlHIEWaEpGCe10Gq5dFidUKvb0OvwHaDf8/1x+70J6rVhcLftuR4LV0rhVCuMjt2olYja1pzpggslMQ+kGBiHqJN2kgMqXiw79/7qoLt875oLLgjiotWJzdyPLhB3vzpfC1Stfzgc2EQRYr6CmFraOFFcJan9UPV07qmjVscXv/qKWhUtEDzNeT3cm5KHqziG3Qvu7wvnU4bW1CstBuZ85vHpkXfv3zOmyE9/equ0hq1qNz4LlwS1uZ6sqEWR2uwosipX4iS1FfKb9ZCPOmM4btMwzKVNoS2cm/DxFgnvdaScVfatqTinlgKo7YWOsHVWgvYWhtMt/Qrl6IMGalMUKv/IE9S8rpyfnzk+PnG73xOdV+J3KRwfHzmdTldToQC6Tcio/qaGwOrsY201pRBIQUnxrRYdwiCJ5qLF0YU31Z9lR8P79Bxg+zObUbvNgBavs6JL07nO14dTT3LZPPnCpfART6nwn//Lf+Obt4+cl0KRACEyuMQw3nJze8v9/T1x2FtrWEV85/Mzz88nvvr6K16//JSf/exzcAnvVo1Xb+0bZ/zuzhv0RmPo1ZXenJJBtuTUWrlmY+a9crxi50vZGGXnIrlCaVXtVcQRhkQtkSaGuNn+JCbCaLVSXSFsyKkhfv2h9QpeXYbULgbHwcFf/j4D7Z95bUmJ0+KkGL/zcNjjBJZZbFoNzGXh+fjM8Xi0olvpK8ta2a0OH6OemYDQcFFV/84ehFTl0scUGZzgQqC47sV6UfZeo0QXqoylTaLoZww6J3x7j0G/zvF45OWLF9zf318KlQ1X0D1aOhlfHOIC3iXWAl9//Y4vvvqGL9+8AwmsGcDDUnnz/sgnr+5ZsihVLA4sy8Kbb9/wN+OvkTby+aef4uNobiXVkmtDUTuS7hxemu7xNt1Cbc564uE3JCg49TSOThAiwxDYHya2aUU2ueXFzYHdbsKhFm1Pjx+o56N2q7ypMprxuFsfW1uIg92aqD+lNFWIS3/Qdj8fmdB/p2Pxh7q2Fr9X3rHed6dQjeymPTFEStHJTGtZzR9duachaOt9GhzRC8EVnTLW3Q1q3y8D5+czb9898f6DcqVzE8R3cd12R797jwBUmyilGViMkb4zAfp+WqPUQoiBXVA+cYgBd7WH65ldwKgpuscFmuieV6qi+t4LzUXWsnI8zYy7iViiKuOrusl4N/D8fOZXv/4tw3jDNN2yn17Q2iM+LKZ96Hxn7eRp7F4EW+Iw9NrWZYpMw8Bh2gHw+eefq46hVpastoGn85O6W2RFW6nQMkh1eBc2SyikUfPK8fENtZyhLagXTTXf2saGXdqQAO/UhcXJBeffurDyu522717/YIK6tb7/DhxWQM1d7Waq2QX0ZMnZg/LeEaMZ6IewVeGK/jhD+2TzcewhlGLi7vaG9d2sH9zXrWbvdZHbLDmCfW8dP3lzs+flizv2ux3zrJZM17WUNzP/y6PqqBaXjdiSEwF+++WX5KonmrN7P9weeDiuHM/PfHh8YFkK43RLqztaXQlhpZRMo3I8PfHu/Rt2w8hPf3pDT1hU5OK5+I26j77/pYXT78UG8lklqFX/lVDG9aGTW05uQWH2+y7g4oiPE7I+bu9RaNRqIilR1Fi90vq92aGBXP7TAqz1gKym3Uv/YLz9q1/XaRKAVJ37vCwrU0ykqIVGKZU5z8ZRFkMAvCFRYbMY6TGnRs2XDVnBDuVHN2s5ixVHIp0eqj0n4cKd7n6lnZ7SkS86/9SJdVi1fdquFvXFBBk6bOm331Ce0/sPH3h8OrEs2Sa3OHABcQFCYtzf8rOf/QWuZSjvkDKjiuaFUlaejo+8f3jPi5d3BBNmK/rf+rfZ7tvbZg9sRdaGNPQa52qb6mOPvUPbf5gQzXdvTZ2+o6IntZVxYcT7Hc6d9F1IH6dYzV6lKrqwTYN36OHRrgLBVoZ0qY7+zvPvId5+H9clKWJDomKMSEq0MlAGFcoBzMtMWQvn85m6DULRxV8b5LxAK1oIe4gIMYz0jPXiatDpQoq8e2cHji7srZDvcXs9JabvMm7bH8QKAf0z1cZA9gEml+uCaH1U0FgHABy/+c0X/ParN7x5/8DT84JziRB3KgwLiXkW1uqYV0Xb8IElL9RW+fbtW8bdPa9efwp+AB/xEtDxupf91jZb46H6LUHtxUHfB9git68zvWfv01ZkeefVXso7xhjwISnfsFSO6xFf5k0E2dXgVR+OxnApW4KlT9AIa9I+Mjq/FLX6/tyPgkDdtuIyBLcVKBcesjqliKg4KOdsSF5H/Dt4pAMjXBfzItuzAl3ftTa++fprHp9nTueVsp3PF4vI381belxebQUOS6gvezC2p3cnkmkc2U2jxa7r9VQHuK+ufnYr7/98zhyflTYDnmWt5Cqc55V5LYxj1Qlblv85L5zOM//9b35FGg7c371kt79lns9ApnNo+/fo+hjNI9juTQgbsuqcg6AuIA4F7kDButIa6zpzmu+oJVNb5nw+0/LKfhzN8UN3yFYbBes+1mJxKbY3Xzq6/Xk0xECAnkf0nO2yZ+D53rj9RyWo/cf1BI2NF0l/2xf9lpoxB6rrL96I9a1Pm2l4l5Qo7flImdcrmiENvH75kqfjG/WqA/AqfOibhVxtat57pmnk9vaGTz55xWefvCLnvKnb+2PpJud9Pm2viq/O2i057Sfwt2/eUNE2pPcOgudws+f988zz6Zn3H95zPi3sXuxIw0StyqNb1hNC4/n5kXfv33J7uLEFcNkA3eWDb/d4xXC5/Mo5Q+0ubXUPm9+/RchHycr1QlRExuP8SEgHnI9Ul6/oBOrV5yWqKMUQPvVIu1q8XFol0nqB4ilFqD+uPpNe0qPU7llUpb6umbOfub25g1apdaUUHZ3bW4y6cYlOe4qJmC6+hsG4QN6qcGfroXOhW6uWoBopfruHS/UNcDn4LDkNgf1hr5uhNz6lsBWA17T9j+PD0Mi+Dp0KNb766hvWUjvrGAFiVG/L2gRc5MXLn1GWZ9bTTOYZmtcRmK5yOh15/+E9rx5f8PJe0aA+4s9txdV1aeUQD1789vuCs0NXvnP3+u9u7RLwBPMs1nGWFvM9qQYcI97v8f7RDnYVTNZWCb3Iamrwr4+kt2rZ4na7A2FbT02E8uMAoT5CFnpypLQSLfRjCMSgApSyLqzLyjLPZlenHFMMMW21kKVevJ49iERLfHU/1HGSNqrXBW21hgCmkq9c8/rh0uqHXmzDtq0pFSj0xMIp6g8mBvkY3dL9apuZZr+r/y658Te/+g3fvnvgvBRy84Q04H3Ep4k0TsRhIsSDqvFNaLKWlTQkHh8f+Pqbb/jTP/9LGkHdMiQQrvb2Pp/8unvBFU9ZREdf08+/7c6N12x/J0brYrmLFiAEpSk0ceRSqXkmNUXbfNCkVgGHbjfYqCXb87C1JYpA6pkr21m7JR92nrgfBTBg+4IVOikFG+Gpk75iUkFqLosKUbPqVC4DSADxeBetkLccQ+wzenDmsVqL8P7De3JVC0js2Su16XeLeOjn2VWRhe2hzm3vv4eGc5cE9eawZ7/v3Vh95trIdDaOGDpfXsEHz7I0Pnw48u3b93z97TsdObwWEMeSK8+nhWk3shZ9jx3wWZaVr799gw87/od/d8e4O5DLgLTFxE6dcOe3gkXPH/s67iLz7tGqSaqOvY79v52m8fv9yL7sFYX1TidZLjNBhJvDgT6MpqyZ5oScF83xXEdE+xoSmxx4SVwvBUHfI0SBkX4G/iPi9h9Vd123+FurOKdcRZrDe518Ewz1qTUzjgkXDqzrqguuNRVUSaNW+/N0Yq5uqvq1L4X0MAx89tnnPJ0K7x+fqeJoziNzoSCbgS5o1TVNE59//jmvX73g7vbAzc2O92/fbShJT0xba9sY1tCnM2y8yu8mAJpAllJwseH7POrk2Y0RL5U5Z57nM3POzHMkuAgSmBedpyutcHp+5GmYGLyhOq6rRvtnuCTd/XtuiJtdHnfNRth+2n4YJ1g/8Ecp9/YVRCIwEcdPkPMHWm3boYbolB7vC5160FXRznk7mDClbdumVpXWlFtTGtkn5fb82K6rM1FED8p1XnicF26nG3bDSHCBdVFBX4jexGZqBD0MiZQiKXrll9K5057YET9DfYYhMiYVVVRBLdTQsY0OELPf0SkfFVASvnIwIzc3B375y1/y4cMHcll1A0TV0Wp3lTYU6vpJO3eZW9N32drgv/31r3S6kG1ogufm9oaH48Lx9Ez48IGH48ynL14S3AnvF3JZcUFIg+fp+MCXX33BNCVu7/6EwasAKThtwVtvF8XuPY6gfEXvNuRT80uxz9yu7tlvnRPnnJm3B1Pu90/otjJbkegBH/e4PHApUqEVoVAILuNjoNarNjSKQDn3sXLcSe/+CFOp/F9+L8H2+7m0RdYFcyqQa2W1sbwwxcg6z+S1sKyZZV0vhyxaSA9D3PwRZTvw1aM5+KTG/E59pVVkYYNinXKqQwgqTGrKl++sFz3or4u+rrVQHvGFCqZ/ttSKq5VSqo1R/PjaOgEEOvdCxPHm3XvePzxxOmeqeMQF41XrhLdhf8+f/cVf8eL2wPHDl+TlPd4pIPHixT3v3n3NN998xePDA/jGFI124PQzauj0Nil0m6suPdY8phefF94iXNCgbkEUggoivf3d4DzRRVOzK0cfIgRPcHtoC8VrWzngoGmRoB2cuhW0qr1QUe+Fi2rtG7kkG79D8PtDXE61I7hGTIGbmwN5XcnOEYMW+SLwfFI6yrycrXhRSoq0FamR4CoxeqJDARTHNrDhIvAVbm72LCusFbJ18NQ54LqYuMSp6+3xLbWyAvnqgOjIqPfeBMbdhejv/shiNDmcri01xvV88+23/O2vfsvX37zluOqZmoaDelu3yNsPR8bDnmnfbQa9OulI4+uvv6GUwJ/88V+ymyZCGGle6SnRKCoQLgW30+ei3VgPHryO57nsrZ1hvQGMjWBfJ6ZeUAXubjXRHrwnWc6ECMen95p8ignZXM8HbUiE8/ouRS7fQ5pRs/wlZjuoiQfXvjdu/0mNAb2ZzoP0OB/4yec/h5ZpOVPWhYenZ+5fvGSIkVYytY8opDGOI/vdxGE/4UCnjCxnal0RGsEn2mj2DOgUkV/+8S9xX3zJac6spZHXooevU85fCIHdbsef//mfcXd7q8q/qAmnugBUazd4xnGi5IIjbMm2WoxoknoJZUvGgie6oItHqh6gQXCuInXGuarobys8PT+RfIH2RC1P1PVZE3Uq8/zM8fiBaUw4X9iyld7K2r7pBQ695tFs6LXrm/4GAHx3udDbnmITH/qf0cTf49yowRxe4nyl+QqxQmmbqbTEiogmyWKzer2N7GxiXplNKNZRVRFQI6em441+1JcmTjp5zFFLUzNze8bjlLZ46W2fPr0nRhvT6BzJD2ow7pRaAoAIr1++AhJf/PZb3j+etw1PEzBvyLT5nBIIPjJOIy9f3PH61Us+/fQ1IXmOz4+0qs+yc1L3+z3TtDNrLLfZh1wu81Zw0McJfvjwAUl7NUnHk0LixctbHk8zz/MReQy8f/jA/X7HbneAtuf5pPy84GCZT3z48Javvxn4d//+l10VZQf3pR2sJedFxbslMXZ/wZ5Pu+4AdJW3iArIuv1WD26P9b6u0ABA/ED1A8UnmodW9evWWllzxpdkPOq/G0m5vvJayU14Uxr/739+cP3+L1uDp6dnaj4TnCeFAXGZsq6sWe26ELF9U0ecxuAZbNiBiKGjxpsM1hJ0XqxIVl6fOiEIuKCT5pDNYgqc8QA7wttf7QXpTSmxuzkQkjf0H7wlqOqDKpttW9hiqB+yTb1dzVaqiuev//pX5CJbwtoEdmmiCsxrZlwbdy9+zuFwwzqfKOVEqYuGp28s+Zl5Lbx5+5YmmV/8ZIePDefqRcR1ZVmI6Frrk4E2hxd7FdrJ6B0D/czBqV1EsBIt2qhkTQkc6g/oNrV0Q4WIPuxwYUC88lnV19KM2VshhGTFVH8/jT6KduNZbQ4KjtJO/0IB+E+7ig0owJKXmCJSmxZCrfH+/VuOx6Mip/Wqr9Ns3KhrqKlHULc424di0EEGeg5qLC/LQq5asAxJ0epcCxt/XwSuyva/6+p0DWVuNQhhe+eLuRDVdmldXygfcAGWdK9VPUCgFsd//N//D94/HjVxFvVExQ34tGPc73n9yaek6CniWStED1Iy4uDN2294/+HML//kL/j5L/5I0WH7J2Dcf0IHKPUMcBcxUr8C7gJWNcP7+/Fge2yInVLljDfujXaJft8QWXPDiXqkexGCeKKPV14YAlUoy4rsLel0okNmxECtrtu40kwI7Xvj9h+XoF5vJFevVkeIRmot5FJYloXlfGba7fDe24jD/iI94zjq+LwQQYQQI/Wk86MdQvUg4onp0jLZjZGf/uRTHp7OfHh6ppTC81ktE6bdxIsX93z66Wd8/tknZugv2rI1c9nOf4oxkmLi+PTM09MTwxA5PydevXplE6wu7dl+ieiIMO042Ciz4HC+UvOJMQHOE3zj8fE9sj4T/SNOnkEWdrsbQlQVYogwjio+ak1n316e6wX2tu+sB7G7/m1DOh1XzYnr/9cjrzdG+lvq/zasWsC5REoHanmmlhmd6FWMO1lptdC8I3lvkH1Dure/HTK1CbWIzZg26l+Q7/CkfnyXd4EYE+O4I6IJt1p8RO7ubjkvy4b81NJU+FUX3GFUPz6briGilIbezu+I0jiMvLhzlALH0xd0mxid9a2cSt2/dU3EFPns09e8fv2K+7sbtb1qTbuMcmkhAkzTjpTSFqMXrqyzVkp3BcA8Xju14KotHywh9xUolLJyPB5ZygqSOc0Lp9MMr+9M3ekoZebx+IFcFnb0PaZH4SYRv7qsWbutfWcJjds+i1ytt4/3lo+tjLhC6jSGPd6PhHRDqLeUPNPcCtaIxrh8iBj6rBt6M5Nvrr5fFchVWEtDcuX1jzB0t3Gh55k8n0gxknaRw25PLrrvdscCnYrnjfcfmMaJGLS1rV2SRnAm4DS+ZEfe724P5PzMaS5G/8AsETSB64WQJkmeiwOF2obdHPbc399xf3/Hw4f3ipY7EOtA3dzcMAzjVsxdH6ad0+dd59A5Smn8+jdfsFZVQXfR3P3LO94/njkvZzg+8f7hyDTecLh7DW7m8eGZab9j+eYtpSzkXPjiy99wcztRZQDftnao9wkdWmLcQUtQ++CWjjxt23AvumxztaP2CpXXWO1je/X56BrdothVXbM+4uOO6rxFrs3XEfWTVQBI2LyT6UI1tq+rHE2PSGEtPw4GdWuNXApr1uSudy6pUNaVWoV5UQW5xpIoIBACKXZv0svB52zNKq1qIHqvJJbWFNhqceNdXg/q+YifvyH914XVZT9yZsKve4O+49aaoafVgDn9+8GAmv51dWldkO3WHL/+4ivmNVOa7jHVuhLNByoe8SOffv6nvHr5gtPTt5T1gcYZZ/SSUlbm84mnpyeejk8cdsIQvBaWHe2/+iA94ey0tIuAnKsP299Qt6+zvRdnI3n1R/QKfgT7etK85Qz2cL2ADITxlpAfkZahaoem5FWt0kTBw157iKjV1qbv7kiqFNay/IPx9I9s8f/u73Sz1ZLVxiAvixJozb+0t9N7IKSUGMdJSdJoy8N1zoh0RVevbiveNw3c6Lm7u8XHhDdjXfwTzgVub295/clrXr96xTQO0CfK0NROqBRrK/YNUZOJ0+kZ54QhBu7v7nAp2oHaELpKDmu7dEK7J6VISJHSMrWsSuY2Zd/z8ZFycux3CyksBFd17J4XgheGwXM4TGwtCnuOIh8/z8sjNmQJofNBL//vGhWyTXFLdnt17T463HvKqnHtCXGvVbx/Rjhv99JtTkIIijI6NeMXRD3kaPZn1JaiCdTqtwX8Y798CIzDjpubO3xTF4rsM0Ma2E8Ty7rQeU/NRCJDsjnM3pAR51jXWVF1B8FQdWctvXEYefHintu7D+SsG1QV80YUFVrF6BiGgZcvXvCTn3zOzWHPOESCs/buVcHSyfnQuxhVBU9XCCFcDkzomxb46KiinrtqoF1xZLyr6KjQwtPTI6fziezU5Hrp3sVW5da6sq5nRM3ODFBz3/muffeR61unx+c25Yoeg9cHCB/H7vaDq83HdcALcREf9sThlmV5RnIF3+y7tS2p6l9HW9NKleiISjN/xJwLa1Zrpfr94fOvcn2Xg+oJ5LWQ14IXh4xCHCOlVuXvBg9B4yQYl8/3osWZQMLQ6RS0ybEZl9vecX93x7w0cj2zrEY/sVfrXdcgdD76pcOw3++5u73l/v5WR+/i1E9WMA63FnPTNDGOo4qwvotqSxdUYeeavq3n8wniHhcu97rbDXw4nigtMy9n3j888MmrTxjDoK1QcfiYjF62sszCm7df4+Nrmryid6mc62KTbhHX//HIFZdeb6kXS1owyBVNoTtCax7VP08/0A1kcEIzVK5vy84HfJxoPiF+teEs1vlq2hrVv6yghB4U7up7bsGCUFll/j1H4Q+7qk2Ly2umFBXyhQ501EbOxQS5l2EJnU6VktowKuW3YdnBJqTshT6uFxRdjMr2fv4+/unH14WzGaN6Yadx6LojBS3sc2zf8xpRv/q62smyncbpOfjb337FWmyvtCLHR3UhWksl5koYDuz2r5BamF2l5kxMg9E7GrmsvP/wjsPdLUMcSB7zGjUdzNU266Q/G/c7YIHrSJc9I/loW3UXH2CMBnl1xmmRZQi0uVs0p78Owx4XR8gWdyIKCEolWhG73WRHT21d93tp0ljl94CgCh8fOv399Crj9HyilQVaVTJ/rbootSRRReM4kdJA53Qqv9HpQvURaYa2bhBwdwFQ5DWkRJom9vs9cVSz39vbO16+fME07HDWAunGx7UqinrNVW3ScB7WvFIfMtF7G+elDwwRGtqO6JuLtC6GiUy7kRAjz+eFkldtoYURfOL09MCMqUPHgvjK8/EZh04gGsfA3d0BG71C/wZy9Ty358vH6KkuuCsBUg+ybfKW4zoqu7XRVqlcgPit0nNhIsQdPkwUedTqT0l+uKpUju6naGUP0qC2svH2SrUK0az8FHS5TpN+TJc+gxgS027P3e0LpGQe3r3Tz+w8+3GPFjmyjXoTEfYHtYtJNnI3BHial81APYQGREJMmvyHwO3NgU8/fc3j08yaVb25rJnaPDE4hnHg7vaGP/rFz/npTz7Vw79VpGlh05NUby1DcKxLZpkXlnGgpUqK8YLsYBjLFWIA6hdYcsVtCEOllTOewhDBBXg6PvD49EDkyDo/bx6FMTpWV2miCk8f9Wtekk4rjK53SmCjpFwlp9fxe4n3q0O3oxIbzeUa6rA10DqoGvB+R4i3EB5p7gzOeJYbz/XSPuwHi3J+FQ1pTfeDZVlZSuOUK+9+T5H2z70uKv6eQHpqkYvjm30eDNFJ0fyPuxJZQKoa+o9TsoPHuGVRrdAuqZf+c397x7w2coWcn6nSnUb8lsQqF89bjCs3+5PXn/Dq5Qvu7m7Y7yc+vHuPNI1H7zDnFsdut2Mcxy1hvb62JBArqpxyb/WjKldOBy80hsHjQ0PR/0Vnrf8iE2iU2syQ3enY55JZl8z792+4uRsRyrY0+mfqyNDmQiGXqOxher0/e1y3ob7c/7YHY504O3MsI21gdlGddeV0PYYd4kckzIgUA0TUUk1vpF3gZXEmumSbGqjnpVClcmrHf3bc/T6u1hol69S3nuAlrzzGasb92nHqgmXj+6eoPP8tQVVqmaNZHBna6ZVKEbwnBc9aqqL9dl43x0fx9d2i+No1JcXIfrfj/v5Wx6QaoKbnoO79wzCQUry8X/pxqAutK2ma/V4V4cuvvkIdG3XtIp5xGpQrmzPMZ5ZcKcUxTbc4Vk7yzDBCaQXnhVpXvn3zNeN+z4vbT9kNF6eUYMLGXoQ7cReObr9BJ1fryn6/oesaLjHvenFl6857vLjt72oI6xoR25sbHh934M03XXqsdgpPxYkCbJgdYN/DdcywUYqkfG/c/uNa/KJpTreQuj5UAG0z1UpAtKXkjGgvDXGCDwEfA6Vm3WiriijmeVXah4+4oLey2x9Iw0iISX8/BuIQcQnSOPL61St+8tOfbhtlb81obtVHxImhKPqPINtEqxC09TWNI7SiPyNqp3LZvSyoFa06TCNpvGF/c4NzsK4P5OVE2o1KbpZCXt/jHNQ80QJUX3nzzTuCF2IQpinw6WcvrDJ0VkHoIVBr22w5fLcv6kF//RJsQfQr0MUq+sPbZvidIsoS7775qkBHZ1wfCHEh80iTMzTlD4mox+LWnrFsWSvjQm3NElPlhWWx+bwJXPxR+J38ztU7IuM4MY17jj6xtoVlmSlZD80Qu/+oWDKjtjj7/d7oI3o5H1mLFUNVuwFVzuwm9fcNwRNT4C//8i/51a+/5Ol44jwvyo/EsT/s+OST1/zsZz/h5d090Yq6hiLS63pmWRTVjjEwDAMlV56fnyl55fn4hAP+/b//t/SSuKNO4LTAEKEU42I1s28ZEtIyNZ8ZEqqCng6UvPLmq6+J/oR3R1JofPPttxwOe+r6pCjyoIeHc6pC7lPOrlEd+Dju+p1dzouOon4HgfgIlbhag86ShY4e2SarictAbXsaNzSeUD8+s0UqmVSrtsywZM6poU+tzdqQlVqF89JYcuO05h9VgnptNdVV/ISkxTzqDz0NIz4ElnyZ2Kf2U43eFn5xf4ONnt+oDiJKyXGoRZc0FaV+8vo1IYyczytVIKZIEU+x7kitiv4rhSDy+aef8LOffs5+t1PxnnOUsuKuUCwVpA4En7akGtcHKFyj/1uJhQ+eYdQWt3YpeoJa8RS8dQBqzXz77beUWvkwP3J6OrLmQms6XSoEobaV4/GBp9ONCXiCdTq0DXmJP7YpY75e39t30AP6uXfhIl4SVF1vTWwkMHpIe+cU4G9sljyOARfv8OmWWhZzU8mUXMmrdiAJ6qIgPaH1lviL7v3ZEoLzsvLF+w+/vwD8oZdYnLRKtmE4KaVtnKiExuPz0bpO3kSohp7GqD9SIKauUy+Yk7d29PpUIucZh4Gf//yn/O1vvuG81G3/xBD6C12v7+Wma7HBJ9H//9n7kxjrsiTPD/ud4Q5v8uEbIiIzInKugZXdxcrqIiiBoro4NDiIJAhoKUAQtdBCEEBBC23EhbTSTgABQQuBIEBR1MANQUFscRBFNptSk8VmV1d315RzVQ4xf4O7v/fucM4xLeyce6/7N8SQmZEfG88C/nn4G+89164ds7+Z/c3w+uuvcf/+PXa7De+9+64GNwtQyHvP2dkZq9WawqZxF0Etzq41liiWECJPb/Y4t9YeAbTR7eLinMc3R4Y0MISejx5/xL2L1zhb1bTtmpg2IAPjMACJKCOPHn+IrSre/sKWs001oZoGDTjnI5iDyOVuMNnafD9qSWjRa5k69NXU5nNPcx+DaD2b3gvR5IBBcPlvMyg6pQNtoHIOK7mGrQAX4rREIOX7XgobTaIb+o/V20+MoN5OO+k/xWGt64o46LxY5xzb7VnmbIzEXBSfUqDr4+SZt67ien+TKaeMolNmLgVwXkdEOueonSMIBKtz5smTFBajkLTbVDLVc1Yg7y1NUxNzh+TQJVJM3H94wVtf/KKOZ/Uuo6fzxYvZucVoI8f5xY6UPKumRkj0xw5fAfaIiYJQYUSL2rvjAQmCc4Grx08I/cDqbMfF+YbtpqWpdRyqkRICMylJiWAKiGytnZDMadpIQZYWhP1L0eBpLu7Pp6Z9WQKMkWE8EvobrAnY4JDeUdlG60RyJB+dnnsMgTLHdxxGDt1RUyDGkmyVaY2UPsOkhOfVQVBnZ3PeCGOMHLuOp9c3dIcnDGGkqSpCDBwOeyjjHTPqtF1vaduWMA4MMWCAuqlo6hUhDJMjCyZT9Wi0rciB4eHDS7ZnW47HnnpV0x1H3njjDe5dXnK23WFyyUSxGyUrkRbTZay1hNgRU2C/79nfXGsgKFq0PkPqLld1KLqZUqRtdYrYatNQ1RX7Q0fXH7QesGqx3tPtDzx+HFg1gbYJuNZwPA4467QprPZcXG6orcEZn9OP5HRazOdLHpOqx1OM44ykLsOtBWqKIRP/MJcLFF0387rEwLHvUVtosEZJtI9HQxodViw2ZmqYOGDoaFuhriq813GyEpPSLo1Ba8RCYhjVAVsl4S/+fNXvM8vz9Pa1195gOB50GtE4ksY9m+0mU3QZBolTmY0xhrquuLg41w0ZdQCNJG6u9+jm5LUuvwJoiSFhrWV3tuZLX/4iT54eCTHRjQEZgm5N1rFar9nttty7vMdbb32RuqlyrXK2sZk2CMh7Q01KN1xfXwOJcb2ibRvqqsrZo2Vl5fL8g/7gJgfVGBj6a5xJ1B7GmNgfnnJzfUV3eEJ3uCaFgQ8++EDTtpXFmkjl4XyzwmVu4SgGP31lqY+GCfEvz92iJburw7Csl570PJcPTIGVMZghIF1Q2i4LUhmS1+EmfVcRRkMKFqIjpch+f8Q4R1VXYHTwxmz9dbcIQRhiJETh5nDk+vjLR1AN6oiYiRw7p9dKA7cpjAfFsTfTFETJ5VbWCnWzos5ZyzJlz0jC+1onFomCOffv3+dmP/D46YFjNxIyArqsE01lsDyJMjxgu9nw5S+9xW63pWkqJQwLcbK1pfxF0dOKMnWtsCiUc52c2fzAcjCFzqbXPUQMbFYV+67jaIRhGPjJj3/C1770q/Sj+i2Vrznc9Bjv834gkAJdd2CMgSQVah8jhZZqGWBNNc/FRyhbwgIQmAfDLLI0kyNfeM1jzpqAJGGMIwEwNnIWYDUmJHZI2OOvR8IgEIUhjVR1bmRLUR1Y63LJpforpoBvmaEkhvCxevup4K5pAkiW4lA55xE3YlIm5PU+R5IWJ3rThRRmJ8xY+iFxfXVF2zY0dY2IOmbzj16AAutrZlwX0U6LbrLq3U7nIYJ3ls1mg+u1UD7ESAqR892O892OzXo99fdJSQvkuohyZUs9yna75cnjGwoNSQgB55S01sTMUZfAGMfQB8IQIPUMfU9dNbzx+hu8+eYX2W4bqtrgbFVyjNosRSHaVdUmdzfr+es0kSiK6BbKJ7LRngGnYrpmcy9T9KQK3D9+wuPv/pD2Zk8tgXrrwYyMH7xHtCOrlSMOPUPf0dUO2a0xb1zgNg3OOoagFC4JMp2Gm51l0VTG3VLaX5aIqMHTCVlxiua1ySsxhlGnZsBkbULK9EhZjDE47xiGQQvAJeGsZRyUNNk7R+FRrKuWum7xVT2lNbGwXrU07YrtTri4vGAYAuv1hqZutBlALDHlfshsIDQFO59HqYf13uHrisp72qZWY1Qi0hLplq5NUaO+26w5Dontbo2vKg77J4Shx3jlISYlnOmJaWQctUZRu8aPDMNITJG6rjnbbClNW9PmY7NN0KhQNwVMpoxZXozixJfTzOnjkpLOHbCTKk3grHaeumQYHl/z5NvfZ33oaI1QbypqG4kfvM8oN6zXhjR0DP2RYA1c7Ehfeo10pkWXkgwp6ljUMASGo5ZeRNMiOEYM7/9cNfCzS6Fu0Q3RZAS1oq5r+hi0ez9E2lWbO8AjpQGkkOFroN+gdfVSQmBirgP0Xmn9YjJUXgNv7yqqesVqvcbYp+yPHWnfMY46BGG73fLa669xcX7O2XabA3a9rgnt1i91/1OGK+vlfr8nxUB3PLBerTjbbid0RordKg1IIjmr5JBkaJuWZrXieLzJOphoak/r1hwHw9MnjzjcfEQKNzQ1dN1RU4k2YlyiqSy1txMLATCVp6gPWTb1GYCRpR29C87ckRlBnZKilBI3K9A9vuLRn3yPzf5I44T2rMU2Frm5wXRP2VQjoT8wdAfEQro4wq/VUOlIKYNBTGbQMZqtQoR+jPTDyOHQKQ/sL1mMaJCjveyg7Xa5nibXSLZNTchZurKWY+5dqSqH9znruSwDydnGFGUiEUG0efDy8oJhFMaQIBokE9Lnt1F6O0zuDdjttrz99tucn+2oK493JjuxpTSo8A47ZVIws3M3bbYLR3WuwBcw5LHXQpSkbA0W1G8QdLiJ+irH7kiSxLHrkHiA2NN1A83WqZ6KZgiGsc/HVdZLHaRJz1iURk1nbiZAawkGzKVOd2tp59cA5CQ+3aMrPvr291gdOlprCJuWwVvCzTV9OuLciO2vMd0V1gTSWYf86gqp6+xP5Xy2WEwyU2BRFk0brF+ut5/aQb3bCQfMqX+ZlWFCLJJO1hnjqCTGTtGMvhs59h1VXU3pnlLbZJ06q5JyTeMUDWSc5Zl0oMk3LpMT7L3PEVLDGIJuTiFytjtju9noVCDJdWkyB8uStNhXioOahM1mw+OPrtQxycejvGUJiWN2jS0pd+BJGkjjEZMCDx8+4AtfeIOHD+7TNhXOFtTWgt636oRnB1QJ82cEt5xpmqKcOymGiSNjUTivZ8JSQZ2xpJsD13/6XVb7A5UxNJcrTJUY3/mA0QZ2FxvG/ojZ7xkqh911yKYlekd0kX4YCHltQKd9xLwparPUAs37JcvE0RbnzV5FY8wkefqZMcqDWyrxTd4oF9Fp3/dIirkIHfpeEc4y7Ug57lqaZoXzXj/LWYwzVM5RG58dejM1B07lJ7kmaL63FJ10Vif5xBinbtKmadhtNpxtt9RVdTuKF21oKhFxwVxWq5bIyGrV4qsKJDEOPc60xHHMHMDqCMQI42gYehiOB4Ze5yq3bcPFbqeNCtm+FDqz29vyMl1qssM0D30oh1t6mg3qpOY3MJem3L6/rbHIzZHjt3/Abn+gMonV5QZTQXznHUYbuMi6u9/vGSuHv47Yy3NYNYjPdYBBiCEyDiNjPxBCIvkKjKUW4e1XQ3VnXcibHui9FXKgNYaRNEa1a5ODOq+x956mrnHOEwcl1iY3eoIjxkE3ShPBOlxK05hS31T4qqEbRKc1WdXnvh+4f/8+Dx8+5GyzoakVWYoxZnAxTQ0yxflYpr7HcUBSYBz8rZr/IklkSluWgS7eeyTqAJbVqqXvdEyoNeCbGl+3RLE8ffqY/niFMwNNZbTcIYV8zwp15VivWuU/Raa9vAT6ZMQpg3wLq3nHMXmBLJvNCnJXwH8nlnSz5+rb36Xdq+PsHuzwrSN8+BGNDOx2K4bjkeP+hlA7zFXAv/UGbgcxUxKWa6s0j4q+dkOg7wa6vl8u5S9NDHoNvcuTpBBIiUQgYamcZ9U07I8dkkElbURNmXUkjzvOF0eEzBiTA3YJlPnzmZqX9WrFdjswRmUDCqJNoWmhXgZoVxt22y33Li947eGDnJaXKcgvCLgGzlaHmeR7bhxzFtFWz4B0i5NHSxcyhpkpwaxTxoGks0OxRjPGCpD0xOGGFG+wcmTse9i2WIfurWlE0sg0yMiAKfmQBTIKWWeLM84S/L+lzbd+zywqsz9Xek6cscj1nps//i7r/RGPxT04g5UnPnpElJ71boXtD6T9FcaBfTLi3voi9gzEaXAilA4y5qlSogEit03Ac+VTFwzGTClgFn/rxdYurxglT6jRqQh932lEkQKurnJHJuz7jpjRkoQ2NU2dnt6TMFMHfco1JNPc+txuJwKkvD2aORUkJKyvqOo6pwnzZbVeHQZf5YhH64Oi/u9kWEs9i6DPrddrAI77PRhDU1W0dcux1znhCRTONrmrMOoEq9oKv/IrX+OtN9/g/GyrI19NQZ6dZm9MRo8AcppORwySjWeJnsoGYyfUrmhjCaAUzVrePHN3bGUtfhgw777LpTHUQ6ROPbYycHVDGgcuxDIOA9UxkrYt6XBgfGugaxxHExnDSBTd1IwotC+USCgSQ2Q5yPCXLdMmn6U4TsZmmpzC/eYdrtKUZ8iNfknU2YuZNsVZl8fjWvaHPdYoKb/NFCV1U9OuVmDthABWlSPF/H1OaZGtNUjM+IyorkaYHQwp9aI1/aDE7MfjnhiE890ZD+/f57UHD7JDOGcMUg4SilNdzFLTNBy7QFN5qrqaUOWKnhgMYgISFUUMIvRot213s2foI+tVy+XFBa89vJd5YCWT76vjUVJGLtNp6dpqBCsAyaDMDzPKV/h8y5S5yStA3dZlot9gqK2j6u/qbni57r53gOsBcxFItXZej6M6++MwEIdRG/sy0naZhP/e56CTn1SKLpS7uetHusOeoTswjCNODEPfg8tNChggTmn1tl1h0MYmMYJDm1Gdr7G2mxwvm/XVO4fzOnjCe8fF+Tmr1Zb1puPmeOR46HnzzTc53+20KUvKkI9Z/2Ic8/4gt35K41vhazXlek9sFTllKXmYRFJwo21r7GhZtTVtW2NMYhx7vKuom4q68Qyj5erpRxA7Vo0BvDoVXacpYwN15bl3foa3JjeAFBuad8hsF4ApSJrYKvRqzL8XG6rJAzpsrjW8W65g0Aahqh+x777HpTU0Q6Qm4lqHffSEehg4H2HoR+qjILuK4eqKZh+wUdRBzd38KSPL4zgyxsQwJPp+JIyjcs/+ksUiVM5SV0oZFTJ/+DD04KFynrZuOHZdnkCo5wPCZt2yWa1YNU22jZLfGxiGTmkrPVSVmSgaU9LhJpcXZ1R1w9PrPYfjoIwc2R4C1L7ijYcPePBQG/ra1Yox9BDjBAAsUUXnPJWveXLzhMP+qPdRqHBuC9XspCqwsHTugDzpStB9ofI6xCaGDiNj7knRrNn1lZamSDrQVgMSepJoc2pVGSDgXMI7MlPADAWo2hpKfekt55SZDk0dwJIBkMVxsvi71Nfm7LR5vt42ZsStPPbRR/i+Z9dfMAwD9hiRzZr+yQ3tPmKjMPg58CvmSY88Ial0+MvH6u1n7GiZcZC+HxVpEYEIMWjh+u5sSwijTsPJrUskh6ApqM12yzB0JKNNVs55NpsNu+2OIUTlRrWSR/NlZxKZeODN4uQzG1zmMlMH2FbqdExolWS+L1fg8RlhtORqomxsSg1IGWXX1jWI6MjTlBRFuLzkJ+98SDwOJIn4ulbe0OwA+arh3uUFb7z2Oqtmrd+fa+3MYgJE4RNUyf8v2sld1lqRMX3eWDfVm4jkeh+jm9B0RpOhXBBFGGHTVnz17S/wz/7z/yTv/Bd/mw8/+gAqyz/4O7/Jf/nv/vs4Gtx2y9lbZ/zmP/aX+Q/+9f8rKdRItHTpOF/H/L0lkuzHxOEYSabnnnuVXNRnpTR++ewEShjxzlNVuqmXcpSYIpKEYeiRlHC10eL/YeD999/n/v37uSMZTB4XmzK6aI3WUjvr6FPIN36mLDM6TQbRDEEEjGTicmsR59hsN/i6ojv29P3AOAaEyP3Lc862Gyqr+qt8gDJlIEAIYZy6faMYLi4ueP+DR1Od69XVFbvdmVLAxB5B6wutrYgSCcPAfjxC6HBGePvNL/Cr3/gab7/5Bk3j1HCK5DpnctNB1sgkjGlUHsdiTC1EtLFqTjEVZ2CBOJH0JnQLx8BIHnTwaXX3d/n3//V/i9h70mCQfsRZx/5mT9f3hHHB7pGd+p9aw78O/C8/P1X8eClInIEYA8OQS2xipG5qHU2cx3AqQurx3inXtPcc9nuQhHFq0ybaH1drPZ5vaJo1db3KTTlWR1J6x8ZXbLae+/YeZuK0XoYPio5rB7AiZRJT5rcsZSmq8845bTZpGlZ1zf3792+5f8UOp4xupsxocb7b0I3Cal1T1RZLZDgecM053oAlYuXAOB7yxKYKiY7jMfD4w0d0hw6Hoa089841A1Cc4mR0LUx2UAWL8RpcmUzjo2wPkjMFBREi77ulozrbW+spJSqaKM57khG2q5qvf+VN/rl//q+o7j5S3f3KX/pN/st/9z/E0tDusu7+43+Z/+Bf+zdJR4P0giz2QOUbHzPwM9KNMIwR6yq+8Y03PweF/HgpE4m801K/cRzpuo5gAgZDu14pzZQUZ1/1oK4aZfmZuLe1Ol3E5GbPkRgNIWrzm/cV4FitGnbnZ1wGYfXoKR989ITDoaPvtQzGW8fbb73FF7/4BTabFd5ZSAEraQLG+r5nHMfc0OWnBuEQA48fP2Z/c03b1KQQWL3+Wu5TKKVzOYjMgXiMEWsVQd2sW9brDU+ePkbSiCGxXjVsdxuOfc2jj95jf/2YykXsRQUxcnN9TZKBqtFSqcY7mtrhrdXaf2My77FlcmPQwD9XC7Kg6p2euxVZZZnS/KJvNMYSBarsa21XDV//ylv8c//8P8k7f+Pv8MGj96EyfPW3/yK/9//4D7Gppt1sOX/rjL/4j/2j/Af/2r8Jg4M4N6+r/5Id4Vz2NaZAiNq5+Y1vfOWl+vSpHVTrLGXOeDLQDYqSunzzj93AKIlzf0HrwbhE1x+JkghxpA8G4yzNak3TNBiEylpWlY4uPPQdXa/jxdrVinEcMjJToSP4SnIUSmqfQrKbi3CttdhcX7JsvDCS/1xeqwJx55TB5NFl9MIJeKtpzspbQq/j6JqqVqOcRrAWY0bapsZbT115NquK1+7tqKtC8TJpE5JKGlTXbApp8ndqisHnsXd5U0eRvlLA7XJhOSWCM+XA809OAZRTtd4xWjiMPfL2F/jRf/ifcjh2nF88ZPXrv0r4q/8Z0a158Bu/xu4bX+JP/vzHHERTiykp4hvK3OH8uRIjCcNxTDy6viFFy4PnpT9eIUmZeJmC3k08vOR0d9IAQjTFEqMauSSJ/qiUaiFGTZdPUaCO8dPIOCHJTAjizDdJHgiBbogxpzWxOQWWFF33jqrZcm7cVJ5gRDubz3c7mrpiKunINT1aOmUnDtsokuunld6n8p7DzQ3dsWPVtGzXO/oRxrwWxvrccxeRNJLiiEkj3/j61/j1X/sab37xDSqvk6IMOgAgh0i3zs9oZQNjiIqwLQIpdeRnxomp5GLiTsqNTJT7QYNKg/kMuvsj9gImJFI3ZgfZcOw7xhAUVRRRgm6jTtdfEMO/93kq4ktk4hk1Mx6XgCB6bXFWkfvKZ/R5Nmjee5IoGXrXdaybFus8ISb6bsjTfZQwv2oa6rqlzRkirOQATj/TarpH1XyBcpfgvpRUFWovYwyrtuVwVIR2HAeur6+RJNy7uOD+xQWbVcs0iWdRyqCUbXlqX7Yhu92OeHWgrSt85RmGXm2fNKQIthsJg8XQYLD0Q+DxkxuI1xz2PSKG3Uap3DZrtd8O1U21s/Mmba3a1KmRw+S+ACMZ7UszIpxbQ5HCm5FXRUpMoesjgKkc0cE+DMiXvsCP/qO7uvufq+7+A6q7f/rnP+JgLISIHwKx1olAIdMmhn6kOxw5HI8EqUhYnF2x253/wvTx08iU6cv3uDK/DCQr1CEQj3mgQA5gRISmqdlutziTKdCMBuzOe7yvcW7MpXMZTc6f771mRL3Tzv8H9y9IYmjbI/vDkTEk7t+/z9tvvqkjQ61CUak0QScd6jGO2otQRjFL9lWMEY7dgb6HMDRaGgCYabLU1N6cAxpttlq1DdiG7WZF09ZcXUe67ogzHl97fKMO8KOPfqJlKK0njprNu85TtgxC5Q2XZ2sl6QdMMiRTav4zHaIUnlhdl7kFoZQazMjp1OQnsw+lTZRlaEax6mDqordj1tu/xqE7cn75kNWvf4Pxr/51ot/w4Ju/xlm2uUfrsCHhh4i0inCnArBZoywHY+I4BoYh0A+O3e7ypfr0mRBUdYy1dmQYR4Yx4IxGo1HyyDrA1xXGJobxmEnKFYFCoPKt0qRYTT9X1nF9c0MywhhGMAZfVcSo6KqvHNrAn432NG51mp+Ta2HJaVw31QyWlM10nSZfMdfrlfqTXEZrc/OKiGg6VoTL851C8l3HYX/Dow8/pO8OxKijU51NNJVh1dZs2ordtuXyYktTq6Nc6mbVoBcHIiPDJRqiwPUz0leatqR4ALnm0To3RfqT4yu5aTKHUlpcr2hnTIEw9MTrDjMK+49udJ2txzQrBqNTLvxmg1uv+bM//FNMiEjIXeYTsmUoFMqIYYyJY594enNEUv0K86CqzMhOqXESxhDwQbs9u77XcYMxTGiQa3SusRjRutK6JuYxud57Vu2K3W6nEWjSjIECAZnqzDCl/mwOb9OE40ueDZ4pkazORrbWTxlFg6FyHl/5vL6qrzbXeab8+QX9NwXpQagrnT//9MlTEob1esv5xQUfPXrKGEZlbmCksX4iqnbGsal3vP76A852O5qqzgTwbip5nseYzhH41OyClkZMdaeizR3GlBIAO9chMmGtEwn3FDXm13wW3bUhEceoBPdW68RDTJr6o6xr9vWT8G4S/gbwO79g/fskMtPZlG5kvbbeecR7tMrGUdcNZDvi8gAHa41yFQ9xQi9TSvTHI8fjkdWqxRhNU0qmfimTpgqxvq5/yrZTvS6XR3WWrJWIMkgUXkVnlBJtt9vinCOEOdW/ahs2q5ZV2+jI00zFNKGRoohTjKrMYrS8a7PZ8PjJTdYNOBwOrFcbJVFnhCCk6LHWIUYzHLHvkXBgHAZ22w1feP113n7rDdrKU7nMNjFtznPHtjqkVgtuJmQ4p0RE16SUBZTBMmVvMaJO+l0CdRDGFLTW8KbDBOHm0ct0d8UP/t6fQlTdJegkN1BasXHUxs6xHzQLYB2S6azSXYKBX6KUPXUYBq3fX2R3rHF50Yz6mVjW67XqaVCGDedtLrPKSLXzOBGMcfiqpq4bfEY7nbd5uqNlvWq5f89ydnZOPwwkEXa7MzbrNhPQyzS+m6xzOvTktq0qA4aUz3qLt5amqtmsVjmgLgBZtiKTqcrI6arF+BXrVYOvdPhKDH0OiMGSMDLkseuGGGrG0VB54fqmYxi0FMYBF+fb3DgGlLRbtrcTomXmCVNTiVcOqmYXoVDPTVdpzoVMAJfadbmlt4est9fP6q0xVJs1dt3yg787662EAFFLMsVkoCJBNFpy1HUDXT9y7PzH6u2nd1DLxiKCJI00QghE0ZRGqUcqlAbiC6WE1ptIjKSYaJuepm4w2RGzxrE/PAWjNZjWWUWlfOasyxelIB6UzmHJUUPmBjP5BzdDpVNdkbntqE7lb1MNR3ZljcVmA54yAfh2s2G9anli4Xg48JF8SNd1iDFU3lBVlnXr2a5rtuuGs13LbtNmB9VmgmbJqSimGjOdMlJms7M4Ni0aN6Lzm6fqkwmFcrppFCBKSsF5iZoWDjEQcvcvhwG6QLjpNX0kDlxFfXGGWU4sGnSEYlxsJgXZ0ASAJYlhfxzZHyNX1x1Gmsw9+epKEq2F64chB00wDgFnhzzlRGmeQppHSJZdyHlL0zY470gi+Bzl101NVdcMQ0a8rQYJIRX3ckbCpt+lDhMyX56mFq3Lta3GTYbPYPDWT37bbckNKZPHRX6HXiWfaV2OxwNDiJydnevG//gq10oZjIx4v841oR5vLPfP1lxc7GjqOtfZLdNBC9wok4hPxhJyIJXyBpA38qn8Rp1UayxTkRjF5BsKG0Cp8v+suktKhJQYY2QY45RomvhizYKLMwmPRfiPn1nbX47MBPJMZTTWKqOERKcoakaQTCa+T6LpdWN0Hnqx0cYauuORm/2e7nikbdtsK0p8oBubJhXKdUZRlknfJDtfuT4024OUV9MatbcVjvOzHW3dEGIi5br0tm3ZTIMuNCjPH5uR+3zdcj1/aXhdr9dqg0LQ4G8MVLuaBMQy7AUBkwnD00gMPanv8M7w2sMHvP32mzy4d0nlPd4JrjSlloZc627Z3pi0W7ygoZjSlGsX7AR3m1WL42Cne6DgVmNS0nrJujs+o7vnGiBklY9jzLobldA+T+krzunQK+dtitnhcDCmxAfDqzHqFJiCjmHIZP252SJJyt2+ZV9Wx7Jp2oy2xnmKIRBCJOUhKs56jPXUlaL+Va0DKEzmNS2k/7vdFmuUezOJUNeqG1YWCLeUxqicSYFpwlnKg1KM0YDw3sUlbV1Te6fIKAUbL+eZpgxSsTDr9QrjGq2htsoeEcYBZxok6tSoFEKu4baEAF2XiFXPcDwyjgGMoa4c9y52VJmBotyLkz9pctNfrh01009xoC3YzHucpmXP1ntprxeXTnSoxJAC49i/RG/P1Pbk/+Ko++UoEReVvjIaW8Bl5fhOiTEkjkd1UG86/7F6+zOxqktS/tExBmSMWiRdaQ1iEj2YlAacs7rhi04gSpKUc1IsbV1j25a69ZkDcpwiGBLcu3ePxjdYo8iBqx0Om2su9a62maLKFcOeHdR5nNoiqpVZwW7tRznqkEVaTYxk2hvBVZ71ZsV603L86An7Q6AfIlVTU1crtquay4stq9qzbj3rtmLV1DTVPPdaiZm0DndqHihfn0p3+cJRLohVOR65TQ+RUMJhAUhJnYAcEeqe4SkfF9C6Sk17JrzoNAorBnEVX/jmrzL++XsM+44YhG/9U3+Z/+Tf+L/TOyG4spEoAUUyliSWMRo+enLDoTM8vTriTPs8D+qVkpgS3TBwvd/TZB1JQemUbOW4OD8jhKM2LhnVFbGCWKjqmtXKMw5Kht6sWppWz/nYdxyOB5zTaBsEyRNTHCwQdCUr1ojXghWS0ZShzTOpa+8Xm1+W4nzK/GdxAvUa5ycduURB1csZw2a94unTq6nWymaEKsWcTjcR54U6d39vVxWv3T+jqSvU78sORYqK2Jjsg5Ih+8VNlkS7VPEWgtZ4GXfLemKMwft60a0rCJEUUXQq5eYd9Lx+Jt21Sn8Ein6XxSvOWUzKV3iZ4F94RTh8J0qjxd8igrOO5Bwpb+SSr69xFownBMMYen1ugZ4+fvKEw36f2U3sBB7ohClFOlLKVZXT0I+cVlwciTFMwyQQ5V+VDKRbY3G+5qxpKKOWCrG495661lGSpUCuUFkjJrMQ5WEVMpMGtm2LAfb7Pc73NE3DZrNh32UnTbtTKSl1cp1oSgMP71/yK7/ydb709pd0TLFTonudXATOedTxXtbMC85VpJCm2m6lBiKfv9IgqTO8aAIjMfGjlV/qexBRLnAWuitJdZeq5s2/8GsMP1TdXUX41j/9u/xn/6f/G9iIl4gfFLzouo7j8Tg5fSW4shj2MvA39z/++SviZxZ12EvNM1Z11FXqbpQ9qYAwWJR2KcRpbGcSYRxyWj+jqNb5Wwiqov6qwybvg5XToRPaCKdfH3M0ljKAptBVDuDQBsH1esXh0DEMIyFoDf1us+XB/UvOMopa/IZljXUBm4q9EoHVeqUTA70G5TEp44ZjIEYDZmSMHkMNAmPfcT0GrOtIww3jEKi84/x8x8OHl9SVy3qbj98qmlwYMlKmRywm1mXn1ZmMQmfUt2Q+Zm9oPp8SUhXHN8qn0NsgfOuf/l3++v/532awiRqhDglr0zQuPSZteOv6UYejDIFHx46/uR9eqkmfwUGdO4eLkk2NGsbkjmTdmGpxeKvzoZ1zSgORlbaplRuv746IRHabNV/+8pd5+vTpZKCttey251lhyQuou6Maz1yKnsiRMdlaWi1UX6B5Mx9aiSZk2vMLMmSNjgID5nq+vBE03nHv8hxjoWlb3nv/Q9rWslqvuTzf8eD+GQ/un7NpK1Z1RVtXtK0a5sp5bdAydxqIpiBGMpfrdLRQkJ5yfIvfmmLSjT7lJiuxJm9KmYUgJZAcjYoOdxiBo0388K/+J8gw4tYNx/ce8cf/9r+He7ilR3jv7/wRH33n+3zjW79FPRgOzhI8YDQFHnEEsfQhcXPT0x0D+yMMo9DKq9kgVa5xMSwxRvpuIIhgiKQw6sSmIehm2tTImBiC6oBuhgmpdQb6ZrPFmETtauKYeHR4QhDtrl2tGkKwHI9HBE9VWXXwbDa0GARNzWlA4RZj/Cw4Q5kTrfdaIQPPsfPCSQWmRi0RbbjyGX1Qx1v5/b7w+gMO+z2H/Z7+cMOPfvhn3NzsGYM22ay3LXVl2K60dnq3aThfr1jVFkvSsUN5UylBlMS5eWNC3cxyzdVAW+sImV+Q4tjkqUJ2AQmnZMAmrXMu938mPI8JRvMJdfe73+cb3/pN6lE4OIgTr2JOzZmCL2c0VZxmAlLifffq5EkLSln+H3TiXMrMGX3fa+NTNKRQbJXOpy+TnqyBfXdQZy9Pe+rHEYNhu91ydn7ObnfOEHQKl46dLhkbluX7+VfKNitRGje9y2UI2cZXlb/l9Jls9zJIeEuMMXlKmIqWqIDJNaLO6qb43nvvEWLCuZrX3nidn/7kPcZReSSRgHdhqps1lWNz8YC333qD87MzKueVAzX3ARhz285OtEEyP6a2Qtd0qtszpSlQF8V7t3BQZz3Wdco2WQyp2F2T+P5f/U+QLuA2FcNHT/nz/9d/jqvViX3vD/6Qj77zPb7xl/5B6pDYk+jjgNuPYKDvO8YxO08lSM3lCS3w1TRzJ/+ypdxbpTTFGeXl1aENCStz6VrhFR/7AWd0bK8IDF3H48ePqaqK3W63cKmyo2m1Rr2utMW91Ltr/FxYhlQnbE5vGxGszOMWrDHU3sN6TV3XHFYHxjFO++cX33idddtog5Ixc/ORmZ061ZPEGJWWMqbE5eUlP/3Je8SVNqx++P4HXFzcR6IQ0kiSQEoVxmvQYyQRxwHpbkjhgLPw8N49/uJf+A3uX55TeUvl7dRTo4hxmdhp1CEVrVEXmVIj0/qC+kZzOt1MmQArM6JqM+gnOWj8pHr74Xe+xze+9Q/SjImDQZttjz1Vmu/LEALHrqPrBvpgCClRRcNX08/QxX+XLmSiiSk7Yon0c4onn3qG9zvaZkXT1FSV49HTJ5ryy2OwQGexWttoBzOwuzhnv99Pn5kSVFWVDYV+tss1UiXVb4xV6HmCtcvNMV+c4lQrM5tMGUQz5UaZNi4bNKWtszwV2XQW8JbdZkNd1ex255ydnTGGhPMafT24f8F207KuPU3laeoqb9DzOLxJPfLNpemp9GzqdFIwMwUBz5thXa7Rc3nZyFMhAGs8SQxWHG4w/PjvfZeYhF1wNI973v/Re5jvWs4vzjF9pPvwA57+5Peom4aHR8fNAR6vdZFM9KQAQx/p9gNBYLPZYj/aM47xOcfxyxWz2Dwko+cpZVLxGJRMOW+wiDJM+NoTksUFdcbC0DNmD6yqGmqDBl7OEEQ7rG+ubxB0DnGMkaqqqSoQ8RSiZZ1zbCfkCEpzRilN0UBjDqwSIkoDJGmBZpn5vpuHWhZkYtafKCnXIXq2mw277YYnT665ublh6AeEUqLg2K1XnG9XbNY1m1VFu6qyDpfGxIRfME4I5NKPBf9uKha83HtlssrtiF3p4yzJluKHecMxVnJmJiJOdVczJQ4zGn70h98lROHyBbp7/OB9nv7ov6CpK97oLPuj8NFK64etzXZEgGi1lto6+lEYxg1t+kd/bnr3s8jk+CyuY0yJEJX/Mgl03cCxU1RRSHTdgRhyStXrtD0rik6tNq3WSApICpxtz1m3DXXlEIn0fYf1GVfKZR/FDuVKgxwYSW6OLTWyDiGCnWeql3Tr0iZNIICeHaWcqqRcdRxkgSlzEUbW9/Wq5WZ/pBt6jLNInJuVkmhAbkzEV42OynQN93c12+2ZTgEqjmVpfgKdqpfy2FNmndWqEqE0TYlACGkKFjEmAx+KCJpcBqbn6BZnm3/ne8DgsMHwoz/6Ltf7Gx74c+zjAz/48R+QHJyfnSNDYP/Rezz+yf8PX3nu7YXD1cjjttd7rNA7mpJ9MxMyaVyF0P6ctfDTy93rvhRdU0dV6XCHWGo/Efr+SIoR42sSwvF45NHjDzECTVNjMn2l9zocpfIOZ4w2Y+InW6N7/ELvylrBBAaZ7Kg6FMiylaeqNajabTZMlXfGcnl2phlhox0D8/DGTMeU9+QYEwnlXo0Cq1yycHNzMwFedV0TJdNSiiFGRURd1veEDo/xVnjrzS/y5S+9xWuvPcyjgs1iFHopw5kHYRgDJiVctg3z9KyyM8zXJ6U5oJqemdZIfSRyM9nz9NY83vO9n/xdxMLF2RkyBA4fvcfjn/wNfF1zfy8M1yM3m0hyCZ8cMYTM5qA1yaNoeWBKNcLZS3XqYxHUqVZy8feUnc5oVNloC7iTJGnnXqrxXukjjp2SaBf9Kd5/U9fKPxljHr1XTailza8pnca+ctnXyAZySj2Z2QE0ebNfpMLV8JoprWfI1s9Yyuiw6XrmC1U65C1a/ycINo8+a1t1vMegM2i995xtV6zbmjbPE1YkwUydqQt7NRt8yrkUYKys4OSKTChqOc/l75m25/ZrbDak5dppNtZSNy324QOkjzpp6/weTeV5ZAdtRrs4h8py7DoeXV8zvlZrKi7qTGitMnYcjz3XN0duDh3WOlarFau2JapH9nEq9bnLvEHmQCYjfylGxCh3n5D5eHVI+RxUiBDiiGAY7EBf9XjjIfPpVpXXUb8pkiRO06mUakXH3zJdh3nEnxFLGVDnpxQ4lIByrnODnPcCmIyw9sGZmRVAn51eM08i0lTvbrdhGC5IIlzfaNbCeU/bVJyfbbm42HK2blmv6lye4mganVrl7KKG1kzJE9XN5T2zQJRmQCrX4D6zb5npdYa5NMcmtLC+dKjqVdPxx0V3rWV9cY/W39ZdqRS5fnR9Q3yjwnszIy6UEcYZ2UKRriiGbgyMfceZXP3MuvbzFDWxOUgKgTHqVLQYIl0/sN8fsE6b6pyzjENEJxhps2pC6efqplGkMoludt4Rkw7eSNbSDz0r306brdKOzQ1vt5xLm8tRsiGTQuvncs28KXYbFkYO/avYt1sPzk6wzftK5muzBs7Pzri+3nM4HDCSuHrylKHvFXFKgqvAOWgqS9uo3l6crVm3LT7TsZX6PLK+St4PdG6uXewdM1o9MSmU+25xvMU4WHPbIdP7rXR2k9Epo3b3tQcwJOrKsr33gHVd8Sgc1Mm+3EFl6Lo9H11fE97wGKvNgUOKYJJmMsp357qvacqiOWLND342Zfs5yPODkhkcUD/BllsfgDHo/atapfa3P3YMw8CqbTPCqrWoxT8oelao4ub9uwQLJtup+WpIft6g+qkZLMC4iTVHWlO8U5x1tE09AUz6ubOzq3ZtmenKwbpo9gJBSzKCDjs5Ozvj+tATex0alMRSZdTS5BKqtvFsVg0PHtzj/r1L2rY0p84g1+0AsPhBZD2eA7/ZJyjV98yvLw49mhUxywuCeaHebu7dZ93UPIo9KSbcxVnW2wOPrm8IX6xwFh2KlCdlppgIg5bkjKNm0BPa1Ciyx5oPXqpTn5kHtTiqc52nOkaSR9EVmNwYo0XymxVPrjxuHBG0295aTada73Q6Sm6OCkF50ryt8nSSRNNajaYmQGbpnBoUbSoOiOW2g6rvSZn7zGQDWAjHi+Ita0zKN5jMFCAL1EqMYbVuM7KhEbp2qXoq6zXCy80pyhGZndBSt7dwSvUzmdIHBWIvB73oI3n2nFgq4vwamxHZNIMSWDG4zYbN179E248EEap7D6namtU9SxgDPHhAvFox1PD0EDiuBmgMnQWd4OGIyXBz6Hl6tWffdVxcXNKuVpyd7wjN6jNp0+chkyNf7nRTNv9EsnPDTEhhMgSls14HN4ANI0PfY6IhuYr1akVd12AMh26PxEVtJ4a6qnWUmxgsmuoqTUXG5ClUNn9XjtCXTSNzgxYYmRF0k4O3wrtatDemghCnmYIM8N6y3W4wxuB8RYzv4CqLryq2uxX3Ls+4d7Fj09a6ybcVtYWm0do9VyiP7gRIZVMxd3TwloM6rf889nJ6jZRzzJuYSZPDYLKTanLXddHdph+JRXebu7rbMtTC1WGk3/aMK8vgmRC/lPJ6ibpYMRm6GLk+dMTDE97k+z83fftZZHb0VVJKDGFkGEfiOBKDcqIeDkfazZq6rWmahr7bZ/uk2ZNSR9Y0NUomoyl5Yy3HrmeIiTol+qGnXdVzSts4Jj9iqY8ZddI/8uacgcOix8A0irrsC8+GrHccVVt01WCTpWQ5rDGcn+149OgxV1eGKMKjjz6iG0fCqPy93kHlLW3t2axqtuua3XbNulXaN636yjZ+VjWtHyRN06uARYPp7KB6X+XmrYLKLNKjxuk9jW70OjUx61cOrgwGv9my/fqXaYeAHUZW55c0tWd1psdh3niYdTdxdYiqu62hs0IoiHJxfcv1KCgaFiESePIzaNzPR16Yyct8uJpZcnmN1BkdR52SV/uK0qPSDx2gwI86qNr93zQ1dV1pk2puEluCU9P6TH/PcI+qpD7hrBYBmFx/7TJna2E11dpYOzvTd85n0umynViTaSolH7cyCPV9x7HrMcay2+3ox8QwBKXpRBufbAYsjVhqv+L+5ZaL8zPW6xZvrWbqjF7l4lwXh1JjrsyQciuYNHkwCuj9vMjgaVcuBjP5bouxU+rEywv09qLorSUFVG+vVwwVXB0Dw/lIbA3RKrsHUkZLK2NBGXE79d6Y8WP19jM6qNmxMzanj3u8lcl2WatTdEIY6boDl5dnnF1c8NGTJ4xRESbrYNU2GKcFuWM0PHr8iD6MdGOviFNb8fjqKVXVsPKKSqonkakWTNngtVapUJ7UZRZ6VqIS3SjipIZP6/lKBFYoT3TyxeToOnuLFB+ysbNqMlzVZAcX6qaicpbKlXSXRmUa6i51YI5Wpqi+3FSy2LTzNSxUWbdWf/H8kh5j8QLd2PNDKX+Hv9zR/vavYZLSDz3N0Zv99ftsmoabMdD1PT7+CnZTc/zTP+A4XpOsUmnhPMd+4ObmwNXVDfuuZ7W+YLXd8tbbK8xm89wyhF+6mNl4ThkAazHeqVEyOo0n5o7huq4gBcKowxJijARRJDKmwH5/Q/IN5xdnrKuKdrNmHAbaqp1S6qt6RVPVhJi5G71T7kq4Nahh6iAuAx4WulFQAMnBhtZ657SNzawZNiOayRJJjElH+qY8grLxDdFEqrVjs1pxeXHJ5eUFx34gpoirHA/ubTnbrVjXFW1da2rfgbVuGl3ovZ0XkyUiYuairumY8+/88BKJunVfMvOo3s0ClMdSTjP7yx3Nt34NmwRbe64Qru/obt93VPFXcFtP992/S0gdowvYymOCTuzShfWkWHHsRx7dXPPBR09obz7kS1X/c1S6zy5zWnJ2mMZMmRXGAClOfKeQ0fpKdQwJhKS1quM4aprUV1R1S+28Ni4Zw+FwIB6PVEMPCHXtJ5J15yqStZAHnWQgMtsdjbRttpuK4pEbCufrOzsKWV+mf+bH525ojdBdttsJrZ8XidSNZ7tp2axb9oeem5trHa+MwVWepqnYrCrOtis260YbVFcV67aiqVR/i5/hc2ZN66gVV0+iPMOTTS0/WQedNVi0ZrLY5sI7bCYE02oAT1InRTQlLXlMqbu3Y/2X/gFcTLTechMjB4nU7nWqdct1jHR9h4u/ittUdN/LuuuL7gqgjZnZe9CmIXRcdy/f4M/4n/+i1PETy1yaskysiAYTacBaj69r9vtrQhiVsD8FyE5iyOOkq6bGu0prkPPetVopVVldVyRjGMdAVelgCm1Emp3VssMW7FTLe3JtZab+U9Y0m4OQ3Gg10dwt9HeKpZb7WnEWb2e8TMplYgbquiI8fspxf6CuGgSoqwpne21qch7rhKpGS1O8Z9taHtw7Y90qLZb3VseeTv6BTJN5Z+/YTMhx2ed0BHFJ9WfYy+pRKyPCDOBN9/QieisUas/obYrsJVK7L1C1a25ipOt7XBxxG0f3Z39ISj1YBRhjp82tMcUpm2OrHFQZw+h+hT/jf/1SnfrsDur0W+HwpnbUtVI9hXAEmxjHgb53eF+zWq94+OAS7+Fw6CbevHEciSFCDYeu47Df6/gs66iqlqdPn/Law9fmjRy02SiHHsVIDCFis4GdQqhbKrX8Z7rK+TSyFk6bLlPdXzHMWstY6myg8JQW5KBEXEpbUhoFZihegDwA7QVryYS05iypdikzI7vPvO2Ok7o8WUU/9LtSZi6NdcXxYsshhkWqRYhWO7HHqmH9+kMuz3ZgB/bvfId4jJjYQxoIXeTx1Z6Pntzw5PqIiKGuVzTtlvPLLX63e56yvFISY8jGMWKMU15So/WUQ9/TdwcuLu7TthXOWR3TR04TFqolK5xdbhWhksT5ZsPTx0+oqoowjnjnaOqWYzdgsp577yjxCHDbOV3IEiUv9kfJ+iW3PhcFSXdvQ/3wmPLUNJ/fq93rxmfdwrDdrBhTJMRIEmGzXXG2qWmqCm8d3pVj9rdKVG4fZ9ExCj3mIsjLtbFyuz76eQhLybLcOv+8gYwSlYLNGNXdyy3HGDItFy/UXecj/aMfMxyfIuGIyTXsKVms84AnJsPjpwfe/eiaq+s9TYj8abX5NKr0C5PCYpKSUu+kvGEM40jfdZACtc82MK+/9RZXWWQEK0n5iceB0VXKn5oEqRpWdTNNi5IIKQRCClxdCW3bsl5tWLcNGJcb4fJQEBQxp2SEKH0Ai/ikIFTOQUYc1ZTpxq0o4Gy4igOgdja7FCW7kV9TeceD+/eoqopHT5/y3vvv0/XazLjZtrz28JyH9+6z2260xKp2rGrlxPS+1hIVU1Dd22Ip/NRzgLSk6Jv+L7Ma3EqbCnlQQf4vZ0GMJEweFL/U3Rvn2MdRy1gQDEnHmBrDaD2roruV0D/5MePxCkKXp++MeV8hBy1qGGKMGLEQr6j5m8A//XPRv88qBSUNMRBDmJqM+z7Qy0iICd80ucFnRHdEvdJDGjHJ0tQ161UDESTqqNrKex3aExNm7NVWjyOb1UozYKmMeV5MtSNfc2exeR/X2MFmWjSZOJknYG1heycD90KZny8OnzVCzMwq9y/PuX76lKurAMZx9eQJx26k74/E0NNUFZVLrFunZVWrik3rON9tWDUeb4CUFGWdvzEzB6Q54M/PFaa/4q+Ue7DYkilWtHoPW+epsBN4J4UXHF6otyYHSClzIo94tbm7HXWd+P7VT4nHa0w8YmNHaf2WspEV5yZn5Jw9fKzefmaaKUnatQyCc4btdsW6rRhDj0ibb6aUO00jq6rl/oOHRBH6PtcdLKIekTQV05a6VkHouo4YwkzktcDv53SN3hjO62x0LbTP0dPi4hbjUy7WVEOXDc3UQFWc0owOFaM5RWhmmebJdCqmoETFgBeCqFnLJx49FmmJxeaczfb8DnP78WfkeU6qMEVUpRxiou0CgjGIcRMthRpSA5kw+vHTax4/ecp6WxFE6bWESOgDMRoefXTDzU3PMOq4uWa1Y7s7p11vMevV81zpV0rkVgTKouxCdWUcB0IYqeuKzWbD8XCg7/pccqKdlFXdYFyhDxm12co7xhhUr4yhHwZcVbFqVtqsYbNiZXSG7Gs6Y4h5hJ01SjNlFgdbkMoy9pP8563f2VhqE8VMaWWNppFupS2dlhI0tqZ0iteNNvY5O6P/zvkZUTCLzbr8zhu7yZu/btjZBTG3q5qWKevnlag8W6ai/xQERYSMflqScRNrgOEFurv2HKMg1gMOG3T+tbc6NWwMkZsu8fhqz5OnN/R9wNjIU46fUpt+MVKaQgt5fUGRJK+LmDyce1pgRfecM4SRvOGk3Igi9H2HhEhvOuJqzYMH99mdbemHkWPXUbkKi6VyNU3d0tQtQwgTMGOdRuw2X9WSSnRmOa2GqbRqrtvLm1/mENVj1YB+muaDybS5kZJ2lFyioiWClu1mTV1X7HYbdts1x34kiVDVnsuzDWfblu26ps20fk2l2Sv9mVHUcjxQli6joSWiz88Jc1nCHE/OzYsplb6Fcg/MexKUmtyi29mBNyidnJnre4WEOMcohuPTax5dXbE729Al1WlrnDbpOo+RMKFnIhZQ9glJQpve5XfS3wD+lZ9R8342CSFolik7plqeVya3gRlGQoys2xpkJCVLMjo4IyZhjAEbDT452rrFSpUZIzUCvrq6ohlXGGcJMVJXXtkACi+4B4eH3PhqC5tCRseL7fdWmwfz/IXp+ln77H582zItDHABtYq9S2XcuOr5atWy2a5ZXdd0feCjDz9AcIwhgCRqZ2lrq7SUjQ73OVtn5N9XVNYp27jAbZaUEkglCpPPVI9ajo08vMDYjPynzJpQUrnlvtM1ElGUOSUt14o8X28LTZxmnx1BDI+fPuXJ06ecn68ZogYJzvgc1HmdlGW0jM6aMkgGMJYdH/A76d/gZXr7mRxUncgQiWHU9FvlqCtHVWlsaF2F5CkYIsI4jFjn2G637Pc3PM5FxCJlrKHNHKhDTqto3dvSaQ1Bi4ttYQtYKJYiroV4eb5Mz802z2j4rTNaquP02dPM8GdRz3kCCZOSmJyqkuIAyVLZ1RlPktMNi+OZdH1xJPr/+tcSUXuZyJQC0A+dbyGZfCPJDpQs1k+b0FRpxz7Q90eGHpq6RYbAOIz0AxwPI/ubnq4fM8NCQ7va0LRrqrpBnGd8+SH+0qUMOdDhDyEbvziR68ekhdyrVUNdtWy2K25urkmiFE7eOpq6VmL0FBnGkf3xCNbQdz3ee5wR+qGnycT7zhX0Er0uxjI38llSCDjj5tppoKA003EXjV06nIuN9NYrc311cSTu1tVhyakxdT6rwrOX0z/O+VsduYI6nyX00qqIpctK8ZEpxs9OAeKzSOpduZveL+TZS0cdMkmAMdPkLMPzdPdA34FzFTFVaKe2o9TxxSh0Q+TqZuDxk2tubjpEDPfqh/y6/BMfpz6fq5R1B55xAktKPWa0SonoHWPezCUH7cbmAF6U2D7WDdZamrbFVzqcQqnohLpqaKomB7Wiqc+cdjc2I1HMwXnRaTNlnooDNjttIknr/UypRVE7O9m0cqI5UEmZSgvRAAvAVkZRtLahbRv6QRvGsLDbamPfqvU0lXZ4V5Wi/2U/KPdacR5MQSi4dYNRAq5S9/+8zNUMTjzbsb589bSj5CBQ7a/JG35eu6R7HxhiGBmHHolBSyycR6Km8C2ORJwcejGanYvGEBOEeMDzg0+uWL8gKX0niqIW51S71kUyhaOBqq6ISZlSYohIYgKybDBUPkDN5AeUOuGu64mIsjmI6qc2TWlD5AJ9mDIxxRnKlRfKDe20A6/wnE+vL36nPpA/brEb5+dK++AC7tLH85cY1L6uVy277QaRA0N3JOUBAs4qarpZNew2rZawrBq2K6/c6bU2p1qTQ8JyXIZnfJqZuWCpywXVz7t/Hmg0n1/RI3UaVUGTVjOnlPthntVbKWCFZADOGJ26OPSYGKhcRUoVpEG/zzoMCtrYbLMg1/8agzX9x+rtZ0dQJc9PNlDVTulhiFiXtMNdIlGU0Lg4kFXTslqtaOqaccw0SLn5Iwp0qaNtWvB50VVzCeNIGEdSDFhb68llWCehn7/b7dSYltt4MpC3DfyckMqbnDVTbd+taBmjKIV9noPKZIgn2odyMUWjqJRmJGHayhcXPsczM5qHbuuLnb4c5O0/n7PRL2nACipNcYiYuxjn+81Mii7kNDB69zqjhCn7q2vOtmccng50sePQRT766IauC0onZTxNu6ZZrfFVi7WeV49kimnxprpiLMZ4rK0wJOpax60Zk/CVI2UH1RhLu2o4O9vx+NEjHSfpHNbphJEYAnjDMA5c3VzjMBy7jtVqhfc6C91M3ZeLzdi6iW+yXMooedxprucz8KxOUmr2FrROd3xEWBhaMZCyfpdzzz/WMpFbWwPe+SkDoKlE3QALOlDWD+yCamWhi9koS84OAHkcqs6OvlW/vUQoFtZ2eZ/OTsttJ1VyhFWysCLP011Lt79h2244ykgcPcZWICMhCkMQDl3gydMDjx5dcegF72vOq7f5K/wvPrFa/aLl7n0eY9R1npA8bQod+56hcWw2FU3dEMbAMAykXEtvzdxcYw24TNTftg3eew7HDmcdIddee1/lrumU/3aTfSsbzXwMeUPO130ZtNscNKTSfI7JQU0qcdEUzFtjMKLdv2mMMzm7tYusgaG2hu1mzRgjY1TWjKpW7t620bnspfbfTveYLco466YpKf8Svt+RjB4ZbtdGl+ui6dP80kUG4JYdhunYtZZ6duaXTg0CzlhqX2FSot8f2bYrDqknjj2gdFYplZDNksRjjBCNowuRqxHeMb/88qoyCSrGOKX4Q8wFZrmQ0hhDVTli8ozRQRjzfZzHjUrCWcuqCjhfKd95DgZSSvR9z1wlJ6zXa9rGZ39BlJ6sTJgo+/iCtN+IZpGw5lZpSrGLk7rl7OkyRa7If0G/5+urh6JpcjLy74xhu14zXlwgWJ4+fUIfRoxxNHXD2W7Fxfmai7MN242y4GwaR+VtbgbzOOvmYG8K7MrRFCdc12HqO5LboZfJGaZZY5ek/ZkyLlP8IbmLNBW2kzt6Ozmo+h3OGGpfYbPebrLNTWOe7CM22x5BSg2stTkDYBmM/1i9/YwOakahrAe05sYaTfUbW1FVDeNx1FGhEfb7I85VtE1N22rncxpHiGlKCyIJMRG/3kz0Hd57mrZmu1lR1/pdRTeTpOkGqDNV1d058Kp4z6YQpwuXRYvmZUI5p+czHK4oQtktNS00v91g8h1R6ukkafWFsdqtN0W++dtSjJpqhcmxVYdygSRRHrstL21CKtF6pt9QOGBOrSELCuNU/gGD03oXb2hqh7ctTWNxfmA83hDkmqtD4KOne/peCaFtZfF1RVOv1JA4D94yvOz4Pke5hdqVwEEkU5hFrBVWq4Z7FzvGviOEAe8NKQ4cjx0pJZrM4/vhBx8qSkruRs2d+talCTHoup7jUVPEKepIP5OE9CCAqYGic7cj9a7rcJn1wdqlaZnPY/6dFsanOL2SAx8VvQVUd+fH5galyUnH5OaQheNoZjaDSWdLAGVACULcLb01Cx29dfTlBMvnc2djzsfzjJOaHVGRNL1/gv7LJySNw/XpZ3V3tXIY2xPCERk6xHn6PjCMhnEwHG5GPnz/EX0X6frAqvWI+Qmd+d8B/5fnK9TnKJOjl09+HhqSMtNJGcYQGELPODqSrLm4uAASw9jTDeTGNt2AqtrTVjW+qTmMPduk001cVRGHEZ3klBjGATFzitzmXbugpMCkv6WsQ502SyHoXza5lQtYRqWqlBKG/GfeXSdnIDsW9k5w5b2mZquqyuiX7jltRpy8cxpsLbIAxYZO5V1TdJVLDBbR+3S/MAf4z9RHPwcgWJZQIbNeL5vdbt3WeS1NZgDwNjd8eYusW8ZoSaGjHzrEOoIEksxlBTFHiV0yXB1HnuwbRvndj9GqX7wEgZBgjMIQEn3IwYYpo0j1fIdhVMfc5ga8DAoIMGZu0co6euNpqop127Jer9nstvRjT4gBEZmAhLqqaaqWKHpPGFOpc2dKx8cUImXO62Ln0iLYYrK/BczRQMtO188YMme7Oqo6zS4jx0G0bjaZiX3g3r0L1ps1u/MzHj1aczj2YCztquXB5RkXl2fstmtWbUtT6XAf70xGhS1+8ipLEK5/3VVBWzyLrBfFgZ7QYVj4RreH6Uz7pDGINXoOL9RbM6HHoGwIrla9tW1LFIfEnm7siNES04DypcCUQCFnUzAIFx+rt5/aQS3evHeW9Wat/GGm1+jaOVw2bjHXPYQQOR460hhw6xVN1dC2a4bjVU7paOJFEviqpnCLFSO92+04Pz9XLr+p0j07YHlxV6vVcw3HtMa39vzbTsBEgJ/IJLYl+LILxdVHy2ZuK6epCZk5x2YOMskTTsCkeePXT5B8kTXaSraQ/xbstBzX3E6w3NgLQvn8M12cMMCy43uhqOWTJzqjZRiYEuSb1vsKDKy391ndBLy91iN0DusrnY2MUogZ47Ssw7/0yD53ueV4lTRxUq7IqnZs1w1N7THi8ZXQ1I6bp31uokp5TnTNF9/6Ij/+8x/Td2EeLON0WllIERkG+q6bdDDEyNiPbNcbZBSkAqp8Fe3scGpaK7Bdr551TnNwpTHGbRfwrmNnnnlr0ZyFU7q4P0pUXpr7EPJGktPgkp7bVKIfqBo0zbeSpY7Oxks175MFK7MTHid6LAQdbX7LcZjvC16ou4X5omK9PsOkyGHsiFhu9iNXT4989Oiap08PhKDvqWrHt9vIX+HbvEoDI4votbNgCiKoj2uG2BBS5ObmRms0z3aMYWQMgWHUOejGOWxV4+qGhDZe7Y/dlL4fYqBpGkJKDGOYho84l7udKVhj4XW2GcUyxHGgbrwOB8DcRpUgvzOphTPFPCkCrE8XRy7PNM+oqeVuaYoiXzY372HUhjln8GVS34SaZh2cSkNUb0rKdJl5UCCgdJ+b6Zjvls+8rETludeM20hr+S5FovJ+Ycp3lHGqWgPojafxDamq6EeTgRwDeSyH8uLC8ThytT8yHD7kbfdffqrj+0VIwBNsxWGEp4eBAQ8GPJ4ymlPCSAw6dta7MqZU7/2YUgZ0KrrjUekgVytWTUPV1JxdnPPRo8dTKVVKibP1GU29AmOJYaCu27lECbJjOXOJQg4gQBvMKPo1p8CFQnif8msk2+3FddI3Avq68lPEGktdWeqqYrfd8vrDB+y7nn4cSRJZr1dsVlVu7PM6ztSSh/vkEpCsMDLZb9UBKBR/MEGmMDmUy91C/ZZ5L9CXzQGhkvenW4+/UG/L3wWUKCCC1b3DRKdr7xzB6JE6Zqd59j/0U1v7lLfd336pTn1qB/UuylFm0DpvdeoCZFoFQ4yaxh/6wOHQszvb4YzD5YMt/JKl4mzeeGVaFKX2yRNxrNNIUshOkUbMZUGnGqNSHD0ZOFXSCeGUstC3N/ESdixRpTltVaJ5dWqjiXkdCu0Ik/IW52N6vyxVpiiDIq0lJTYdj0BJyYnccTY+bstfRD4zkpUfKjyu+VPUOc3Om1miVrLoCDRUdUvTaBrf2IpqtYKqwlc17XpD1bRzzfAdx+mXKXebb4pelWjUGXQMKRHrBGt0gpSuu3ZKItDUOv+7rhvGUTl+RRLGKddcGhURb5qGptFpNgikEFk17dzA53y+FqpPpYGgaZoFYjYHI1MoUYzmdPwqc33ojNbc0md0YtVUE2gnRVg4rXb+xvL5OVUlJk7PL0OilARjNG+rXdBFl8pmX45rDvama7C0nreuy+J3eY3uB2WsezlrJu/iRbpbfB8x1FVDateEcU0/jhy7Pdc3HTf7nnHIw0CspWka3mzhn5AfvkSjfrkyIT2iNc1K4B01gBGt39fmtpZ21dIeDxO5ucPo1CMR+r7HGMexVwd1HAcOxyNVXTNm8vuaehrpaSigREb8cuOcoDRWSp9Wav/tXMA5H/mkH7ev/W3NAA2cpj1v0TVvbakl1XpEk1EwY5jsv7Vz/fQyKJvvE9WtafgFs9NdgiBz62iedUpfVF71ouv1PClaXhC5smlP9YsSIQmVd4zeM9o8/tUYyHRWMRmuDx1PjsKT6xvYO367+oef+32fpySjg1wiiqYmyTWOWWeSCH0/sF63VK7CGej9gBv7W0iStYYUE6s8HUyzkolV29LWjWavrNIVeV8hQMyNa3WtRP6ulBtlB3XWh1yGQQnULcvSFGCyhdO+b8q+fVtDngEKijNoADu/31pHQqiaRidJpYivdEhKU1mqymoT58QEpPWbM/IgKI0Zc9kY6g9kqzcdwWSHdSVv2eTpOM28Z9zdJz9Ob0vrwaS3E5qg2XDvdaDS0KPlhpT9aHZMM0bDTnb8Ni/X25+xBjVNa+icFiwXBzAGRU+NGCWVvjlOUxaqqpo+oxgbazUyvn3B9d8YAqnyuV5VoyxnmAh2l8dkFldoXvyCyhpK59vdDZ1yAaYFvT1JROemL1JQ1k7F/NNjZkZQy2eWBoRJcadvur1ZF/ag2VlgWsv5hcLyr2cke7XL52X60Pk7Z6M916fO6MZ8PiLgrGe12nB+fo/1+jHiHUl0Os35+QVN287NEZ8SZfj8ZHbuDPP0DucNxqT8Ox+/MRO9T8zTzZqmxWcOOxHtkMYocbhIRKxj3a6xVomVy2azWa8yh6SUaGMqQC8R96paTc0gkzOQxUwNd7dlqb8lzXo3PWqMQVxJac0Bl6Fs/M+5VmpnMFamLuu7aXkhd+SKdtDfckrvONH6+k8WsJi87i9+uSwCto/R3YwuWOOpqpa63hDTDV0fOHYDfT9OI/9Mzvhs/T3ejl//RMf6uckiNrDW4p3DeEvTeIxEhDEj4JruBCU3X69WDNs1Y06BTmUgoiN+Y4pTQ+o4jnR9xyZukChEE2+hTXocWRMW6GRKSQOs1Xz/Fy+vZJXm08iAwYsurpkBCrsg7b8NINiMgBZ6K3UoZk7H4gjN1H7PitrCuQVPJgUqG+7L5PmlYuVCffxrbx1H+W4zv1bIDAao4105z+AcKZTzUwc7JOHmOPDkqufq5sCmv+SB/2deeuyfi5gMUlD2rawzeU8uafkkEWMd3nqauqbrj8opmuNOZTOx1E2Frzxi0KY4dDRzQU+tdVq7aqKCZM5NjCkaeBRmnhLQqM6kmH2CXG///H1LFjpY7GWxc7DcUyjPFJAhZ2WnfdFZvDVUJg+hyP5B0yjdm3elwXvhoC6Oad71Z3tcuNvnOudyzHo0wot189ZZLvyg8vfHvGE64XK/6H6nnKvOucympGT+8xHcDf+g4oIH8pde+nWfHkG95VVlWoWMpFa18s49fXJN13WEPmCxWDNyda0p4rbVRqni3CrJdEXV1LR5s16aF4PWRsUYtLiZKk9gUBJlZwwxTwWZwwe5fcRTCv62UzpfC1l+4S2Fnmo3zO3IWqHxiDGSqYoMzljGEPVYrGXMDs7d2tjbSqBNBrc332w+bzmnz+7dz6jSdDMtHiJHS8U5ZYG42YVKl2gxa73JkWdKwmZzxltvex4/fkJ1PGCdZbVac3n5gO12qzeXKfWtr6qTqmJKB7rJc7ydpZoMl84VD0FrSvu+wxhL07TaVekcMegIQhGlRgGdk+wnTj01LpV1bLZrjDE5MtZrYwRCDJnQ303B2sce92LTXaJDKdd53Y2CCyI7IWH5Me894xByExY5faRprJQEa+da3VL+MjvEcwpL1UTuOKdMFjPH/Z/i3OQ5j/GpdJdipLMjnpLFmZq62hGGDxn6RBg1lViOuzjhj/hV/p/mf8v/5BMd8ecvzlnq2tO2nrOzDd3hmhgLgq1rp/PKYbPd4Jyh6zqGYTGXO9uYGIPWmoow9AMhaGMVCbzx1JVO9SlOQAncytqWrEBKCe/8dK3KJv1sY+pdi3xHZjXCudzQkmYduIWIKm4/lR4sqddK4Ges08/MCQA9jogYNwU008a+/PI75zE5JJ9Qj+fzWe6RL3jN0kzmoHUOfmXaF52vCX03OSQiMMbE4TDw+KMn7A89FV9hL//spzvGz0HmDGLZg0rZ34CIjuhdrRr6oabrO53+npHyuqqp2grjHYFEnwMpk58P40hdVfTDgK8q9SFy7aZZ+gD58s5oKFOG0OYSg8VLZ3AIphS7kfLw0k7lLBszMGDsbeR/mYV1VUFFC+6YpjHvJQOp/LAla6USEUzhPTVaLONAgbbpOMqRzY604mHPY3mZr005o0lkcYbPfaM88+est/pcGfHts96WjjZziyNR9SFyyfgxevvZaKasxVrJHey6WNaq5+x8xdX1FcMwEsI4LdyTJ084HjuqytE0bS5UNviqYr1esd1udTTytFC6xXmvhLPTItiFoymJEHR0apARsHkCg57W8+HrgpwYYlzUXpQNsGxwRiZlmqIWOxvKkgaWgn4Zi1ij0SHamXnYH2mb5gWrOF8wVXRD6e0vzundwO7uNn7r6fLijPS+sDHsVnxR1nJxU0HeBCIGh3Vat+mc4/6DBzTHA7ZyrNYbzs8vldbFqWF5ceHiqyOSAoaAd+rk1E2lo+9ipAvqOCLC8dhxfb2n7wfaZkXTtlTHDkRpzwQYU9SJJ02bI2bVDZdTkofDge12mzvpNWpOudbKe0/btixK8ic9W4oxJm/aBU1dbPoi2SlWehDIaffJ2M0bfPldDKnEgnTpZ+p0tVyju1C+khW5m3UoRyJCnlT2rMH7pNpQNM85R0QnYC3INuZz+Bjd1SeKy6nohKtbtqsdlorDTeR4GLjZH6l9BaiDTzLs5Jqv818A/8InPOrPV7z3NKuKqgLDiDGJ9bpCUiTFAEk4HI6s1/doVzVN23A4Hnj00dXk2CvfruptGoYJIVqvVjhr1WnNYebYjfj1su5fRUTyXO2R3W6HX9armVl17tpeeH6AnTKiBPl9LB3E285pebg4HFPgn+uns4uqbBKFr3GhsyX7tvx8+QRa+lms2gwwvEBk8btkIqQ0zjqcq6mrNW090B+OQEWSRB9GbvYd10979lcdx35g4/+cDf8H4H//GY705y+KIuYRos7p9Yk62hRTceyOtI2nbirWmw0xRQ7dETPGzMRjcdYTEkq7heBSzZObGyTJNG2ybldZZ0UR19rfPgYUvZzqtq0+1nUd27PdXKdarvCd7OOz1/0u+KMyMVokpnuqPD4johksMMWPyJOlnMtN4eqYPs9u3/3O276BlqktXzHfR/OO8rIg606sdKvu/4WSHfaJm5eF3tZrQr2j2x9yIFk47PN7nUcHHX74sXr7mWpQtSNPf1fOkIzSk1ijJL37/RFE+b6KonZdz/5wYLNZ41yli2ytOrUujwQlEKM6hN7qaNPd2Xbi6tLud4uxFSnOdCQYJTv33t4yqMvCZUWS5pSplijEvPHmSthMsSC5HUNH7mWESTLJfun0k6WFyaZWMqqaN4QwDvPTsx9y9ypTIgp9RKeDlDrUu+jS84ylLJ94iVWcu7MXG4gFKzMyUvTIiibmkmhXeow69aNqa6q2Yb3ZsNlt8bkTU9E1m+t1fvlyq7N2+XiMusEOPW3TUNdab9d3R65vblhy8h6PR47HThvGyjVNIwWzquuaumqoqooYc4o1N3kUlFS5KOd6R2uFtq7zRCMWl9bcuri366dBwd0SfebXT40mTPQ186Qzpq5qjMnMDvPGLlONdA5JnFNnrRyHwPFwgNUa2z7L+VgOXURpsoquTt33+btMOdQXbPNLw1n073nW8ZPrrmQnFTCZ14/EZrPlq1/9KiJQ1Q2PnjylGwb2x47NaoOvW568GpNOnyvWKsendwljBF9BXXvGMZEiOjpwHNE6vJq2rbm8vMf104P2j4lmoXAmT4jSNHJdNVhjqKs6k/V7ttsNGrTInWOw9L2WuTRNM3UrFyk1+ksEaboPZ0CS2ZNluta3NuWUK/7uOKfL7ym13Ew6UX6KbsgL3qsE5zJlteTWZyxeqM+ZUpv4HP2VspM/64hPaOhzpASltwYaSKml1nvIYnCuoqk3tNUW8TXdGEjHa26urugOIxIFCQIccOaPn/tdn6dMiKnJIWJuwrM52yHZvEzX3qrd2ew2rG8aOjMiuVFNkhDDiOC0f2IcsJ0ljIF+6BnHkVU70h072qaZvguY6iQnyp/sEBZe1qZpFDllNk1Lv2Bpa2Bpo+5CROW8F13y8iwgoIfgMjuEmZzV8h5B91lv5/Vbrmk5ruw0LeypHs9d8GBqsL51ci/Zl+/Ujaf4fMLI8qpJb6XU86ruJok6XMB66npDXW2wacDIiJCzVg4ijiFEiAc2H6O3nw1BzSdujNYohjBkUn510IZ+JIw5hZ+Ryq7r6buRVasnIAKrtqauFaEZhjyjVkx2bCttOqn8hHSCNlbVlWHM/GnWWHxV5Y1JG1h0LYpDOjcilfQ1d4zZjGLl/zeysKcZKWXZ2VwUY45kSlq0GJoY0xxZlUtb/OnyPczvLUo2f/qsAJ/kghSjeFfkOd/17GtYWOd5fZbv0ak0OiihqmrqpqGp68kJkTz/uxiYX7aUa3FXUkbiC2G/9xUly9Mde0yypKCR/NCPhBBp25a2XeGcz46QGtb1qtX0f54bbZk75MsqpFwUr8ZDx+otB0qUIEodynnTvHvsc7DC9JqFhs5BSv6R6U1Zn0wxWou6qkUKClAC7KjDMbyviEGP/aWF9ItzXb7ulpF9gWFfHvvPU3eXX1VSUAbDdnvGxcUlfT/iq4ZuGBhCYL3ZYKsV7w3tc4/x85bnBQPe67Qsa9W5rCqPryrGME7cjClP23NW06dN285jR/M10PrqhPFQOZ/LT5TmrAyhWK3ayfk0E0RSRmvmWr+6vnV8z6b1X3zNp6fvouRFeY3JM8gX5OLGME/BsZONsoU3eKF7Wp6gn3l3wy+yrNnmmWO/cy0mJ/UTSnYmnoci5xdgDHMPx0SFKLdKFgqiVtcNya/oQ4dIhTU1hpw6j4Ixjgt58CkO8BcjVUYEfVVR1zXWaBBjRSBFJEWcz+VEkmttvadtK3a7HcYeGIeIs9r9nogaWCXt/vfOE8I8tGcMo27nSZCYkJgwTib7q0uo1y/JPESgbdvcvMVCRWXBlvIxUm4JZgDJYJTrMz4/MDKzaj/3+SKltGpZFrgs2VqWAd6Ojcz0W+19OVa5/ftlp7W4D17kd5SgcxlYlcUooABkFoO6IQ0RUtDAi/xjLELCU3EhLx8v/ZmbpBaHPDUcIJYYA9phL5nLy03OQIxqHCWBy8X83leaNgqKNnrndHxobroCNYzWu6n4OEnMkTx4r4XUtjiSKWr3sq40c4gOtzay23DVrY2w2MNi+HSbSxl1lcnxnHVM36tkw6ClByEjw/NLiiOakuRi+BdE+dyO4D7+Epi58/4OZcSEBJNVd0nZl511I5riLentXN2ARaf2FCeqpJRLo5ubiNq0+H1IgshtiopflrxoY7LWUlW58clXNHWLsULXKUedzWPwrHGIGMYxUNcNbbuibVu67oiI1lKf7bbUdaPIpVHnzuXGJGurXMOZ1EG1dkL7bA5SUlKET5ux7C19uuXoGZgmN2W9nI3prN9TzdHkpM50OXcdXMnmYkZusm6iyL/Ob08TEhSzNbyrr+YOyna3/mo6hxdIuuWgpls0U0kEMZ9SdyGTTpONZUJyAb9e+4r1Zo31FWOMkJ2zONYM19cvPM7PU+4ik5A3f6PE+XVjMKLsA+M4EFzATQWXGpx752nqJjeNCAVxDKRcq6oZqqryWKv3s7eOti5ZhcyMUhwnNEjVZi2v+jFtaHqMZbN+XjAji39fJAV5w2gzqzUWs0BCDS6PSlwG5OaWg1qAgtLgWGxWWU67QF0/iTwPzfoksrSZz8q8d8yHU1CThDERQ0RyE673FcnVeCe07ZZ79+DJkxtW/UhKhp294Av8+ic+tl+UFFS9qWvW6zUpkh3UlB3UABJwJk0Ol68qVm3N+cUF1jmOhx4RT13XOv0MQexMEWatne4PZRGa75UU4zNbe/kj5Tr9YgP0qWf1tchcU/+Sa7542/SyO3bvZc7o3Q+TlAiZ+qkwU9yuv9bXzY/NB2Gmf+f/K70JL3M4n3feLwJ3yjndOt8JzSp6qxzhRW9DMHnIxPJM1aLUrPgC33jp8fxMDqqIjhk11uF9g7WeoT9wfnGJQZ1Tl6mg6kprVPth5DgM7HZn7HY7YgwaCaGNJRrd5NoVowXVXddh1ytWlWccA8fjkaurG5zz7HZnk7IaUYL0hME4NWMm0wVJnJud4DaRcjF2y+77Z881EYIoYltXtyPw6TW69GqYklKeYCbuO8neQwghc23G6TOWn2MLkfonNYjGaBpDT+TOuc0Oqjqw89tSdg9ubUZCnoRhM1uPaGpQ5u9y+bpqCYAa4a7rOEiAi1cDhdpsNs80pwGsVisuLi6JaeRsd8HZ+TbXXVoePDhi8VgMtXes2oa+G0nR0DQrdrvzKejY7XasVqvM5qA6lka9sZ11tO2a2tf5uictSbH63rJxxSSZlzIwczjOUex8/RNTsc/CCEFe/8zPJ7J0CHI2gRJMWWWiYH7NbIjSZLhT0hpD7wNlkk8cIzGNk9O7NF6OFxvhT2KcE8wbdD5ySYLetqq9lKc/ge4aoGLBK2wKNZhwOHR0nQ5hWLUtZ22LqzQLUN9Efuf9/wj477/0eD8Puby8fIaLsGkaXCu0raFdeSSGaaOtqgqHzfR+eo2dc6zXG9brNSJ2QgKDCCnBZrWmzpRo1mo9dOV8rs2dkUURXcOSVndVpU4H5M/S+nkzoZ1Z/2Qup7olC8RyiWAtEaIyuU8/b96kvXeMA/nzF05EJlPX0dgCSQiSlAbLOpx1023jjJ2mYX1S+TQb/Cd7z+29w+ReBz2pCAQsEZMi5ODWAJvNlvPzB9RfbTgeezabLV0/8uXwJr9u/tuf6vh+EbLZbAgh0LYt9+/fR5Iym0gYkBiQGICIJdI0GhTVdU1d1zx48IDVes1+3zF2UfdA12rZnbM4W1HmIUkGeAra6L2uzziOtO3C7pg5uycitxiE7sqcZX3ONbuFtPKs3hqTGQPMRIv3afRLT0p0rHvOtmkDbT190VSqiLlFTVgydgVNnQIfgWEcp2Dt0zioH/faW3orxUEtejtiUkByMPsiB19Sokkbft29XG/Np73xTnKSk5zkJCc5yUlOcpJfpDwLMZ3kJCc5yUlOcpKTnOQkv0Q5OagnOclJTnKSk5zkJCd5peTkoJ7kJCc5yUlOcpKTnOSVkpODepKTnOQkJznJSU5ykldKTg7qSU5ykpOc5CQnOclJXik5OagnOclJTnKSk5zkJCd5peTkoJ7kJCc5yUlOcpKTnOSVkpODepKTnOQkJznJSU5ykldKTg7qSU5ykpOc5CQnOclJXik5OagnOclJTnKSk5zkJCd5peTkoJ7kJCc5yUlOcpKTnOSVkpODepKTnOQkJznJSU5ykldKTg7qSU5ykpOc5CQnOclJXik5OagnOclJTnKSk5zkJCd5peTkoJ7kJCc5yUlOcpKTnOSVkpODepKTnOQkJznJSU5ykldKTg7qSU5ykpOc5CQnOclJXik5OagnOclJTnKSk5zkJCd5peTkoJ7kJCc5yUlOcpKTnOSVkpODepKTnOQkJznJSU5ykldKTg7qSU5ykpOc5CQnOclJXik5OagnOclJTnKSk5zkJCd5peTkoJ7kJCc5yUlOcpKTnOSVkpODepKTnOQkJznJSU5ykldKTg7qSU5ykpOc5CQnOclJXik5OagnOclJTnKSk5zkJCd5peTkoJ7kJCc5yUlOcpKTnOSVkpODepKTnOQkJznJSU5ykldK/Mue/Gf+u39Fzu+dg0kM/YHD/prXHtzn8YePePrkipvDkSQCxuKqClt5nHfgLHVV4S1YBBMTSCCFgRBGwjgyjANiElVVU1U13le07YrGNdP3CwIkAAyCAbAGIWIwGGMwSxfbgDHgncViMMYCBoMeh8Xgvcd7T103XFxc8hf/4jd58OABbdvgvMVZlz/IYazD5CUyxiAigIARNpsV3gAiIIJN4HC31u/m5sD/5z/+/2Ct58tf/jL/yD/y38EYS5LE9c0NP/rxj/lrf/0/paoqvvrVL7M52+C80/Myevx9HzgcOza7Mx689joX91/j4vIB9+4/BFcThoFHH33A/uaa119/SJJAjGC8wzcNvloBhmQEeEFEIp9AU8zHv+6dP3rKv/uv/D3+5f/or5hP8Im/UPntb/6mvP32m5zvNrTeYYeB4/HI1c0Nh+5INw56OtZhrcF7R1PXGKCyFkcCiaSshc5Zqtqz2rRcXt7nT7/9XT788DHDEPC+pmlWtOuWxnsqZzFWkJgo+osxYISsjBhj8rXI18VarLFYmxfasNDfWYedsTjrqaqK1WrFm29+gTfffJO33nqLum4BizWCMQbnK/1qDNYYrLVgoXIOb/Uxg8GU2yxJPlS9T65ubvjP/7P/L9/59nfxVcX/6F/6H/PgwQOqqiLEwM3+mr/21/9Tvvf973P/wX1+5Vd+BVw+N2tJKTEMI1fXR3a7HbvzC7ZnF+zOL/jiW1+h2ewwIhz3e37wgx/wla++petgHc568B4xBjEunz0ISe+5fJy6Mh8j/w3T3X/2v/WX5HxX462Q0sgwCiIwJotgEGvAQOVUZwwWawzeGipn1P45ixgwlefe/Yc4V7HfH/j9P/h7hAB1u8J4T0yJ4/GIxJHKO+qqom1ramMxVpfOIBhDvrZA/ttg5nU1YKwunff2lu6SP0M/zGYbXOO9Z9XUXJ6d8du/9Vvcv38f5+p8b0jWBY+1Vr/XQOU93hokJcZxpD92XD25IhwHusOBm+tr+r7no8eP2e8PdN1AP4z0IfIP/UO/w7e+9Vt8/etfw1WOiPDOT3/Ku+++w4ePPqTdtPm71P4CjGPg5tCxWu/YbM/Y7Lbszu5x794DVusNztcMw8gf/8nf5eHDh2w2G+q6BrEEhLpt8U2DcXVehaR7pp7OLHl5nPkYzOg5upyA4By7dfNL1d2/8NW3pN7sMNYgKeINxBgJIWAAX/TDLfbzIkZomoaL80u++o2v88Ybb/Dtb3+Xd9/7kOubA1XdYKtsD0RIKSJDoDvs6Yce5xz3Lu/BOFBVFc6pXcdkXbW6bxtZfK8BY+2kX8bM116ts0wXqdg0ay0uv85aaGvP1772Db78pS9z7949mqrOr3f5GCwu65JNiZura979yU/5m3/j9wC93jbb4CBCTEKUREqJmBIJcL6iXbW89fZb/Iv/4r/A5eUlxhhu9jd88NGH/Pi9HyGkWW+zfnTdSD8kvK+5//A1ms2Ge/fusdnsWK83rNdbUoTvfe97iElc3DunXW0QIKSE9Z66XWOrFmMgSfpkvgLoOS0v7+K4ivzXf/QD/of/q/8j3/1//6sv1NuXOqjvfvABycB2t6JtW7brhtpZzrYbrIBzjpvDnpBU4awkEIMTQ2XUIbTGYL3DpEjEqSF1jnZVs1q1jDHR9wM3NzcMQ2Szgso7nLU453RTkjQfVBIwRt0GA0g2gMVZxJASWAsiqpxl/7WgFz4mxhC4ur7m29/5DmMIPHzwgN1up85mMYgJ/cdkZc1fZO4YCZOtRkppuhjGGNbrlvPzc37y03f44Q9/yFtvfYmvfe1rGDHUdcXZ+Y6vfOnL/O2/8/u88YXXWG1WOO+QfAyg5+GdIcbIGAaGoWcYe2IYca5GUkAkYkw26NT4ymC9x1YVxjkMDjHlBn3+tX72cV3LfILz6crSsC5eAzzyj/i3zL/Fv8xfeZlafS6y3x/54MMP6Y837Nqae+sGSQN1lcA4Kl/TDQnjdCO21uCM4KyjMhbvLN5VOO+IKSEknHfUVc1ms6Ft1zh3AySSGBIw9AMSItE76srlpVHdmA3y8y+AiCAkJFnEJOzdjUrKL0EkEpKlHwfefe89YkpYa3n77S+r3ueXSwj5vCbP4tbnpXJfJcGJw4ggIkQRhmHg5ukVBkNd13Rdz+///u/zu7/7u6xWLc5bfH3Gr/7qr2KdejJjHPHWT5u7GnEzG31rshNjiGFAjEybjV3c46a81znEWsgOKhjERNXBxfn8/aa7AM5ahJFhDMQoJHXxIAcxxtlJo0xx3QVi0h8TwDiHxEiMEV/VuFoDGww477HOY52QJDEOQoyRru+JMWI3KxyO7Aszr9dyse8ufH6lAHnD1F2AW/pXrnkIcJREioG/90d/yG/95rfY7XZUVUUEJEGSgJXsGNjsnERh6HuePn7MH/3dP+TJR09IMUwBFkkYxpExjKSoK+es4Qc/+AGXlxecn5/z+uuvYQ3cu3dP7WroGOI4B5FGzzfjD5Adm1m31d467zBjwHtd26ZpdI1xVNbg6xpbVeA8iAWT8r3+Agf1JQFX3up0f1i8LhmjG8UrIiKQotDFREh6v1qDOuYCIQrGpOwU6lI7ly3k5DBaYkyQkjp4AlZ0DcQYjHWIVwcTAUlJ9bfr8WOgqjxVXeG9UycnmSnIWeqt6mLCubvrp9doaTwkBxYFqEpJGIbEj3/854QQ2N/s+cIbb7Ber4FIjPm1RvdvEyNdd+R4PDIMA42vMC7b/pTuuuyLY0x0Xcf777/P3/pbf4u//Jf/Mt4X4AwclviCe3E6j+UzeQ2GoSeVe8zmwNFZsA5v1Ieomgasz3ZUA+XnHaX6WsUelR/zzGuWevvAPeUfS/8O8K8+97zhYxzU7jiw3x8QAkPt2LaOdr2irT1m06qSEemGAFbRTGPAGcEb1Dk1ikgZ60jWIpUujq8M5xcXHLuex0+ecnV1jaQBZwdi8lTeUxmDyxvu9E+xkdYsg3eyHiwWQ43ItFwmg52oQY4x0g89Hz16RNu2SHYuz87O1CAlKNCSGlhFKYrGSkp6zmbeUDHMKBSGynvW6xWSEh99+BF/8sd/zBe/+EWapqGpay52O956602+/8PvkVIixEBFdffSZzStGP9EipGYRhxCilF/UkSRDZOROP0hIxkFOi1LNEc0S/U1k/Le2mzM4vXTvv+MiUVMzTUPX6JRn5+kKPTDQF8ZVt4g4klpwBBxNoEzRG+xziAGrFNds9nWW2twBY2yutFap0FTXTdUtaL+ziecq/FVTQwBkUBMkZgcVWVxJdhhAv6yQ5UNh5lv46KfTH/PAdbSsUoiGEmEEDgcjzx6/AjvHav1mnv37mnkXvBZsZMjKHmDKJ+dUiQMI0PXMxwH4hgI40A/DIQQefLkCVdPnyBJI/of/OAHfPObv0FVOdabNc7WvPHGGxyOBw7dYQHPLxDOjDYUA6ZrkZAUMSRSElJcBKBLKQbNFPc+Z0Vm33Mhf//orvNOA6MYiDFw7EeM84AgxkIySND1cIAzRsEAC70kRZdEgy9fNcQkmLwBVZUnSpquhTEGX3lEKqIIKQSGYWCoK3xKecNS22dksfBSNvCFFV5sUrflrq0WtWMJQs5UfPjRB/zkpz/mjTfe4Gx3Rl3Xig+ImWy+SYYQAkmE7njk6slT3v3pO/SHTnUl2z6DIaVEkogkmbaNm5trfvKTn3B5ecGDh/ex1lBXFXVd45yDNDLZ92VUM/+Rz6/8ndSLFg0onSs/HrCIMfn+zxujMfnxFzuodtJdWXzf9BIk23hz6/1mAjReCRFIYojo7qm2Zna/rBGsnfdrgyDGTMGVbsV6DaW4bZJykGDydm5mvSx7Y4qICMe+ZwiBOkbaRpFXWzCmxcLdupSy/HuZFlicVnZmTQEPREgIh8OBjz78ECOK8L/++uvUdY21kJJ+aYwRQmDoB7quI8UIvpptcvmVIyLJDwjZsY+R/c0N3/3Od/nWt77F5eWl2sXnofHPvQVNtsPlTwU6YtT7xBX99U4BgRwEmGx/TQYFxTz78XLnvjcwZefuvmapt9EYnj7XXszyUgc1JqHrO0Ls8TYRVp5tU1M5g2kchhpSi3c9IWmUr2gU2UEFZw21szhTZSdOURTfOC4uLmiOPX0/ImIIMTGOaoD086CtXDEZt9c7/6OvKhhmfp0sXsDszRfDUJxUYuR4PPLee+8SQgBgtVpR1xmtkZQdbjCSl3txY7Hc8BNIzGndbLytKalZw/5mz5/88Z/wrd/+bV577TWausae7fjCF97gq1/5ClhNh8DS0c5FDlJSwEZNvURiDGCEkDexmKKuhZQ43Dxj3JYidx+g7APLrSQfgzAhCvPrloql76lY84X0G89+8C9DjJ09FKuptRh6YgoUBK7yTtNRlozugSFRLrQISAroBpARQAzeaQDlq4oqCL5qqOuGo4heixAYQ2CVPHVT4dyc6Cy6aO5ekXyoc5ReAix9n5jl1SkbfCJG3XhDHGnahrqpWbcrvK8mUMWgx54kYZJmEQwwdD031zc8/uAjnjx+yth19F3H8XgkxsT1zTVjiMSgKNQH77/PD3/4A9brltW6xRnH5cUlDx7c5/FTy7E/TidTVO8WgsocvZP1WJGPkM/57prI/GNu6+VtXf37S3ed9zjnSNEQU+LQdbiqBhKCJQJRNFj1xqiDaoy+JwzEOOo1tob1dqe2yDlcEg1eTF5/dD2cc1BVmJQYY2QcFblN0ZCco8rHU1KVRorfuLxe04NZ7mw8C4+sBEtmQtDheDzwZ3/+w4zSJC7OLzISabPDopJSwsTI8XDg+vqa/dUN3mrGzQAmlWBsRirLxh9D5N1336FdNfwDv/FrrFYrRevQbIJ9ZrOcdx65dUp5F0kJiQHJtrg4/NZaRObNOPvzs2Q7fmtLz9mtZ+z0XVd2kSqVu+v/XM/k8xUDaquMIRkHzpCSTIECRtFsJ2YO1LOdDgFCyM5s8RYlUYCisr9N1jTbldlJ1UCs7zoYR8YQkaRpdO80ILNGbf3kMBSdTDKVqEjK0cIdmyOS9c8qmqtXwJJi4mZ/A3lvWK/XnJ+f453TUi/RkpQUI8M40Pd91s0FaLCIgxY+84SuAgzDwE9/+lPeffddtmdnusYp5QyzetgmK+r0GbI8A8PyG4xJiIT8+YKzFm8dMu03+gHzMtzW2/nh244od54v772rt72p+HPzclDgpQ4qYnRxBcaYuLnZ0w8djRMkjZA6mlqwpmIIQix5dAvOCt5qdO+soakqKq9QcRR1Aqy1NO2Ktl1T+ZoQLdZXJBFiCX2b+wABAABJREFUPzCOA6muqZpqQrempShIpb2zFJId0eJQ3sGki1LoRbWIJG72e4ZhYL+/oV2t+MIbX8C5svBaMoAVjc5Fo+LytSlFYgiEIdAfB9Iw0ncdN/s9V1dX/PSdd+iOByRFrq6e8t3vfoeLizPa1Rlt1XL//j2++c1v8pP3fqJOppQIbY5KSomDsYoOSErEcUQMOf0suXaRWxHi9Dc8kwYV7kSQL7JtS8fIoGlgmIzE/GZhK0/5h+JfA/6lF3zY5y0zgiyS6MeemCK5KAlB08jG2hzsSHYSoyLawJjTNM47TPJgK6zz+Eqd1FAJvqrBWdrVijiOhLFn7I9ch5GVCE3tqb2b6pwA1c9ScrG442V5TTB39p0SARcnNiICISQOhwPf+953sdbx5he+yPn5OU3TEGOcjKEzDitq6Dzw9MkTfvKjH/Mnf/ePOe73WlZj3YTiRBQxkMlDSPydP/gDttt1RqAamspT1zWVr7T0ZHqt3NqUnZtrsvTYEzKMhGFkHLocaKrul72hRO8so/Hn7cN/n+mu+AqsJYkwhkAfIoSelMtJQhKGqMhdcU5tvsGdmTMAPuu5cw7vqyme1iWVHIxpjZz1HgmBkFGZGAOSLP0wYoyhXbW0TYOjZBkWSrsAAyRllMWycFhlGXXr6xaIfjLCmBRFDSGw3+/52le+yr1797K+WESjSNXHEOiOHcf9gTCO+NpOn5mi6p26M9nWJ93IsYYPP/yQEAPf+c53+eY3v4kvUVy6rRfzpvt85dKUcCKEnhAHUgpqp0WmMrQXySdS31vGeRmazmBM+b/ZDf8lizFqS3FTpkSdAbW/kgEYUPtns1cmYjGSGEJiDImYLFVVT6hd+RyRNDlgWv8sU2BmjJYO9iZhrCPESAwjYa8Zxs16RVV5zBQ8aElVCZoLOqqnYab1ZrK3JUNjptdZY7IznQhhZL+/5qc/jXjv+dpXv8p6vZ6yWTY7kyk7g6qrKZdtqJ1PS4OZsxXWGC2TyGvW9yO///t/m4evvU7dVIQQZjubHWtjDEmY0djF56pDO/s/RkQDrOQ161Yyx8X5fwkyb5nXabp3llnmxb88R283cuRb8v0Xfj58nINaJBfiIxDHkYghhZFh7IhRlcU5i1GokWQMtqBGRkACKTliClMtmnUeaxx1bRX5qRtqU7NarQkpMYaROPZcH4/UMdA2FU3jMeRqtIVRfObmTAJWb2WzfIkpgMwyyraa7pfE4yeP+IO/8/tYZ7i8vEfTNDlVqspkxeKdIyWIUQu/nzx6zLs/fYd3f/wOV0+viOM4KUEMikaMWowCAv/Vf/V7vPnmG/jKstlsWDUNr7/2GvvjDYfjQWtFFwanIGV67U32NFXxTByJQ5dTy/ONPN1Uk2I+BzP6NMH2AjmQcgjPkUsS/xSHT/HBvzhRw5NRz5ymH8eRfdepergKa4TYJU1D5Y2+qXyueYqkGAhxJIRA5T1Nu+LiwmCdNvJ473E+glWjYyxUlcOZGmIghJHD8UiMNanRNGJVOUzKToJo8MayPiX/v+TXLPJgijpRXqYXQVNO2jg4jAM/+P736I9H3njjC7zxxhs0TTOlU0VkQsFiCFxfXfPoo0f0x6MW62c0qaiO5NrbgnBIEn704x/z9/7wD1mtV/zmb/6mbtRJN2UrqBMxBYFoHRnqoFqN+vQ4nCWGgRRHRAIxjgvEOK/DdCBLJ+hTbsT/DdRdvZMtIQljiLqBGkdMjiAQUiRhiaJZLoM6jZCd0yQ4KySxiGizp26savNsbhZRu6CorM0lTKU0SFKkamqOfc/+cOTQD2zXGzarmspbpkSDMbeBA4re2AmRms9q/rvYyJJlEqMZpOvrK2KMDF3Pb/zGb0w1qdbkgClGyOVZx+NxsuUxRpZNWzI5bvmvbI/BcDx2/N7v/R6//uu/jq+r6ZiL3TVzon063oKOmuJoSEH0QFLQ4C5v8BIT+GWqbZZPY3bnlcv3+sJnvqXHH5Mm/dzEANYpqmhScf+yoxMJCbWrYhdodUJr6hMhxJxFNFjj2G62DOc9x65nGEZCEs3/m4LCGBDN7lijzZTGhOlwUhICkevjkW4YaeqK9aphu14tNkEhRsE5M9nSCbxdpM8l7yWaHbUko3uzsU79wqTZXwlHfvj9H+Kt5Y3XX+d8dw7GsqorfW8SQoh5T5+v223UVE3q5AXkY0moL/Cnf/onvPn2mzx87WEu7bstxV6npJlil8GuqexPQKIQZGQcepx1OOs0uCx7UbG7dy7v8y75i+W2i3pXb79sVvxv0ldf+gkvdVCttRoR2aIMGtULQkyRYRiJqRTsewwOrDaYYECMXoSEojxjSHPWtao4v5Rcu6ObvRg/1URZbwneEI5HxmFEUiTEirapqGzuyc+bPKZc7EXEKaLOXo6WKKjqwkktRrLUZogknl495Tvf/Q5vv/0lHtx/wHa9QYwilKrwgAgpBaIYHj9+zI9//GMevfshY99PXZiSlTGlqJFSPvGbmxv+8A//kKqu+PrXv461lrqp8bnrsHQp5q+ZPuu2Mmj9iIw9YRy0QaBEMiW1JHq9Js3ITurdQuxlXcr0LcsNJzvWJcV869134oMbc81/7f4m//jLlOpzEmstJhd864+mPaIkxiiUrEhAOxa1RkqovcsBkKYgyY6/GIsXoRRPKUpksqFVNMAYdVyTFUabkdukaewQtJykbdtc82axaEOStRndK6mqbKUU7VpkAhbR6OSYiuqmFUty2nD4/vsfkhJ473nt4UOqqgby+RQEIAT6oafvtCnGW6fXmexg5Ig+iSJRpJz5AN59912+/e1v841vfIOzy8t8uElTY9oNQWmCzYc7I1KSm7OMkERRJ4NgrKjuLtPEEXC5hnopMn/m34+6S3bGsIpsuioRxaPZIZDscIpEYkoYEZIp+4oh5ayXMYaQYAwFXDG5Xl17B4xRzS3kI9aUmsmZ9WHqZo+B/eFADCOrtmHV1hm91e9UuV2Ldqtkw6A6LcXRmzksRECsyQFNpO87Hj9+wve//32+9tWvst1s8FWFNXpvTrELMmUIWKA5qQAQZt6sjTGT/g79wI9/9BN+8IM/4ytf+7IenmHWl1mbFvvEfCqiuV7MVMurzu9Ui2cKQ4fFiJ3vg7sXWe78fcufv3UU8//NMNTCqxGmDqpfomigPDuPs8Uy6qwak9HScgOX9dUmSSlBi7WICKvViu1uh68qbm5uoBtzo5XcOv3iymn20eUgOeVAGYiRlNRnCVHrLldNjXMl21D83uILaMNscfwn7RKQbNhsvp+iRIzVMpQS3khKvPvOOxPTxNnujDo306SoewHM+/t8y5jJ5kr+rd9i9GyyLe7Hke9897sM48Drr78G+WwpOKYxuXHXoDjnfB9r6ZdHxCBRm2udswukNIHx+h6xSAT8UjWfr7dLm1uuuSy90efo7Qc4/h13xv/sJTr1UgfVZMjeWIMxDpNsri9Rb36MgWFMGOentJEkA1Hr96wRrUPFaBd/inlzMlR1Q5Jc4J879lM+U2sNBocxFQwDQ4wanSC5aF9m6Nxmp8wsa37yzZEklwAsd6OsdIsUk5jSjGLo+573338P7zwSBfNQ61LntEVm5EmBmGB/s+fJ4yccDwfdBVwuLBaZIpiCJokIIQS+/4Mf8PC1h7z22mvszs4UfTZFvXLCphi7WxD9Qk0MGX2K+kNG8YqyTJb/BXr1meR2xHfXJAZ2POEv/czf8vMQrTNa/GSaInUudbViSozCZLT0+iR1ULPhsqgeGyukpJ+jzkE2RyZfMVkgWdZM6eyS1RIRjt2gzQMCdaV0VN4ZSunw1Lxj5ut/twC9fNZMe4bWP5tESmow9/u9osF1xappODs/x+E1DVzSpLm7WxtxtEh+uicSkzEs56Z7sm7IN9c3vPPOu7z37ntsLy5uHRey0L87xzs7qbL40bToRHkFs5Na9Ff0A0uDxJ2P/3hdyP8We/mq624RYxzOV5ghafBfjtqUCqcZJ1S1y3XTGTG0MRFjKXdaAAGUtGBe3FQ2Nw3SFPjMtj87klpuMCoan/WhqTxNpfvDck2n2rrpPIxe5/mR6dWqbqJd1lYp+MYxcEhH3n/vfc52OyQlNusN3tdUzlHqS9NUb7r46GmDLOpT6vHMpN8paVnXd7/zHe49uNQMVFmO+YP0vQv7WzS03HtazqDrZ7m9B93ajBNKc5RP/U5s9PF6UNbqE77+lytya82WdbLF/uoe77AWQgrTq5bvU5RasNbR1A1plTD0jDlDiahu37n4M5gG5AiFCJistzHNe3FdeypvcMIEXs01rZoJnvZUk+1StkPF4TbG5Hss3zcSGWPk6ZMnNE1D2zas25ZY10hUVFezSs9p1Sz6Uvbu5ZkJxPxYiIH33nuP1apls1mpnbbzpy2dyZmyUh911mWGGLWHpUTCWaOBg5DLzsx0LKUM7dPq7WRzb0cTkzxmx7/PP/yzOqhapwdgjQfrMEaNTYyRfddP6VJwBBIhl+JYozVRLl84RUx0s9qsc9CXo2nnlF6ppLCNMVTWId4TQiDEQBgDoUqMKVDXNd5nHjHR7xBbHLxccG6L4j0/ulQkIaNgVi9IjImbmxt++pOfMnQDBnjjjTcy6qV2NgqEoLQRx+OR4+EwbfLaYa03znQzwGxQMbzz03f44Q//jNdff51fOz/PBtxMTs4ieLt1Leb/1xqoGEN+nUwpqvJdytGSZjRL3/lMoH1Xb55ZKXNb6W89fyeSqvgCD9L/9Llr/XmLcVb1IaPSKVsZ4zT4QSq9llGIYolGI19CDpBEMEnj5VLXp/1vedNO+e6Vgt7PHLh6fXJtK6rf1jnGGAldx5ASq7pm3TZY69VBs2UTLE1xLJD0pZG/7ZyWhpVkgRinTf76+ooUA23TUDUNbdsqx+8C2SxReoyRZD1TCUGh61lgIMXpMBiGYeTxoyf86be/w5e+9lWWDSkF3prbwHKDxEJUPQWsTGml59f8yVRXRQkEzK1nb1/zZ5RgfqT4B9N7XmXdNepQGuuw1mPMmPO7+jPf43OPgGYMlFZKMroXxWS0xmCtx1nDZrOm6zpCTFmfE1Zy4JXfZ412X1vjZqRTspMqgRiV33a7XmNti8dMXdJi0pRtshnulFLGAotdLlLqwxNq8Gz2lUUCEg1XTyM/+fGPkRDhgbBZb3BNPVEKxammcQ7k7u6HQvnM2w6QAf74T/6Ir3z9yzRtrXqY9atYvGWZwATWTGtitf4/I3+V98qPWb5V7v7w0k3+ebo7PXbHB5v+18zvfCWy/ItARMjgDKielpdkVhNbacBsQq5zjPHWNQqi/Lx93+OsZ73e4lxF1x0Zh4EwjoSoAZN+X17iOzU809UulyEljlFr5TfSkMRROW00VFBCQThBMne4TCUspe5aEUmbM14gWJzLiD1Aihy7SN8fGcaBJEoRSdJywiSCWFc8leUCztULE1xV7j0t5wHBBOHq6ooPPviA1brlwcN7E4PS8/TAmGwfUgYCss9g8pPKLezUsogGi1MzuJQmKcn39+0veJHelvvwFlixiBvAcMM9fl/+mWcPeCEvr0E15AaSHFsbTddLMSxJFXCIkSQDMRnGHCGISEafFLsyVol6lRTdKtJotEs1Ji0WtrmzT1OsGq1OaaasuAI8ubpSovK2Zb1aUXtLMjIhL6VXaxa5dVJLCo/JBJUi+cy1d3O4ZghK7m6t5bXXXtNaKGu1LjVv7CEEpT5JKafYMtIjGWllRg5KEb+xlnfeeYc//fa3+bVvfvP2kS6dBLtM8+ivlI2z5HoUi1B7TxKvgwKk0ProZ+g9Fqdr+NxVMUz0fy9Qg0/0zFa+ze/wPwB++sJ3fG6i7ZpKBeYFW434VrDRYvPGnixEmzkmE1Nq1KaC4pfUCrik/H0h04zEmClsEGymm1FKp7m4XEB7BLzHuxqwhASxGxiHkb4fON/taLzDicG6+SJo2bHk9FPWLbMs7tdbN2WjYpOFaAgyYq3TiPvmwB/9yZ8A8Prrr7PbnYEHnwOpZZlLMfFSdlFhujeeFzHf3Nzw+3/r9/mt3/pNhr7Pm3sJMAGbsvNZavuMRoyUwvtyPyeUWuB53zJHarIARhb78nSsn1R3zQueeaV0F7KdNZMTp8G7nQOMVJ6T/z95fxIjy5bm+WG/M5iZu8d0hzdkVVZ3VU/V1fMkggQhdhOSQG6ohkCwJS0oaUd1bwQJ0FqAAC20kAiQ0EYQuJAESgstxIY2pJoUQaBFsrurW2AN3ZWVWZVZOb98w703wsPdzeyc82nxfeeYedy49718VfnyVfI8xIu4Hh5uZsc++8b/9/9Q2q4WdSi0yBpKUlqSAjFEnj17pt3vhyPjNGsWqga5KJdq1RniihprH5E0k0WzSJnc2ENSmthseoYuKruLXzqqtYEEal9iu39ucbLNFQfnSKkQQs3gZiRnPvjBDxlPI/eHA7/4R/4IXbhR/NycmaeF/eGxJEQ17u3fojAVAYJ4Pn75gt/62m/z7rvP6frQdLVzVgWzp81VVhST8c1mq4GlHcQ5bUg7c04pOAmLgRdYoGi2PkV2Hy6/upay+iM5eyB+ektVVn3eK8nUuY+OaLe8lpVRKFSDUQrVaoLwySef8PFHn+Cc4+nTp1xdXRGCY+4ih/t7xtNkQUL1NZxiMovgysOzOz/COCpscB4i217p15RaUFkG6hXV2qQHxRWjTrDmUJ1hN4thRpURhpIZri+5vL7i+uaazW6L4JjGkdN0Yp4mxCm0rAtVslQYxHylOjxFzfg6Ky3MkwATH3zwI+Z55vLmgm038PiqNkpwkllCBczuO7qogw2Wv8ia+BAHJSEScMUtQPfPILcP1PPZ2dQ1uB/wJ9z/Cfgbb/yctzuoloXSzKSVjyy6LqiTGaMAPcreYxyjmKBIbZyA2EFOmeD11k5Txrmg+MyStWEF0JK1OqTafLV0/gL0XafNAdNMysI0J3abgYvtwFLVVSdiKekoETDweumJqsSklW2wKGOeJl6+fMk/+2f/jGkceef5cy4vryBEorPScFEcWM4ZF8BbJq12sz5cYpHIxx99wtd/+xv81m/8U37hF3/+0e2XM8Fcv44C8UPmNB6Y55kQbYKFOcdNK5hzIPD6+ayl5yxxtRZi99qvcSuFU48JfJ8r/h33z/MvPXo1X+zS6WRBG5qCR1zAx0EdJwRxZpBMtgHwhZKSKguxWSKGVk9OSBlKcQTfcXNzw/FwoOSs2ag8a9e0yWpw2pmqZRTlRvTOk8S66rNytc7zCzZ9x+Vuw9ArUbJGw7W8WK/IaKKcqrJCajfUGeZKYUiGe3Uqy06Eb3/724ynE++9+x7Pn71D2G6MKF0ZKOziX3NELcYxMarm1xpOSmZ/f8s//Me/yi/8ws8tjmn74+qR1PK+Bk7BB/q+R8SGT8wz0zTbdbgzR0ZPwJoihEVBrk9w2R577RHZfcT3/TLL7utLzAA2BaxmXGTF+mClZwd1Yk7NSFVZSkkhGjdPnjBsNpyOJw7HUQOMlJGsGcGFMnzVZFqTBKa/HI45Cfv7kdOU2Gwil7uB6LAqwxKoeWogssBgapYYp/YgO0eI2q2vcD1HLonoe06ne+4PtxRJpDyRxkJO2nCLC9bmhQVVK1xyTUQEj9T+B7SjfE4zXY584xtf53S656u/8POEYZXWeGBZK748gOG1NSHiAReVySL4WjcwR1SyBRKPeEufUXYfrlV/+YP3/PQdVKuDLw66q1UX3+bAyUpGvfcMfcdpnqjnL6IBOSVzOJ745OVLTqeRjz55ycVuw+XljovdjovrKy52Vwy3r9gfDuQiNnWsLNPARKxrX1pTVgPECEwpkyUzpxkvwvXVDuei+RuL3dNhJ67pJ+/PJ0bqe4JBbBwhdGw2A8MwEKM2dle+clDKt8YyYWlPb9CrUgw7SzXjii/H6y7iMLumTu2UZo6nE5tdXx/R10RHEH2W6ECyVknEtxTnMPR6TVVfmO9QIT/u/MPOL7zetLUM1B9l/TbVG7VqDnDJnr/Cr75VpN7eJGXOoZY0Sjs5Fzy+i8Sux82CKxXwLmbrXXOixEiMtZxkJRhtxFSBCkoPQSma1SprQ+WWaVSWdQnesH3G2Sdjzdgqri9Grw2UXpqj5lnbucrR6FrjgCpxGv+as2PlDFNJ3L665Qff/wHFDPrV5Q1x09u9kVbiXEoZVhwSWFENU7WeGv7Cfn/Hb/zmb/D0nZvm0K9LSuvlqlK31/V6ilEDGe1VEcM6LdkUFR5nRsXKbVVgllNand8b1mOC2V7U75MLfN8/efNnfIFLcTVhUVaVicIyhFLLmrJsk16FGSmTwWJZxiKuZamcd0QfuLy8wofIeBo5jRPGVqX3sWRc7QyumTDjZpVW+hJmKYjMgCOVwraPBMOmliabzoYFiJbFWDkrTvO8jb4JbEAD+pmSuH31ygy4cLHd0UVPSYWSSuuyr5+3SLD+/SInqgMqBpGiYP1vf/vbbLcDF5dby0Bps9ji0Lh2/rUEFULQ5hFUN0TDRrIWXVPgrilKt/IqH/E4P8VArykN11f4ZZTdR5ftTc2o1C/AxjU6ur6zAMhZIstkrSjWeJ5HvvPd73FxcUnf9XTdwFXoOAbPYX//2iEr768WtLUUv252E7TsKHPCgMsMXaDPwcZGKzWQOqF5MXZ+MfbOsqeg5ywWiEHWZyh4hu2Gi8tLNtuNJUGyjoNEyM4GV1QIz9pBZWk6sZ77FnCJc0zzxIuXL9lsN1xeX/L8vaeP770zJGs13G5ltC1zutlstJO8aGm4cRC/Jqo/puw+9vbP/7af7Kqy0YJMWV52iuusZXKx57qLkSmsWEugQTjqls8pk8qR4+nE7X7PxW7L9fU1N5c3XFxeEbqOOSVciExTWTirSw3gXc3TnEFWoFK5FY77PV0fCTHSUYer1Dy66TDTY3oJC3wrmnNaR/N2ITB0A12IROfPeERjFxm2A7GLxNARQ7SvDpyj63vmNDOnpCV955hLpjaPb3f6DHQx0PcdFxdbNsPQnEvbQfu/rGR3CRBr+1p1wVuV2unea6MZy3vOhOtRR+D19UAg3esvcc8FvyZ/8c2fwac4qJq9DFZmXG6284EQIiF2CDO17iQWPevd9VSMhffB0tRFG48wehQf6TubzNN1kGuUK5p9qtxnaPoei2K9CyQ0OpuTNrg4PHNfGPrIprcu1xrNem8PgLSHQFdZWCtqo5VzRgPk8K6QS+Z0OumkCHtihm5giEbLUDCw/nLDltsvq3/DIhrquJ7GE7/37d/jz3z0p5nniYbzeyR6ftgEpjJX6EIk+3XpqGY5pFHJvFGQ5PF/uvX75dG3nv/S3h5c5pr948f6gldloKhcts7kR83twvrQ7ocs91B5Ho27jrndlzMQfy6EENlsNsSoVD4lKT6vFJ3s1UoziI2aXYyxOm968JQKRzc1B6/vDKrhq3OnDrUDNX4abbBkUBfYiq8pNVcDwsLkCofDPYfDjixami1JGkRHLFg7S1C2z68yWwMrGpMHWfjoo494/vwp4p6yudi0ZKddGjULsJZf37B6juA8XQgapIq0J6gGjfUevSaT7lzpvU12H/x4/sqXUHaB1x7ZiodUZwtzlrTMqEMmdAqM5ICUrFkXAQwjmXNinE786EcfstuduLy8YLvd0Hc6DnIYNoB+jnBC/CoLynmjRb2XNQjPgmUoJ3L2pBDYbHpczzJ9yk5nrdoWR29xeitmvnbghxjYbDZst1u6TvGnpe2G2pyMykowu+N8sQDUtcBTeymUR9r7Dh89IUDs1DFIuQ6LXNqqdJvNhqy+zg287kfX2SCala7AvY4yXN/ezyK7r7sDq7+sDskbsq1fhmV+D3gaeRcW7As61tx7RzbbrG0kC7WiiGH/raHzeBKOx5HTaSbPwvX11eKLOM/Jz61ac9ZIWgOi5kQri2edqDTNieNpYrvZGPZalYxnoZ4KrIYCOO1lcM64o606FLuOoe/pY0f0/qx5zjsdG31xuePm5prNZsumG9j0A8OwwYXAxcWlQnCOR8WdUjieTgyDjtjeXWzJJdOFQN9HhkGP12y+q37PCvfbvrvVoyaLvFW/p9olEXDaubt80mvS9/uWW0dP5OfeKj9vdVCjTRDBspFLqS6A7/BeJ33w4ARL689xmjIPEclpoXWyDrxg0cN2u+HmyQ2n08g4zYvtkNLI68FS7aHO5famfCE7uD+eGKeZqY/IxcBuM7QSaVlPhDKD7701LRWaQOu9UqMeggcXcKJNWfu7pGTMJfPs6RP6GKA4JIt18dEcjuW22E2RxcgXUeehlMJpKrx89ZJvfOMb3Nxc6Zixuo0PcvUVqF0FvZYwdH5ubtNWNDI1r7tGQtUneqQk9JlVW/N/TRYe+ctLueevlV//rJ/4E106MWc5S+f9qiJQMxtiD6TdG6QxKsROh0Pk5JRiKass1u7Swzhyf3/Aecd2uyPGnuPhwOlwJBkQvvKBLlCL88zLOmCZjRuvlMJ26MhRp+P44IjR4bMpOV8sQ7NE+VjAoiT56txKVfYIzg2ELhCHjn7oEWPgKFLwPpAdVtatTkjNZqkaKyh0p5aVi11PFtjf7/nBD39A6BxfuXifxyRqwbgqOCjU8xKdOR9DBemv5Re0c72sMjJt886+faalH27Pp8M9+Nsvk+xWY1ENY4MqiXH2Sm3utPezhFp93+EkM2enDqxTOqqSE2ma2e8PvLo7Mrzs2W43XO52XF9fcnl9za5kxnHk7m7PaU4KS/HzIw6QKqPGqWjnN+fMNI04KTzhiq7bURtt63/eaYOno8ZrtSHMW7XM+FidI+E10xsjsU18WgILFzw+BlwI1lAWGv7a4SAnyDohSrwjlEzXdwybDbvLHTF4dtstNzeXXF1dLZCAtRUGCyY1q+tczehn1o2RjWOy3g3JViV46KT+mLLbzmWR3Yd/+1hT7U9jPYSi1eWc2m7xvlEuVgqlEEKThXrfkYLkQsml9ajkvDCn5DIyTRN3r/bcPL3h+bNnXF5dEjvzV1RTnm1Mdc5a5t5V+iUBPP1mw3gatanQK8xEN1wdWZVbd/731jQXvfIIhxiVa3WzVd8JlWnlug6UIGy2G548e4r3gSc3T3lyfcPlxSW77Y4QO7YXO77zne/yyYuXhK5nf7znw48+5NmTpzx//syoO4tVllxjmBHzK7CrxyCOuv8rOwisQNGre2S6phRtwrBBMhVT3CjE/gDl9l1J/A958daP+VQe1FparA6qNycVjS3QU6hZu2LnUzm47KQ0BURJBVdUyVbKmlIK3gWur664ur7mcH/kdDoyjRPTaTLeSjWmiJZBnYAvletriXvnXLQBJc3kAjFi2Rk15qFhSYp9LU4ppkBD8JTsoKiD4b3SDpXo8dHTbXqG7UCWTJozSRLOe2YSUUSxh01brMr81QkStAxEwRXhbn/Pr/36r/Pn/8Kf48mTmxVY2aJ/ERRFLlDSaoqLRXjGo/qwLUzt1RLrrCD8bf3YOu0NCqiuWzfx9/33ftxP/cmts7jJ4krv9YHGiL/PyqVFI+ugeLgueHIMOilGCjnN5DQhJfHRRz/i+z/4AMTxzvNnPHnyhO12YDsMzJNOw5nSrDyVPuCsqeLsPpzdAC03HUftTN3Pxje5G4ixozq3DtcUuWYnfPVP9XNLQXw18qpc+03PxeVlIz3XTtKiMiyJgs5Er5PKNKCyDv+Gd9Tz9SHQxZ7Q2Wx3G/kqTrOw5+isuvMFIavDJOqW19IvOEIM7Ha7lWewCn9WyvX3tf6wya4toVZoRI2H0OSg9Ug5/XfKmU1wTRO07FUt8xcNQ2YpzPcH7u7u+REfEYPn6ZOnvPvOO1xdX/Huuzte3e8VgnWczoN3WfStrwew6lMIqp/3hwP9MLDdbtn0nQYlaFSoTbP21eBj6lQslINRkxc+cLnZse0Goo9mlPV6t7stz995xvF44OryhuvLa3bbHZthIEvhYnfJy9tb9vcHShFOeUZK4ebmmqvrK4oUYnT03ih2POr4O4MIrOaOrgdYvKZnwXB/i87XpgffjH4Ljj63JL9Zdh/60j/NVZsui6A9EvaaOLXxMXpGc1C10iR0sWPoek7zbLbNwnYL7KUIKauLXzlIEWmjfj/6+AUvXtwyDAM3T27Ybi9bHLoe14nBC3yoDmtYDafouLqC6MQy9R2uaJMX1TldwVKqX1RL411UOFmMkT5qw5VmhvXzlXtU8E4zvcNmw7vP32Gz2bHbbBn6nhg7XIz40BH6jm63oR82HMtMlkKSTJFsDvXCLPCoSK2v23yI0koJj9+7ZcoVuBJtzoSzYRufV8LeLrcjz/km/723fsKn0kw99snOO3z0xC5i5oZCIYs0oy+GefAhaPam33IywwjYaLjMOCU+efEJ3/vu97l5csNms2W72ykxc4jke8VGFVmaptp5YI0sXo1qdUNSLtzdH9huemaXiN4zDFEJZ139epiF0g3VyQsKU1iYBALDMHB5ecnl5SUxBFJKFt1lsiRSySSnRl4nETmdBYyKea7y4dQgh35DiJrlGzZbIydWQVHCfoOVt7KyRk7e6Vdj9zNHZHFml3KqAXggvOk2VyDCOr6X1yT/jWL2QD5+USL/G3n2pnd/oeuMlkucZprj4szVTGjNPHl7ekN9tkGZJ6Inj1rq8Vbe817YDFvSnLnd33N/PPHRxy+4vLrg+uqK3XbHk2fPCX3H7d0d/bCxWeaRTFrCBTvHmi0Xy46lIhyOJ8Z5JpXMdvMUY8yyL73HAcN0uWryrSGrNnTYeNHdZsum61sWqq7NZsP1zTV3d/f0sWfodGRp3/VstjtKzpymkSlldUA97LZbhmFQvFYIDH3k4mLLsF0w2bU5QgPoFQbq4T2qWQ2WctyaUUBT10UrOF4eYBCkfcryeW+QXYHXNNmXWHahZjTqIIgl0KzOe1DFR6WZklKQ4uk2g44sNT0cK8WTlpIAyKnYCFuFAZWihv7u7sBut+Xp0xtunj3D4a1b3eFcUL7odVYmLBl3h9Nz6gcuRAewpJQ1C0sd4byUz0NwZxAc7yOdc01uY9cRfaTrDKfnvAXmgT562DpCiFxdXnFz85SL7Y7ddsd2u8PFwDwlhg8/Yru/Ixf45OUL5nmiGyJdHwCFrjXanTcY+jNYX1OE638s90ekEvZj5d2677rfLjycUPXZ9O7rp7ZKlX1ZvFNb0pyi+rMlt7zqJh80kHIGjcN7uq7jYJhibSj1Te6Llfuz89aAvLCOJGyCVCnIOJI/ecV2O1E5nKuvEuo9rn1Bzq24rvUZ6zc7ghc6C4a8d1C0gdS5YpSZpnc9xrOtWGMfHF0XGPreoDJlVa3ThJ73gotq+wWIIWqW09tQDpv4JCUj3hNipz7QmkAfMVpBqUxQmKthNmXtkD9kZym2d5rsOvPtSgYXWsU5GJ6cYn8X7N9L1oDlp88vt55v0vG/BP71N8rTWx3UdRfx22K/NT6vljMqhqManthF7abOyyVUMPTheORHH33E/fHE5eUlfd/RddFKp9pVdxoncL5x87l2A+zsVpsnzjGnQkzZIjMhxEti1E61dTmhlZlcpZAwA1+NvEVGm2Fg6Hrt4Fxdd9d1XFxeIHKg7wc23UC0Bp0QO8Zpsm5BaxaQwtPnT+n6Hm/NMBc7Nfo1Sm9tKxYlIrUjspwZ/NqA0hSVoNkqcTr9ajnR1+/gY7b+sd+dvbyo18eCsZdc8B+Xv/il6ITOJSMlnl2bNwqxkrLKYdZSicei7QBLhkT/sIuRqXWmstyjItqoNydSUV7c0zRyf3/kYrvj8uqKvotcXFwSY0+ulWpH6yzVLNTambYSp40CTilxGifmXOj7XgMTu90eaZF80744M+QKZ9CIXjMUMSw8jVVJ98PAk6dPEYGL7SWXF5dsh41h/nYcjyfu7u85jCfmnHl5e8s777zDsBm0k9RDFzxdF0yh62lI9SVtz7Rs1y7xjeVIrSipM1rdnrbhZw4Ci/B9RtldsgqP//rLJLv1ma5YN2C5/pXH5EMNakGy0TqZg+dDIM2z4VMXw6U0dbLQ+GpyiWwBd8oz85wYU2EzbBjHcVEfq/tYy7bUEr3pU99r5aHvAl0XVecaRt9RTPfS3q+63KAszoKqloVSJo7gawNKaPLvvE0cvLxku92xHTb0Q0/oAqHrEBz9dsOQdXqQjxHyvEo8yJJhfuuS85/kofwutowaaIro/QgLu0Cj/KuZ2cecyzeezKef5ZdpVZeo+gQ12xe8MzadOvdeO+9D7DRYMXtXn/2aYX+stUeP49XJdepMuZwZZx1BXlP1Tpwi3dqjpAZWTKdXHL5zHoxhBecaWX1rkKry0vwFy/ZXppgYCFFHWdckU+Xgbvht1zxkQvQr+jVL8ZWCcxrg4J3BV7QPpyWfVo5iHTD0mjEWqDzomijE9nFJyFRBblURc75zzvgsiFUZtcHsEf27Xp9Tbjc84U/wr7z1PZ/qoFb8nFtdVMv82P91sMOqOQczSiZYpRRC0A64YiToLUUNzPPMq9tb9ocTd/t7drstFxc7nlw/0VniQNdPpFyWzuwVEdw5Oe8a7wnjrHxnu4st203NBnAWPXtXS02qMBeFuJQxN/1gZMwrPIdzbHc7nj1/Tt9v2PQbLncX2vQVOy4urvjo448sE6bdnTllvvpHfoG+7/CWoetDaLgsXBW8mlEyB5UF8E/b/QcCYAGC2IjTSmHRpjkg0Kb08Prf89hrDxoZ3rAEeMUN/6n8Df7Xb33nF7OKGE+p8YmKZXXEpihJVqOirp1b9qp2PxtpRRc7qq/V5LqIjYzUUaWSlBx9nCbu7w7c9nuuDieeP72hH3q7l9qY1ZyudTnbbnzNhvvg6PuBNOnrRYxRAzOsSIN6LE6M/m2wqWxaio/0XUcXOw2anNd5FM7jg2jncfBcXlxweXnNzdUNF9sdm2FDtxl49fKW3X7P/fHI4XTi9v6e3eUFm81gZVFZzTeSts/VgC8NaktZtL23Zq/t2a2KsGZedXyyKXVxjfpkoSl5rPT/GWT3ETH+ssnu0pB0nhU5Sy770vCW1XhV2FTFYhYrWS9wEONRlqWJCFeb3gApiuMfb5lS4vLychUk0+S2OXeuBhGrwMrKqH30RONX1NKka1cTfJXdxdEN9ncherqoshtjxY3XDupg+wNePBGl3HHGd1ynXRXntAk3qLOqlLGrG7+SUamJFGxfz7zPCqtoM9VWSZH2BzjH2Wz1SlfYYEWre1jH1da9X/yNzya7cv6W187ly7BaA1t7nlXX1YxlhQyqL6jZwlATP9YXgtDkuMIu2uW6xWHVP1FBWlga1j7I4hcuHgu0zLadS3GOIHUMcA28zD+g/lyrrgvMqtJg1qTA+VRIWqC5plXDKQ67Oo911xrsUZYR7D6ElVMJD256k6+1NqzPcqnPLTU5s8S3bhF4rRobNVwt9S+5vwVX7SpL0fpRek0Pf3a59dww8N/kbeutDmqWRMkFH6RNGMEF6vzW6J12BGeN4qtz6hxYUy5QyMUwStT3CF0IhODMmXLMKTFnbRZ5dXtH8I7t8AHv/9xXePbkCbtdp3g+F1rGSYG7dTMWBeEso+S9YpmyCKfjxNOrC5sKpOewbprSv9FQqwpcCOqgRh/MyEfDrSjPpQS4vrlm2AxQhIvdFc+ePGO33TH0PZvdJf/Vr/+adj4j3B+P3N3vubi6oO8itYs12J7pnG27g26VvTanuP7/TYpISiaLw/f2gRZ9thFm1Rl6oMseZrTOuWJXv3Bn315bT3iHf8X/99/w2y921Ya8mnUGCCFqsF2sqaw9KFCzkOI0O6q4P2cGcKF7Cr7i5XRvK6F+MtqvjGNOJ47HkY8//oh33nmH7fYCV50Jp819VCO/Kjs5TNkFx7DZKJdep13MVaad0fU0GpsWodOCwC5a41EMNs5PM6ldjNb4AsF3dDGzkY2W9ruB7bChi9rhLA7mkgldpGfDaJmoUq3HA7lwVGO/vKa6XxZ5MoP1GmWqpaZynhEcnmjj5xR3Vg+1VtH1tc8ku5/Bbn+ZZDfYEJNKl9MwZIUlwCyKdxenTVBJTOYR4/4NBosq5uBptjWXYlmnymghiOGQq9EPTonM4WAT0ZxR7HrD4lU7K2jDRpVfZ46HNdmV2sihx6n6R40+bZaG8yrPLmiGLUZP10dyTkvPQ3PaDQ/oFWLgvScn05dOq0qKccxKYRajlpSrDpUK5zHj7Gh6WGnW1tIm5DTZxCq91iIJi2J1x2oiwYYSqNOZSZLpRBttMadGqoNUo4UHevjT9O56ndd5vhxrXTZeM57USqwUWTWSOZvkl21Ko02WstGxAe2Or2N0z67Ufqwy6TGZs/tXrHkap8NPZGX3zpxmc9jEso0BPU9NgK0DMD1lrd6u7HHNohqUqvoNa7jS+T01ZpZAm5x5NpIbHVedsk7O1KbdYIGRbVt1AJvTp+fiV/svLMOStFKve6NVk7X/oJ+gz4peQynKDRuRBVNmdIxNYP0DufuccvsRH/B3+T/zt/g3X3+jrU/NoKpXL2cPk0YoStuhU5TWEY63ySAe81kpWfCxo+t7SprJ86yKZ5U6FlGi/jKrAU7APB3ZH77J960UeX1zQwy9UkPVaKTyfprjqDfMykExsNluKblTBzt0eLLiOP3imColUS03BYYQm+B1Q8cQB/quow+RoeuMUqQQA/Rdz+XlJZtem1m2/Ya+75TX0UHoIjFqE8mYM+kuPRBa0W5PU5TnCqdSm9BKESp+53g+aQ5VYZoTXd/jfDDhKudR/E9wvcP/j3+dvwl88hM/1qetM9hJWeGNS1FAcD4P65yD2EXmKTcDV3Kg22xwIZLnCRDN6uAs472isXFgs3XVKXAgyfPhhx/Td3v6fmgjgyt9VDVo6yx+bbLrh8ECOX1v33VIUW7ApZN0cVCrPPd9pAvmlPY9IkUdFr/AVkBFIxC1VBojzTl3QkapsqaU8F3HECOnWcH63ihVarmy+kuPJIAopZh+WCb+tM5SrwZfXy/MaWIYNqR5JgjEbjBFmHB0PwkROVtfJtkFNZLFLQ6Tjo009W66WLOPlforQ1Ysp+8C/ZAIdfIN6pS1ca8+II020NNcNhsrq81yQqnY9lSM+cSZo1hPwj5PtDJU7D8nGYmBPlpzYMXyYXLXzt06oa1SFSzjGbtOsd82YUgseK+6GuOpVIc2AKkZxvrsjtPIcTwx5Uw39Nq0ZFTEukr7h8iiH5uRX0EpMCc/eLc0kjyQ93meLfsF85zpozkfD0huXvvDn8F1pnutZJ6Nuzb2AyFMYBjU02ni2Y1B+cT0REvQ1MSKvqprvX8LmWNB9PO9J82ZTMaVhHMKFanDATRgXuyqiE53lFIIZ1AAXW28aZO/2iSlMD7n3CK70eM7D9nsulsCGAAXRNkqzNH1XpMKwXyPAqRcOJwO7O8PdJvN4vC6aver0+8e6FxNIuKcTXur+HKzZdZcvkxeebiXy2qVmFX29CexAn+cLf/7t77n7ZOk6npwflpasklRgKrAdYODpaZzxXioNx9C1FnhFFxYNkcdYUhSCC60gwYKxcFxnEgfv+Buf2C72dpnurOJO8HGRNZ4woeI0pcoRUkXjKuu1Ohdzgy8Yl5UWEJ0BO/pusim78EtlFt1KpABWew8qpB2EHx7WHKacTESenVQfVx1dlr4VpwQbERkdUBd5YlcG/Tq3DjXSheATjKaZ0qBeBWZ9/eU7QVnWa56Xx6Ur167t48qT/fI+86Cplq1YGbHj+TP8Wcf+ZQvw6qZKc1IYRk+fQCDc3QhIFFISTGsOReiBSRprvJWjatGtjrhJyjTQyvjW6AqYsoCUtEudnEoPY6r/JLUE9H77bTRULIqHNeFNoimwmZUhsTgJlbCNSOvGVhlnPBeMUUVh+gs6NLIXa218zWa1zKpXpMgOSt7RjbQftcZUftiuJ2V3JVQv0qKGX0D5EuxUZseMzqZpaxfRVPIc9JmiYPy/w3b+oGPyeTPtuy2bOSDaxJXlpncTml0KoZTt9GRpsTQB21iM8xm+1yzN87VvEDVL255XawJ3TiwFbNayBk6b/34zjR+zUBZFhV0NDA5Eb1rTiVrHQvNyK8bwUII2hAVIz4G5SlN/rXb6s3AY533IThyBhedya9x/6aZ/f6OOWcub270WKE2ZUnTr+vhA07EppE6y0yfZ8LWFS39Xr98y46uz3M5+aZsVub+gVC+LgU89rbHZFeWK/qpruaQrpJO1X7VqlTXbYjdSJoM85wSfT8QYoRUaiRizdZlcSZt0pFbbYCYDFY5Ct7jYyQfT8qN7mEiE2My3U97TjSzWAxSIEhJuBjbPV0j4Zx1+y/yagmw4JtNqR3vPqh/Iw58MG5iUTpI/dslSKp6nBasB3DCPCf29/eI8wxGwq/nor6USZ9VBCr8oD5d9rzhqE44iFVNlmOL1Gw27X7V5Ee9ppqxfU1iKy6lbujZ+uxy6/kGW/mfAX//jTL12RzUh6dgJ+ect3nbywNc8RIh6EObi5LzgyOYk9dKISxOojpjlZ5qyRUGcaRcKKLUUYK3aKNpCyuF1b1wbZNKWTXAoBimWiJ1teFknYGyqMZZZ14I3sbvmbAZts85r12oVCO/pPpr6r2sMC6xZmitXLbcxmYmaGCFeln1123P15bGrb+dZQnnObHMPzeEiFQs1Ge9v59P2R255Ov8Ff7lz/XXP9lVcXgLNsqMqyyOuw8el13LJul9t+jYL8YNtHt+uZOuzWKuq+rpiovKosFE7Sq1tCmNCtmenYo/SkVwoiOA9eHWWT7N2BtOuSrOOm2tNpkoebWnlNV52uk1I19lN9QJbjT5FYQ5J8ZptuAqIvVjGo/eOsukv6yGn7ZXKosVblEN+flzj+HUg0X6xZ5nac+JyvTrFD8P18+S7D68Eq3y2+Qk+x68J0TLLIo26w19bPjPGgDJsvP6WSsapbMjuiXJ4r23DIw5Gl5xnZrpdzWmao6CmtDSSNhrMFUN5lIapWVD6whRV5v7Vo1/PnjLoquj0hzC2vhV25gr7EAxaFQow2k8cZwmfNdZ40pl8aiNprX9ZrXfNrFinUlt+sLV9Is727O1UwYsf99YCwza8mn3+2cmu1qf8RUtmWXznAvEEMlOewTmlHBOIXTFMoB1j0XWmuX853qUZtmcUzo/4/9sWNKcGUcxViCjqdJPbzaAmrBwFb/tDbpVAzffHFD9Wnh3nclpTZZ57xUSYs5gfb05fA0iY9AVloty6DMnCCklDocDpWS9xqa+ZVXuX3k7ZssqJMEBkrPq+EcczXrtbrWxjnoNK+fULb7Ym6qwn19uRxzffus7PpeDCqYXvOI92vg4qRFHMeyE6ISOJFAwqp/Yyq3iSnuYdRv8mactUIEmKlIOUhFAZ4077whG8mz3i2phSymN8sojZ1MgmrJ055F8vaaWgTJMX238cIbtgpqNs0ijgvgbbm4xB3NKhJK19GY1hMotVsc4Fkorv7UO07dtvqyEohoK9FrySqDrs7vg/hYnpf3wmb3W1+//2X0CDuz4Tf/nPuMHfnGrGral2aFqPptvX+sA6/D2QURZldESWPmmYJbsiRqlGhoUsKZd/dw5Jevu9K3BqA5+Oss2mJF3JZMsq7k0zawM/Jlz6hoWSp3UoJkoq5OuGxe0EhAWhRv0qK0caYo055n7w54isNld0KijgJqrqpLuqsAVpVpj2RFqF2pZYsr2vUKialmpBVZupWjNe3Znn7pkOT5dAF6Xh7q+7LLblpx/CTo8oh+CNcZNlJSVaBxPjH3DSy/Jg+XDHIvstc9fovzWYOVwSBEdUjELoYsE79rI5GKOXnVQc1mwb1Wvtqagajwtm69G3zenoDqnzhIOlcqnkClkfKivq+4XZ8/YGfSgEooLU5o5HA767M2zVhw8hlWtDSqO5exc05W1sbRVsFpWauU82TWvnTDnvSUiqjNUPYGyEjz3SHDwhvUZZPeRt/2U17l9ratYtjLGjtknckqkWSFVXT9olcn2xq/gF+cDZs6Ps9YBFaZXfQiFvhVOp5lOdCpZzWyHeu+d6Wobfe19wIdIKb7x/De2iVD1pSaqGnRqBdmqDnJz8hwrua2wLj3jCh9YPMVKv6Z2aZ6nlmxqrCj1T6i+o9kbEeMl1gddig46qFh+oHHJs/q7lrSx/VvGnlriTDRAXBqtl/N/qwx/BrktbLnnT7/lQz6ng+rrNUsxbERW8n1Tgh4dXajpaH2Yx9PM5nIgdr2W32uEYQ4oTmlGSm08syxNnbPscGQj509pbo6kc6IGtkW8JphkdLx9xotA8S0SWj9AwEo5+lZmijESuqhleVkUVfPrnOEQK5DYgzGaUQjGw5Y5HO6ZcqIfNuocxEjlhNRMVGkRTT03rHHMmSVqZTgW/2G9zl5rktBckrP3nSu1Gh25115bH+X1Vx5fJzzfZPcp7/opLBWKZbBIrb5JbVyopPRF6b+OI5J1RGSaMzF0BB/bw6vzthfnyhD56D1b5KrYsWtpvWRRDkoRnFOcXfUSKrWYRvO5ZRxLJdCvgRWLQ+qo+GnFTmtTYDCFqFY29AEXdBZ6zrkZDL8auCJgypcl4veeLMJxHJnmmTmX1TNjnHp21h7XJpusm2U0cy+tnHTW3NgM/Po2GW9yrTTIis/wNTluvcGr13+WZHfJeJy9VPt1fKXq8QTf0fc9Yz4xzwlE2SdCjFbKXDWanAXCK71mPxd0uhfUbE4iZw16JcM8Jm3QCoq3W2cPSzGZL5VUPeBdtKpYqA+dfXdLFspr93OoHftmKLVUKgrtVuipPrsBWqkJlvKtaJY02nhu5yDlxO3tK0QKu02vMZiIjR6mBacIqyY/y6wKSKq4SHWUVO58G8ZTJ7lmCwS998qYYVMBy+J7tM95XCo/v+x+GZfqJnX4ve1xKZlpmhiGLfOswVRKiXGc2Q5bRsl6f3FadS1oCbvpaZAzTaCrYpTxSj22JG/skSmZMo6ErDY9mHNZP6+4gLhsTloghI6SAw7lsnVgOnbNLKDOrPoKS4ClY0/Nb4heG6I6dfhq8OUfBj7Vd6k32ijJvFdfp4vR+F6lNVGrj+Na4O6cXqfSYSlPe5Gi/S1SByiUJveLk6hyvWSGvcEJ7ZxYe6aPrc8vt7c84z93/923vudzZ1DXyq4S4IJrqfUs+osQOpxLzHMi+Eu6rjc8hkcL7gtSAqTRfkCNBozKJxe8F2IMjKeJJFlvvni23bZNulmSDDbZvNSsmZXZ8VpCfDSDWiOb2plnAuY7nIdUZlKetXs0OoXFuMVYaomAdgY4x/14Qk4n+v5EZ8S79W90uo7557gFx1dEM7iyYLtCiIpr8YHgI7Wr/AwjZXyZ1YmCGkFl3CMzfv6gV+YD7uTfA/5HP/FjfeYl6qjVsrg2iyicRJezCBpSKlxcbDj1I2maKEUYp8QwbDgejM4GVZYpa3SrHfVOMT5UeXCtq7KWfSqeT0SQaSldD0NPxWA3uiCDbCg+Sr3IGAOSMohvTrA3+fVmEKs8L2MXjf7G6Zd4LZ376BT39DDacSsIgZFie6eZj/v7PVipVYErWTPAOOvWrpmlAmKYLXFQRxS6xcn2LrbEUi3vV5hKtOyE/ruAN2zWY7eWT1eCn3V9+WT3sVB0CYMEw/VlbWDquoHTYWSaEjkLzkf6fqvOURG8OKJXcnDIZ46pfnDNsmimJfpopctzWMXxeGSaPd3QEaWjUkAt4XNEmBt1VYwdOXu8ExtlaqX6Rsuzck5DTTosz41rjCoaaDljmKjB2nnDqWvXEbuoPKneU0phHEcu2rNWLHMEkCzh4vCiTrSU3ErSbSY8ChuYc7bQTJ9Vi3P1+KumL8CmJOrzoG/MQHwtUfCztJxbZQtrJtNUTRFhnmdubp4y56SUYPPMnBMbm6KkOq0j1UlzDbfuVZ9gz4BzbTqSF6PYk8Du4oLLcSSlWf/WBY7TncHvnMGnaoJImVWwBmQfqoNmXzm3KpWVUC1IYZFPp8wuCgGMS8a46lGv2H4XjIliSUPSMgS1WmTZk9PxyP1+z9X1NZRCb0xClMVXEkrzZ8z5aAFnMfmtOraIYX0rbIHlFJa+ofN7qM1j2aBgNTKGN+niz7M8PVu+8tb3/BgOqj2s1XcEzV56h8/aDOIsrVRwlFyIsccNgfF0atmbGCLB+yVnJDbdwK3ySBaZVkGs+A+cNiOVSkAr2nUcp6zTQVag9uWzlylBIlWZ+oY3bbxmvgqYzXQOtRSv2SbNSKmh13/DUguiCVkd50fFVolOtpomfWhiqPkfG6W6DmdqnGiCk2t3fn3/mp/MoqmHdBYLzko38iw1/+Zbe77e9H45+7aUau3fV3T8i+69TznYF7NKqfyxmtkLzuPRTnZne1Qj9Pr+PCcwejIlTFbA+pOnV9zfvaRWA8BodFb75ry3jCF2byFQS/C+laxA5XzOQjlpBsyFGuKYwrKmwtqEpw+cV2iA2HPnpHb7rTDQFZayZCu9Kcj6PmO9WZAM9R42xxRV5IgFaLXUptmp1RmpHJQVpMFKSI7SHM+aWWp7bM+klo8WuXVGJ1WdnSLWleppOZPH8k0/i7ILvH4dNfjA47M1soli+Hzo2W523O/vtWkyZ0ou2mDhkt0LoWRM36ySAKu13AuVodj1dBm6TqmWfOdaBqaF2EUQv8hNnXHX0MZV/vTXRmdjJVN3jj0N4TxZoFWBVUZ9lYF3r0dX1V2khi7zNDOPk0JdoFUdammebDrUbIPYda0D/5yLNr4WZbZwtaJn+r7ChdbOMVIbr84hMW90Sz8t5fQpsvtlcXbrfZQieFcIzpvPUKniloa4YdgwzzNj0ibKruvIc9SkTN1/MCdrFYSsjtd+rk3GTrPp19fXOhgH1W/55ayZVWsaPZ1OZHE4Z5CCprVrgqF+bs14rqgnrZG6NqN2XWj61zcmIJNd+2qpf0xGZKmyqRBV59XbsIyse3M64b2nj9YIKVCnO7maAHiLba8Ocj2XGuxRnWxYJiqKVgzP2St4/eeV7H2q3H2K3H6FD/m33P8deDO939tppupNs6MUS3l4O4prXv8S0eozrg9213lirMDholnToBOW1MFfd//XS2np2LaJZ11yvoKg1UC7JEzjhPMDDTtRdSiuHQdzFmqXYKUqwdWoNzQsVAiVz8zOQ6SVc1tZ15zRs47O1eZXfVpvfAZyntkOvTkyFvHYT+tSPLW8W3Ij21UM31KyFzvmeRH//O4t71uaA6pH4izqen09fE3aXr5tqVFKjPLxW9/3Ra1cMiIZ52zsoKtyVOfUmyEGioaeRseRNQhKiZIy87x0ma5B/+qI2Y/1HlTD72iNLB7OnMgKoK8lF58SwdXGwYWPUf+yOndVXtfYLmd+a5XfRYk2An9YIADVUag+cGWNONNx5jiiD1DOhZwSJScEFNOoIXlzYut7pUmyWzJUZtRxbikxtYdTzjCQrWHAqBLWzpJtzpkddz/DsluDmgWTW/Wg6SdZAvucNZDqh4HQdaSclYGiFDXKWfHSlR90yRw+WLVUT9VljhA6ht5RdpmcslLoTCeEopQ6NvK5NsE6MMjXym7YB7Z7Vp/DMwyqX+H5Vl+u6uEa4LnXvpYGDWfFEn3WSs5M48Q0jgx+o05k/bu14bQE5+sisrZJ5tSXRZeud7HJ9LqaJcuetE9pQWrV/Wd3/bXjf1bZPTvPn+Kqzo9joTQLIarzD5Zt1D0Y+p40bJjvDzjnGqzOtfFK8DbrBquAam18HfRDX50TDbRvI/0wWMk8cTqeDGalx6iP2epJ08bQmslvXyyvtb6TlV5e6+j6OS3jxOq6WJ41VvILpJRI88w8TYyxclgPq/348e6xJuMU/rVy2pqOfY0SDBrdnFBNWqW6el3rvv7KZ5dbT2HD8a3ve7uD6qRRyIiA5AIla4ncbqz3VjLHNSdNKKSUKaIzZ72PhukD7yN9NyBlZplmsigN75yW9O2FpYXFFFtYskPVcTseT+A9oavzmi0f4yyqFkCMA1LC4nF7WqnJNyO/GPqW6rYSgGgKoH21MWYsjnqL5uweKQ+kWKm0LKqp+opijmPLTKgXrGTySbkvzYkXM+pVoaphXcfpUOumItWuO/ChCdejZv3Bi+dls/Ubz769tvbc8Y/lV9/w2y92zXmmlBnvNnTB5KLdX0eoU0FagODIJTNNM30/UHJhykfjNwwE3+F9LUVz1nSmWNOl+FKrJuKWwMYFxTcDEKzsIplUMk5CjXeArJgr58D51ePuFkPveN2Yr5zTM7wnS0aqZlPFnouqoKuG1rxvdTzhdDpxOhyZThNd39PH2Dh3vdPr8ytjreN4F0dIsbQZEUfJaeHydUqQ7s2AYBmoglDBg1IE56U9Y825tuVe+8F2/2dAdheIxsopd8a8IMoUUnJqzXSgJe1hs1Foiqjejl1PLtma8pZMekv9C20MwhLUVznU43Xbjq7vyFlpeLi/JUsySIljnCakBmsOvJf2+VV/OucUUVJjDXM6WTmpMS6UPeuqlmpyy7ZimG2pz8KqKiG1aVGvYpomxvHEOE4KGSntoTW7szh0a1TZstYu5OIMGznc2f0qZv8W3SsLawDVedLnuX7a+nv94fPK7pdl6RjwWh0t+AKx2xr7jtjIUQ1Wu75jWzYcorogMUa6rsOoUAEz267lVM5WhV7U5Zw2KWcpKjFm18UC6L4biNEzu1H1jmjTpsUVtIZPt9j2h8GUVlwXW99MviW8qows1FplxZtrDutZgHVu9wVhmibmaWKaZrwfka6w6YfF0WWVwf8sEuGcYkuD9cVUHWB/2ipc5kspPLAgtfvfqh1i1Y5qg+rZwOeX25dc8J/KX+Z/8pbT/9QSf8EAtmkm5LllRxQCD9F5ghOKCxSvtDZZhHmeyCkZ3U1HztpsMXSezbBVQvSccVnwhrOTtZZcKY28coa7rmsAdO3k199PaabzvTJS2t95+7mgpcUYOkpJOBFCI+tfl5iC8Zk6ls68lZL1ixJ9436Jll+Dgy5EDis+s2DZOymyjNZcObPVOYUVLs++pnmiz8O5M4xld9fGxS9OTV3rDtSf5PpF3ud/5f7HP/HjfJY1TxlJCVcqjihQS5cajAg9A4XUGqIkZKbpxPXVNVK0izKVzGmeiUNPdAWHNfLZPTjLnNoLC1dfVWSeoR/YfGVgHEe890xp5ng6gtcgLBcBKY1WyjnFgFanrTm6YlngB7K74KYXXj4s+q2/W2NTa0l99T813G4p/4zjyPF4ZBznJnMiGK4bc7RVadcs50oyG/chOOZU6Ao6ZrU7VzvOOWJXm7dqBvVx7OlPSo6/TLIL4FZZ8PPXPZ0LOpEsRLqup++0ieLZ82ec9nukKP3U9dUFLz46fa49ywiZrBPxwqZBPaY8kfJM12tF4e7uzoJnlaeclcaveqOq2jWKU6aJpYIRLKNWm0wWvHaV1/KGs1unPNfyvLx+Oo0cD0cO9/eUku3snjYnse3nZ9kacRqgusicZi35O3d2Fs4vSZXmpLQU1H89VnU+SzbbXhTq7mM0p0gNVUmJvu/Z9AMXF8oOEnwghm4V4P54MqtNy4pdv7fSeNdpcAUQQ2DoB2Lo8P6WlBJLaqdm/GtQ6BFR7KUmsIwf3ddGqerj1fNcMxUsAwDWWcnVmeoxTO+W5vCpnszZmhJtH085c7HVHpti/kK1Y7S/e3zpvmrT3rpBde3aKk66BoXLa86vvIifkOvwLt/j3yj/NvC/feN73uqgOswYiY7Hc2uMgqV8vfcUSdqpLIG5ZvwoGr0DFxeXvDgdOU0TXbdh2GyYpj0ZR8KoQgBz41dl1CUzrtlCwbvAe195j3lWEHQBbu9uCVEFq4h2SovXkqSUQrDI1nlvxLac37DqfNYsalgUplg2IYRo+NlAnfqjQVZNCyxfNcqueBfFgDhKUPxsveCGI3RwPoLMIkIfcE6JsoOPOvECjejW2MGK3aoUQo8/3A95zOTsW72nj0vBw/ed692aOf861/xt99f5rUc+5Qtfhn+soznX5XHNciqeOMYOnDb+4AtdjHRdZLvdkNLMuN9TSmYzbIheIBiwfikXtLWOrcCqgrWpKAjbzZZhuwHnOBwPJMnETrFB+/0985zJBj3xCM44htdBkk4iMaVY6XosWxC7xVltDmXBnHP90uzTg1JjE6b6v0r8LszTzPFwoORC33VUeIo4dcR9WPZ1/Uytl8PThR4RyCmv3mPZJlllGni9S3f5nAc7/DMqu489vxqwZu0y7nq23YbiRpwRmJdS2G023FxfWcXAs+23HPvtyph9tmNrqVZ1+DhPjNNM10Vi11GSThPb9Ft8DMTwgpyyJQ+tdFAHMXgdFZyzx3tpRl5Lo6sG1bPn0y+yyrx6/aFsLfpS6Z3qcdUG1AAf+3lOSekOBcve12s1AXirQ1SjQ4/3XTtPXdrMl+vs5IcZJPf6JzXDtmz6G47JZ5Jd3fWffALiM61ikLacIQshJW3OE2f489yoyGKMXF9dQkkko5lTKiiD8rEAh0yyWjLHe6/d7ivnqlYUU0pM04SIsL3YUccCK/5Y2Gw27Pd7aga+DQ+piSgLnFxZ4051v+vkJ7+WSapuck2XrpuvoTqGFkS1+yaWFKjBlSdlDe7q31cIT/0MdW5tX5pzXCsD9Xt1kCtUrLJyRGXasP4zXxkBRAxbrn8fglb01sK2TjysyjqPCMBnl9sXXPAf8hf4194iTm8v8Zei2SVRb94ZB1TdqJql0W5bbZiKoY7R0xsbQ6DvB25fBuVEFTFcihlGm+qgpaqw4sxb7UfdcDRr2vc9/aDjy3LJ3B/vGTaDpsjnWZ1XmwftLOXf8KulloyKdo+2sqjOgQ7hvExaI2HHUkLS70sG6gx4Dw3OVUox5a1zoUPMIJft+s4xzovide37UubzPi6lY6qTce4d6QMU3qCqPrsCe81AvuYxnGcINYoQnvIJ/6r8v4C/9ZmP9ZNatUFHSmmKopXcqrw5c+h9VGXhc9u/vu+53O0opxEpwtBvCF4DhUUlmcNb/aWWtawvuppiRcfQzguNkuF6hmGLD57j8WS8d2Lyo5QhVDq1hsWTBbS/MrJrma34RWFVfmzLPfiCh/ez/n8cJ5XfosTRSh1nSvLMJMpKMbuzzwJzkmtZtwV9Vlozg9CqCM3R/XwG92dFdqEqcksCFKteIQSp8AhvkItsHbeOi93OYFgAbiWv+u+Hu9qSA6uua7CGIqfUQGlWurztbkfOWTv8vRr84AOVJ8WJVZAqnHh1Da79o2ZJ2z81JFo7qG9wFhvG0Z1Lav0gNf4eXHUYaTaq5NLe61YHFlzbq8XoQzPcrN+vX0o4X+2zmEwrnEdpfn68+13X55Xdz/us/EGv2hBVz7NIYc4JyQknwaAnhXmetbpqNI7H+5Hj6URE6C1Tv4bxna0zVXbuJyC1cU8bjXLO5Ls9OVsALGKy5ldJosWpXK+WEKiJKGcVAAuwdHiLNR+586TA4ies7c5yvPV9dq3jevmbGDpi7Oxz8llixdUs9Cq3pQ7g41nnivuu/5qmuVWKH+5p6yl4dNXw4JFjfE65vWDmL/HiDcfT9XYHtUWiGSnZyjd6oHX5u+J7PFrWFgMcBa8k0pvNwNAPzXPT1ztzwPSiX2s0WRs4oTmASrKMYqC8w2U13sNmQ+3aP3GkFFVUTsSa5Cq3XqXpWWH23GLglwap5Rpzqjd/+VqcyHOF1qIcwRz7YvQaEyUHdYyq4nttv+szV50AwwqKa5N/hMXAt9UU5aLEH11nr7uzb59prd575pjZa5ec+Kvy7R/jA39ySx3UpB2lVV5rKaaozOp0T6fOv6fhdEspSgm23TJvDiCFLg4Ey8rDAz1p36Uqg0ce0OqgyjTRD71mGMTRd93SXWnGsiry5rD5SpF2LqvNWXVu5axW2a0ZnsVQty/OlUoLxOozbVc0T6k5kcWcfYTWZ/i6Xn9EmCoTQVPMrj3PrirsdTBo+uA1ET5TCT/bsiuy4pR1S3NdkYB3CldylMbwkbLKOiL0fUeatHqVUm7OXPUPHz2eW+ShubIWNEhRaqDD8YiIBSoxGNReCD6SKl0etICsOgvO1XqSnMugq3C48+s8y6Q+OOEqu1R5Yi3DK2qKpt+18fXMkV1d5Vl+aGV717zTGiP6pumdd6SklIdrx6ZVBFhnu9603B+o7D54y09tlVLwcXGmshRcSVCSWngxAvo0Mc+TwpkQ5jRxGk/03tM5Lcmf7d+jF/eGPbRbV/XJ6XikNkfVSXVVAS176VqKr+RMTglKsVK+b7Iaqu6tjdUt0eDP5PUhBMs9kDvnlCLrxYsXhNgzDBuGza4F98HwuEDD0urzuOTKNfv6MMn1YIeq/Vgdd5rnNrCgNiBWft/HKKfON5bVM/QZ1qfIreDIDyA3D9dbHdRKrSGS1VFNCSrFQQXNinLxOa+ciSEENi5aQ4o6oEPfc311RR6POIHgI7vtTgltXY1GHbR5tOdXWNWJGEv1aTzh52A8d2rUN8NG31WEe/a6CXXElQMoLSug2dBFYaowqX5bMHzaOapym86U64O9txvgmmKrhjivMkW5FNKcyCnrVZpOLNhEIaogLda/0p/Uo/kQW2a2Og4L5EIptxYl/lmWZXzXiv5htMPiiKzf9fomOI68w2+Fv/mZj/6TXDlnclaZ0k7LAM6oyMyA5iz4SMuQiKgiS4aRGvqeq6vLGibg8QQXW5BRd25xUGFRnKsQxj4/p8zt7S3b7db+QEmsRSC4QCAo3tqqBc4tf98cbJu/vlCf1PfRHHFqBA+s8XoOLZ4unu+ib9Qp0ki+Gt4KbwGveqCUpiQXcD9og4ruQJXLddRcO7CrI5pSwvt+cXItS1yos7fPd/Tx9bMru0CTocVBTRTxFAngZyQqM4oGM4lpnsh5Bg9ZMnnKHE/HhfmjeahvNvxnwYv9ndLdqcN7d3eHc46uMx5msa5sTdUb1RrK12hQLYciiY0gS3H4oXZE1wBxNTay6l5vz+ynaDPvPXNS3Kv3ixZVbO5gEJ56fSxyas907XtoEvdIJqo5GXYvToYj7zpaIOjDAoup/MhvXn+wsvtlWTnPilUWTG4SgUSRpMMbROVqnhPH05GcEzFoc+rpdKT4SOffLqPLktfeV+9bjLFVXE+nE04ch+OpZb7znIzOzp4NKeajCqfxyOHQMXgNvoJTnelZkhN4Zf2pNGgteeWWAOthf0C9lGqz9/cH/vP/4h9weXnFz3/1q/zSL/1xdt2OUrSxvO97UsqEUPAxgAel5q+2XxMqsmoMPNsdYXGaq/3wgWlOhDBrRdegjPX9uqvVSTX7Juf/RuRsstfvR25/6C75P4a/xt9+0y3mUxzUnGcz3IWSEl7ysumgUwpAcSdJM5WhC+ADWcSi+BlEeP7khsMh0MeId54hbpR30rIF5a0PtCkXr7yMZU5NWfZDr1Gu8djVjs8lGMhtc5xXBSmSzekIzTn1jgVm3zJQNRo8f2g8Vhbyi/MhYCDtmigWEM/Tp8+4v9eHMa8mlRgJJ+IME3UOuUfMcVEarGDiIUxzMkyYYqEqfU+0bPVCTfTFrwOFf8r8Uzv+erUmM6rDZh3AFomUUijMzMmCHBeoDY3jONLFSBg6XHCM44kueFwXwcXFkD9y3Kok11jt6kEWhFQyd/s9jkWRlqwDGTzOsmIYbMawVZKppcoli7o4p/XfOFrp9UxeRUcCh7MAS9QB8J4CnE4HzRBHR4hqZC4vrzgcTraXYZl2ZY4oXsgIAcVBVqe6Bmq16uHt3EW0Wer2bs+zp09olHHmFOjY5GzMALVJ6u164Q9qfZlk97ElQCbjJCF5QlKd3OdwpZDmmVd3d0SEnGYohbv7PZ1bqG5+nNC1mF7GYXyPneL2nMePI93xQB87JGUaxNUpvVDAQS6kceL+bk/vMt4audbZqBh1wEQIBkOQpe5fqDK+zrqeJwlyLry83fMf/Uf/b7a7Hb/4S3+MP/dn/zyxUzhU1/dsNgPzbPoy1ExRwbmiGT3T+7zB0NedA8yBjozzTNf15oisyPxt7HeRtFj8/xotbabOIKVB5jQRZMNCRO3YPM04hDSNSkMnif040/lArU+taeYek9tiAXWFBIp3OjwnOja7LTt/qffFBw6HgwUxozqolfPTdKYg9N5TcuLFRx8xvXrJ5dDz1a+8q3JbR9fWZlBLdrRR1y0LqWvJpJ7La0D54X/wwQ/5d//d/wMfv3jBZrvjl//0r/A3/vq/zF/7a39N6Ti99ryoQ230W2br/bpv9IGPfv6LygVu3ovAdtjw7jvvMKfEPE8MpfLEn3+MK8X8EuUS5w0Nq7/f9Styw/+Dv/HW93yKg5q1k3IN0nWVSsKKGXbTckmUPEMKeCPTB0dKOt6ss+7NkhPjBJ113C34isK6G+7hWj/uWdQpS7N+dj1XASQrQfDKU1QhTCBzwhUh4InOq0Nqx69ZKamK0aKiNZ9dcJ7OB6JNBTJqcVIp3N7d8Zu/+U/5ys/9PM+fv8vV1Q0IhK4jhDpTN6gDXw28ZRheS5ubYa8CFnzNe3lrkHLMKTPEBUTtzBCVkigSbfQkmtlofBr8gQXcj5WXOuD98tNzkNcrhEDX9Ta9Kza4SstoipDyjPeRkD1YI2CxMuY0TyhtkjDlpJE4PVsZaB2a0LImxTIz5jKcZberkooxst1uub+/N1wU7O/u6WIgzwkRVUCSNVvrxNl4yZmySfQeYrCI3nuijc9TQx6aHNUJI6U2EbS50YHKDoD9/ng88PXf+Qa/+o/+Mc/ffY9f+qU/xp/6U3+a7faCXATvtMu6FMdmM7RgzHFenVjW689w4wpEnfKbmyfMk/HJFJ1YouTVCzcflr3zRafItDnK8PuW4S+77Na1VJY0CKEI4jK4jNg9lKIT89KccJwY00zJCUpmOh55enNDbRb6cZevZcwQ2V5c4FzQrGwujOMMguEJrfEtOEKA4ARHZjze88H3vsPNxZbnT5/ifa+y6xTrj01LcX4xkYs9WIK8Vo4U/RMclFT48IMP+b/++/8+3/v+D9ldXvKjDz8Ccfylv/RXmOdkelHlXiumDjFmDOOBacd9qwmuWTHn6fued995h5cvXpLTzNB3psdX+HY7bxGd/EZZ9Sz8PtdjsvtlcYXznOmi0ffFaHhkb5l0xSanJJATclKMvcIHExGP7/sHZPFvXxoQuwVq5GhjTB0awD958oRpmnjy5Bm73RaP8MEPf8jIEdCMarDhJZQZsrJXnNLIq0+iMmQEr9P3GjVaoFaQFyiVLYvWikHLOh+Izreq3e/93rf4R7/6j/jggx+w2V4Q+46PP/mEX/3VX+VP/alfZpxm5pSV1aXiXysDQgPOmnxZ0kPTDyvOVZQZRrxvuXq8Dt7otzvm/R2n08jQ9UoKlkEiTffWDn5l4yjU0au/H//hMbn9Gjv+p/Ir/MO3/N3bHdRU6Do9YY8juK5hHBX/UOmYzGDnzJy1ay9l3UwflK7HEY08veiEmLWD2i7CNmRhPl1yiqtoJIRA7DpyLkzTRPCB0zgSnNMIbsXT2HJJUtjf3dH7Qh+DThdpEb05qpX8vEU96yS1OQXWzT+XCQRO08hHH3/Mf/b3/z4f/PADvvoLf4Q//sf/JL/8y7/CZthRidIb3jXUDlmoOBj1gde3cJ1Wr4GBZnU3w4ZhGBhPow6jzLnd/doNLSWDGXZ1VoyNeiXAuOXnc5l7KIGfnXhXOJH53be+74taIapjVcfPZaNGqhNplD5NJ8MUc0yKKDdezhr4SMlITsotOc94J2iTG8wptW7UdgPaVDF9yZlMLU5i4PLympyF8TSqc1EDCREjArdgyJqgSs7c7/dsKFzvemLfaUnc0br4m4y1kmi9x67VWxxrjJS+ZxxHfvTDD/hP/t5/wg8/+BFPP/qY43Ekxp4/+2f+YsOl1jLQQkVSS6QVB6XveyghepjKRqHf+65nt7vgxSef6JvEMOzVylQDL/b3FsjV77La27rVqyM+OIM/nLJ7tiwjrQ6agJN2D4y9SYP1bH0C8wQ54aRQvEdWXcCfdqCl5Fz1oNLzKO1e5PJKeXuDD2y3O7bbDfvbOyocYF2dFcnkNDMeC4eSudooH3Hsev19nU2+0m1Li8YCNagVg+iDJTUKDnjx4hO+/vXf5rd/+7dxIUIIfPDBB3zta7/Nn//zf0Gfz1xY458tkV9TK8u5AkrpVmE0+mKpfMT1/aLDPLa7C+4PB0pKzFMy17OZ9JZgWJIkQhs9J+tM24MbfbY+u+wuP/0BZR8+5+q3OzaXW6RkptOJUAd6WArPIcTgSWnWKYnGsCLGrZ4rX62t5hQB6+urMtJcQ6tQqT0Ndp/14YhRM6Ch6+j6gWC6qupdTSbUT1WH2YsjOk+aZ8bjidgFYr+qUK2y+lU/rv2TCm1spX7nmy69ffWS733nu0p7tRno+45cEi9fvmC/35Nz0klSyXwlX7WtfRdo1JQCUCiuXok5wrj2fW0Kgg1D6IaBru8Zp5mAWIO63qdsOtZUt92PYsNZxPZ4LWefX24de7ryj4H/1hvf91YHVZ2xAF4vInpspizt4l2lJLGspssJFzPZ6BJSzozTCJKVQy5nknPk4JdIsx1Rh3+edyaulFXNRHWRjWwRUQB/NiA/JuANV+RWAiiFV69ecDn0hN0AXR19uYCfK4ZED6vKcYlKXJtsEkIgJS0V393e8fWvf52/9x//Pfp+yycvbskZ3n33fb7y/tacTzTa8rJgPihtGxdE1Jrayp5Ow/pVRb3ZbNgMG+73e0avGeMaxdURsGrwbW67+DPl+Fl02BuJd1frsZc7Zt7ho08/wBewQvBWPlScTRbFTzZD5eojX+lojKTejNA0TeQEZZ6RNDPlwhBjE9YpGV0K9f6cL+dWwY73mgUPHcPQkZMAe6bxROw6y/JWuIzJrfeqbkrmeL/ndp7o5ZLIlhitg98mVDlfs6NLN2g19i3JqWeF+rX6nvv9nm//3rf4h//gH3L95ClTUpkZ+i1/9s/8RZ0cV//O1eY7WWTX1vppXQedtTy3VtwxdgzDhoLCVZw5HOpc66dXw9TM0drg6+Y+ihWsx1/uweOy8WWXXWDZN/u3954gZcWbSNNxNeufS6HMExTLTnmvzW3yZgDVGRSlfijO9E4NqD3eRTbbDd3hqJWA3SWXlzs+jD+CaaKVAmvELQUpCbJjsoEPw9DRbzojX3fLhLNVUNWcVMtYevsevKeLHXMa8QKvXrzg27/3LU7HAzfP3iF2kcPxwHe/+23lL07JmseKZZlqDHSeTW6GHGcOqmsOp7nd1CEpBdUNXT+w2e4YDwfyNKvttuRG1SpnWWApOpZSFFZwvt+P3IvHf93uzpd1Dbsdm4sdaRqZ55ko1ofRpm/B0HU4FIbSqiUG83nI/Y3IUglcr2qT1/YdmhNYHakWnISFR1obtWrvR3Uspe29E8E7YTv0DF3PPE2kuQeGM/uh6t03OdXlmhOr/zR8tVsYItI0cbw/cLm7YBh6YhfBwZQmbm9f4byyHc1ZKwDBnOlGGyWLPlzgZIteqNqhJk7OIAZmC4dhYN5tGQ9H0pQUQuAX1pfq8FcMurMkQh3nzQOd8XnlduDIH+UbbxYoPsVBfe+99wjRM84nxtOBoVP8KF6dHhcMvBuUjiQza6NH1hm3UpST7Hg8kkdHmkYkZxClk6ib/TDCb05dvawHfI99jGy3ka7fgHgOhwNDt6GPgbF+lFhpCCM/B25fvcINA7Fc0nvP0HfWpGXjwFzAu0jLNroaESv2SMSTiyqpGBVr+4Pvf59//Ku/yn6/5/nzHff3B374ww/45je/xTvv/BzjPCvliTmpPoalJPFI1Ftd9poZwbBgGoRqKcnHSOx7DocTQTRbWAqkoqU/Z5Rf3poWik3lWR/BnR9Q16PS5R5535mObwHsV7nkf+7/2iOf8dNZZ52KIgaB0BMPlsnGSqZulW1MJSNzIUkhjSfISRXFG0vAKwW1CC917zQ4iUZh5bi6ucIFz92d5/rqhqHvuL8/cDqeyOioP+cUlexFkCkxzYnbPEPSxoKLy0E/3oNrXKShyaxWBpbyUy1RxqCPfCmFH3z/B/zGb/ymBj2bLX03sN8f+N3f/aZCG1JiSok5FWpB4aFprz/pVVf6qYV/z/vQ3tmSSd4xbLcc9nvIhYDDxXVwKEZ8jkF2Mq6YcqwOkHtECF9bfzhlt2beawdvq76wTOpxkmgVkiykeaKkTEA0e2qbrdAWeU3HAmf70hx+jRb0PKwZBDxJtLLlfdRmzeCRivfHnDKWjIsTdfSCh+gckzktiv9UzKlztbrklmPrkTVQtxcqxjqEwJxsjHEp9MHz7vN3iJuB0Ecd2lK0P2GaE6dp5DiNeMNWLxN0lgzUWaOpbcqSIDDbVH1o20/fdfT9AEWY0WEFm80GfePC6SsUpWas8xBrFmrxqOoNf0wKXrtHb5Jdc0Ue+YwvdmlApZnxvusgesYpaSYf1U83NzfKD348MI4n5mkiF01wLb6A7vNsSYDHliYGFoew0ULWbLwFFU2WUczynDPjnBCpdJPVscsKP5FC5x1PnlzzlXff50cvPqKUBHXy4KrS6Zyj813zFfQe1wqztE7/ek3BOS62O54/e8bt/p5hGMhenedE4Yc//CHvvv9z5Jz1WXHgY7dMpHpAm7LmyNbzWX6nz7xtUfCId4rDNnzrZtgSnedHdx9wES80iBC/ehZq8kYDTS8d60rWmR4+l4L652cvPSa3e97jn/BvvlWm3uqg1kaMzjuIkT5aOtvOsR41hIDLovUme807yKLl0sPhwH4aLbLX2ejdrnYyP35sFbBybmQBnAGWLZvonz1nmgsXV1fmRAh1hlnbYCdIKqRZuLfyV3DC1c0l3rKa1fkFdcDbqLMQWLr4PYpUCiqopjiHfuDdd95ns9nQdZH7+3u+9a1v8c/98/8i85wYp4Rz0Pd27qtGpipkhWKQBJ1WpJmEoq6q84sRiIFu2LDbXSJzZjyd1KH2muWomFQpQiFrB2DJGkVUj+YntP4ZO/4Of57/4id2hB9v1eh6/Qw5U2g6dSkvD5wox5wD5nFkzEo1EgAvylwhZRXdU5vq3GttaWfGvjl1uvcZNMgpaDBkOKEshWxlfs0AVPJoCOLZDgMX2y3BOaZxqhdIxUOp7Cpfrl6nGuTirCqBvq9RrVC42G74uXff4+fe/4piDWPEBweuMI4jORXGcWKcJroYKPQtgDp3VbPmVI1w2gYDU1xtANSMhdiM2BAjfT/Q3QRevXjJ3f09T548QcQxz0pjpMYqa8WmFMSVcxTMH/D60snuWdY5stlsCQVSEVJpNpFt3xtnItzv7yjTTElOEwFUTtXPsHEiyzjkliWqqSKHK2iToBfVp6JjGYtUSJbJH5lgYz5rw9Rf/ct/mWmaSGJUZc5pF3TrjtfAKljw5LyzQF7UhuDAKxY2hI4s2Rxwz7Onzygew9spJv/FixekomXSeZ4VQ2hDUooAUrSh1D1u5KsxFRHrdxDVr13UjFdKDMPAZrdjHEd+71vf4ud+/uc1k1UyKSVzgGVVqREkz2ZPljj2Z2mleeJ0FMMYa6Kn6wIFr7oNxeH3fUfXBcK958Xx1GAbqoqXwKDSRtZVK4Xr4EVxySsYkWDNy1AziKXULv3KwVp0DKrTJJqXQnSKQyclhrDjT/2xX+T95+/x9J0nHKYjU0lnMrLOTC6l/CpHnuIgiZBKQdN1jpyFlDSrv91uCV3fYDqkwgcffsjTd94lizRUf6XTXIIaVuehNgnv2v7VLL2IOuMFfZ4qO5EU5f7e7naEq2u+/rXfZhgGqPq5fq451bUEV0rSvgin+phVz8PnXZd8j7/i/m3gX3rje97qoGqZJINkm1YkKF2TZkVqNqpyMtZO49qE4b1SS0zziMu5ISSKRfWVBLdGzI8O3T3LMtZSY2gMUgI2hk+Ftoh2RXtL9DoRPNbs4T3vPntH0/dDtE2PStVQucxqw8mqhFAoJFFlKSj5taCKKCe91svLSxOCyDzN3N7eqjMAbaJWEJtyItrtWq/s9eyGnP9kUZA6APpADsPA9t13+d1vfINSVOBxmt2tnf21RJLTrFgcQusOfyR529ZjpdPPsnaM/AW+9bn+9ieyLGutCqTCOBwuOGIX6CUwZ2g5CO/Y9YORk0+keYZcEK1TmR/78F7VrJ9+rxlE/c1S3namtFU2PS6o85iBOWXUjzA1I6o0gygdGSXz7MkNf+TnvoILMJfZjrEmv1cHkdUxm+9dP9o7nIs4N4N4ctLu791mg3hHdjrGdZ5mxlEzXslkqeAbZZEYVdeCxFGFVozKrYbMNXvnvOgQjBgaI0Xf93TDFbe3e46HI0+fPIEaUIoFZKUQfM2uaDOF8oK9Ocj6mZFdoWHeQwia4XaBLDDnYo6qmPM6ELtIDJ67l68YU7Z9XyjpHl2m35pMW0akOafUQKuZP4pog5H3npSS2gBH6+Goruo2dlwNPRd9x1feeYcQA3fHe14d7hD7ezWk1cirzl3yXfpV9JeI0yqRiGJxJWtGrIsdxQkJqL746XRiHie9duea7LeysTs38uf7blnO9T+da05QiLGdvwdk1melIkvXAayosUPxrZk86aQ6KaaH4rn5/byy+2VZkhOSIxm1PZt+Q+g881wQy4amrBjPnBXvL04oUnsAsvI/n6X29dvaGWzOU20iYjFl3lhJ2p+LGOf08nMpecmcSgvf+aWf+yr5sOd60/P8+gonhavtVoOy8Wh45NXXykldL++DVjHtXFMWOr8EZd4Ftt0GHyOTFJ2iJXCx2SoNZSnLMbyxpzQlLi3z/yADYht1dvGWkHEtYSGITdgUToeDNjjWZ4FzmAU2BdE7YRpPyDi2Ee3D7oLQdS0L/XnWli1/2v3KW9/zVgd1miZit2ShQgggmsLXRKV2PsfY0QmQPULQBiWg6zqCj0zRkY4nJCV1UR8Y+qYQ32T7q+JyS7SSizVMOG/dbLSmE6lKt5jb6hYh/MpX3udqt8VJZmLSLKhbSsFnU6JQ5XZu6GumSFrJ0jvPbrtVZ9akaZ5mi1gK2YyF6quFrAekKcDH1tohcnWiUHN2Ap33miGoxkSWDJ9yVupcs5Qy+IC37EXoB8uWuE+N4usteXiK63/Xc7zgnj9Xfv3tH/gFrlWL3RL1VoC71+EOfi4U540qRIdKeO+Y557pdGI6HnX/LCJ/U8NJU5wP9sm7SlRu2KS88HzWzEG28cAarTjlCi4QrTwaBd59+pR3nz3DR8cpT+yne5p2XilN156W5er1mYDaFqAyql38ORc2/YbsNDM35WxKC9I8GzuGMye4VjZWBlh/ajIs1QGglpatOc25ZYoWtEkslKK0SOYXuPbBq1xtKSCJLCjGPXTGDfh25fiHVXbXztMy0a4oTMQbrCoVcpkX6JMPhNg10u1iOOG30/ctBvYcni7GE1kzM/r/po/EXrEmFxMPnbuOOqg3Fxe8e33FzsG2i4S+Yy4zp9QzSR1h2s6C2hBVX1mSEa5dU7LuZi9GGYfjYrNlLoWpJGYpeIEudkzTfWN2cdZf0BL/Dx1UWZ6g2m1dHfeaYa1l5GAZ0JqNKim1ATY1r6f+bYUKaGlWsnCaJmbRngEfIv12pxnVR5zlH0d2cStc8k9xBQeSsyWPhBIL4hcMZRFRCr+uWwIn6w/AEkHyBnkVq3CVtjGw3FDa94r3rKvCLepbxLLa6/1yopCv9549o2x6ttGzsSZspcHS56G0YM3+qDUsrs/F+kzccqJ6TAvAXKCLPVe7S33msg5vcUXYDgNlzs1J9TE2GsDVp7WfWjgnNFtefRL1VyqjzNLkqn+gkj5Pxi60jsnMH6kMQmKZ1dPpYMGow4eOEDvFp1dC97aZbv3t4cvtXgJ4BrZ8lbettzqo4zgSuw3Vd4xRKZK0Q57WkNT1PYRAyDpLVpyn4NgMA5vNlpQH9kUYi2hXqauqYOWkmhNxLp7nEX29JT4EnVLhiuEsa7ZGozTv3Wq2tzqnfQz00fNz77/Ls5sb0jzy/Q+/j1QcV2uQMuVXNxpnGaP1o1A3wOMIxNBxub2klKzz1LNU7l8j0y6WHl/zu53frPU+yOp3+vtiyjG2sljwOl0mp0SJQZkRmuFQZYANWpjnkVwyLkS8DwxBqcCaflsO/vCHH2ttuePPlH/0uf72D3qtM5c1sMFVvlC9vs1mR4iFLFo6zUUze8PQM6dE8EE7+Bk1gwgrmV3L7vLK0tii980RV4bY3icLLtA715qtmnkWHWfZx8A2Bnbe8/6zZ1ztNsRNz0YS+w/uz8AaenlLw0l1UltVQWqjh/rI3uRcimO32So2KyXFwBahi1oJKNnw02aYFyO/YJzalVXF6EU/21glgu1/qAbe7kOZZ5tQVZkMaA1SUqoTXJAiyqs8jrjgif2GTjaEvnvEkP/hl926Wibcyum+C62SgmUfiyhkBMuaalwuiLNy9nkr9PrT256p3NbXxQzyufNfneTmtLEY/xYnmQwH53j+5Am/8P57hOmEQ4MQh9DHoAFQ++yVM7pyVB2uQbkwuzAlHffaGRY3uMDNxRVjmjlOIyeZ8DiuLy75wQ8/1MASFEd7RjGw+lFWp7Kq4K0Dfd+qMIapNqe1TusSeRga1m20YLRoIuf+fo8bT8TQ0Q8bYuyMuxoLAB9v/PvDsvoQmJImZEApyIRCytLs4Ol0WnSkM1tOZQRRWZXXVaz+0wJt9cXWPoQORjhvzlyCDPtjKsRoyYSCpbbou44nV5fETU/vlLWoSGEaR1JO6odUBiNXRcWcVDuXxUHVzL9bCwOVOcDRdz3bbtCBJdMJciFn4WLYqIM6p+bHVAjYuZ6T1eeC+iwscr3ex1WW14ewsl6qU862WMDVKYtl2beSM+PxQCpaPe83kKaR0OkgA7ee1vZjLE/Hlvff+p5PLfEXqeMUNbNYbPazo9BFpW+YjokYe2LXM2c1gtOcDGvWE3IgbTZMx5Nhh+qGSNvYJWu/MrC+3vjSOjn9OsoUlC0AKznlZN1mNN5EKZmhH/ijX/0qF9HTWeNUFz3D0DEhLUvx8Eul3RSkfRUgSaHM2vEcfGTTb7nY7ri7u2OfD4zzhEsZZpgmpTvx0RoeFjk+X/VSTfhadmJ9s2LUt1kmdJwmdW4KSDEFJ/Z70WapUmbG00Fpv5wndtqNuL26UsE6e1hfX2/6zTqQrQ/ABQf+Er/xxs/6ItdyH5fzczgbGxdISfAp43zEiVNyYklkEea8lEWnnBGb7V0km4I8v4GyOqYdaFEkq9U63O3va5e8TmtbApf6dbHZ8O7NFe9dXfHk6gLnhHE8csoT/RDJVB7fpjGXKJnFma7yXUQY50kdEB/wPtLHnndunjHNM6/2d5RxxhfharMlTUlnN/eRru8XiHnLLLEoQXu9Ks66Q1Up1+yT94ZhQpvRCmIjA11rBC8U8jwTug7JCqUZp4nTOOFi5OKyGGUbuNi/WQbe8PofCtn1Dieatcs2V7y4WRNyBKY569hI44HurFwcuoCbPTktAxDqjXqYnZKqb6Ddz7NzWJ1LKcv7pAYQUlpmsQYjeMfFbsez58955913GG9f4rtgWE6lqVLnZOE+refRMIYreWluhBSmPBFdVMMYI33s6C4uuUbL+nf39+QivHN5zW9NiTTNpJzphkGbTt0CwAn1kXFLILdmTViraedslpCoPkmlEEJgmmeOpxP9sME5r7KbVbzncQQjq6/JkzSPSle30wrcPB4JQ2cJj5X+4MeT3cfCj5/GCt5rL4ox+oxjQsikQqPkExFlZOh6HAt/s1TnytVgaCWtZ1FEfc2+r2X2YcIFzerHxqOsXwUh1DKvAAHeef6M7WbLdruhd+qkDbuersyMkzpszi3Z8zV8aznN0jDZzllwYs+dYA263hH7yIaADAO77ZarzY6Sha8+fZcf3b7SJB7GIevPk2P1J4e0IQXLc2SOKRXeVQPNoOPa6187/f3d4Z6u3yjeG0dOhWlK7G/vbNxqjws6eCKliTllQuzY+AumccR5RydbYj+Yk/rjye27XPI/kL/0hr/Q9VYHVS8aEKP7sMlRKc840TnP0UdKnikuW44pWDOJdc2lRClFhdRBdhrFJIJqCaelaKmh0YN19opo1O6l3QJwxUhxOVPCzkEIkSiO65sr/tSf/BMcb18Qh0gmM6eRIsrLVhtNagPR2qF5+PCLKKWLc8YRGAMhqON7vdnRZSjDlsurJ7hcYE44zHHw2kCyRHr1xnnD82m+d4EPLI7AuoRaRJhz5nB/wIfYmgvSXHj54parXLi4VEA6OHKemaYZHyLDZkBSouTcxgp+3qyT3ZL21x/zy/zd8H/h73zuT/sDXhZUiWk/QXDZQYaSYBxnQlQqlFwEyRrh56zNfEWErutI46Fh4XxQjrtq1NbyUWlOHA+c0VVZtL7vtcx5dSRWDt6Tp0/4+Z//Cu9cXpIdbHodDZxSQY5CiApJ8TjrNNbngXquRvCcPUhwiHdMKRG8J1ojSuh7SilcdB2d91zEARciPR6MFcKY8IxNY4EtqCoMeFljB7GAs0adWIWiGqJi2MWi7B65KK7P5mXf3e0pOTHttjx7911wxeT3xOl0YnNxieTCPGr03wePdoX/+OvLKrs1W0cpy7PvDLeXFaenDReWNS0F+r4ZfWcRkga5ZQlizlbTlOfHBrxhApFzyi9nxj6sAnp921IKB+G9995nt9vivOfi6pKTzUEX4zptGLAaz7lFl9sLC+uIWxwCKQ6CynzsO+Jm4LQ/sPGBi+2ObT/QdwO7fqPYbZvUlxFi9BT3uj5fm041QeHsOVW8Ik0fr68Xt3SQF6dQnWma2N/tOe4PvPP+88aakXPmdDoRQo/kQs6ZdDqRRNhcXOJj98jdePNay+6XZcUYiWK1oJwpZTbd6lvZ32OQhlIBR0bz6MCHGsRWvHDh9bBK11K+Vi3sHnHwPVBchcmojDnnCeI0krBkWCBw/eQpmB4qCPep4E8zp5RIoNh3oFZ/amP1+TlZwqyrbqOo/hVhMOaj0EX6zYZ8nBhipN9sudldMvjI+1fX3N7eEotCvKpPszAUPDzayjnGGzRfnf3ckA36rBXRkcXFgt1sjYy61EbWQPhHP3jBs+dP6K/V3hTRQSDa/OcoZcZ5bQaPc2KzK2wvLj+TjJzr3O/wd/nf8Xf49974/rdq9lK0S1yGntB3Na9p8AsxZabNFlIyLiTtvCwaYc/zbByQXnEfvu6X/a2vIPklki4itO4Ly76sn0QpRQmrTWqr0lDMhL1YMQgO+mFgd3nJsN3g5dKoSFb0Fe2zV+WCdSjmwOiw9XWnI1yDDw3PKHbyQ+zodxdshi3vvv8VcposqhIyWiLIruUsUAyLNMFyOGvcXilCoM1IRxrQvP6+gZ+dKsHxdGK72SBZM7DeQ7b58tEyFHNKpPs7trtL+mFjRL+rh0qE825/+VRtKEAmcJDt29/4RS2BzkfrrMyqGEsdZLDITMkJEWed9ZqR8oCssWaOZuzrNtQGP/UnRccb1j1rynKVh6lOajX2rXxbHoSXpki9o+s7NtuBftORSmZMEwVhTvVY1hxoWaB1IFMNe3VSqwNbJONRlgqlK+sYDweGEBn6LTdxw3azI2Yhik20MmVc2ddasqNaAcEyUPqLhbev5XBbhaQGmQUouSxYtPboujZfWkpReiBTrClphSTnjMyjAvi7SDdE47BctrHdiBb42v14RI6/bLK73EPXDKJzqlM1mHc29axAhmyQJmcZPu+U4xlXVtRKqJw+oO2pGHsPzelqIxybGnSLk6onog2mTp3hJftf8fyBbHjmkAvR7tuYjO3Bx+WWONo5rhthnHMWgJ1nqvR5dMS+Z9huefXJC/p+Q4cjho6rYcsVnh2BHs9Irja6XgxLFsp+bs9qHS1cnVOhmpWaOfYrh7M2pC7Psh6izjmnmjIEKZk063NbSrKJf6qLu2FQCI17OMfSzvLs5M9lWNr3n77L6kOkDw6ZZsqYqXFOPW0x+cul0GFy6jzBRZzk9u+aQS0ijTnltQxq/Wi3NtOuEf07Ubo1kfMKqQeDVZlltWfr9m7P/eFEjp7Og+s8jsJctCnUWTW3fY53Z9Vc4EyH5bLISEs+eM9mt+Xy+pIPXnyPUIRu1smX297THSZ24unq9eBWmFsVpmJOee3laXZmdSqtemcwoCq/tSJeSiHPOoBmGYzgGg798vKSLvZU2KWz5tVcCpREKRmPIyXB+UDOiTbl6rVwoiYtFj+Plv1NRF6+SZyAT3FQRTQDGoInR08phjUwpV//k6IlGJFkXXlOgc+TzrYehjXJrd3oajirQ+awDOhyXU2BNKdJBbYZ+fplTm9VCE2gHcS+0/KkCCGGVmqYS7JHejnKooXrOazogqiRvDq4VTn72uyRMr0LdHHg5uKK96+favbUgMVVSIWKDbGMWlOYK7Nq+7sG7Nd3rY1/scyga5lQHa5Qz63uU1WkFTOccqKcMn2/geGR+94M+8pYPhC819SFwAH4TXmct+4LXyI6KrSN0tScX91r57wFKVnxpUUf3DTPxGCk961zRImSYLFlVedgn1jL27K8sraFtPvEkjnw5oSdlYmsoc8HT0Yz5amoonRJ8YVzNiaG5iE+cE6Xo585ygpRseM5T+h7+s2GF9OHbHrPIIFt6LkadvjTRFe04UWoDvnqWWzXvzIe6+Pbs7wuCctyWlRskxRp5wM61tc7Txd7ata5YgG1YaEgkkmz0nLFOdENy/Phmg5x7bl5KK2PmboD8uWR3QdLMx12jc1ZEtrs91Io2RwlIz+rwxjW2dNGhv7IMZoGMoVag7iqX539PfXV1Ye0AMNk8jROHE4jh9NA7wohKUxmLnWyk0Y1VR4elkrriSzXvShlbxSAm+2O6+sbvvX1b7AjMODpfWTTgbzcsxVPXxxBHvn81y56jSNUQ1uTJYuzqrrTGc1irsFudc5XDnQMge12u3ocxJz6TC6JLBkd8YkSslcv2C3271x3tFdW5/lg/XR9Uz0F74lebeycnNo+aDa+2q9cm3W9Dl+IoYOiEIFgjt+yt3K+F2cqbtGkWOWgZHWkcsmtgru2YQ7N2K6bAgV1UD95ecvFtmfbB0Lvia4n1SSRPQjOLQMkHt3yVaWsFKFkw8La/uwuLnny5Anf+M2vMeygF+XIjjkwffgxfcn0GWLVi8Jrsru++6uOFUNCrGV9afRDRKfNVV2aNWBaGu30+iptpjYDms6xe1Gy8sFqVSEssIlHz6z+U+/RCp3d3jcz8SHfeWwX2/r02pgJVE6ZFNKyETi8C+2G5eqZIyirg9I9iQhd7NTY+2Cg34Kz8n7t3Fvf7kUW7eGXyuNZf2GGXlQsg4mAMyeXIk14axr7dDrhy8xxmjSrmBNiM8sX5bzwha3P5aHyLKXgolroLka2w8B0uCV62IWeayJX4omnRJchdQrod6BOk6yN9fpYZvVlUZL1/ldBq5QRNSsKhic0fN/V1RXDMGiG1xygkrPiWqJmAZX4d6UEnGYPWDmmr8vBm/+pBszxQhz/4ZfExouINTEkg2T0LJRTOtkpFb2X2bIluWakciZY9tU5vXfS0gEWAFWj/NqBabdUY7BainJn76mRfJHqWJgyqErXRQ7HkRevbum9h4seiQq+n7OAV9xQY554YIQdS/mRmgXG3OygjYHDdsvu6pLD4cBF8QQivXcMXSG9uGMr0BXUOcaGG3B+HbVZwLWjskAqVsqxWCOY2P7neSanhNj11+e04lQ3m81KnYkxhlQKtUzOQBF7BuzYUm/Aci5nr5ttf1124YXwpZHdus5i5Qf68eHe1kxNCMv0NCdLRgowJ//BRT7YLofKZW0drgbNSW7QJm8TzCRrcwrVeXMKYXpxe8fFbksMgd0m4rsOH7UZsGAjUVtgvWBd11eu96Wm7B2Isk94r1NvLq8ueefdd7i7u+MJHRDwLuNT4MU3vs1FhqEIISvnZYU96CXXhqS6AUsVTDHTFeq1bJAmBEqLTCt5eguw8I2uMMbIbrdTAKRbBRVkcnGGOa9TpaqDq9+LNKCZ3Z9VJcs9SB7Ylz7bP30P1XsPIRKCYnQpYphUSwr4JUhyQXGoW9HpZzk5Ygw2994v/L1Vr9WguF77qjxd6fZEoOTM8XhkToo/rhRT+kv9Hq3CWm9vRtjvD3zvRx/x9PqCm6strgtcdT3GA6Hur6tuME2nL1Cumv5YAhmoGVALrpzn+uaGd997n1cvX3JZHDsCwfXEOfDqW98mPn/KZs4MxVG8YWdX9mNJJtfrX+Rz7ZjWxMT6ma/JuSq7LehaVSmc8S47S6Y5hw1eKqQ8Q1FuYR0hXm1preYt2dgzucWdyaf6QXDgwK/xtbfK1GfCoJacmWeY3NINDIsB7LvBSk40DnolxxVcUXxD13WkubOsyWQj+N7kDa2O3y4JMN4/Bzp3OqUGI6hGP1h0U0/l7nDEffQJ227g+dNLCo5hiKowfaA4T3Ta3R6N7LfdbEejW3lwQvigJfftZsPNxRXf+Gdf52J3Q+cH/BHm0cPVUzYpUUJn3OsV91G1/3Jp+jCeRyNrSp/apFA799cZqNpdm1GDXUt1gjCNI0JmTicU/lOFKODQsW6v2+U380y+bf0JPP9O2Hyuv/1JrIqbqcbbOUcXO+Xg7DPzVDNQRsht2cyUEkO/oes6+rlnCgGpLAyrz38MSwrVptaSt0aipRQO9wfFqaWkXdnWCFLyXP+q/f2UMx9+8pI0zzg8x6njvXee4b3TRpNQnVJVWOvSWM3Wa6k3qLKwL+eCdSLDdrvh5vqau48+4dl1IcqWbSjs5siL3/w6T7LjrjjuU0FyIfrOrknPVar1bcpzMfpr5FhrehDVC9M8N8dqPeVoyQya/Dp0bneeSWlmmk+MaaTLPUKd/uYQMprrfRilPzDaj7xU159wXy7ZrdmT+nMInmRRRsvaWYevNgBB6CJbt7XxsQVtPn6QfZHKz0jz2TEnXSHbDqnGtWT2d3ecxlH1ivOkZFP0LPD1CF3Qkn62XoK7w8gPP/iE4/2Ry8uBP/nHf5HOKXf2OmNTJ0TVIRPNWNo1e2+ZXDvZEAPitVoXu8h2t6UcThR3wEtg63su50D65g+4uuy4HDN70CAObO9ca/YTNGGgp7MSDodhrRUiVZtJKwRMeU8xPF9aysgsqHSxREnOWhIdpxPzNCIhM8wjQ+7xviO4mniuslsx7n84Vx0s0ceOyThycZ5g+mlKyWjRPLHriDGw2/QcD3d48lmT52s1kBYQP1zLM386HLm9fcWUZgSMhF4bvoPzeNGR7ZIXOSgCsxQ+fvGS4/HAcbpid33JcZ5ZO5+tZrwunz08CwtEHEGDqlw535XPdNhsePL0KdebHXHObIpn5wq7NDF8co8LPdeTsCdynyGL6N7h1dYHoBhG++Ep1JimPWPShmrUbLQmX5amxnUFoH5c13VWEVf5HacjKc+M04kCXM7XbDZb6oS7R6sTn2E9lT/Lv8p/8Nb3fHoG1dG87hal+0Cw0V9d1+PjhJ8FLzY9BiNIXqXzt5uNOkQeZhuEUyrHIsv9bqImqnQjWCpKb1JOif3+wP1BO9Odc1AyeVYaE+e1zJ1K1hJpgfvjyHc/+IDDcc8f+YWvcBUu6H0koyUF15iHVuX0VeZLVYdYNkzhCeKguML1k2t+6Y/+Ah/9+m8RjxNb57lIE1sOzN/6Pu8z8KHAcc7a+Y/NUUc0U1ozpHYMWf1LFbgKTyna2LQm4U+VV3a1f9vt1qh81Bk9no4mZCNZhJQ0+tGJVa39hUqvVQOC8yyyOiLtlTfI48dM/AfuA/7WpwrVF7Nq1k2QNuatAvG7GImVAQGdM9xwT6ARft+zQ/AUxsOtRvjrfRHF5jTieLEfV86pZrIcaZq5vdszJYW94BwxdpSU0Mk2jhh8m/6Ri3CaZ17t7/nuDz/g3XeuuXp2wxCjOsuWkXANCyWr8zJZdTVDWP9t0JqgUIHNdsP7777Dn/yFP4r/6JZNmtn5wEWZ8R+84P2+505OpFRwc8a5DQ3TxHk81eTW7GzNtHnbc6EY9m4pM80WPGgwpnLoQy1P6xrHkTlNzGlkHI/00xbZ7fBex2j6OnfbVbFsBTVaZoxVBrK9cO4EfMz8pZLdehUiOvml7wc1zsUyQUGIWFYEdcJ8CMTgiTGw3Q6M495YK5ZstvceUp1ts7qPjT3FKJxChALTaWR/tydLZthsNVta1DFzaMbWhzqAREOTMSf2pyPWVcVcaubJPL86oOWsk3/JAFXnTp1nsUAaui7aoyaETsdm/gt/9Z/j49/6BhcTXCJcpETX3fOke8Jz6Tg5uE+F2XC62TiuQ42ramJ9zY1ZBbwlYpReotrAitvLOS9lU2voWWIkWb1nZs5Js3ppZpd22uzbRlKrvnjolp7XJerNci0j+VqE8VNe1Vnpu45g45dzEYIEStUbzrHZbo3ySLvpQxzwsSBpInbxLBPIwz3Abk9Rne2siiBFZVdEdEiDFELszLEz6iZnQx6KNcW2z9PM/mGaccGzS4U5oxSUmA5dBRALs/X6nM4pwqwdEO8dfd9bwkiDspvrK/7bf/1v8PX/7z/iYhJ2JdMz0XcnusPEu3TMPpKmwiFqFrUV7SyJr06qHbsetMJAbZPqdKuKX61QkmJym40toEhmGUG8BLWCNrSeThqgTtOoI5g8LYngrJrilqktreq73LnHPenOOZ5/ynjAtzqoNYpfMDga3a0nR3kfGPpeo+dsNEdeH+Ykmsr3MRK6jg1bQvTMU8DnqZUCl/LL4q3poRd8pbdO+5IK+7s7xklJ9kPsQIo6Xkb1kbM2EjnvSE4YRbg7HOli5DhnLV1aZ5+WQEFn+Tl1VlZFlgqwrk6lc9BF6zR0cHV5yfDzv8DpT/4yL772TfqpEGTC+xP+5R3vdhuObmaq3Ki+ApcN2CDneqc1Ta08gDqytYhOwahjDGs3dI12vLMu85rFksLhoFm74/FATIlcEiHEFlGtM3eL8KyErT5ZZyWFc0qaGoU9Y+RfK999q8B9UeusjGGZ0coW4b2n7ztmhCCOuRSSwVGmpPRoOu3J0w8DIUDwmb4fFmPPg12zLWvoEtvHpbyq2ewpJbqu05Gi5kBXZbZ+wIvoBJLTlNgfjlzNlyTRh9o5EC/tDuEWpdLunmVQm4OKYltjCLW1ir6LPH/ylP/Gn/+LfO0/+y/oUyGS8Exwd+Dpu094EgYObmZMapTX5USRRU5eC6JlkZOWGS3aLFJyAlzLLlVC//VM7bq7x+OhGffj8cj2cl45Axrwavf36+nR5nK489RL8x+cfCllt2WQZTF8MWi1x4kjFpisTXdOytGoOH/AQ/SBEAZCFIJbMHAVItTW6sdKJwjaXFINS8mFNCecd0QfFT+ZleS773rFeJfG6oeTwizCac6EMNOPiVkgorPIF7z8Svc/vHZb3qkjoQ6qow/xrMnwYrvhL/3Kn+G//No3GVKmKxmfZ/L9PdubK573W46+Y0yFKbYshOZBZVUnEnPXq5w0u6eOvx7Q9qUsDWLVYS3moNYSqQ++NVameWJKsxn4SRM4pQ7+0OELIGe6ZJ2qMO952a8zXdxefiyh94WveZ6VBixGutiR4owv4MTb2HOtgMbQIeLIWcd765hSj/PLyOa3raYdRDPaD4NzEfA21tmLylBKSXWf80bALw2eVErGCUxeCHPmNCV9vloEUx3TB+fxhk3XOFIbGivsBudaY3nnPO9cXvHdgurcrLq5HE/Ew4mnF1fMnedFOnKYy/KUyPkxavb/Yeao5r2cd9aiYJ39ORkfsdrHZIMsWmJGaKX9IhlJ0tgn5nlmnmajvbKEgiuL7RGhNnI/OEt45PwBPpTI/02u+ZtvvNOfxUGlAsWro2plGL8Ywq7v6DH8nqDEs6VA1lKkN2qQru8J0dF1DhlbMeR8j5tzuigiWkZPU+an45Fs5X5VdYrJxOuc8ZyyEfnbeEZxuClxP86c5kwqmury1pnvWgBq2cQHuIz6Gk4joi5Eqp7f9j0Xl1c8311ywhHSjGQo7oi7O/D8+TWfZOFOZmbR4xZrbmg0rzXncybv51F8xUDllKxrDo2CapnUnDAfnI7VKx6XHafTyJgmjqcjnWU+SijmsghN49t5OM7/vehJd35uD5cIgcQN928TqS9siU1JqspIuRpXBj9qY0UShzesWi5ZG29i1zBEIXq6uMHJTD90Z9ko4NHMRbOxIu3xlCKKuSyGGXYeLKPd+2U+dynqULTMGIXTnBiTyq3KjSy36JHV1ELN/tt5eOfovKeagOA9fd/zztU1vycQU8HLjEigHA5cylOedD13OOZcqC1eQn0c7EhulUFYPb+1pNsI3kuhpExKM8pFa/K72k9ngWPNfp9OJ+acOZ1GjscjKc2K2234KQ3qlk1/ZD+ac9rSC6+9J5C/PLK7CNCS+Qyeznd4bK55luYcYZUspdsT498MWkJuzlBdVZ+d/7O+WFk8nD37YnVQTyT6gGRRmq95po+9PmPWma1CKbgiTLkQ5kw/JeZcGGRVDnRVNpf/1qdS3YFijC0eR3SerhodC7a881z1PUOBLhV8TpQszMfAbhq5ubrg2PV8XO45VBtiz29heS5qMKB2doUnbBmpJSBLc2KeJkIIJHM819AFrbqpVarZp2meGOeJcTxB8I1xpsLglnB3HWRZgIdbzqGJ7etK583S/8WtcRoR14FT/lelv/MIXqm1pKjNNohRSsV0mcptwILq+oFVkbGYo/Yrt+id9apBRZ2uJsbbrh31BYIzTt7qfUgbFDIXIaTCaUqcpqTZQlfv6eq46xNrPy26D7EqMlglyVgxnMKSJWXGV6+IORFSxmcPTsjjCTkcudxdMYeOi3nkRSqrfO3a7ri2KdUP02e2wndW77N9mqeJ0+nIMKjumA2/v9bT+u/SGAhEhNPpyDiemOYJH4IlKy0qbXvS7gCLt7B6XaovWZ8T4SiJ3/39dfHTnJUaLea8NIuUOinGK+l9EEcSYEqawQw2jtAHSsmEoNOoutjjuwoyfsvx7f9eFmoppUjIqkiCNrAU50hzpnhhmmfmkslOrLmqwmId++OJ++PImDKXgNdagVLp1Kh+5Rwr3yNW1qodgJ7ee7z5sj4nTre3/Pav/xqb45GcIJEZEfr9wPOff48P58TLnFuZoz10gjaNrBTgEiUvgtP2ozjmWTNJ4BRnI9UV0PMOMXI6HVWZJuXlu71XbA3API86E15CK1XVuMe5h46o3YV20quI7UE0B/ANOv62f49vv/mWfmGr1Ex6UsWk2EuLDot20HY1I+PBe2FOjq4T+mEwqhwtP/sQGIYNXb8qlz5sNrGlmVS7J3K2TSAQ8EQfbU56YjydiJsLsk1byUVL8ZXKAxxjgvvTzDhnNoMOolji0nrn6zGsslFlV9ShqByWvQvEoifjC5z29/zTf/KPcacRl4zJoAjTAZ5ME88uBg6+4zYfCaJnVERIFDUo6wdGd2D5ElgGTqhSHKeJw+FACJFxnhRP66Tta3UOUk7GK5nYH/fc3b3iNB4Yj0dyng3vblRv0KI7t248YbU1q6ygekiLQwFfNtldZKuIzRLH+EftHkikYdqdV1yxjjQGCaB0vTr5pVLk1MzselVdE1z1L8/lSSFdkb7r1FBKISf9nDxoZ34q6nw4BMlCQLFzUykcx5njOLHdeKILLAkHmsO4TgR45whOg6icsxKt4+jxhFQIRpEmOI6HE//VP/gvYTzgEkh2JDyjg/H2FZfXlzwfNmxPJ+6oJWhMLi3Z4qWdT80gIYrRy3W/nEfEkZJWpPr9PV0XOZ5OKx1M63bW+6acyqfTyPF0ZH/Ysz/c0cWBPE+kkhTX3RqyFC9YExPnt8lO7lFdrP+WAnx68vEnuk7TTGFkyga3y0LsN3ptorp4M2y1wdP0UsqZ8ZgYYmATHW5QiEmtCC7l5tVy1VaJ5U9LE+RaIatsCylNypyQs05TrMT3jhY4Vz1RStFgeE7c3d9T5IkBhPWgFW73GitETTbZz2uqK29BW01++QLz/ZF/9qv/hHA8wgwle7LzTE6YD3suxmsuh4Fnw5bvz3vlifWV+UKTW27tODnfqnrwwG+w8yy5cH9/T0qJy8srnPOcxtMqL2yNwlb2n6aJcRzJObPfHzic9kzTSRmRKk2nq02uyx1q8dz6hkndGJBivpuD9+VD/i3+n8B/540y9Rm6+KtRh5lMmm20mxn8aTpxGkF8B77Tkn/Xk9ORLnb4rlc6jZIpWfBeiEHoY4f235eHOrNdaLu3mg5aypjGMxeN4qmkrBNvglEHeQ9iTVi2UUmUh+/ucOQ4TqRS6MirzIt+/llvYBGCgyFEgmhHc1ccIdskEu/Yf/IJH33jd0ivbnGTesPZBSYKebzHH+659IVr5xmzzoqu5NGI6GSpJU25Eny/KFL7fUqJl69ecXdQB/X21StrdYIsSpGU5ol5nrm/v+c0nnj16iWHcc/xuKeUzMuXL7l54ijFkfKNHrSsMCQtVNXzy/PEbKPXdtuLBX6wvlECFGFTZn5JXn6qSH0RS4cvRBwRJLQHNqWJw+Geu8NEP1zjukHJmUtiGke6zYD3UUtSc8FLQaLgyZTgl3K1OaCLKanDFlCnUDK1zLCoDlU0NWrPuTCNib7LzEWNfZZiJVYVzVyEMc3sD0f29yc2fSAMAZwY4b4+A8H7JrsOlV2XFcLQOU+PoysQs+K+XAE3z6TbO779tW/wngR8cornlkKZA+PdLdvtOzzpOob5QGX9ERxFHM5pJv4hIksqZcBZR4M2nx3uDzZ72/Pq1SskF7abgWIOrI7fE0JwvHr1itvbl0x55nQ6cX9/x93+lv3+jhB7NvXZLU0Pr3ZgbUAKaTpxmhLDMOjUpQdKZyP5SyO7DQ5hgdA0TewPMxBxocd3G/AarLoQCbHTDnKBeUpMJeMQuuDY7SLaUbvkNB6HfakTVDjPSjWGBQfH6cg02uQqOsVXGhMDJtN6/o6SISGMsXC73/PkemfP5EIKpI6oo56dRzNMXkRxd7mofOPxAl3Sa+pRtoDTeOJr/+TX+IrvccnjinLojiLMxwMXh5HLruPSOz4pWXGn5uNVl+EBL4WeP7SqISwNiKVkDocDyWkAPJ5OzONIF4Lq9KI8yqfjkX5QIvO7uzvGaWTOM4fDnhBGbu9e0Q0DoeuJSTlRW4agRXvrQE9PJM+T4VgLu90FtZ+o5MxpnNhefTay9J/U8kZmP88z8zwx3R+5vH6KeEe2zHQfI7hAKpm5KF5UUiIHh9/2bZpcg6xZwsagx42NRLepJpWWDGcpS6OVOsCJLMmy3Xp+1Tmu8IBaDUpZA5bTNPPi1Svm/FU6D0o3ma0ps9H2PrpaoCUQxRGKI9Q2BQdyOHH48GNuf/gB79JDdiQRRoQpO07TxHA80u12vHd1RT/dEazhSpwsclszkY/BDJre1QZZQSEnp9OJcZ65PxwpubDf77m6ulJ9XiBJ4jSNXJQt4zjy6tUrnY55uGVME6fxRJ8T+/0dzntK8YS+YyeXQG83ZU0Ntz4nEEnc370CcfT9wH15wa/x/3mrTL09g1qcZkBtW7QEiZEXq0pJeeZuf8T5Ad9tiP2ARK9dc0GnLIllOJONSI0BwiYQnWJTRNTDh7h49ELDiNSHtCnOilNzykl6mk90MVJcIEky0nQWYJR9ZglwOBw4HU+kOdNFp0B1tyhuZ+X/mt10zjG4nsFFOvHEIsRUCC4QKXz7977Lr/2Tf0KYCy4XShayK2SvXeTT3Z7ucmDbeXyZUbVte4lqOucX0ncxLEcj2a08l2hqfRxHyjgyTYnbly95/ux5o5hKRo+03+/58MMf8dFHH3KaT2RJzCVRnGN/2BO7Dh+GBeDfZKmssrt6A+6PR25fvWR/e8d7X/kK11dXWj6sG2bOn5TC8zLzb3D3VoH7olY1rBpN2/0vFXuTGMeRlA90g0AMmgXKmU3UIKuIZnDmeeJ4TAxe6C1zI85YFczSebfQaDjLuDgLK4rTACNTGlexzlpWXOUw6EzmkjOUrOTSlt0EWnA4jRO3d3sudx27zQ6ViZrt0ueiZTPFzrNAb2XhLnRsul4DLfGQJl786CO+941vsZkzocCc4YSORPUpMB4PdKcTvR/oxZpDihhhv55XCKsYjyUbVVvwwINTFoRUFJM37xPjOLPf7xn6nu1mAziSqPzllDgcTnzta1/j/nRHQbSLdJ44jUfu7/c2oi9aNlVw2AjN1gUtzVGWUri73/PBDz/g5skTbm6utWnTuF1FHO9I+tLIbimr5xCsGpI4jSOEiX5TcCFQJNOFnhgjIQTlzE3ZGu8yJUDZ6ISimjEMzp87qA/tmwXe4gVs6p+qII+kmVy0yc9bNaeQW7lPLKgVUQcyi8JmPvn4Je8+e0YfI9q/KcZ3qQmL9WlU57Dug/jQOv1Hr13Mmczdx5/wg9/5Fpd4gsFfjgIJx7bAlBKn0z2pd2xCIlRC3zVhuOm480ZdcwFqKkgqmbu34RKJab9nHCd11kW4ublRp1w1u46gnRO/8zu/w+3dK+Y0kV3heDqwu9hxf7xnuL9js71gTprZEym4YkHtshP2k4Mi3O333N2+4nA4qC6+vNBZ9unE7e13eXr15z6/0P0BLB/icv8EfT7LRJEOnE5dBK2qZmuQrCa6VrtSw/QucrB2AzwLZR5YIsIypiGog7/g3m1qnbOeGGO40UpCsUY8YxdBoVOlaOVxf3dkf39kuO6JlnnXytUai7pkd9Wh9mcQheA9nQ+4IkSnCYSPPv6Y7/zuNxnEE3JBirUrizAZtvw0J7qU2XlHlIyXhBOlFdShAdKgKPrIVW7rGnKprfOtkqbVqfF00mNlTWaN48jl5aWZMUGcNRKmwne/812+9/3vcTjeM+WR7Apd13EhF7x48bFmWl0gDj1XN0/oh0wLPVeYad0fI/qfJn7wgx8gBa6urzkcniL8L94qU5+SQXXt+1lXd8seqzHIKSEu4JkpOKSowxWczqovaHkkp6QlIi/YJHFq6ve8rLHQOlQ0R7ESiKvhi9ebSlFMm/cOzAFZ09a0TbKM1eFw5NXtHdeXW7ZPLzRryapTv2bGVtFzwJNPI6k7MQ9HDi9vFXx9f+Cjb3+Xlz/8kGdJQcVShOyFXBwpzYz3e+i0wcA5LRnX2yer/5b8xpKBapGinYs6TDNz1ozx4XDk2TOoIOgiBe889/sDn3z8CR9++CGzzBBEx1p2Hfv7PeAYtpeM00nxgEE5DJf8vGYDXHbs93e8fPGCu9tbhk3PbjPgXa9GTBZaGAEuSfwL7uXbReoLWtrI59u0r3VZRiw7k8iIn6AoBUhtLPI+NMaEnBTW4r3iWaucVjB+Xeedp+Y+OpaHnwVrpiUndVJzCa1ZqBr41ZPWHIspJT755AWXu47riw19Z0FNC67Eyj6+KXdf7BksopN2xJGPE95nxpev+OCb3+Y7X/sGXdIGECmQncJ0UsnM0winIwShc0Jaw02wTGqxRqW1ka9QlrMrqTChhExwPJ44Hk/2XGgJVblStXnicH/kww8/5DQftPTpBILjNB55dXdLP0y4EEkp4eeJGFsjejNeRYrqhJR4+fIlL198AhRicGy6zuZMK7b2SuYvjezW1Z4ty/gUyyyqHKo0eVdxe85KdMUGU2Tmhiszoyw1kGn0isCyX+fL9JKjdQKLQZ00X7CiqbES5lJ2XEqRc8m8ePWKj1+8pAtPGS6VzcU5aZko37IRqxMqS1a1i5EuRqRoBcqVwosffsDvfe3rhCL4ZI6F7dlcPHOaOB0PpM7R7QK+qK0QOXfQiyj+cdEPpnDbexRTKliG1Ajgj6eReZpbGXrRB/rveU589NFH7Pe3pDKr/fJC7COHwz2x65hzout3Kyq82pQbTHeYo2bnvr+/48WLFxwP9wybjt3Q0bueNM+8ePGSX/yF36/E/f5WHRZTk0LO14bSylYQWqmjNl7rez1igc6aoF+MrUY/zT7XGQSuVVNVbpwz6kerprTG7izgxQa2lHbstqo+X1nklDOH04kPPviQq827DLFrMKQatLjqJduJiToSygkq6kgPcUkK9Hg6gf1Hn/Dtb/wOsYDPBWowB0zFMefEaTwhxyNy2qGcLbLaV1S/Lol1zn9Ju8bWZG7/1sEFWvWYp9k4pNc9Fca8kAuvbm/5+OOPOI1HMhnXO7Zs6eaOu/s7sgg+9FxcX5stU3+u+hHqo9qUwCKUlDidTtze3lqAINweX/Exv/FWmfr0JilnHezOt++0jNFijItAToXMrGNug0b1SnIupFwoSafAFFeYmzNUaWvON3jlKy2i4wRMxutrOSdSnokS8OLPFPH6DirNjeP+eOLFy1sutz3vPblsN3AVC6nbLDQnOwrsP3mJO0z44wz3M8yJ+w8/5kff/A7p7h5S1Bshmj1KOTOnifFwT9n2+A6cN2LrlUg9cqoLNvcs06GNYNOcGC36maaZKg2lqJNRPNzfH7i9u+P27laFq4NhM9CNHff3t8zzzEXKHO73HI/3hNgzdA4XA7UsnXMmn2ZevXjJy08+4f6wJ0Z4enOt2QiUCcB70QyZCIM4/qj7cnBJaiOfo5Lz19dgyZbnApIsA+Q9IXSti7yIGqSc1UjkOpZz0RLts+yIrDF+S4BFU7iATVorjdqjYrOKWMlJZDGgzjXHNuXMJy9ecnO15d2nN+y6jcntElytDWttLvE48uHEfHfPMQywn/ACtz/4Id/5rW/ww9/5PZ7kYjJUyPY8pKDBkJyO5Ch0gyOvrleTTxVc75vyXvkoi5Nqgl6KXkdK2vw0TRNlq8FR5UMVNINxPB559eols0yA4KKjGzqFrdy+pB82+BAZT0eSODYboesgBGflbj1WSZn5dOKTDz/i7vYlIjNdgJvLCzo/NO7bAfnSyO7Zahg8ba6TSodWREnr3dKQsCbfRpZgvemYelPWzhfenNh1KnXBFlc8M9jgFU3NmMHLi96Udff/kiBIOXO73/Phh59wuRl4erkBc05rSb82K0l1PMThxRHEISnjUsGlwnx3wMeEHA786Jvf4btf/1121mRSp2ol75izZ54nTsd7pPOEYdccVFdcvcR2TIXr1IB7CfAWiJg9h0UZDeassIt5mnWKIL7tc33S05y4vb3lcNiTJSGuEDaRae45HO/BOcZpYneROR0PhNDhQqTvnI73xZmu0OchTSN3r17x6v/P3J8927Jl6Z3Qb8w53X01e+/T3C4ibiiVWZUmqQATnYGBgRkGZgUPvJQZ/wF/Albwzj/AO8YDr7zwBBhYVVEylUlVwkqiJFJKZaPMVGbc5tzT7W417j47HsaY7mufG3GiVJkZ93rYiXPPbtby5T58zjG+8Y3vu33PeTwRfOXls2cEJ8Q58u7dj2DA74N28zKY6jRvEK986EJTWWmazdZxatfc4mfVSJZVBn4pfFda0fK3rML4dYl/AxU007fu78X8gMX2gvXVQsowTpVvX33Hb/3kGXXrl3h5uuZeno/uKc46FJILoTo6HGWcyb4gU+T+6+949ed/wRdZkCwat0ASRU/nHPHTSD2dqKctneUj2NqI1opcZkwrMXG5dE9Shwom2K/PY4yROc7kVNai4uJ6xJQ4nY4cDgdimilSCAS6LhDTzPF0UIfRsOH56cg8j8jZ04UtXRDw3hJ2ddIqqTCPI4fHB46HA6VEao3cP75i4h9/NKQ+mqB67xHf0NCKvnfAeZPhwdm0qKcadK9BJNa2NiFpawGLcxgjgnV44unFWa7oEuttk7+outCEs9meLrzAJ6hWMURUHwCwCjhV7u4fGDz8rd/+cuGVNDSqkZrFFsjOWu1/8gd/QjnP9D4whJ4SM/HuAZcTG3GaW+hjRioawJOL9PNMmSbodeFp59kI1NouLgtvpmXfenXaYvo0iNrmcCmlVMzVZS6JcTqr4PD5TJGMFG8OERPHwyMnd2aKM/vXe0QgdDt+8pMv6d0Gcdqmqinz3Tff8vqbr7m7e884nnn7+hueP7vGuZ+x3e60tSpKk6i1ciqf8Rf1f81PPhpyv7nDmfGCtvYtiZfW9g9a7TpHEUGcx3cBRO9hLjrlKCi3SuqanrXCwju162vFWm7JmEBrtYhbW4TtyKWaF7Qudou5gn53eYm6eKQ1uY/M/f0jt3d3fHL9M7yICn1j+rzt7IqqOXTOkVPhmz/9U17XP2MIPaF65vOZx9dvqOeJIVUktSKzkKhUl5nFEeNEPZ/JQQjdgEeVOVpjYikUl7aZnoXjYnNfpqIrjZ9U7PPrAJuix7p5KDVojhPTfObx8UEHP1zFZw9OGMezJmenI6Vk3r77DKTn5Sefst/fsNnscX2vVJt55vBw4O7dW1598xXH4yPv373mfHzk2c0Nz59/Ymofwil/+qOK3V91rMQOQUJQjoVTjlmM0dp92jlwH66vNHvk5Z8sOKXJK2mh0dZ1EwhXyJTUWtFVE4/m4qVx0DbM9f9FKzsKcHt7x+cvnlHrJ+pyV0D8ZRVuv1cU8fLi6Gvg7vVr3k2FcnekpsLp/sDjd9/x/utvmd8/cJO1d9+mxGsRvNNN1o8TdAHZ9zhDTtu20oQEBStm5UJ6SlS3d+lEOEWoNcTbhH4mxgQBFpWK2oTNZ0Tg4eGOaZ5QwfRCkIF+EzmfT+ScOZ1PTPPMm9ffcDwe6fsdL19+qutSMy/ImdPhyPs3r3nz6lvev3vHeTzy/u0rXr54jnz2Becx8urb8a8ivP5SR2UtlnSXNwStSXd5T3XVulNKL3Mi2vovK7opcjEpzlN90cvjSZJ4kSi25LSUsixSIk1f2S3gz9OnQxe1epHIHg8HpmmCsqGTjs7W4lUtvCWL63sEZYxyfnhg3N5xIJCmxHQ8c/fVN3z1r/4Ef4qEotSbRhWrAgmYcyKkiMQz9XzEb1g6Ey1HWT6/yIWjFlRzXNO41eKzibxX4/E5S+JbR5uc1qK1QEyRSuZwPHA6nYBKIulQbAiEMOG8ZxwnfDhyd3fF1dsrnO94/uxTrq6e0w0DLhiSWCrn44n721tev/qW+/dvOZ4ODMNAvt3zP6r/+4/G1EcT1BCCBo9tgpvOq4uIJYXidMLT+0A214OSKzV4HYIKjoJKkuSc6bzXn3eOzpSS1VGq4J03hMY/2dyWadsLTmELzVybU4e9rwdp7cCLalZsc2y1fbYW+RQTV/sdPjTRb72JQRw40+FLkfPjI/fv7kjHMx2ebeiQmLnCsfWewZtAMGpMoO5Mmd51pDlS5xmJnpACPitdQewcG+/pw61kQaNEzCoWHTITvYbZZGYwHc3VvzhyPB04n0/aOhLT2nQB71U7tokk396+VX/g7TU//enPmOezKg0URcPH44H727fcvn/LNJ0Jnefu/Tue3dzQ92Gx4nVOE6yv8zv+j/Uf8d/nf/jRoPtNHJthi4SOavxH8V71eIMOlQSv+oylVKpNQoe+B99cNhKgln3BOTBh8tZ10kJJLv60wxarC5Sq/akVVYygWGH1yxdegCr+Ijlo71kZzyPHxxN9Zz7ny5SHtmODC1QqAUco8GzY8Uffved0+4AvwvWwpc4JxpGusgxZpQtN1lIdnctM0wzjRO0DfqvT/84W5MZLFy61S+3cL5bxUirB9/pcm8i8855k8kTlYhNqm/w0TTw8PCqiW5X64Izwej6NxJiM7yZ89fVf8PzZZ3R9T9cNbLcb4ngGnLaVjgfefveK23evOZ0eVcR+CLx/95rr66vFTeYX8f2PJnbbWlerWvI2Q5TqHRktqEA1j6VpbhptRIflBEHpDTmXZeikzTx/f6158u7A6jXf7Dxb9V6ztqB13fdL4nbxq+s/pClJwHQ6MZ1O5CnSb/b0YUXMxJBLsdZph6PHkcfIN//yT/jFeaITh68OYiY/HgilsDGHolorseqwrafiS2aOkRAjPiZkjoTcK4ra2scXwvwadzbJv+BjTS1Y9yRNbGSRiWuIdUoRKQVvqL0+q4XHh4eldV9FOeh5ivTjjCDElBHniSnx+s1rrs4Tu901w2Zgs90qpaOgWpTzxNtXr3j73Svu794zzWf2+x23799ydXVFivDmu/d/JbH3lzmqBAVcvDkoVZV+FKegVgiadKcUdZgZve/D0CMFOjLOEGxYgYB2qMb6WtgDSNGd3YmnAaMfSk9BK0JWoOCXPQMV5YmKLlxM55m793e8vNrwfL8lSOPV5wsM1RmX1QAtHPU08fXv/yu++Wd/SECYTxOkTDkccTGxQ+OUqgVjoS7FU4qZHCJ5jtQpEgaHb+Ac1fi3yjUtVgas52J/hItunn2tJdKiEl/ZOLnt2jSaIBVOjwemcbQ5IkfKieod85wIIeFDBEkQI+/evcE5x/Wzl4BnGLaETuXoMHmxeTxxuL/j3evvuH37mnE6s9ttuTu94f/JH/G/4d/9lTH1axNUdXbQqfXN0CnX0yrmUgviRZGnakMoRmh3XrllpRbmeQSKJknB0XlHcLrTP/GDfXIx28W1ISFzksKqd+cafNMu/CXgb6mo3aDakgNRLkpKlWmeOJ1PfPHJMxPer8u71aRJdZoit+eZu1ev2T+74pQL8/GMK5neNuYggpmvqmA+ZdElSDmS5gmZZ2Tu8IOjr9CMLaXpWTZfYJ4+XNqSW/l8yjW75Ch+eOjrnE9nTqezto4lU7yY7lnBxYzkAoy8e/eeOWaeXU98/fUvOJ9HRDxDv2G/23N4uON8PDKdz6Q8c3P9ks4JcR4ZzwflABboQwcIm/LI35bf+1hI/caOzWaL+EB13mRNrEgX5SqFPjDPTm0dDVV15sA1z+r4UmvWBbQPlBg0IWgJqL2PNLrLkxXv4t/L1zVrvWwM6IKq1oBzvMB2vtdI0LjFK0o2jiPBezoJlqA2owZHE/+f0sx8PPL4+i2nw4H5POELnFPF58q2wMY5BpNjqVV5UBpqlVQzKc6EOFNjj089ne0YGo91OcFV264tnrqBt/Zpu06uXujCNtTVrs+lpMw8zxyPB+0SuEKhKG0oZTVUkIyUipzPvHv3jpKFzWbHebOl1sL9/YGrq2eQC+fjI493t5wPB6ZpZPfiGbthwFE5nw+I91TxdPn+RxO7y1q40FQUZXLJUaqiQHjBhWB2p5XZBu8cuvYGQ/1X6oVxSC2Ru1xlYEWiGjWkZQZ6yzX+6sW/gcWxTtffdThtmbpsiV+FHCN5TpArfRjonHEOl3JG1s9dKmmaIEXOD0fmxyOhCoMLhKLi5hsnbLwWcbrurs9kKjpUl+IM84zEhE+eYBM55ZcsoC1JdU8vzvKqikdd0sHadeGD/UuLssPj46IjWWqxIdU2DFRQxqwORd3f36kTkigf/Xh84HweAUcXenKeOdzfcjo8Mp5OxDjxky8+wwuUODGPhfN5+q8QaX+1RzaVBt0FHal6ahUdFLK2b+Ohq6tcoTrNK6QKgULXd7Q4Wm5TXac0sMSrjQ+7i7xBi1yTeBJZu5KsKHmlLGjhmicsd3R5n3a/+9Cx6QY23RZPITdQo16cS9XzCOIYxHM8TZzuHpgej7hU6HC4XBhSYRBh4/zSlG8dDSeqxJOTDubKNME0IWnAV4UClr1guT4NMdYBpwaKtAFf553NOMrySLrlMjYN6YthKwv8aZpVb7pkqivEnMhJ6HJHyir9V6xjdjge6fo7coHddk9MI/WYGecZJ4HO9UzjmfGsSeo0nik5InXDM/dT/jvyP/loTP3aFn/X9XivrcSu809Jt7VoK8l71ZNr+43o5H5BeQ+lTQALeO/oOkfvW6u+RRDL5v+0uvkQoWrCvmK6cSh1wAYF2nK7boXyJLhrrXhzv9oMW3r7fErU16RZE7wT03nUjT14Xv7kM7rQ8fjulnw8qbZm00WDhf/SYPjKikL4nHE5E0rBF8iurmu4UR9YgsyCyfpLl+BEk8FqsP6TpN7et2RUVHeeKEWh+YJ5IM/zks+nqBV6jIk0qz7c+TzRdxtubp4hn3zG8fGe8/HIfB6ppnuZYyROI3MXkE4ZMtnpeRxLz5/I73404H5Th1qbehDl1a6oPEvM4iw2RFRbURqPLy8av0Uyfd9RnXqQN93Np3/qkw0SQKos/LpmVSdWoq5izyytp3bLL19FkwbLhi0B7fuO3XbL0A+qKKFCvbZJV3VeSok8J+Jp5Hx4YLPfUaZEPI7McdIJUpOm8sb5W5Y9DSWt7IsN3KSMzwV/YblXqNYTteT5A4eixie9/CxtMXziyXKxSLZnqbmXqDVlMVkqIEbGacYnRUJSUn3bWgTvO07nE1038HB/4IsvfsbQ9YznE/N41iIrzprYVk2A4jQiXYe4wKn+eGL3iZi9bZ7OFpZStVPVxMgV7Cnkopa5per0dN8HXFWb1PYapa6Sfro1P93opCE5FodrysXF1y/QqO8VYpevbl+uijhebbfcXF1ztb2iDz3B1ExK1QKkijrdUIQSI/PDI/ev39AK/3nOeF+Qsor2+7YXWVnUHqtclaJTSoZccLkQSlUkyn5e1VrWQZKF61vt642KJW1N1xd3C2fy+/er/XDrAjTtzVxVOaQ45Z1WwCct4nKB9+/fE2elDIQu0HfDshY/u3lOyWVJTudxJJekCjjTRJwm5qkw/ggS1OXzSyPyreBQo9/luppwlJzVCtWpykjvCttNRwgOJUxdoNzLCns5/aLfeVJu1fXe6Sm15LXJNjaef6tEVi0UTTYt6RTH85tnvHj2jP1ur5KZJeseuMRt2/Ct6M6F8Xji/Zs3xGlmPJ1hTuxCR1cFj4FzS15SmzXQch656ppbUkJiwmWVuKR9+qo5Q7V4u0zin16DC54t6+8vg8J1TXnXF9fvz/NITIlUEpIU3CpJJbBkoXHqL4nToet5TnivCazgGMeJvt/yyfNPmMeReTxxPDwwj6Pew1qJInwlHxfv/WiC2iwhQ/B4J3RGdm7Xo+TVB7q11otl7LkWdVQqyo1MWXlm3gvDENgFt2xI9lHbNVqPy12/ZfhSL0PK2v7e+K2XQxqy/O6ysFaopTL0Ay+ePefls5d0vkNqpllilpR5fHjgcDwyTRM1FX7n57/F9bDl6uaG7dWO+29eUx9Phqi1CmZ9y4VbKhpI6lRU6WxzXBIPS4Yk6IPtSgMfbOqwKIgv60dZ/XXbdVsWR/u9qlJUc5wpVaW9chVknkz+JFJRasY0J8Y5Mo0Td3e3lAxX+xvSHLnabBlPR6bTkfl8xomQ5pnpdCZOE2lQ20/vvXEIhfc84+8N/8uPBtxv6ghmV6pVtCWoZcHzUHkSrYZbEbMUUybGV0sh10jfd7iusuk9wQtBBCkX+r2GKF4+8I6K0yDQuidXFnDRDq3y/QdJw9pGX45WZTvPzfUNn3/2GX3XI3PC2zvrQIoOZqSUiNPMdBrZdT2f/fxn3PrAXXlHTYmamwg2i2SL/q2xWey/syXpLhdcrrgFEbKkvFRqyeBUz9f7QFto2/O0fNa2uYg80RNf2tnLRRBySUzTSC6ZVNXdqw0TyOmovHHndEhqSioLczgyDAPBd8yzuq/cXN0wno7kODOfz6rbOc3M48T5cCS/aHbJwh3PfzSxe3lcliu6gakFbuiUYkOT/jKpp5ITMvT0XSB4z6bT56DF84ev3ewYQdYktDYUdFUJda0Qt83e6cPDInQOBp5eJK1oHDnn+OkXX/DTL77g+bNnBAeu+qWoz0UVIsbxrJrZ08zp9p5Xf/4LXl5f6cafD6SqCYI3De42TLrEsD2DRXSjpxqYkQvBFACah56USirJBCuD0njaGVtxdmlK05JfNS5Y2b2rTmxDu3Xzn+dZB1JyIqMolEsq2zenpNQNPH5KxJg4Hk48PDxwe/se7ztKrjy7eUH5acaL53w6Mp+OxHFEgDTNnA9H5nEkzsLpNP8VRNtf7mix8eGu3vbV4jI40/sqVV3lasZR2HSB/dBxsxvY9J6c8pOiSJfEtlLb3W6JL4YrtsSsof+1zUiENW/JRaUd2+uK5QVykbRWLUR+9pOf8Omnn7Lb7WiKQ53zuGruaSbAnNH9Yp4jjw9Hvv76K5sAtGFpKsF1CgpYnFTbDsrFw9LkpmrV+JSU8TnjLbkXAdV0TlQn+Krx7nyLR4UOdB+rSiNgfRz1eooCX2Y8tIIDLb+CcTxrBzllqtN9RQTqOJGymhl4UW3WGDPjeeK4O3F4PPL6u1cICh7c3LxQykPKTKcjp8dHptOJEAI1Ze7LPf+h/GfAf/dXxtSvmeJXe9K+63BSCdjUVwHVEVMkLibtnTrT+kpTVE7fhSD2PM+k4HCbwLbb8unzK653A2kacaWu3I+lBdVcnFjaLsqNbIGpwSEi9F2n7lRSyUYqEzvF9bOwRPrNzTVf/ORzai0cHg+UeVSR+xSZppE+dFzt9zx/9pzNZsvzm2ds+g2fffkl/87f7dnNmT/5p/+CN3/wx5ST8d3sxVvAtRGwWnWzDFVwuSG+YiLXkTRHxEe6vieEDuc7CsUkIRJKGK/L5GOu6vJwOp0Izi0PZc6JNM94qYzzmfN0ttZSMekvDSadrNNn2IeOYei53u3oh4HNZsumH6hphhQ1Eas6bDLFmeP9lpSUi+mdUy1DZzprVP7G1cT/7r/3LfDf/FhY/UYO7z2h65ZEtWRDtZNSPHKxZB+dMowlAdG0+iwWTZaDEtltOp5fbXjxbI+TDKbtVrlsGV4gUkvlzlJANJ4lGHLqHEGc8rCz6fKVy9dqKKv+qwuOm5trnj97zt3tLR7ogJIz0zxyOD8u4uklF5wPfP7lT3l29YzPv/yS8/0Dj998x+O3b2CaF8/mxXiAVQJtWTyNI+iUuKRnVtVXO0VFicQHQtfR9RpTTZkgpkyuusmrB7kOSFSEcRytQHUXX9dCcZxm7h4fmExnM6PdBb137QLpZui9J84Tm2FjJP6O/faa8+GRUCunhwPHhwfqHEnTyPHhkdN+T/lcEYRgun0/348/mthdXLU+/LoC1rpVToXo0kLBat2VOM+UocdT2W86Xl7t2G87inlwt2NBjBYFdCzc1qHHVmSIGSs0GowmV0LNmZqzyWFcFGhLjOs5d53nZz/7Cdc3e87zifEUCajbVaWSyYzzyLt3qq+YUyZPkd/+3d/lxf4Zj2/fcfvdax5fvyPNBTpRmUHaBn/Jr1U+oqLNuhGHKkgqS+JdqzDHyHg+47ynG3rYqAtfaydrK17b0KXm5RrlrDzJ9j0XgiWzEefQrlWO3N69Z5onckk6OFszqjLoiLNKShVrMw9Dx+l0Yrc5ct/dEvqB/WZP7zsOD3f0LpDOJ/I8U+eZUjKH93e8eP5CUa9LYOYHPKRcqDMIC8Kt10rjyXfazm80nxSVy7h/tuHTF9c8u9kSfGaFFhVM0MP41lJsaNUKKBtCy7KQlPSnnbMOghpCXMpQLmCYxb276A44dAD2pz/7AhHheDgylkwvwn5/vQyPgw41nY5HSipMp5GHt+9JrvL5b/8Wt9+94fbVa6bzyCA03XxbXzXRbvlC1W8/AQy8WM5g8mi1VFIpnM8juRa8DwxDz7Df2X6jDonZ7IhRvEBRbVRvdRxV/afUSuc9bVywXgAKd4+PnKeROUctJKwAVnpEZjRllZy1OxJC4HQ6cbUbef/+LSF0bIcdne8Yz0fiOHE8PJDnkTLNlFyYz2d8dPzdXxO3v95JSpQ/4gTiPFGqYNJiF9B9pdrEmNRKjhFkhqHig1rcObGHmMp+GHjx/BrvCmpEqhemXPA3DUtC9fUMFNfyFOpqU+e8SkF13lFr1on8p6i//sOge0dRFYFcODw+cC5FA6Akasl473j24jk+BMR7vA+c8kypnuvdMz55+Rk/3T3jv/hH/7ku3ghZIDxBDdbnS9tNhVoyCu7rg9Vcc+Z55vR4pht6+s1Av9GNVsW5Z5zvUFeiSimJOSVinJS/asHcHv5UCofTI6fxzJwUHco5U51ORZKVIZtiIYReJ04nfVCufWXIASmZznme3dzwndcEyoue9TzP9KFju9mouLpXX3DvhQ/AmR/B4Yyc36m1a2kb2Jp+aXqitqKlJOIEzvVrG8S+5yp8+uIZz6+27IaAOEPC6/peTbhRC3gxfl4xwfPKInpuXYAQVF+3UUtqzjRv4/W4TMbsXufMHCfu4pmQEzUn43QVus7z4uVLFVAvhS509Fc7Si98+vnP+PzZf4Of9Dv+0//gP+bV7/0++XDWc14+Ldg6bk+lJeGmlpFyVCmzCrlE1ewbR6Ypstlsubq5pkcR1hgz2TZxpNDsDmNKzKmZD7NIgVWg1EypmePxwJs3b7S1ZKlHGzQvqaxXRSq5Cre3j1xfZx0woVJLxAtc73e4nOmCX9C/EpOuT6WamgM4V/EfqK380MfCGcXkegwccCb3F1PSAbaS8MEvfNBcNZbFVZ7fXPPZJ1cMPcylEJxoxMmSQ7YS6CLBsW6QqyRJZLWYUEDACZ0NuTbnuWWIClYU/GLTdeKoJTPFicPpSE0zvahod4yKkuPAB88wDJzOJ0rO+OB59tlLbm6e8/Pf/R323cD2FPkv/sF/xunb7yiTyqU1WaGLOS4AslQy2k721RRTauO9avIZcyKez/ipYxMzw6YH8ThnovFp1o1dVsAhmWtTysrvlQXJ1fvVqDH3jw9kQ/+LZRzVOR3mtcJWeeOB85QoZaLkymY7MFTY9Vv64NlvNlTTYi45q5angRQlZ+ZxZPrhwVM9qgJNHpUJyw0BLZrY16JdFgU3tEBJaAK72QwMfVClIIFcL/U5QXBmWa5xp5z2S/T84jSaBo4BWMEpt7jkRI4TLT28wBbtr9bd1J+IcebhcCB3jkGEVDKnx4PaUXuHdI7D+czxdNRnIFecE377b/0uL29e8Hf+63+HAQf3Z/7Rf/T3CCnjjJNdba8o9oZONEUVl81+V2k7UgM1q55rdhoH4hw1Fs7TmePhxE25pLRZJ7CtnG0gG6NOzWdT+9CvtcZBS4qdg+PxoI5+Rt1q/OxaNNdIySgaRRBpduKKePfDoKCDb0i15j45p2VYXmqlpMjP8yv+t/XvA//+rwypjyaoLfGZ50jJkXQ+crP/9Eky0qBzrUxWzlROCR/c8kdwlFTxODZDT+dESfzohxBUpxSDm7XSLSYBZRt8g/FtU18tTwNd8ORcifHi3D74PI3MjCXWzuwhffA46XAOhr7D9535W+uUbCyZ5CrFC9ULicLt4Z5tLsoPWdqe0GQdlIuDtZaxCVVdxFIqZr2WmOeZ83gm1aJ/cmbYbvVmlkpAzBmrGjqmgyLQFkWTNymZ83ji9/757/Hw+GjuOq2PoS2IkoU5J+aY6Uy70jld+PousO2VAjCejhzuHxi6Xqe8bWOcx4lxVGrDdrsxGgMIeu278Ovrnd/UsciRVb1ubZp5dY6xNoc4TSRN47DfdHjvcF4Ro1KUotE5R/CCdxWHTu1ecknXRY4nq+Uy3W5C5LXqkJYXdRnpvLqf8fRVvne0Kc+WaAHgRBUJql733Xag2wyEvteK3HsInuwFOkfY9myurnlzf6tJgGWnS5tpOVUx7cI22VmXydZam6Ocxso8zxwPR+YYqU7o557Qddq2LSZJZQVBMVONadaHtIF2zlC8lCJv373h7bs3HI5HdaHDUufiqJIpOGJOtJFG5wpRCs7rV/rQ6bNdYTdsYZPZdD2brqfEaPyujHeO/bCFTtGQPqzt3B/6WIcWWDNJtAWpFqFiG3tEvK5fIthQhEBSJ7KrzUDnHU4K3lWCDcRpQaABVcTafhZgjaq0/qOBEHqfnKi9btdatR9Cdyvwr/+slTjPPDzcc73pGPzeuKE6yV0tkdhfX+FEGDYDJRc6F+j2W2IQuustn778jM+7HX////0fIyUTDCQpRQ0CWukpaCK4iO5b9uKazq79iUk3y/M4Umyo9PrmhtB1+BDIpTDHaBCzYbVVtajH81lR1WXocdWFbdSAN2/eaIuUdv207CvFGX2rEnNWg4nsqHm0wkJjWGolOM9+u6Vzgf1my9gNlBBJMRLnWWcpuo4Zsdb5D3tcDvheroz6PZb77bwQQqAENbfJKV9Y3loA2X178opPCqALsXpp66zeq3a9RYQQHMG5hXb3pJV6ebL2tWpt9hQj72/v2G8+JQm4qnMJ8zTr8DEV8cLu5hpxirLmnOiHgeFqT9123HzxGT/95HOuw4b//B/8Q+Q00Wa0ajUApHFeEZOLEysItc1fDdxqUmbqvlWIKXI6n5mmGbHizoeA2EBwSomua50NnSnIpt+bS1oArnbfir0PwNu3b3UGoCpNrZQKkqlFyKkZBiVEgrl46Xl5r7lP8B4oiMBut4OU6X1g0w1kPytSWyvfkfm/yD3/i4/E1MetTquKM5ecmadRJyvlYjCHJtugD29FEBcQp7aFuRY6dAH1LiBF291ds8okW/toWY0voqUuwceaUqxVuejf3nmCWapqRv+rtvg1kJvtWfABJ5m+09/3QeiH3pJLUX1MaRaMgnhFPt+/ecvp8ZEhpQX1ypVlyvOCWb9s8Bj/NCedcJuSbu7jOHIeR/Mr1k2/iAo9K6/JLYTyGGfmOZJj0s/udYJ1HEdyidzfv+fVd9+Sc8S7VbNNF451KGqOWYsJ53BOKDlxHnu2G/WRPtw/8Oqbb2DS1nMTfE9x5n6ZxEu44FQCC50mD8F//Pr/Bo+UMr0h7cUSeo3XdfRD+XEs4t1Nm0+cWDzohDc0KZO2vjXd2otEwpLVag9mNSSqtU01H5AFEQvea2fBCZLlAsnU4/tX0eCituF6Rba7zl4DYdgOBiG14sORRT9j9ZqUHM8n3r1/z1XKygWttkhai7YhmsvnaYu9ZqWUlMkVE9xXBPV8Hsml0vU9MSU2241pnWoRWWxgJSetouM8P/mMyo2eOZ2PvH7zmrv7O6v+m3SPxm8t2pGYY6RW0Y6NyyoRd54IPjD0AylGzocjp8MjcZqtwxLw4shRE4zz8UjXBcQS1B9T7IIl5cavW87LNhVv9zaWtBTsztmkv3hNZIvQ+UDj7MnyOrJuTG29vaixGiLVCMqXMem8rbfi8cjiwrUMw9W6xvvF55hTZhwn1Q21H1OdVf1R33k2261ubkOPIPS+R7pAciCdp9sOhG7gMJ3YN/53S0xrY59iMWEPqiXXNIqKDf2lmhdXvnEcybng56g0lZJx0av1ZEyq8d2Q1yJL56upIbTPU4qSFU6nI+9v33N3f6/uOhd7QC2ZUrSbFXNmSom+6sBmzkoP8N6x32wN7YvEaWaz69n2A5uuJ/mOEhPTeSTNE94JfddT+RG1AKQBTU9SVEvKtEAKwVNDR/WXfEj9ubXKv8RGG3UKWnwtP2EoOpUFNcXWMjULsjmOqnzNX33aLWZUbeHu4YEvv/hk+VqFZcC2ogpG292WLg+AFi+7YYvb9NTe011t2b94xs4Pqrgh2hqvsHBNl+frMm5pRY3lRzZUlmtZiqCWO4zTxHZSN7JuGEwCTfe/ZdjXdF1Vu1d//1I1pVEEY5p5f3vLu/fvVL8XQ/trUUanFDO4KYxzxPtqyjdqW+y9Uv+GrgfrpDtg6Hr2mw37zZZ4HpnLpFarsTLzs4+G0q9JUNW3NefEPM1IZdHlE2yS1PS4lvrVVc3ic1wm3bwP9H2HmDWcCUYBl848a+BdrKCq+LVU9Xo0LpQLzhZNd3GxLwL5lxFznCOmxHiecC7gfEtGHC44inNL4KsIOqZZp+2tdJ746p/9S/L9mRQLzgjVyon1NqG8TmZLRblaJSFZLVMfxgeiBdvxeOBweMT3nW36kTnPpluqny/nqNJO44lpSuQqbPqerg+Uknj//h3TdObxcItIZbPpqTUvUH57GHKpzEmtUmstJgfmKMVxPI10/hGfKm5KnO8f2Xc75uMJUsYVdbl48+oVb7/7lu2mZ9ht2T+7wXdBuVxd+NFs8jEmcsrqXpYvhumWhUA3FOcDrpo8U1bUr5ce5wNdqEhnsiU8TRrFFspGHCj1cuO3n1kcINrvKGfXGzfKW0um5qdx+vR99HW1rW6+3Rj3xwuh1+JKRKiNP4ss/tQxJ4Ib8EEHD779i7/gdP/ILiVrPbKsvt6ErJsxxEKpKQVXC6RCijNj0liIKXI4HHS603ttm6ZExew4gRA67QbEmRgnYpzJMapep6wTz7d373l4vOfVd99yOh/Z7TbklElldUOSUtTFJ87WrdHno3rPNIM/jQTx9CHw+tW3zKcznQQt6Kx7EueZx7s7vvnqL8j5v43veyPM8aOJ3cujJZYCVCng9br5GlTupa284ui6jq7rdNNAB/WayPeayK0rbhuua+LzTwGC1TK6Tbl77+ldWLtQZU0I2mb7oWsb1VqC1ZJor+oRQx8QKUoB6wOYsHfAhhtDIAlqbuIdc4p8++6VckANPVXmqckdilx0NS66DKVAzrgSIBXiPDNZ7J7OJ8ZxNPDBM00TqdjALFgBH5bnLmeISdv7XMRKKQoeVDJv3r7mT/70jxmnM/3QtSu/ILdton+KqsNdS9XCwjly8YicmfZ7pnHk4e6W78JAfP5SjQt8oHOeGeF8PPJwd0uKE74bSGsT5kd3yMX/V4PinQ90Hdrjt+8vND6gcQPWgegMKDp42bh6Qtm313e2h3vvCD6Y3YnqL/uu0zV3fYmL91zHu2qtHA5H7RyEgK9CcJVhs1EVHSdIcPTbLSFnNtvBEu8Otj0yBFznmeeRX3z9F4utr9ham9Q9BlCg7UncVhQ5zVnzpKzdvalGYo5mGzoxzZO65RUFqYaSNXkuSrnMeSbnaG6bq7JEqaC4i75nk1Q8ng780R//EXf3dzq87dbiUx33lPg1p8w4T3hfde9pH45CcJ5NP1ByYjoceby752qz43q/53q3Y3w8UNLMeDhxc+7498v/7KOx82t7stM8azWXM52XC9kSrUZ8cGy2A7Eo9O1qWBa0thCG0NH3A+B1EMitm3nwwSBhEPWq0Xpf1vBbt29NKqSJ6teqYrIXiyTtun+Qm152WluyVkxdrHptaWarGDSpcCqW7oLatU6Z8f7I+fbI/+c/+k/oz5lkzVElhVsr3GnC2uiGNRhZP+sU6cPhwGE8MFdFce/v75YFL8bI4/2d2oraZjNPkxL9/cDu+oar62u6fmDYbOi7jtvbW/78L/41r19/y36/gZq52u/YDB0pJR4PJ47jWZHjquLrBVYHrixqb1kyN8PAXM/cH2ZuzxFXqvIITS6lSOGhD/zxH/wBX3/9FakU/s5/7d/hk88+59nzF4tQ8o/hmOeZ82lk0wWQLaB5lw+eLgT6rnCKyjv+MFZAixbpoHpFrxt61TqHIbhlQKyIFnNYYqikfZbktKUFznszvqgLwiqAC8rzxqeF7wmXaAAguqjNKTOnzNAFijOk377fwl4sfr0LSK6QKhLh8eE9//j//h/QPU7MtRgbWjcGj7dCzy3i6jUDZrtIKaRp5Pb9yFkys+mkvru7xYlns9lQc+Hb19+yv9oTgg6oDZstOUcOx0dShuAd3dWO7W5PCIGcI99+8xX/+s//jL5XHvlu2yP1xhAC5WlnzRgoMS+DUuLq8qFdhTRHZjcRu5G3h1fcfvtaC8hcqSkT46TSKVI53N/z//v//mOk9+z2e+Zx+GuKxL/sITbsERBTQnmyIrYk0jmC9HRdXBJ/6rruaRFizj5PvtEGTdoOKku7uE34e+9xnW+nY1Jroq/nmr0kT5K25eylgvOcxpFpTkqd8iBBCyi8UA2FUXqBaAKDIo9dEfrqOL2+5R//3/5Dwt2o9rUNlbTPH5y1Gy9QsCJmVxxnejacTifuTzOPeSJ4z5t3b4gxs93s6Ae4v7sDZzrJIRBCT29oZooTMRbmeaIPivh65+iCZ5pO/Ivf/+fEOHI+q5Xjz372BeN5ZJpn5QtLpdWhs1lV5wpJFODxJjU4CxwfHhmyIx0n3nz9im2/xaeiAyYx4nVamLt3b/n6L/6c2V3z3Xf3fyXR9pc5WqxoF+pCg9Q5xPbbNqTkvUOCJ7m0AO+6vmoRX9B10TmHVKX0FLIVU3L5pvZnjQVvmqutk9U6kVIrvnrEW9pb17X2w89RcYzjTDIjl+oBL4SuB6fAjgRTH/Gdrp1oSm2ippRx5t3dN/yD/+v/g3IYde0qbVhP38ubAhEsj6hZSGuC2lehTDOP8yPv0hnxnvP5yOPjkTgnVU6plYf7e8L5aEPBOvgX00DOMymNTHNmnA44UblELRSFeZ54/eY1MY48Hu55f/uGzz57uVhRz/Nsqi566JB2IsZMrZFSnGrhe888Rc5uVBm46pBd4Z//03/KfrMl4MizDke6Uolp5l9OA/8H+dv8/kdi6qMJaoxRSbJG/hYc06zyA8GbHV0f2AwDx3OilGjSSVhT2KN3NtANAyEIYRBwySokuYCaNZAKlRUNBShLS7+1DXTYYrbKV5gR48nmC3/oi836Ipa1da9cj5x10CoXDapFAmr5feWAdOLpzpm/+PM/4ParV8zjrDMvovIoAvTOMQCumAWmmGaeW4dNqJW3t+9JG8dpPHM6n0hxJhUYpzMpRuY0U+KkVmDmRJJzZr9/xpe/9XOGzc4GIrQ1H9OZaToxTSd2Ow9kxKsjUEC4udnhg2Ne5CFEnTxog22G5OXCOJ7pqxDomcczZZ4veIeF0AXi6cC7717x/rYzFG3id//W30ao5Lj/WDj9Ro9pmpQ/nDY62GfSGF1QpDd0hVJmbbvVi03aDuUfq7d3KhGxVnlLAhchYytRLqt6aMWZah22hlQqEWLV6rUWatYEIKWkjkqXaOvFZ2mvXZ1nzoXzNLMdBnxVNQKlEDY+U3s3Q70KhLnw9s+/4vTde95++5ouV7Ksv0cpDNb6UvxfpaB0Ia9LO/Ph8MgoHaNUxhyZzLKxmpzKnGa1cSyZLqjTUHc60YWel59+xmef/5TrZ88JwXh+uXA4PnB3f+L97Rs+eXlt+01h2ASc39B1gfPomWNizkXpClWHHJSigK45VW1aZyAOPZI9eYzUmCElas7kFHHeU9LMXYA//P1/gQyOzW6L4wXwb/9VhuBf/pCLNmXI1NnQPWkb9dOfVWT+Qk5e6mrTKbIMZMClsHxbe1tBdJkAaJsxl0SdKwSIVGr1q9SSiA1SWJJxEbm6rOuanYsWWLlkRbicLC1+tZu1rUgcVdTZzZdKN1fe/elXnL97z6s/+tcMyTh4ds61ouaSFx3h1kEz1ic5qW7o+XzkWCPHMqtsniH+PkX6GNU1cBrxoSmAzByOj7x59YL91Q3b/RVffvkTQr+h6zttd6bE4XDgX/yLf8Z20xOCo9TEdtsTvKOfA9M0cx4nqg24gMnbWVLjRTSBK4lUCilM5G5mmhPzeeYhV9wckaSUq1wSeMfDpuPbX/ScueJ0/OF1UJuj3IfOcsthTlxUp0WQA+eE0AvibaDHsM1mPOLa1HMpDfbDHnpYsFEDWyjEEolptt9X1LR6jyuNw3kJNcuThVae/J+QKsSsw8chVHUZDCbz6NdnSiynoEKOiU2s9AK/+P1/xePX33H359/QFyHV1NIePT/rtyqYKcv5NNBBSqUPHXfHA2cXmaru2+fT0Wh9lQ5ZdHIleEIX6LqOPg1AZb/fm0594Ppmp0o9mw1910OpnI4P/Nkf/hnjeKLrHCXPbIbeqCOBqes4HY/EzHJf5hgvG296e7JKas7jxCyO2fecqnB4f48rolKFMZGmWZVdpFDmaxxffjSmfi2CWozQ3UZplzZRq1bEEfyqzbm0R5oeml3s0HV4ybrBsE6g15JtWtUtbdMnEdMWzaXCtwGYilYqxVGkUAzpa3zDy+fjsu0kGGG4qEd0sY3d2dUupZi+qZYzRRWkefftGx6+fcPxza0uEgjBed0sS8aVDN54fGL0hwv0rPE83j2cyGw5TcY9TSa7M41mb6ctIN2IVY6kDgN973Xy0VcDOLS1NU4nYjxTSiTGCedNy5MCrhJCYFsH6jjrkI93KnfSEGVrlCGeYTPQDz0uC5lMLhFngwDOQQhaHMzTSMkzpzgh337DZ59/xicvX0D58fD4dDGy2L3g+zhMGquJ95cWy3rP1M9YW93ihG7Qqf42rduumxL+Dc0Stfxc3/wpctX+V0ohi1mmVkeVVW+xDXXpGnW5zWNtLd3NU87rkFFtrVt9T3uK1vfNKrB/vr3n8fV7Hl+9Ic6zTtg6dyEjVejtxBcUAwwJtj8lc3v/yNTtGV1ljDPzeSSVjEOY08w0B3LOTOMIw7CgJefTkZpfELww9AHf+Za9EOPI6XQgp4mUBpy3rAOd/u96D9JrK3/StiiiWXnjvdfWFsumhoAqAsQcKTFCiotaguoyZuI88Xh3x+wivgv03czgfxxC/e1oMbAkjsaprmLFiC2ITTkC5/BdUGPOoBu72Bq68iRbJtf0Ty/jti3KTxHaUtSuk2xUg1yYa9NpvqABLOd9ARE0UAFFWotJ/ZRSL5b0ury9ovf6gyHD6d0db1+/5/HbN8zjRCfe3tNG+6omIVTrZBk0oolze+4y0zTyeCicQ2GuTUElqU5lTMwxUnJinEZC9jSb11IKDw+3OO/Z7Hfs91tc6DXJLrrfTPOZ0+mAk63CAlJVGSIIPUE7N95xOI82VKonroWW3lOVWawgOqCTY6TiNNmaEzJOeJtjqKXgqmc+n7l7945THXH5h+egtuS0XGpEX3xPgHoBBjgf6IceH3RwypnWZNsvWxuy7VFN0PDDYx3OWk0Aaqk67GnfyU3pJudWj+l5wff3rPbMYAh8qco/xoo8/SCqaGJUPuccZOXT+lQ5399x9/Ur7r/6jjRG+hCUrtTOqELXhhZZY1c7CXVFUWvm7v6ewwBjqIzzqPzjqHmDcwrWxZzU7rise4nzcHf3HnGel59+zmcvXy7ggBNHjpGUZx4e75inM7vdRmPXqzQb5gwm7DiNURF/G6RqBYGivmsnr5CVjjBNBCCeI8SMxIxLkRI1OfVeiOUt7+Q/Af7HvzKmfo0OanOBWG/kwktDEzp3mfwZAgoYf8eroDMmL1FV1mVZOGsl20Vd5yG/X9U/4YZcBKT+VONXFs3iS2uB/YqBKWkTlyYL1BZHLoJ2iXe9CdN55P3X3zK+e6CcJhSnrNRO/X9zTLhc7BqtSSoX51eNe/RwPkIP4zwxxRlXNPmYTStEP18hZSVUq39xofNCLZFaOqqD5gN9Oj0yxzO1qvxUcE4RD0vIgvP0Q6dIgRNCENySoLZKVNHe/X7Ltt/AnKgeiivLJuKcqKRX53Cd7iSSK+fzkfF8ZDyf8LL98SSosHAXNZasf8KKsOgPtg2bCzcyXTKcA9/3eJ8ueDZ6qKD/+rvtWBe+pQJgXZO0eJBim4lzi193WYTFf/X1a+R9pYSwIEhL7LZyFo2hEgtlnLl/9Yb3X7/i9PaerkLCJrqNiO8sidZ73U7YqvnlOhYez0fi5JgojHEmTlHVEWpmjjN+NG1D83BuyVBKs06AO30+tB2XyCUyTSdOpwNUlfUJxocstjh739H7DnGOnMtCD2qbVq2W7NtnBuj7nlpnimhBQE2GRGoMO5P+inHimE9UKpthw+bFjyN2L49l3f0AKaoNKWyC+lXpGSF0BMn4wFLItmON1/Zn1ZhZefv6Z3G6oxVwBaozGSXTKs3ZpPbamf6ys9cjF/uDOTpVJVi5ZSqrxa35k+cMsXL3zWtuv/qW87s73fCwmYTK8t49y+OgiZ7lp4rwaqRM05lHEudBmF3hcDopqFF14nmaJ4tB5eMtCWTV3y0l2/rp7LJlQ5cj03SklEgugVyUa6sFVsYF6H1HCJ2aEcRiRUOzxnaLHqr+mpotKAdQ2+K5JCSrkot+Pr13JUfOpwPHXPD1+b9paP2VH6vouywV+iW30skqKSc4vAfX93iZcV5UAAdoQbgOERmAUG1w8iPHEyepWpWC0oY7rRvJ8pp6Zpfnv/4DA8NUoafglrU201RN4EOOgORCHWfuv/mO919/x/HtrXa6aqVxvkstOCC0XOki67n49JoHpMjD+Mghe8aN43g6EqcEjYPvMuM0mgOVdWptLfdeOJ8ObLc7vBNurvfr018qRQpzHInzSEwTpfg1dp2eYyceH3YUzkwxE/M6BFyqDk0VBLGByiqineyUSMGrVFtUVyyJCbIqNrnqEe7YyH/60fv5axNUrbTNkcGDXOo5Nv3SUlhccZxWuNJaqSbW771afXnvl0QAGoLZksom+9OK+GXuGXAWFBp8epH0VXJDYU1Mti6/sy6d64PCgkRoEuvaXqwcmsbKq/qHknn/9h2P7+/AeJnJO2ouDH0gZ5gT6trwAexVRQXy9VnRyregrfQ5zqQY9ZoEtcDzzhlSV+1h0MUpponzdCDlSFcSJSop+nB+5P7uvS6gZOY84WtPJS2yX6U4+m6ng2Bep8eHEIjL8InKxrig0lGbricL+F5IkWWRqFJJdeaT51+wfX4DnWM7T5SoyXCKEVx84n/8Qx4lK1rvze1UNxRd5EE/l3KVFJlSh5iAc4HGWVKJLRCbzLdfBKotBpeL00WJ0zb3y4p5+d0V3W9yIjUrWgjohv301TQ5bv/dnpw2aM+6OS+PpZ1XjoW7N29584tfkB5HTUSdY6qV0DskqyFEtyy4lzGsaEB1ZY3dmjieToyidJE8NzqNYxpnck4411qnI6V4augRKQQvSM3UHKkOYk5M05lxPDBPJyqFmCZwQTfvogL9upEFut7Rx0A4K/83JksQctVNy7osPjhevHjGw/09Mp2psSrL6IL363zHZjtQe8HFoEVCKT+a2P1Vh3Zlim1y2QbhVGy7AiJajIZS8V61dgFLaJvg/uXG3DLSpvixJsHt7+W/eVrTLcgqmdWl/sPzbRACut41eTBn8lYtvwZr4eraV2pmjoX5/sS3v/gF6f6EL4BzzLXQdUH1f0smIKqQ4lviVlSoX1mMy/qVYuQxnThkYQ4wThNkXQOS6fl6I1/nnJbk00mg7wL77cC2C5Q4QWjT+YVSEuN4pNZEShPOF3xnUkUlK0hj3PMrtsyPR3vW9V60YZSCan92YkNwvTltzZUqhepVeaWBCc6p+kExiR9Xv3/9f9NHm09pSGp7nowFgojXIT7b55w9rx6lpyzwQNU/pQ2s1TbE9/2j/JKvP+mI0WK1Lp2GerkqP8EqntILc2mcbZstQKyDAWAylUarKrmoGcSceTi856s/+XPSwxFJGeeEGb1fOVdyMoTScBPNblqxuHbcEKWQnMczR+cYq+c8TZSpmGGGFlcPj48LiKDDqfrfXe8IQdgMgU4qJc1KZWxJvmQeH+9IaaJk7cD6Ti3qs2n1Ou/p+8CubqjnmTkmVqVszd8arl0RVH2t0nmHD4EJlWRzFLxTe/iWB35eKv9ePn00pj6aoHrvVSoD5VU66oKkXSIty4PTuEUIwQuboWOz2+JDYCMVovLS2oYrOHLRgaWl9STWoqctcKvUTQugajdS2hiyrFOk3zsugCz9u9oi2dq2F2lsaz/U1V605KqWcgWkiL1nhs5xFkuIpVWP7U3qgkJUsRY6apMmOOZ5JqaomwYVVzqKq5ATNNcSKwA0Wc28e/+a5++esZuuEPGcp5Fvvv2Kx8dbUp4Rp17WpeqgSSXb8IIKyw9Dz43o1OEUE3NO6g+dVEIieM/hdMTvYdMHNjcDuUzUabZk25GoSKj0u47t9RWfbQY+f/k5L158yn635/DQMI4fw5EYBs9g1AgWHjA0/SeHUOyBcehk5tD1bPpBhyScEDwEMk+85tGiqNQWt8sue/HfPEFP1y81dKBg3ZJlk37yDh+8zLLViyDeJqmX15K1dWb6thgR/e7ujnGK1KRDetVpuysK4ArZFfwCTLSWDQrK4RYKgqTCgCOlzIyKnGNuVU608GqfR5wWWCpxokjpeToyxiND7JGqziNv3r3mzevXHA4POA8xR1xRhLPSJIkami10vf7Zpp7QmdvaHLVLYzInQ9+x2WzIaWaeR9VvLhGqUKSCy9RQ6PaBz3/6E2aK6gfWF5xu/w3C66/7aEVt5YJiYqw7a6c55+n7nn7oCTZQ0nWBUDQ5VSRLb2gpantclqKqJeN6820fNkBWLrpIRnsy17pF6/OyG8Evw6OeHo0OIF6Hay9R4VIVVV1S5lw4n8+8+tM/I8/mZlMryambGM6G83Ulx3eqJeOq6gsvJ9PoPdRFbH9OkdHUUqSp+4tuuE7hdZPn0aKst+I255mUZ52zSGr/eDgduH3/nm+/+QoXhFgipEJvOpDKpQqAtk19V/Eu0XfCZuiZo+pppqRSaN55RDyb7cDLTz5Bn+3MY5pgZqUHCOAyw9WGzfUOxv6Ja+IPdbSk1Mk6W9IGNtvkfu+d2abr31UcImqIo7dO6TseQz6rJfPiwHeX7/ZLz6EswIvt9FaTldwoKTqQ3FbPjx1NdQHvcV0PqEnJklPYPi/WEUgxMZ9PvPpXf4pMSqWSoglxNJWfLGbVU01VwF3qv0KRqmitnZ+zdSClwuRUprJwsY+ZK5o+n4VLzfiUVIKvCwKSSXk0SUUh5cLDwwPv373RBLUmpgSb0C2ygCDgqt6fAMEXvMuEDlLVLljFWWOyUl1HlUo3BG6eX9MNPcfTkRz1c0E1cystUH7Kmf8Vf/zRe/DRBHUYAjHqHQhe6HoInVrhKaqkwSSYtWRQ7dBCZbPZqruBoAtkLXg7SXWXUTi4VeONQdcq3yci6Kb/eDnVrOusSjXUlqDWdZnUxFFvva25y/e0opNlY1OuyppsNL5Me7Y84LrAcDVAKnz75jtkFziPmU6EwQsUO4dlBdHNVSyRlqoIc02o/Z1ZBBYnFJeoWXkcbQoVWOQsSi3EeeTbb79m2GxA1CryeDqSS0Sk4NxaOUF7CX0gtVUqyufzwrDpFg9sdZiCeZo4no50DsJuSzd0dENHrip14U2G5eF0oJv2hF2PryoyP6eZeoLjISD5hxeMBtjten0wSZQygwRLeta2SghBFQy0wsCFwH63JQyDVfSVzlVU9dEOC4qVf1e1qBD4de0n9UdeY7UudJmLjf6ioFJWwYpIiIA3ZFeTijZYpTtXs0+1sg5XhXGaiSKIaffNNVO9IhU6ROA0KbRzrMb/q+LIVR3SpFTImc735DSRq6of1Kx6eiU7MtkkVCz+bGK8mqj0u/dvccExzSM+dLy/veX+4Y7j6aTTud46Mo1vXqFiCyAR5wL90HF1tWOz3ZNq1cGsmMiT6u15HKkWHh7v6YLFee6gRmoseNFnKdVMqonsKvvrK57dPEPSC/712x8ehfreISxrnnceHyoUpzbEzrMdeobdjmG7IXhHyAnJgu+s7LakRtHmaOuDsBZTS3l+URfXD95fv9k24pYEtF9heaWmjNIQ1bo4X7kguM7jQmizgxeDrzqW19rApWqSWivEihqTpMIweJIHJ5XilKe3wSnqaL0F7STUBQQpBSQXgh+QoohULJGSzAmqFqQoNznliw+D7SeuMo5nHh/v6PrAZjvgfMf5fOTd7Tvevn/LOB0V+S156cxpNy9ZG7QCjhAcw9DzzAV2u8KYlPs6jTMUNbHRHFlwQRO4YTfoAO3sFk2lSiVJIXSe/dUOui29++GNJpYWvw1Jtan+5sjou57ddk+wIRwd0OzUrdB0nZv5ptZWtp60zVjf5Xvvu9JQzLHq4jtUtPtTyoWoZcs1NEaf6pmw7P/OON0u6NQ+tazAVm3xZhYDtSq3PeqapalJhVKZDXQrLlAQSoYOWTio1UAGGj2wWveqDdI5R8qJeVLXJsGG7KzTmprGmDPebNU9qVbtDJxOB6bpSBr3IMKctBP2+vV3HI+PFDTpTWmmFKeFlWRbv0GcDv85r7Jvu82AC445Z3JSWqV3QdX6vLDZDFzfXFM8dJtATbNajVvOV6hkyfw5X/AP+Xf5P38kpj6aoL58+QydBBW8h8EXdrueEICkXIIUZ6gF1wTBLYCS8Xtk1gnonkInic4lpilQ6tXC6bgMtfXvDwLRKhXNd8sH36pGQ3jakloSCPS8RNqD49bW/hPO5MV/W4XvvHB1dU3/aeC630GpPBD56t23vBxuVOTbKbRdpIGnCp8un60FW1UB/oyS66XoEFb1ikDpYq0baWvJOydUsmlOPjDNEyLCHCdy0qTAO1kSFnFVKSSaM1vCUBAnBMQcsjTBKQ11LnB2lXnW5KNIwXWObtuZcxKGjpuOX0rEOVJFuL275XyeCa4jHndQvvhYSP3Gjs+/+ISr6yuudht2+06Hvax9WIrK9ei1bdWq3u85ziTjCHkqWQq9zNYGDnhz6mjc1gWR/1X1+AUyRCkqWWJtoYaSrdG64KT2q0+fAbEYdubYcTk82BLZ9l5i0kSb7RauC/2VKjW8vX3H/fGRvRvYeGHjgSRrq7W2BZMLZ1YzO5BKqtEsHIspdmihWk0uZ2n2tLzZCblExvHI/d0tMUZCF7i7v2eaRkVnrQC+TIZYitmk6LBpx/adp+tURqZY8hKHGalCjglq4TSd2PsB1wndECgpgMt0xvmtDuY0EXMkFBWolhjtgflxHq17pBqPiqDqJL1Nf6dkQ0wJKYlSnA3y+SXWGs/ZXtH+bgnq9xPVp2AAStFg/YHL9RXKEq/rz9gaaImK98EKrcb/X4tDMcBAk1R1asMJ0vX0e0+JicN4Iolq4UotKFApFwXNxfpfZDXKsOSvZkV+c9OgFDEajqbVpayDZM1pRwTmeeR0eiQE9R0X5zkeDzwcHjifDtSacR5zprNCy65Nqdn4efrZ+z7gPfRVGEohpsw4TNbJas6BhTlOdH3A955h21OnCZc02XWivupziWQKLjj2ww8PDDzh3bOCLZcTJEo3yRSd4TWEuxhFrymJuBW4qutr/7IVtsXr8rMX/FP9/gckALmMz8vVdj11xZhM3i2ERYxe41mWdXgBBKTJDer5Z6z97wOlwilGqhedXieDq+wuikNq+7/WDWNZd1sno+ZCMmBLQBVWSqHYsySXD+7yHLQh1Ece7t8rN1+E8zjx+KhfSzmqdn1pe5lpz2o1qo5p1l3oOs9m0+NCYJMLU0rEqGuPw+NrNfAEUkn4LjBsOmoMOsSVdd8rVIpUTi7yp/LuozH18QT1k+dsdz3eC0LBE+m9EFyhiFo+xjirVuZSmWuwTONMYdbEq2Q2rjL4SieZcdMxzwnXe6scLjaGevHXZb7Y/tTGebWvLq0iWx4b14wL3ooFpVZmxo8xG0UsGBeoQS7IC05tu25evMBvn/Py5gVOHEef+fPDW3YvntFVKMcz1a0V2sI5bfVaoyNUGKeREqyyqyj5vrQEVU/DeR0qC15RvFoyKUfm2dxbnNMWlSEOiHpjL0oIsn5Og0R08XfKdQleeVG1sdJrxZHBZVwQqlOXjG7bIb1yf6VC6D3VeXIuTNPMnJJV/+8JrsOll5B/HAnqT376GTc31wydp5OsRHwbLluGEKxixVqpORcOhwM49Xn2VAZf2HaVcRrZbtRH+vuFlR5LW/SDuG2TxboZquaufvOSuWcL5gVSYJRVaq2KZGKIZwhPng+5+G99WWux9YFnL56x73bshz21FB5q5O7+DV46nA/4AtXZZn7x7F22r6ha5+SUiDVSgm4+Mbd2mafN0zvR5F5c0+90qiOZIofDI/M8EbqO03nlHvmmquDqk0fRibMhLCuyLJEVQw8RS1Q3A9TK+XRWb/I00heHC0IYgqKoXtRCMqp6whwjc5zgrKR+iQ7yT/9NQuyv/VhqF1gmc5fn2vhxcU6kcsbHiK8w+Ewgm5D/TNlsAVn48HBZuP/yhLyVSm3Pay3+YhzlJe9b1tcPtvlqr1D1Z9QxLdiMglPDgaVDZht+4yAK6LBXIPQ94eaabejJMXP3F3/O/XhkJx29d2y8rvrN8rLWdQKhlKo87QpSwFUbSKxZ2425WBKiHFoQSpEFmVpqP1eZ48j5rAlgzgmc53Q6ql1qyQgV7yzmbTCtzW3UWmygMiG+o++cWmajOtulVrabgWwSUtVoK6fpxMYNiIdh35NGj8zVjGEcUgtjmhjjCLLjevsjkPhTXg/UlUcKOvDWIm6UMy7NipSK7snFZeYYSX1r/bul2PhlxD0FgPS/pXE57XtCXalcoGtKC/Vlff4+oCBr5mo/oh1P1W33LRjsu0+TU0VbjUdrJhNus2HYqHPdd28PpFLZ5koQ6J2+xiWgtWAWpa5re7XPVwydR7nyapK0UmQQtw5ESl3s4UWUPnI8Pur5dQGq43Q+czgeGM9nkKJoNtZ1Xqg71ZQEMqUmxOhD4oShQKnCXArzHNUhLlftqBVVSDqeDuy7K4YhUOZATZ6UtHNdi3ZApD7Su3/60ZD6aIKa8kwIW5xU4hwZpyOlE/qrXtsQzlFT5nw8EqunuG7ReVOOni1iOVM7deVIufJwHHnz/oHPXn5ClQ6kYxHI+SWb/OWxyFiIygW1AF2RAQsYoMH4UpragCDV0XU9fT8wx4T00LmwcGdajYRUTSS8Y7jZsbnpeP7yc4Zhw3Hr2P9bX/Ll8+d896//NX/2z38fmeKTzljjJ+acCdIpy1Ecp+MR9+wK3wco6iTlfEKC8XREIXr1La9a1VdHN3RU1DlKdceykdIDbnkCCyXV5QFT8WBPtUVUtDYlOM/V1R4RT64qPi3bQNdtEC9I0GTZ7wOSrQVcQeiZc2GaRmJJePOtrllbeSEHNj98pwmAYejZ7gZcLZQUweSyKA0BnsnZpqiqyT2lTAZDWBKOQgmOsPPcPx7x4uHK0QdHdUGNHMT9qlBdjsZPVuRAlt8Ro7a0Vs2T3zHk4XJINLiOvuvpu47v/YIdS07jhBB6nn/+Kd1nHZ+9+JTQ9fhPrnldzvz2z7+kns4cX72hxrTQbWBFPGoptkjpwplS4hzPbD55xma347tXr7T4ct7QR6cSh0Ho+47QBZvmb1zU2RxvdLjEm+XxmocZgmznsSRiRT3MvVc1CxGnrx0CPnSUokN6joR3WePXa/Lsnaf3G+qoQ4hu420DcExTVK3DOePToJTBH9VxIUOWL0X2K00tIc1R14uaqSmy8TB0QtoP3F5v2Q0dYattSE0OudjcL9/pKU6ADT+0Z6Mljg3LclJNlepJOtvSTZZ2qf3deS3uFACQ9f2cLGP5RazR6h3DZsNP/sbP2BSNXRDmbeAP/uHf53ee/wzXddQ50eS1oNgQTLX3qMv5UCtehGmcSCFDJ8saK85R0UGdZnncCqu+17Zlc4oSgZgmum5QLenaFFCgC94oWusldNYVViQ1AZmgPB0aL7NU6Lxf96xaefXNt4zzEel0uDAMjrAJKhRf1dwmm0vjnBLeJfb9D4+gkmzPj5FiSYv4ql3DqB23elKdZCkFjw6p7Taeu5stvVzT7bcE74hNOu1idZXKGr+X11nMRcwoAcV0kpffFky+TmltUsu63rRiGNY8wrVBNK926JYwIysxZjFiaecmLTl1/PRvfEmYKp+9+JRxmvi9/9cvePdwzydXO66Hgesw2O/8kmX8EiQoCihN48zETHSqWqFgYNY4woE0ZYiyuLUFMxEoORGniaMcqPUViCMXtakuORl5TYweYCdkn00MjS45EYIi/yIOnwHn6AnkrlMKDuAKjKczqczc3t8incpJhW2glI6SRqOg6Z7QTTt+q3xce/qjCeq3337LPM+qj0Xh4fY9z/YDu+E5QfRmJ3TYJlWoVQnPgnEoLCXyztN1g2bwzjEn+O7dA9v9M4p4JARam6Zhfpf37VI6IudMKhDc+vXFRcoqtmWYBGl0J13XTctSg87z5rs3vHx5Re/32hJdt2aLOgtKgew92QulD/TXe3qX2D1/xu76hmHY4Iht2V0RKGn4pRKK53mi5sIUI8/3ezZdx+l0MqeGgq1lqBhuYorKHR02PTsqKUW0OmeRywiGki5aghcRXlC71pIzznbfUoSuc/S91+rHPHdDV3GdNxTA6UPg0KW7iiERlS4KKUdSjNQ0q8dw1SSsr3s2Hw2339zx1VdfcT6/ZL8d2A0qsKy827JINeVc8a7FzEWdU8CJp3Mq2O2D5+FxIsV7ximxv3qB6zZLMWYgdAvhXxG7kLPq2JaibbqlIK/t99ZNtTGkdIFd+VwhdMoHnmd8M1mgcfD0RNq6LiK4PuD9gL/asN3t+cL/Fn8nn/l3/uZvc3j1HV/FjBxfr8/MZZGFyqM4a8ON45lUM9c3z3j+6Se8+u47qhOyVextyMoV0Uo/K82nOk/IfpGIcr6S4oygBY7SAOw5KU27U/90fTBEWM8IAUdhO3RsthtAebaUynYX6DdbqgMJZUU3Og/DVgn60qSwMofDPa7vCV2grwM/AgzqybFO5ma1K0xQWIdMU66WnCrVggJ06qiUiuPt+0ekVn72ky/IBMT3S1x8H0Pi4hmoH5yDDb2ZhJJc/Ewr6fW/Wb7WUtlWKG82G0LotPjz9YnT0GWJp/mm4H3H3t2wC1tefPYFw2bD78qMfHbD3/r5z3l4/Yav/uCPkG/fLwgUCxpVl8/REoucC/M0k52o80/XEaeJ2FrCVCSry5hK7bRkU9fdFDy+ODyeUiLJjGKaCHq2RF3RPaNrmVSdbgL6Hs6jyb/F9na7pYosNtcpRq6fb/C9IC4jZl+8udkgsahiAeBdpVang1vxzJX/tZLmf/2HmQ2UmCHpHyeNn6k7XEU/q6MqIIOnOo3VEhPzs8jnn30K9nWa/vQHx+XE/dPKSikwrcuVSqUTTLZuuUP6a6zFOCtEtny977WdXUU7l0+UBttmIRqz1Vek8wz7HdefXXEVdnz5ky85TxOf/fHv8W9/+pKfPn/O4c0bbv/sF7ixxd36/itLoeUyepxPI6ObyFvPdthwOD3iKxSETkRzgCILLWWR4ss6gJ6KapCezkeceJpUnUPb7V4UqS3tOi57mW5o6raYzThB39cHp2SATofkvfdIhc7DOI86pyAZ8Z4wBGrtyEldOTvnqQLPXeZ/IH/w0ZD6aFQ3oeIYM9REjomYPbmqNZtqXhniZze+JYSCtlVA+ZR9N+ikqdcb2lx2z9PEOM+6cF7yNn/FsXCMGiekthn5+iT0FAavC3IJUO3G5Zx4fHzk69u3/Lf+7t+hXG2X1243WdHWNd2oQHHg+sD1Jy/5o6//gp9/+jlFIOWsg1CwwBJL0mw3ulZ1pRAc4zSTckEG44O286/FKjS1xRMxDhlii6Y5YVihk3M2fc6y+FHjGwKlgZPrjF5t2zjEUevAsOmJSb16RYQq2WBSlZlQ9QHRRdg2Gi3f7H1yJeWIN3RCJV3yQuT/oY+UEimZCDdC3Q5WsTb9wcaY1KgRa0+JzfQH5+g6R9erKHfBkaqQCjag0wwpwPbAJ5V92yjbZr60aHFQZUUm2xnYz4qh+F4uyiW9DTgv5Bx5fHxkOt3z27/1M6i6AejvF9v47d9irV0vaosaPP3VFbFWuu2GYbdTC+LLxFTaudOGRClSzS1K4wKLoyYhhyVS1WR9slMifrJkc+sHQ/3Uz3yOZUm8NL6cobUsG0hbrI1ebe+TFWwrsNn07HYb7u7vlVMsGed1Srw2YPwi23ahWbhWm7sQQ4VndeZhYv8jid3LY0m06hov7Ru1Ft2YTKQmhEDfdXSdx3tHzMLDaebFHA3d6Ww9WYv5VlSt++TK48w56yBl1XX9e/x+W3tbd6LJ9GksruhXMC79HGeOxyPbrWfTbRbqS4vddedQKkDoO3X880IJnu5qx+bmiqsXzynzzO5qT67vtIXcHItl5Xg3cABgmifll1ZNfq+vbnj9+rV1N9pPin1+FjAkS9E9y4qAeZ7o+36JXUQIQZfO0qaZLWXX59Yt3Uaq0QlEhwu9K2w2vZoElIKXQvWVZ892uOBVtzfo9XUuQN8GbpUWk7IO+OQ4m7rMD3y0U2gxVFnoYWsKqUl+MDvZruvx3pGKcBwTXTfxIsNkBjYiag1NvkCvLt6uFVvtUGOWtVR6cnq/BK5c0FOsUBJZ0VD7U9EkzzsdTnSGlJuWxvpytl6GvsMPPaX34Ho++xtf8rN/62/yxX7Pt1QefvENkFRRw65PA7OqbSgVa0o1l7yccK7jp1/+jD/+4z/SnMM5vOVeDX2vNAqkWDfQUYpbZi9WA4CqEmjOzGnAKG91+SAOXUsbVcXqNlNhCPhOaWLivOmyz+SNgPe69vuCBGcqOh5hj8t1UXhw5YGT/LOPhtTHdVBRTs48zaQ4M40TV1fDkhTaqqQJosBaYuhJ6DKlyEnoBoWInSWOTtujWjmmX3UC69/fL6IWL/OVTN0utFURdTlF1I5Mp8xE1Mb1/e0d85y40PZXkMbWcP1gtmGK5V4idMPA/eMj0zwrEmfjn09O0aqR5ZkFk93Rh2iaZ23tuCbeqydrabfx8dwyEdmeLRWRhia9k1Oi2hRe053TTc3kv4oR9bE2vA+IqKxUS3pLVdHeBX1bnnFZPvMymWucnmVRL8qtCk4I/Pp292/y0NhNSK5UNBGrxlFWjprYNV+3U4fycoP3dKEjdJ3Gg4A4nUjNJZNysk2+Vd3rtW+HXcIlwdAvrm4oH6JVC+KHLI+ShrUYqqQSOtM48eq7N/zWz3/KB1G3LjDttQUjpQPe0W02PJ7ONDGzUkzi53ITkctkpL2etUVRxPJ4Oi2felUlsGtbqyU21SSnhuW1Ss3UqBwxVZBIOihYiy2GF4mlCLlqLBdzrHJOubzqeGK/VzONKoHoQKBOYKyfZyl9iy7CYlJ5Jel0YyH9aGL3Q+7dKpuzlsuCFlVLM110iKHvB5u4FUOQIOaKqzpctL6m/ccl+mQB2wrS9X0vfqadm51Mi/FWv7q2iC1rsSwSWHGOHA5HhJ6r7bDEe1uj11iry0sUK5Dwjs3VNeeUkKAIqHNe47isRfkFCGSOPPrfKeucRM4a67vdXtdLWa/p8ntop0WSxlPftW1SHbByjlYUOcgZX9uQj8E07ZnHCqF26UpG9WiFavHXDx3TdKYW1ZcUpza/1TosYqYSOLVebvrcOrhaKNk4l/mHT1DXKKkXAfbh95V2o8lpR9frjEtVzINoHyUZx/oiwlvUr+/RruxlCH+Ati6FXVvPGvLv1rRioaRI47cbf94GM1NMpPlEf7NXmbR6kerI+jrLfTYwIDugCzz77FP6/Y79s2cM2x1t6p/ahqAuP8BFN4+6OEMpAOV4+emnuD/5EzM3UiS0FWNI63QUu1qeWpvzWjWdameAib6veG9AnvJKn9w3adRIaMiTLM9YYbsd9BksqqiSSyH0ig6ECj5ozOIqQQTveuWpouvEEOG6Pn4vTi6PX6OD2pFiZsqR8Xwink58+ulz40EbydzrnXbeK4/SFqs1QXV4CfjQa8tCMuKwRUtQEq49uOIvbnRt18hu6IcV0Bq2K66wtmWUv9YWP100nHPmsayLW0qZNKt+Y12yXcxJ0cqhRZhXE0zl/jgeDgclGo+jTtMvj99FS+Iiamtttmk6/HEeR3JWZKPkTHGWHtnvinMGqeswSDYf8vZ5pYLrHHOctK1ipHxsQ1/KuVJpzijeeYLrlOrQqnqRVffPCNKadbSNXu9jsQfJORUlxibday744OlDT1/6pxnaD3g4F5TDmyIllUXarG2EudaFC2ZqY7higyQiBN8Rup4QOnAJccqXCl1Hysk0Z21DlaZDCmvcrhy89ZzsZ1oRwNrWaTvrMuDEWs2KVfLeq9xJzoXX371dEKFl6W8+fEsc6Lk0O1+cI/ie2/sHpnlmmiPnceSqobdL0nKx0LeuhjjMQJfHxwPRLHk3PqzJk4DyTY10IBefrTZJOaveq1703KY7geqrKSvotXHOmx6fXldXq/GuO+UAGg97jrMlyGY/W0UX22XwcJ3ldW1jsqRCJ9tFpWZ+JLF7mZBe/rtByU3tQcT0e6t+qK5TjrJ34JyqqjivnNs5JvWDB1pUVlsjWjInVZOihnDAGo/t0JgvC0p5Wdg4K1hkKZS1BR6CUqpiShwPR2qZ+fT5jRUJrTBbKSpy8V5iBY8Lgatnz3l3+8ictcM0p7T8zoeFWksUW/abzLJyTomaEiF0y3q5gBB2dau9tyb0BdhaIqtXT73e1yQ0x0RpT4ejVfdLASpocirV9sXaumsDfR/UTTFHO1eNYeed6YJW+xpI0Kvkiiq7QCu2qg59/ViOZa9YpkF5kmo6Txd646n3ONFOoPh18r0h92vh0WJ/PZakEpbkUjmOSx9GC2H4IGFuyKkYGGDdFUPEndFSnNe/x9ORx/tbbq52+pwYqCutDpYlzHQ/rTqlngUkBPbPn/N4PFFevER8oLXOpVz+8uWlExrHK9WyFPxeHLurK1MVaMu8qh/UhbZjlJ+cLGHtLmhNEe8VbdEObCV0xl2uOtS2XCXRz1jt2qrqRbVkWN0qd7sNuVbiSQus6jK+A9cFQM0ppA1QeqUFSGlWSDBMW17Wn380lD6aoO52O3KuzHNkmibSVPB+AJxtjtXIsx7f9xD6hTC7VCqWGKp7xGrz1lCP83hmPJ9Q4QxjLi3oY3stcwQxcvD6/YuAtSxDLQ0Ny5OyVjliThdLMDvVTKuY25NFiGc9E/tedyGdEPNM9R2v337Ht68+Id7fQ810dtGbIHShKoezSVoBMTZNPK0Yi9MFPMYZa44tn19E0TqtcDAkyeK4QqmJ623PnCblP9WqifVlq6yKJppc/J59TG1JqeVrAR0waCiqoWEAvqgkVdviVV1I9djGSZHvq+0VfTfQl+FHs8mDI86ZTd+zGTpWdxF9jEvNiHfgLU5Le+AzSId4HcBRBoa2Ab1Xu8wpTlQioPqnVeEQTTjlYmG0+3652Td5ccEQKkNs9dzWirUlp22GxNNQb9XSm6bRkuInbNX1+peGwosK8cdMjQnEcfv6Dffv74kPB6Zx4trUB5+gCMvL1eU1G+9rnGeya3HSwAB9Vgu2zvvmGie0tqsO1+pznGtm0wXjSKn0mhRdxJaPYsoXigjo63dFTSeUwhEZho7T+Ui1BKE0jknJS3LqWDe3NsiiBXRRbdAMifSkW/dDHs2SUWwauaFATgK4DlxQ7hhxWbtEHKHfKLIoplHtTfbIOUqKzHlGbQkHu8RNYgndMMXey4kllTq06T+4LkvHbNlfmxrkanACFreixXSOETY9uMD97QP5y59S3coVtQkpHQCtmalWOterGUNOpJKpwfPu9pbTPHOeZs7nM9d4pDpbZ/XNixh1YDmTSqya8MxzYj6eeTgcybXgrVBa1jypgFv+6P6xpK2GUFnhVwo1Z2KeIdg+Z8Noix4wVdFVK8J07lZ51bk2OpneX5GGnGvrvr03AJLsWgmIh6o0ltwGZn4s3CpYEj2x66b5ln7e5mTkgraIfaf7C76q3qgLlFrNzCapic+yb5WW+645QNuvqoJSWgSsHasWBY0IiK1X2lZ/2uJfEn6U5+5EOJ8nHu7vuX//nt/6rS9X6T13WcSzAA6tDx5jJOWk7e8qzKnoHm8Sj3byT+qq9YyXCwlVlmQ9xci3r74zO120+9pOoWoCqWCSIaupgSi2r6FzEOtt8sQ4Pem8YtdZP4/Y+quJckvCdUh1wHsFq3KZiabHXp1xdUVA0nIHbCaQS8esP+t+yv9J/qf8w4+E0kcTVOcC8xyZp8w8mrWenXQ1seZuM/Czn3/J/WnicJ5VqLa27Dsj4gnB62ZFsYpfhxRKUe6RExOUxyE8ldD58PgehG//H0JgGDq22w3b7ZYgzizrrNovyinqfEcXOroQdLLZB8Kxjk8AAQAASURBVGt1t6T6+6/vugBZ4fYxqt1XrcLtu1vk4YCkixaTVUQa/2pFKE41ItXatNI58EFt3zofOHGkaYOplWETZtcHvYpek1VeS3eTeU5435GrOpsIOgT1JEu0Z8C3yhCdeDyNk9EbKolCdbllGlCLJjeoDqjawzmCOFUkECVFb3pPzpnee5Ufq/KjKeRjTKQ8KeeWHrAEyymX03lHNwykUk0UfLW1UwTKqnYHYkicuj9FchRVU0AXt9b8eJqdXyalfO+/249XmsmFowuezgdKTsgvuZA5J47HA9PoiFEtHRf0SYTqLpc4TciCbdypJsZ5ZC6Fw3jm66++wh2OpGnCujDKV5X1U4hb40aLGEXQU86k7AgbpfsoH89sY8mWLHhUddxSocsCC0etCd97KNW6GAmHIyXjP4tK6kgrstpCG/wFciL2Wmt6Wm3Tb640YrItygFsYtyrrWIxN7VcI/XSqOYHPJbhT1hQIeeEPvQU1+mEc1It39padeLwTUXFQAAnLGYpMc6cTyeNV1mLWOWkzpaKfT9D/2UDKmLuXlVUH3tBTmkgS0XEP+HxOR/03ha1GJXqachvSw6ldSEsSS29hk/KmSlGcvC8uXvP6/dvyfcPjGPkufdIWYXhl+LDmR4lIM4Rc9Rkx0ExFx0V6q+LL7lz3hZ8lSFUJLlCdepcZXMGuWgh73yHd56YMykql9/5Rq+wh9vQ0mqdrOAUwQpW7Ck/Vd9HJbDQkqFWq39taMtje4EmIOsQmq5dafpxLLyL9JKAD0Fb07AgmC3pFu8Q762loSo2znvr6qk8U8kN/lyNUi6mBp6873r/vx+v2qVy1mFpryNrUVOf/r638xDRQeLzaeR8ntRJMjTEkUVByDmeaJaCovtTjITQ0e12/OJP/4Qv9lc8Ho5abLqgHQtL5qS9KKy83VrVscniLc2Rt+/eGV95zcOK08/lWdeLUpobxvq6xRzlWmVZyRpj1gUXLwQJSkWxX1soA1ZwNZpg6AZiSiRzlsMyONo0Um3xeVE4WinbwMOXYeJ/fv3mo/H00QQ1Zx0yiSmTaqX3nQpAZ72CuVTVzSyFOc5M00iKGarpm9aAoDpapSTVIdDVh2SpvDqNONzikVuXC3jp6KTFhIDzi0bdUrrYsd/tuL66Yrvd0PtAQ2A1oXKkqMKytcB5VMmDzWaD4AxBFO0KtoWg3caUCaGjirbKHsaJOM14hCH0hGED5/HifDT7aByoVgWnbENEpVKzoSJeK6yYjcAsynFJKasQtQhSiiLTbfHXJ4zD4YDznlyUG1Vg1dhsLY2qfGC8LK4UiGOOcXHdqO3HrTwUWBZVlQHSDbNQqRII3uvkYK+tgu2mow/eprd/RWXxGz7O5zOhE8oA4MmpUhI459nt97h+z+NcSDGSospzlSJQvBLhTdNwkcWoyYYGDZUUY5ddtiS5GHSStcBox+XiubRIBbbbLVf7LZu+VyFwE8FXV53COI6kWOi6XmOzVBVdXqAsWYqrZfJS1oq+FBV5nmPkNCe8ePKckGh8uAtUoJ1vw3va59AFS7lEFX2m+r6HnIlJBfK9KT94YZm+VTMBt9r72YuXAvOUcCEQQkeMcUlMG2uhWNsJ/aduymKTEqLnPY4jOauunqplNEkkfaOKta6yxXZLSkRjuAuOVKtKTP1IYrcdbbMJQae9s+uYkyIpJZucTgWVkGLpVmmSVqmSKTmZk05Sm86mKYtvT/qK8CPfj1E+iFss5HB0nWe33ShdC1niVh+LunTZcl5lbaRkbvab9mPfO1rrPOdCwJCjkpjSzJQ9KWfevn2LezhSYlopBi1mBBsaMQRUdKo7ZTPnsD17Ltrmb+YOrlEdAKtiLzoJsiCn9eIcVRDAQ/CkKdGGIdvwpa6jBtm1IaZSlzpgTTL154slylUuJs6bBF1pFKHLWfP1XEr94RPUCoSuU3pEVMWZJU+ngTar7rGzYaRia0opmZqTDeK2QqcxT5tueYua9nn/y7c9nDSVGtVEbl1SLox/WpJa7TM8Pj5wPp0pueKkVbDtTJRmo/dA0c6LXiPJ5LRc8DwcD3z99TfE21tqKctAFhegQL34N/bvbC1+gFoqp/N5KaB0BsTW/XoZu6KjaLb2loJavdb2TOsz3/cdU4w6ZLhoeGlhv+S1lgTXVmi4SnVWMBQ1Ceo6TxxN815/wVIgMyxoD6ezODdwQKTSu4/fv48mqDFG5pisbe/ohy21imptSl0kYeZ5Wv40LbpSGj9JJ49jnAmhEezbDL+63XgfGhBMS5CWSgtsUh0aNNKkTgysXKZyh35gu9ky9B1D6AxWVrH9rgt4F5hj4u7ujof3J4ZhQ9/3yyKmN6UhC3UpQGopNBuxlBKHw4GSMp04Ot9RnV7GhiC0fywLsCXXsRYjFFdEpxfwPpMrlNQI9rq0TvNMF4Kta44Yrel78cDGeST0+tAU03Zrwzq6SbS2hnEkWyVTIc5RnWZaHrv0LtYSgdo4klY8lIILqvnnfTAEz7HpOzrnKVHsgfnhj/P5zM4NNCUC1ZKsiHP0Q490ntvTnRlNVG2VFjElCaOTXJhP5KybZms7N0vR5YEHi0fjkDapGmeLzi/bjqsucLvtluv9FZvNQLf4MyvfslIZzyPzFKnogEEtmf3+6rJyYxWntuLog3dU/l3keJ6NcyWK1nYDtcanCAcXL2WLZrGkodWFtaD6p1XpJ6VkfHUqV9IQI6MBFGtTVTECinGqpinSVUVT1taUHmIrZENdF6TBuXa1dSOcI20kQGPVENOLFbZaoqvJSZu9VX7Z0Ae8OEIK1PjjiF1oSWGjLHn6wTNGHfaJcVaqjfFnl8JSWpK6UkhyygsCI5b0LBUJ7X7L8rsLCvWkULk47IaIEzabDTfX17a22vBfsYEL01qcTR+6mtyN1EzXtY7Gmsi1zhysnGl9vpTrPMWZY9IkcTyeGeYZf1E4wepfXlDMbUXZtUiTqngxxu0MXa/e5uYsVZaYset/wRmvtdE8LaYKFq9C13ecx9OqJVwu1gTL0L7PntQEreSsCG5tn1u7Vzr7UFhWDktyhaY/4vB1RRbL9+/Ub/yoKGraUUlReeKXw31t+E8s3rhALRsgUkzSqC47cvvT6BZwGZmtgGjHymeu37vi+hwNbDcdgxX7VHV0BNG9wPSGNfEsnE8n4jxroU6jbrR8hCU22h8sJmtVUICsCg2n88j79+/xx9MyYNVyHOxZXz6VAWSaO5elGJIK0zThgtc1M5uW7/L7YsN1ip6KTfc2Dus6KKbNrdB1FJFF4QKqWpe6ZmVs+6GtH3p+Syasa4pTvdim+96oBizJqr2OCK4KTaFWsHjn4/q9H01QT6cTc9QCw/ue66srwDQ4JWlA5axewXGilkTOYhfV4aqS2eMM8xwoWQhGquuCtta7rqcLfYNvaOmhRtXKLfLLNHul890yPCEIGR0AEkw43RxoOqft7Faxff75Z3T9hj/70z/j4f0dn3z26doSs8DItsG3kqTWuvBKSlVppePhsHJUc9YqHFj9eXWxdbXxstQ1Yq6FMRfGVCAmZoE5q91pMZFj0LereSaFQtfpw+mcWNvdHtVaoSbj/FllYsiJa5y/RjixTSs5jy+VmCJu9ipZZZ+1fc5WQDx5YNpRKkPnDenrcKIbU+eCtmedXBajP+gxjiO73Vrc5Kg+5CobkhnnM6fziRizJu/FWVElxm+ciUmUK0WlpkxKjpQLzW9bjxarupk4aQmqtblbRVxRVLEVBEtCKOy3O4ahpwvqHha8Y+iUhhJCwL/0lFK5vXvQBXOMfPrJZ3QhoANBQHWaYAtrBW7v0Uj1MUYeHh+ppRDEsxu2dPsryrtHXd4uk0HWLxTUeCPFSO490eLXx0zn9ZpQKi5kfBWkZmZ7Vn3ocClb604XQQeoYcJEzAnXBU0Aal0ktYSmFmGfRdC2oNNrm0ujCWmxsJpw2nPbnmMaN1s3jSpu4Vc7p3JVDIKfN5Tbv/o4/K96rButoaheOB+OHE8z85zBElBVNrBYaouSffSSC+aLsA5cVuXX5QWJ02N1pLucAVjPpR1NMcU5x7ObZzx/dkXfqTZv8EoD6mzwIsbE4fHAOM62Rjl6H9jtdggat1LFdPqFJ6tNrba2Z1LO1Gni4TBSc6H3HbvB0W33lMdJ77W1zJe8QS7a4O31qlJWcMqY7fqeaZpMTivjUJpT08y0D0/B6ZB8NYTetocUMxDZDxva4FNDw5ZnyOK4rRguNGUWvR/jNDOnol0AKlmqqVRc3M/aYjiTKVTxIIrCtYLkx5CgFkEdCH1PBQ4P90q9MUWPRqW4bNGXWrXVXy2ZtRZ0ShNtTsVenZak6vVdOzSNi1mtcBJpmQSwvJvQdz3Pb254+eKZzs00QX5Uou329pZxnExVSOhCp5beKFXmEh2vVYySYnf5ImEFfU5iiqR55Hw+M50n5n5im4uCWjWtP/wrjmUPL5YIAnGeFWBxYnFbnq73llzjG4nJaHcmL7U8ZUbbu7q5Id3fkeMMBkI00KUll63obDHd5nUWxZ/C4g5VafMC+jyWajMXZaWneKN9qfbQx+P2ownq3d09w/aaYTPQd4EQjD9aVIS1GvcgnmbiNJsuZ4Bi3t2oZ3JJEMcTbuigD2qxGHQx7Lqe0PWsLCYLMudtYle/5rAMPENwgYJWOA0wLsnatSnR90ED1ryXtbUj1KLC9/vdlpefvOBnX/6Md7fvePcuc3O954vPPlHlglqshbtu8rVkckqkVJimCXHClCJpnogpWsVlf6TRFZQ/mlxgdDCGjmPJxOKRXPFR0YEgAe/1bkdDcbIzSZ4ilDLThsC8U12xany7mvQmF2sZ+5Lx3hm3qiGobQq8kHNSC0SbYM41K7IRGiptGNTFAGa1Cii4ys1+T9f1OPG4um5yTi6nNn/4I4TA0Pd4hBQnYhe1VVFUN3A6nyitwi/FrrNay5YspFiZfUGkQ0rFk8jF6zLpm4OUOnW1i+CsUPJmJVtwSwyXtoEKS1tG7BXiHKnbjS4o2RZwL6rt6ATf9/zN3/4bXL1+y5vXb3h8eODlsxtdTHqMutHG42wRKisSJK6SSyEVpQt4GxA8nCfGhwc+ac9etcq9oWRFW50Fx1gLx5I4Z2HKWlCl80gQ1WPU8j7jgyM7IaWZ4BNdl3Fup8LdLUFthR86me9yRYLTIQosuRCTqku6jjQbQb3+QelH87y8TqnZugjV0OtWCawxYdiXFWCqY7kZdnShR7or8o8oQW2DYSozo8n4OI5M00zK607YNokUYRyPQKcyUwC50rsmFeeWCd4nxZVoAhB80NagqXuorJ4WreKdqYFYYi+OYFSfpuMsteKoBO+U4991DM8Hfue3f4e7+0f++A//kPPpiGw2PH/+zCb9ZSkmLrepiiZ6KSsFBIF5nrm9u8PjCDiCE0rodNJezCmrWmGDggOtfa7qLQOlClMqyk8fZ4bNjmQDLDlnnZwXOJ1GZh9VV3YYcE6HC9ULwdqzFTNdSYyzDthVYaFaOYcaXCxoaiX4xsvWNmwV4Xg8Ksrdiipbb9t4uBhCXusH1wejMhg/+QMo4Qc5qijvN+fMaTozp8icVQ+51HU9KkWNJ3LW6+FRG9q5JmavfPCUZlXWeJLC6NrmnMN3DbDSilY7K4YDeFX/WOUnAYG+79nv9zjv6YJRfKy7utlsePniOTkXxmni9vZOO1fzxNB1PH92rYVAQR3RWMe6l8+PgudykTvEaeLu/S3n04mbL3/OLmyYDxO5Hqn4tXKx1NFd/lNE6Q4Xc58lFzqrXkqFOWUGm7D3gLvQ21bprorLhVKd5Uq686QcGd+94zkvSSkbMKymK5TWvYA2ZI1gajLeJLh0sDamzDhGGl24WGIrqFX4Jcy16rW2Irjy6+L219hP1KU9HjrdaHNJ5FRBVPiVJMyT2gjuNz0+DJTqmMeCFw0AFxw3N9fUmuk7z9D3kKO9gwnRFpOZcoIPge12x4sXLxARXr16tQRcm9ZT2aC2COmCroLNOv2q4t9Gk7f2UymZnGZyiYzTyJ/82Z9yvL+llJmffP4pNzc7+k3Qn724BmuSWkgpEnMi18opzvQ5mQODI7dkrmqLyZcOfOAE3JXMuzwzWkXjcmvhCDk4nHi9hUWreXBI1sUoVfXbdp0ia8HkG0rRRzjnokkmhd1uoC2IS5Iqes1SioiDaR7BbVHeU1merhX91FZVa1V7EYI4dv1g/FOPF7+g24KihcXLggL/0EcfOoYuaDJfivIki/LNcsyMp1mduZw6a+Qiai1XPeILNUXyWMgUuiCE4BYN3dB1eNehWXxqc0C62JqsTgiBlDLB2umNX6RoyspbrRXGaeQ673TxtbZTTlF5hEWgZkqOiFSePbtmf7Wl5swf/as/Zr8fePHiGZ9/+slCO3C2SC4oUIGatN07ns744Ig5UWJUVxfvIemSouCi6fUiVPFU74neccyVsVRiNk5nzFTncb5HnPKOctJrFpzYYIhDJNLrALfuz7ileGoLJjkvz7Ym2yo2XXPWa+KEJJXg1ap2Qb5SJtZEER2TWskNTocOqaaEYHxYtJvS+45Nv6UPG7PrDfxYOvzZBg8qxSxOC/M82wS4I9eyaC+3iQopQowjTjI5e7wIXgrObzTJdB7ve1jKIitCAAz1X3iBUi8QKENQZS3WNccvTOOEPLvWdcDWvVoSNTuqVx3J6+srrq+v+fqrX3B9fcWnL18SU2KcZzZDZx0N1b10rHzYxvdsG33KiWk847tAypnTeWI8nnjpA7U42/pgIWxWDOHyJBGic0wIU4E5VbJP3D88WOdKYyZGpUkUUURTaf/OBpuqItXC0sFsibQrFd+FJfI0UVHf9UYfkLYGm+A6om5rvq251jWgFqpky3TQhMHQ36Yx2/RWM4JI0HslPwJwwNDjOap8XS6qRpEbVayCr4WcZmL04CDgKRIgRnqvIEux1rH+TovXFqzKI268RnHgvfseOOIM0KkKH9pYpGHqInZ9rUgG5fx7z7OXL9gMG8afzPyTf/JPcM5xdXXNi+cviDERNt2TtEpz4LIAvSHo4LDrTI0gJcZxhFIJEih5Zpwm9nbfpCq6qIi/0SLbcCgVvCdXUUvmoi34kCpO57bJBaY5EULjSUdyyQzdlq5zZOeUFlgSxRW87e2Nsnz/cKDJcOrv2yBx+3Do87dQjrRCoDq3aMArBcHTpM9aF0x/2RBXTZFohkJaqNRfG7cfTVA7H4wT50w6oNJEW5x90JIF73v2m95kTgZqrhxlovOe7aZn2A70fQdkw4PzUh3ZSoI3/2Xf93gTSM85MY4TuRa7AHXhZLToqG1Rq6s8y8XKaxQPm44vK5FdhW6F/X5PCDuurq+t8iqobW9dqrNlAMUmOVNK6mGfk/K6nLqdZIRkiUhFkWC6gH92Q8kj3/3iK1K7McW4eVU38mDs/WaCkIvWaFqRqXORy0ZxKMGquQy5LC0qkUqfu2UYRWwgQpPPskzxxahtgmpclzU5bdetUjVHXugVnQ/st1ud4HOKnDiUrG2y0VZZ/jgOL0JnybSrFU/QYb1sSVTK9GHAhZ5ahZy1ZhpcRzcEuk4Ls77ziBT6oC1Mh6JNwSw6lUulfNF+M7DZbNVhCXh4eFx4V8CTh/xSv7Q01FTagmDKCXb/alm7Fc2NZzweuX33lpj2bLe9dWKsQrX3qZjeqN3/lDJznAGIOVKKFlrIipi1J1zvpKM4RxTHQ0qcK8xFN27JUJxOboagiJAi/g0x0g1XZbt0gjp0KiMVvCK8lYqjLJxUqKr4gaGotRVQWojqkFpW+kV0pt2rheciWbTEs111wQYHNbZFVEJpt9kxdAOdH/A+kFPH6a83JP9LH20dcxSam58mrTag59UjPueMq05pTFJxpeBqRUoGZ1xxp4OBrWharsPybip1p9P+rVxw9h353rm1r9WqfPyFJyi6XqrslfIIMYMQRNhsN9SSiWnm3fGB716d+elPPuf5s2t2u629anttbRU2XnVJhWQdMgekHCFpt8xLQ3DXs1M01VAmhKnC7B1TrcylkIqDVBlJyoATry5tFoPVknGhMJPovJpJeA/iLXloz7Vo16NIXjsPZncmTrt2miTpJu4dFF9sADkqQFEy1ToApa0DYou4De5ZTrzcGe3aFnA2z+E/zuX7TRy1gUilIf9lAZVa11gdjbTDooN9gULB1wzL9LxoV+OCWiKNG2UJey3FulOmtnBxHgs6d1lQwfIcNZ1pTDYsZyjFQdU1ftgM+BDYbAaur2/Yb9TA+/3tLc+f7dX63XkbqL5IxmDJL6RCSZk8J9LU2ucJclIuv8qM0Na29tF8FXxtK7CnukACosVuyYVpmiF77VKLopiVZiAgNnUfqQSj1hgn1zW3v3ZpKjXGRcseWt5+McFQNVdRrTkt+lsellKyORajsVhRjRSTYF8H2muDl9t7tE7tr4nbjyaovfHg2lCSGOeps41a2+zCdrun22zphi0udKQ5kmb1y77eb9ld7ShkPIEYJ8ZxRrXbNKnqgme727Ld7RGvUimlFg6HRw6Hk0LDnQ5SlVyeBj7QJulS0w+0iGktVTFEsZRMaR72IbAdNlxte7abjuurLV0X9KYtKNcKVNe6tnCaa0LMmsyG4Em+ErFWg6ivbfEOt+l5/sVnbOYjb//wSNnq1L1Ut9x7SiVXMQ1Avfa5avXnCuomkjO+cQwXFFmn79omJk4TWW8YsFRz10GMV6UBlHJkFf5eHyzNTWXhg4mhKt4Fhq5nu90QfDB9S1M+tEEp+HG1+KXqQJEWV4UggWQ8TX1AHLvNljBsERwpVaZzZL/ZsNkEuj4oB9RVUp4Izhl3xtr4QeXTQvCItTS3u721BB0xRm0JloJr7fZWprYM4WkZ3k78yc9UjOphra7zNHJ3f8d0Oi8FxDJoYHHaRrukFVqWvGabuC8msk9TcRAojkVFom30FUftOmbvuBtHJlFv61IqrqgbSbZWvFjMNXH1NlBSKJSaNS6zI3Qeneuzboh9VE0OyjJU5lrctk5JEZrnfErRWstiiXFZZhdagtrkplph2Xjk3nn6rme72dCHns51OBeI4UfgZW5H20gbb2x5vkWsQFT0fp6jbmjO0fWevg8MXQeu4ET54gKquGHP7WWxpJ1Cx2a7pe/7RaUi54sBHzQc7bLq76JxlXNeEUv7TiusWjGcczSqi3AaZ+ZxJI5n7u/ecrUf2O83UDUBwPZs2ztbraxc2piIc9TYso1+sXe+OK8FlKiN+++YSuVYMlMpJFd1otlXUirQ9Hqdp+akxaITaNSxmpn8TC76GVznCGY1WO0kq1RqTLYu65Q4ovMXNa/KBtk1HVXd3OdZi8VckiZ0yjBdktx6+SDbsRRh6HxF8J4agg3G/MCH0eCyfd5GO3OWUOvaKcsQqFTdo2pOeG/T/SbvpzMOq5ptW8uUc+sWdQug4atLgNaLU7II5lJNYk0qlfNapFCrp9ZgPE0FzLa7HTc310itnM5HHu5vyeUFofsUHzZo0dHeXZZntW3sJRXSnChJrdDneSLEaGuTW9f5dqLS5lZUjKngKeJIqI5vspwgxmSdJczZsECybMVoD0KygrxSqqO57DXJyuW65Iyz+7SydW0QzzLZZfBZsHV/LbBWityanLYEVbtnF/fDPuN6f+TXxu3HE9TQ0YcAorqPDnCuEMKw8Nicczx/9gzX9YtLwnQ+I2R1ixg8QiIsk11GRqdCjTgKu92Gn/70c+Yo3N0/Ms4zMcVFUxXv6MF4JnVBE9bPra+ds3rLsySX63UpVn2Mk8pLvXjxgk9ffgIlstt09L1xg1jYo7bYKpW32PmXqghOTpUpJgbnCduBHCdiEXynmn/iPbkPyIs9P/lbv0MZT5z/0d8DceSalxutSZ3ahS0SUoi6S9HgectmRLUEqRmxQCg0lMmI9AVqk9wxWzF8banCUv3oQFULR2w63bLw6pYBVieOvu/ZbXeEbm3tOyP6r20T92Qh/aEPjzdebOPHivKjCQTfs99dMVw9p9vsIAvzFHnkwCfPbri63iycYHEwTTo9WUqyBzHjvLDdbum6Df2wod9uSCVzPJ44n8+qMSuOEDWZapzd3JA9MBOA1hWwtqQtnssU55LktWRBF7cXL17wxWcvCA72u81SperCUKyhZcQ4/U1FNmaVKBrnyKYUvFML4OqtjKu6mUyiFpXhak9ylbd3R1IQEtayLI5SBYoguS7xkixBbVPJztqzRP2ZEDx5MxDMIKFm3ZAb1cT7YLFkn7lmML3Akj25JCSDy6ovLNUGWBbkqm1k7b9boqPXY9Nv2A5beh/ovHKpnFPe7I/pKNYWXlY5USBg48z/Wjzn00jnPZu+Y7vp6TedimfXSC2RIN5490of8oZWNDUEH/TZ/uyLL6i1cj6fOZ9HlQiqdSmomtRR/eASKUBgRU7VRC1XR8bjalJqUtb183Q+8/72PWma2XSBYdOz3e1ULm3hTaM3qyjy74xHVwx9mudRtSHNDSrb0IpUbPjGLSiwnr2Kok+18vZ4YC72TgvFqpKrolk+OO2iVOWjYzMOUir5rM+7947QB3Z9uCjG1Wg6LfHrCHi8GGPSgBSxoqntmSnFNUGtuq8UWnLslkIPGvNylaXCOgH7zZ7dZksJz7j3/V9DFP6bHULidDqQUlFerNPuasAZ3aeyGToFvrqA7/xCN7ruOjZBTLYoLDzO5uokXv3knVOL1H4z0Pc95/NZ39yQP7yjNhdqVjWeTCVVncEQmp647YcVCoFUEiknUtaZkv3VlsfDA9P5zPHxgbffvWKaHrm+1qHWxTFtwRNMv9zuXStCStGkdzqfKdOsQ3AL27QlhY4VJNacoOCIFKZaiRRyEe2IofzsEDx9MCeyUpZi3yGqOVwj1EzOSiEQDzWsiC1o8UduslJVdXyx3ECrKc1TRHBFDZpagjpNk55LUdOUajSvZoZS6truF6syrMlg5+CMdvSrj48mqNvt1lrmoNZ5mph0XYeXSo6RzbZXtBCD73NBcmS/8Wx68KJJaPBK0nWDZ+i2qtFFodTEPJ95uLvlOFZO07jIH4l9IN/0O636bG2/yyMjxFyJTTbEt2aIPdgiC6k/eOUdvn3/jhc3V/ZSsgT1srM1Lojl07kWFSlPiuAexhM3Lz/l+adf4DcPBOe42u8Zuh4XPMOLa9Iw8PjJFQ93CbYbheDHRCJrgieVWlXPz7m6tB+KuGWYXmyDEO8g28bvDGlhtYbwQZhrxmWsQjVNypIo1S2Vewg65R/CgHOdts5Mo9YAfCiZrg/sNhuuNhu2/YDHG6Le3LG8JQ+28X1wT37II4Sgj3mF4DzBd1RfCT6x3ezpNteE4YaKY4wTKWlFut11eFcNwdMEKUgmO6WJQKTmme1mw+efd4xT5HAceffuHeM8EaO2cADEm25e5elkME+jt6GtusqZhIyzbbY2wrpSST779FM++/RT4hy52gZ6E/gvteIamx6ljygX2rVToDHXUi6czzPBOfrtwJwmJvMCys5s/pzHXV3hP32Bl0q8f02Jvbqe5Yp4Xai894r2m2Zue69cdRraFeXu1ZrxRRflGAspWYLaKnuL867LiLTHVxMEXEWqkEm2kWR8ybiaEDq9dg3JrU4Rq2XITxdJnHaEtv2GTTcoV9gZnxtH+jFw+OxYxMNNYmoYBlJ2uO4ZYbii67eUmHg9fcemD+z3G66vN+Q00gXlg+ecOB4e6EOPmOOZ84qy3jx/wbDZ4sym9nQ6cHt7z5wioFPYHx66NFrRptspOSZqzU/UFnQVyKiqhM4p5KpGKp9++im7zZbdduBm17O/2uKDtw3eSnOpWmBL0d9DY7+YWopyX0e6FBfENXnoaVKBOp2cxVO2W/L2/8/cv/5ckmXpfdhv7b0j4pzzXjOz7t1TM9Mz4ogUhxqaggHbgGCIFGwLkgH/e5Y/2BAE2x8Mf5BNmpQBkjYFCoYsw0ORM5zp6e6pvlRVVma+t3OJiH3zh7V2xMnqZtVYQ3ZlFDIr8833PSdOxI6113rW8zxrw5hP/PTz10yhLKimDx1CYE6JLHlJvrMlLDkpJ9Z7RyogSdE7HyOOLV2vaFsuhRpbQaEFWK0FsWeyCU5a0uB9QVzUdXnS6UmZhkSzVAG1WDdD1sJAHQL08wURNkPP0Pek1HNxefuvfyH+/3l0TghhoO+hloFcC9fXN+SkdnBpnrm9ueZyu8F5e+6p5DohST1y2zX03muRJNB1Htf3XF5ds9kYZxzh6elp6Sy0AtfVf/X51aJc/gYKOEP6sGS1jZyNKS4F9p9/9hklRoYu8Pz9Zzx7dkvf98s+vSz71jcvRQWuVVQIFqNqH0phniYkJpq0WOlYaxGt1LyAp8dJT3aBV8cTp5KJUpYuK1WTerx1R4oQU9KSzCn+6aqKqqYM3lWqZHUswS3dJ0V7Gwdbz6Hre0JFEwdDtwVZOtMi6nIT48w4KkjQaGylqQMbyFc562ytiWpGvxZ8YPMt6/Zb+1qNINvUgm45gQKu0gUhWgtSTXhh20PwvVVLXknMXoxz2YY9slg9BONA1eNEMZrqeZ5tBDnaV7uug4b8td/zGR+qFFRcrWV/W0YxRaSowf393R0Xl5eqrBRnvM+irZxm1YKiiYpUqg1Uqsng8qpq0Ap1s0FeeMLQU3db6Ht813O63PDy7p56fOT1aU+slc0wMI4TNUOOBecUKdXLuprjUGRZ8xUI3hm9QDd+yYBkW1h6PmT161wXiHEKvVooFRuLWIvQhQ3Pnr3HzfUzhqFHR8xpJRTnmWk80XUOL4XeQe8dnaj36TItRO8ibZSriJDns1lq3+HRhB1YBd73PTnNutaq2c1g3N0SCWT8xtG5TBBFFluV7cUTgq6hPjhECimOHPYjxzFyOE2MkxnGNxWlPTNL66Q28YttwoLyOGvFu0QuSTnFfomYa/ue1sYSjqcT4ziy6QfK4G2N6KbqzvpcjYaVSka8Pp/RqB21Fp6Oe7bXt2yfP0f8nup6Qt/Teb23rgv0L54zX14x5sTuk/dxpzvKPEM25X024QiV4jhLkA29NDQW27yr10glsSi7pJqy36K8OH0GXZEFQZJGA6DgjLunL6n/H/qBeU4gTRkqZqptbS0zZ5fquL64YTdsGEJHkDYjXhM6h3tn1q5u0A7voRY9z80m4EKvVltFNwhI9H3PZnA4yYTO2Thpvd5D701oF6m5EILwve99TCEwzpHpdGKeZqY4M8+6KX9Ty63hubWaOLO4t4qu9l3aFtSq3gfH6TDy7NktVLTApTJsN8Z9a4pntDhropEz4/yWCFJV7HgYJ3aWoM0501WnLVFRG0MvTodaXAx0tzc8v/6Q+PkPycW8n1OhdPacOq8FlfkbYxw9quESKgYAFBWsYqp/s0VSMdt6bUoDEpK6qdRi/qtv0XCa64T9vejPLShWEGpRIZs0uoGonVSbBnix3TF0PX3o8EPP9mr7l194f8lDxLHZXJidWcWFDu8dc5koEilS6YPgfNECXLRrmHNhikdtNRfl7Yqr3Nxes7m4oN9cUpznNE4cj0emeWKcZlLSpKnrvlZc1sZHx3i8jsZbVQspGxXKEmm162TxqKJtdO8d3//+9xBg6Dy7Tcdu29P1/drZaPff6XuY6kP33mqFRsrkmplSwtVC5yB7SHYvPQqwZSCKovTd5Y5wteOHf/pjTrWQhSU3yjkvg4RSVtplMcRTZ2VUBZycnYsJnyTq5ElXND7nkimpLvuTCCooN7GzUJVni065XAZvlESME13fa5KNrt8iK8K/5E+FdViR6O0IDqXgef+t6/YbE9SUEoiijt45m0VugZ8MUvDOLCQskRUH3bZbPB21q1RUaVmwVpturN6r/2FKOuUmpWRPqtjGvkIyLfvWLL9bllYLDD47tVexXmdrqWA3TmolJ93IalFRS9ep0EWDIIYCm6qvtVgtyVMl6UoMFjGUSJSnl3zHGDx+01O6juA9c63cjRO744nH45Fpnuk3W11QWZPeHESn89iNVPpn2+DrkqC2c6E0o2YrVoSlWinVxvAJ2saoFWTlupaKJqlOA/Nud8mzZ+9xdXmJcwGsBZJTZBpPiBQOTw+QJ+Xaus7QtZYMOENQ9eY4oJgn7Hd9SFNwLypPU0p6wVvCU1E+kpfM0GEjcAvBy5Kg6qbB4perDgranq/WbowxkmJeNrbz4qrljGuh10qQNTGraMKlJtWBFcFHg54FVp1s8sT+ac8nH3+0BIJlbdg6XdCsyrIhZuOdFjvJOSWKE/zlhSZo3YDbbvBmuu6HjnSx44DjaU5wsVXU7XBc6DIlF4pvDUjdB1rZcp62WO6hKKgl6O1i6SZl94xKKlWnrzgW9BgsLKw/ho4DDlxcXHF1Gcz6S/0g4ziRS6KSzcmjMHSBTTfQOZ2wEpwJLJxHbGTsu7J2Q/A2MhN8Eby3TdihRWkplDSx6YWhhy4UPIXgAFcW8KDvzIu36nXApmylGBnHE+M4MdvADlXUNleUFd+XtsCweFrX1nVTadfQqD76/UthbWtSNzbISRHQ3WZ422ZKYDHmt4je7rcKWs3asOp5HqeJzW5Hf31FcZMmyp1aRFRR7r9cXjIPO+TZFbuP3yf3gTIlpYMWTYpKsZZ6tTGmi/1bXfaEliC3r7kqVohaYdbQo7ryI3O2salFOwtigqnWRVmTVGuRLQ+yxQQDZGq7osbFpCiq3odOaSpBvZPJAfrvnoPqvbacndMY5oNOlSo5UXLES0FQRE5pV9qVEirZqy2Xl6r84pLZbAfEZ3DCOE+crDif5pk5ztQqy9CHxbu31l+OPRaQ1Mc8sXJVZEmk2noVWYVGfd8zDINSTgQutw1w82+9/vkf6lnMLTWTS9IpakVjbo+j6wMpF2anhVWjdIGjSofbDnTPr7n+/ke8+ef/NbNAQkV8oa1Vr/qfVAqhfQZbQrVAK5u0u6Ex1XshprJej7LS/nSPE0IuJFl8E6C0iYbrmq1V3Zx0UIhbn9e2/7SYsYB8dqlF7NKvXuHftm6/OUHNEz5oMHEeS1BFofmqOb9zlVDXTUlEzIbH21cyjUvfVKjOeeNfKsF3nGYOhyNxbje6CXVkXWC2ApwI/dAsftoyE22TU23STl0uLlbZCGqzg3Fcri6v2O2U6Lwo78QsoBzm1dgSVE1Gc1YldEyJZjxdnKOEQHaeg1lqhKyk6DxHDoawnY4T8zRTcrEJKNUGHRTliBZWVT0LTXl5AFoAz5bJrpt6+10XWSrKrWoEdPGKWvslQW2edI6+23Cxu+bq4lp5WBYIa8mkFCl1pkyReVTRhTd7KRFFu8Swe62aNAFMKX7jgvv1HcrR1QS1GYcVtTlakA9V8fah4IIwDB19VxfHCj3WzbqZnTtrveqeUi1ZazmlVVJtw6EVUbKoqJtfp254lWa50WYea8Cwqt/OdZ4j0zhxPBw4HvaLgrWK+f9V5Uy/nZyyoDa5KWvtnHIpFOepmw10A3UYKLstte+REHCbjqcYOUwzTzkxO0ffDQQfiMVGbaZC8Vm5roKiD7LovJdP3jbgYsIpNfYrtoY0EbXwpnZDbeRoNeshp56D6jO8xgbvO66vb7i5eaHevGY/Nx2PHI57Vfxn9VQcOk/nu0XQ5ZwK/JwEM06Xd2btKoKqG3hxZqUT0DVcE7mClInLXcdmEILPBFc1QRWz6KoVvIpNQON1yYlxPHEcM0fjmurY6jW5PDfqty/ylmWZHW3N5ZyorahaypO2/sVGOleOxyPj6UTwnovtB8vjUWprjtkEKksU2hOSSybmbG1XXTfjNFEur9jc3OLCqN2D7RbXBUUYu4B7dsMxV/LllquPX1D6jhKV/1eXWK7xsb3vygrU34tlqK09WRF8FbX0lfpWHZmLIqzJF3wp+KocVpqiW1iS2bavtDG+PnT4ZvTvLMmtmcZLXzojteLFsxkGtsOWoevpQkdJnhK++wS17zzeVbWcq+BQVX+OIzXPdEGgznqPxNlIZI3R9B4pOpCilEgpUfnxpXA6HXj18MA8a0FUDLhZgABZf63rdC0y7FYZd1ITLAXU3FI416LJk+CWX32vtmylWVeGYACdrhQDfC0PaxRBi9kUW7valcy5MKXEtt/gd1tiHokEgleQrNh4XUKHXG65/PAZz//KD9g7IdqkplytMCpFfYurckiz6Qd467Ma99m6vU4KOcMc87p223NmnlPOiToMOKeIv+1twcRrmpw6E1A14aYCeKVYDsB5/r/ahYEmwLiVV+wJ37puvzFBrZxsM1U7C+dVMBWLVkLVZfpeFp+uYsilqvnyYjVRsrfWnPFX+w7lKXlSgmlKHI8TpQaQYNHgLDlDp0UVMj4MBOesCnCsw+c7vBQdD5rLKhAyu6RajBBcMsMwsNnuqGjC6ZwoN8EJEh3iFXXD2qEVVJFnKNQ4jtgdJRttYTPsOB4OxDFRq443yymtU2+KTpAYTyc2w8CUhDlNKlxJykupzqvwBL3Liz2HXlLlQDqn+7trybtq9hvxOzX+Y21iKkdwpqqthUJrnwaCGwiupyRh0w0Er1YzUivFaYJ6tb3mVCo1TzaWdk3eFjifVqXWhfj/XR/iE5UIkhDXAQmRhHNFW6dOp5FTYQjql9oFpXM4GnKtSZUTb+4KFrzE87Tf8/j4xPE4E+c2x0jW9SiyPPht4okGPD0aaqSoVFzgwZaw1ZbxFqEmJaTHGNltd2w3myUH1sCliEt2LcCuG6eTFVmPSdeviCLp2TnK0JOc46FWolR69Feo8OXdA84HYqkcjifG04neB0rKxBzJoptSbX4rC+q8QGgAxKL8tGrWIslQfjUuaUtIM5VcVMxQqtJ2FB0IuKJFYsytogfxge3FFR9//CnbbmPuEg5KZZpVPJHyzDQdeXp4DXXEVw24oQt41+mkK+cQ59+Ztdv1HuetPBVz36gViLo+vTD0hYvdQN87fKh4M9QWsfzfqE1io6G9U9Fb3w88HZ40PttYx4WIZ88wsGz0i/ADtCVvCA1V13XOlkghi+rXN6OyoqhpiYU3r9+QS+bm+poQgtI/Spss5BcUxomiP9nilVSvFlNp5YnGnIhA2QyEq2ttM17scMNA13Vshp5DB1++foNjZuOArsN3Pa5U6xIVZpnVuaVaZyNYctqgU6pNkLL9ACFXFceulOW6rPdMVdZKAh/8El8ExUoaPTo3tFrAuY5nz94n2MhtL55UZsZxJBpSWIoi+14q227g9uKGbb9h6DtNcOrA5c3wb3RN/kUO5wtOknGPi3pFl8oQYBM8/eDo+0qQSBBLgkRBF/GCC069O3Mk58zpdOSwP3Ga1Vop57qGFdHo0Rx8lvXaCqwqNLNPRfqqFbiY+w0arxAoghSHeE8xC0InjnGc+PKLL7jY7njx4tmy15VabB84h4cUCNB4nCnZEWMmxqQ+zSUy54zbbtld3VD7J6rv8FuNWwJI1xEuL4k+MH38gvBbn3D0QvVBC72SdRSpKJ2qVkG8tfRpIGFD9a0lKyZgdZ5UBEkN9oOWxGcDCHx1zDkjnZifvK7fVjS2PUWvAaZB0E5WLmdjsPVpRhBKEgMAVBdDqXhxOpY+bOmHb16338JBHWm2AeIyVRy5BkK1dn/vqKmqCszaGSkW5nG29lFCxHNz81yhax+o4slFKQO1YgF0rUpYlLh2CYWlfVcBF7xuWpag1mpG6JSl9ilmdL9YUZUm4lBIfJpmDqcTr1+95nd++zehDmoJJUAXjK8VKF45XM57ipllTza9pus6qNh0qcTgO1LoNbiaDVUqKtYRryhNroVXX33BD37nd0hzIkdFnzRR8WoTi1bQnFeJVXk5uWoQwAlSlCYhwSIgKkIrCWIpqANgwTmzwyoVXwolC67X8aSd6+hcz+AvdFBAVS/T4B34nnmqBNfjXSDntnnLQuXQG9T4p7owp/xutElR9zjdLWRmSoUiEW9m0L6qb616x+nGm5I6NNRStTUqnpwqm+GiRUVqDeQsHPYzh8PIOEVbq8aLtrXb+gfNSi0ER9dvtOI2DhCgCUhWRXkLCC2/K83ZQeqyzrbbLcNmMDQgodWXighT1ip14UPlTDd0SNG2ZDLTaO1caHFVqiMMl0zTyDEWjnFUdDlnDscTl5dXdn2EV69e8ez22dIiBSEnDWhSHFJWq7HCkusAlZz1C7XUNRmoZxxEu04lV3KqVF91zJ9zWhyTydXjayZrCaHJk3SaPDmPlw7nVCG8Ga6Y40hOkUEG8nE0/YJ6DnemhBeLLSLyDq1dPRrvMAQH2Zwy0Md9dzUQggog/LJ1VOtWOaiWrKMG/dOcOY4TDw8PjOO8IkmtPLAumHYH1nNoIi3ANnQMhKhUc3bRjcmKKuk0oS6yeDamnLm8uMA5x9XlBSz4qI1hTJUQtGBsNKdq79dKmFKr+oaKrtnqPWwG0mbDcZ5JTjhRCCUxEHh6PHKalT//8LTn8LRXXnKtlJSoVQVjmoHqh1jM/jWlXAtIGrdf6W2RrJOqjBMKNtjFLNiKtVjVy1bLWhwEVIchRnPxTrUaL977iBcv3ufq8pLgex3GkCPzPDKNo4EiR+bphNREH4SLPjB4rx1DP0D47lX8mOexdikUXQuuErY22W/oEKl0Hh0ioVw6tTWzXEAHSqiF1Ga34zgV8njSKUqN4m57otYSCQgLDaqJoZvjj6tVhcXZLOzqOtXPGJpIS4OyTmyb58Q0R+5ev+GwP7AZBkJQQXCuLNaP4mSJ822tNNCoFTO52njtigrynMDFDrfZ4Ict/mKj3P8Q2HQdyTnePD5ySCd2hwcmCn2/JU6qeclOi9Zio24BCNLqqYVioH9d6TKlgiuNMdqS+XbO+rVCRqKjGywAtM5zgWIUzVIgJ0HNnZQmeHFxrcCaVHMVyqQ4cToe1NGhytIFECqbbsv19opNf810/Euo+FleVEtmbZlpNZIEGx+qJ50yxKQ3OJWCDxsGy659f0FNbeSjEDMgyi3SYtITQod6kbnlgmlOIOcnQxO/LF89q2KU42C+kTUs+uVSq71XYZoih9PI0+HA4/7JMvvVemHxE03NUFfWcZVZq4KhHwi+h2KTVxQvoPN+4YeWUpCsFi+lFKZZJ0j44DhNR8Q7tjtzSVg+j/7uqqMsJKQViapFEVCphlY5bXc6sQqy6v0QhKoqFDUhljOkRFqwGFQkYtOgdBSqX4j4TiA7x8VuS81HjvmA95hdk60LWVssup7rYj3xnR9NQlgTxYoVbwInKbbhGKettWDmOXI6HQmupx+2hM5aPEqu1GJKvG7upVh3Aap4Q7wbegggKsAQFjW2iFaPC/yJVbNe/WZrqbphtthZZGGyNIR6mia1LsmRLjyDEpR36IDiDf3XAFMrlkjOpBzJDYnx3jh4St5vLW7x6q0bU9I54+J0yhRKzJ+niVyVJuO8mE1P1YAtBcmyWK+0GZyNd678JSXvF0CSUH3Bej6I0e2LJdO1eZsCkqtOafNnXCcExNF5nerlpMNJry17nCFLQkYnsVztrjgcR01QfWvxt9agoa7vyNqVuuzEFldW+6SG3AQHYq37VDQIl6rCS0STbx2coQVMzjBNkdNpJKVqHYGWAGL7lq7hNnLWOVWLO/wSI/T8QEo1WpVgGZm+TFEUEWu3xqiiy8uLS5wXOvNb1bjnwHyKc0YFYJiQszaldYvfKpDy3pOTDjIpVRA/UKUwZ524R43IOHE6nkA8IRT2jwdOhyM3t7eU3JOjCe2yWe9UXXclNaS0LMlGW7u4VjAWi7+29ziNC9iAmPbMZUO8CuDF4apflOGtrayT2oSuHxj6gT70BN8rP7ob2PY78laR7hhH5unAPB1I88E0HnqPezxXF21H/O4O50TpUk6pOLhCEMGLOaN4FK0G2v6uLXinMRT9JU5V+qdx4jROzCnZ/WiC02YjeYaowgIciTlDNKs+V9WVQSiETqg1W5yoNP6Gs/2yZB3D2ih/z58/4+JixzI8AgWrcrG9xFrWS5onGLorVsirXyjoI5Kdo/ZqpXcEEGEGeiCJY4qJpzkxnSZePzwwxZmw2ZgfuV+KoJrLQgl0WVSAaq2zWnTrsGRAn0URqjvTAHDG+S5VkdZqQl6zYMNoKGIUg1UHUI0mAX0/cPvsOVfXz9hsekVNayHFmdPhwOl0JKbZrmkmxombqw2X2y1d2NDLN6/bb05Q69qCEws8qSZMwwlWKdfqSNl4CHiQQDdsjZfgQHTBKdKu8C9SSOZl6rwjdB0lu4XE3pLit0h1djSS73miun6PtcYF/bO0CmcVELUaYrPZ6FSWZbOyG1bNaaBmyw/VYquWjIKsKiRpk0f0nQt9FzSptbOKlqDGGBltc3fBM80Tw2ZD13eUqfCvOlrHrc0b1wfS/E4rS/JQ19MwgnQT48gS3HWSlGF9QVtDznmbhtTaTcoNEjH0RoS+V2L4SUwN3TiGy//dcrL1Hdrk9X4Wak3kLGRR5SjSeDlogmom4MotLrpmQwDX6boVIVczjBYz/24JKM0AuXGSVheGSrWBE3Y+TgOaJmQrEqa/ztZtVWpKtedgGe8rWvQcj0dijAydX4R2LbFY2jxLABdyVrPxmOJqGWcer2pwv45hbK2xas+K8x5n6/fh6YlS1UfYh0A/9IxpWjwyF+S3nIu01qezAipyafYsbkEWlsiKUirEElShJSrGnS7rezXOdPABLwF39gsEJ57O93hDMupmx3hSVwqH2Bo/2yxLeWfWbhOLitd1HLwsCR+LZ6hNkMuRUjJSRYekZFN6ex3/qYJUcwCJybylBa1oVn5Yq6varxYDvNPiW5yCDYZZGNqvCC1Vuy+1JXZnirZiHLzQqS1TjDOhJSvOK6JpWFCRiuNMgGLt0lzWX86rTVmxgkfMhaFgiWEyL8s50vfDUjSnGMkpm6NHx5yigRG65sVQsba+dL226yErOnVWhLraeN9u2fS184HyWyUb4lyNWtXoGvLWtfbNj1cCjqDFlrP73mnS0IeewQdGgUMaddx0M6z3js2w7o/f1SFUfLDzrpokBUE5jAINlT5HQnVNW3IqXhNV0Zbx6aQJaozZ0H4tHmiAjB214fG27h3enhLtLEmplFCQmgm+dSbrWQ5hAAttUp2CU10XGIaevutMxKpOERkFaUqxvc+3V2hXQV+71NUzVAWcVpQ4h3QD8zTjciY7YaqFY1YrqjFG6HsOp5OCBTmDAWUxR5w5HdSs+ZWmWWvP7vzj1XWrW3QsKp7WjvKCoNaW7NfFQaUlqGX5ta7/9r3Oe4bNlttnz7jY7izR1/g0TyPjeGKeJwOBEtN8xBPZdB2d72jkuH/V8Y0JqiKDxmqrUBNkieQayXkmxtnEIYFaPeI6+uEC3w34bqDEQoqVOmdKtFGFtQCZripfsoqN2usDJTpSbvwJy9TditCtO+Hbmaluyut0Kawy5Sw7r6Lv1/cDWyucbm5u6PreuGt2Vy1JXd/AbpiR1rVdqyKGGDPFxramnNn0AyIFZ/6QKSeC94ynE8fDAec91QtzjIS+x0v4GkB8noQ33ejyCfSzlboQs0M1m5dqKxAoqULXEhZNGCLGga06MaMPaiu1Uiv0BLy17ldEtNjrZpy1ZGR5oNsm7xe0IVdsSth3f3inbaZSkga4Ei1wKkKTs1p0xFSJWRX4PvRs/ZZ+s8O5oGh+UfNxR/Pq1I1dxGtxk5uA50z1zwIoLYFUC4CmXLZNUb/DEjEz67c1uCSsTTyFEGNkv98znk68/95z/TdZ10jzjixnQpdibaaYIqmkhfi5qjxV4dkHr89jXQO296pYPR4PfP7FF4gXHveP3N7est1tGR8n+wTn2U1dP6P963IBsLEDRWNLrd1b36KARrX2lRaZUmRB0Zrhua5VR+87+tDppo4z5F9MmW/dG8DXigzbpRjQNdwsfJpfX3ln1u48TtTLjdIPnBCCcscS2RDKQpwj87wagTsXuLwcKCiCrI3/AOJBVhGerqVfFiaozT2Lmrhx18QQmIZyi02HQXT8tHI1OhbEvMUpKrVmalbajPjAOI6keSI8u8EHRdhwnmU0aQG3GImr4C2jCn79kwa1KtVs/7Tg8uawUaoZiM9xUQnXCmmecaVyPOzZ7nZsd1umx0lBk+qMZy62SbdN3izSlq2nGjKl8ddqPBt72pKnSkOwUkz4YMIvKcpLrJ5Ss619fR7AENY2Ptr1ONfTuMParJqNQwx0M7PzVmS15/xcTvsdHlUnRXVek6Vcm8NHWVxKUiqQqw1NUWqVs05etfVUxROjUlLGabYuYLCYaYWQxeParLrsufZei9NicdpLQMdEFxyBzhvg3xz9sJTSzrXYNKyUCsMwqHCoZOa50PcexcnVljLZHtvAHLsEtm+2mG5xqwFGRgFxXU8eJ+aSSVE7eXFO5DjjQ88gjjnmxRA/9OrZPOcZFoqVxocqBVdbW16f4doKv6X7ina2rZjSq9bOua7nXuqSyIIVV4uVmkXw2rQ5GuudU/cG7zq8qEuK6x27zSX1qujQoZJ1L04nXn35UzoPnfOM8S+RoK4LDyPer1Bda/mUXAldTy6BXBzHU8QHT9wnok6koxYVFdWsNx8K19cD83yrG7vzSPAEbEyfqdoBMsmipANx59M57dQqbU45i1jDNjdvLA1rdbVc7ur6hhfvfaCWWCJmHaSRRbkzYgsXWrtHF6HyuXa7yqbvmSddQOM48cgjm/c+pN9YS7Soif7NzQ0//dnPubu/J3RBA20tlBwpeU2gHbK0zvCq/NRKsX02PZoKv9ZM1/t1cZ0BsSJCttnkzmsbNqdCDoq/+BC4vNRpGO4tsrmqGZUSEInTiVcvP2eaHgjd15Is1kRWLLhTVx/Q7/pwQK3KhaEUpnpimk6K0Fdt2+fs2F3e4H1Hv9mwGS45jVqtx6RjUTUQmEudC3S1EkrBd4F+s6G6QsqYlYiR8fk6st8Ow0yldV5sU29VKXXp6i/pXa0mRKlM80QuhW7oubi6UvTQKBeN5pFrGzKgRzMQr1mpDX0IS5Bq6yqlxM31DeM4AfNypl2n/M5pnjmeTjbNxSayUOmHfhFwfNvRMNJWqeeSF1qJflA736TVo3SGqmWdTuWkkq3PL3j6MDAMG4LfGG+vs6JL52l7ETB02+HJydlaz4p9f83vU5Had2CTR4Vd4zQhBONmqopfMsxVPRrH02S8SE8IA8NmRwg2dEQ8jd/pKwuVStF7j1RvRf3Xlc/64865hZriGiVFjFrUvs/a2yUrTaQQls2krVlflHt/Op7Yn048PT5CyTx/dkPNrXjKul4tdOeUFB0PQb1ZBWJUz8vmW+t8NRWwIFU7VymbV6qJs8QcDHLOfP76FUXgab9nc7Fju93y5v4NbqGTsRT6y/Nydj/On8VinNVyNtWHsye+oglIKpWatOvktfVEKpFcHb4uTwNOHMF3mrQWVfSD0+JXFL0O4hXdDR1SZk4+UJv3Qa2Mp8qf/9lfetn9pQ8fRN0ygoJRtVbImXmemeeJaRqZxoSTjt32ir7fAh5Shwu6x1dRXcmcCtEStPpWRMT+bDoNUTcdTdab1WVLsKzzZZQD57RLmJNQQytAKhn1XfWdIbRVjfV98Lz88kuohYvdlmEIFFExVTVP+L46UskK5BkNJ6VElUyuiYyOIxcPOC1UUtaJWAWHuE6ph/NEKuqN671y41vH8/7+jvc/+IDrmxuOL4+UqrzOXMFlRU+bpWZ7VmuzdK5nz7crFN+uoP0nbWpk63ypj7ScU4gopBLUnaKYnz1WXjnBB0/fbfB0plnxNhbcbpNvwsSZQGC32SE1MZ/ct67bv+AA6nUzs5wZQTfH7CvjGIkpE7OQIvQDpCyMU2KesrU9dJkF740sLctEiGIVqHMej5qi67xzx5zHBblqm5mz6QWI+fw5Wc39BUW+qiMqXU5BI5sDFlOmG4Su63De0XlDDs2OAXQKgwE1ishUtGJLiQBsZMOzZ894ehpJyfz0qjDNE7uwJXSBTsS4M7I4Bbx49oLD6XFpK6RZk3Xrb2kFXZ1xvqohBI3sYAlg0mqrAjkmivNW/aMPbDHIviFRRVW/re0WnCe43jYpRVhyKXQWIAuZWhI5nXh6fMPT02tKHbnsNktb4Bz21cS2CVZqW7bf+SGWsDVEvVV6raXsgwfXMc8Z5zzTPPO4f2CeypLAake1EueJEBzXNxd0Q6Arga4f6LqknEyBirOW30JNP7tORis4Q+UbYl7NOqdVp7WhVQZfFTF+pgg3NzdcXl3hQ+Biu9E1LNi6bUGlbX6GjoqY96sjdJ7NZkMfAsF5qDrF6ng8cnt1TfCeTd8TQiA6p+MIvUfsGoAG+JQi03Q2YlCWLcOCtE7fNhzNIoe1N4t2R1Ib+ehU2CC2dqVBElkWBK+WSkqVHLRF6MXR9wP9sD3z5G00Fex+rAVmKRocRSrrmF5ZkGqW/70ba3ccTxyPwngqlBJpVn06UzsQQo/vB4awUyRONEktdKSadXwsyoF0ISjFQUC8Z7vZkqpHUhP0lMUi6et0sHPbKUuHNHa1EFBVsLmImuyoFN3EiiJnx+ORmDOh6+iCKu2D87TZjrlU8MoV1ERatQ5tKExJqpDxzuElQDXPYdTT9dnNLceTgiCqiyjL+YzTyMuvvtIOcoXD8YkqhZsXt4ynk32eteOgjgWGprLSopbPVg0QseRe9yQ9z4bflFptQpZbFORCJjtvKv6CM/3CEAYVq/qBzm8ALQy9U9GuPt5q5Y7RYlqB246SYXx8N4qraZrISbtXrgvWolcFvrBhuwkMm0v6XrtUMWtBmpsynmAKcWyhubNuzJrYL39f/kmQ4Jfrs4o0G6C20i9yTuTWRbBiynkF4Zw4U/Af2Z+OfPHFF1xdXnJzfY3ZfhpgoOtsLtAFz5gn1XN4p44TiDowWJzzrvnJi1HlYDChtWI6jpISba/KObE/7A0cc1qw7vfLHlalocl6HuvY9LoUW+dXq6Li0+aHKqFF7MKZ1Es7DuaVKlKgZh1PnSPZDPxbtFd6iQr1VEzcIcZ/V8Gbs/NQsTXSk+XErt+Q04npL7Buv9lmaoF+TXhUZBEBYbymkjPHw0iuQVtKBBBHTtFMsjMpqtq473USw263VVN9v8LiYN5Y3uG8JqMhBHad8kRzzqRkULG4JfsXG8uopYH5JiKkmKjO6eivAtRECL1ZRDQzbK+m7JagVmmqvvWieVrDs7HVhOoqlxdX1PqFVtTVIP+k6n2Pog/qA+boh4Gr62uQyjhfqnAgzmrwbgRiytrQV4qkohfidPNWknFdVpzuD5aQLs+poXG5gGtJEVQnxu3FFOR1CaTZAmjOWd+rZEqeiPOJN68/Z573hFCtHVv0M51v7LYeFqeaXw0dfkeHnVRVzqOYh6vO9tapXCnPlggVVecfoyWo7fpqP2iz6Zljp16ZslV7Ih/0Z13Vh/5so6vkZc53uygtaW7G25o72z1dfo6z9qKG7Ipu4n2vIzq7vqezEadiia4WD2efvPGN7J74Cl1X2W6Fi4sL9ofJ+Mm62c8xKbc0BELJ6uSAog86tz1ogSSaVOcYWedOG/pvnRbnmzerCndatrhs4En9CJNX0Uv1Jgg0o/SK8tcUFCjqB2oTZ3Skq6fvBjb9Rq1sWixoTiAFbQOXRI4Tcdxz2N9DTcvafWsJn12nd+EoSYdlFCnkPFNrwfl+vZZGkaAoVaWUAvOs8bY2X17lX/tuQ8obKgoIdENPjYaqqJxci6Iz4c+CuJytZ8DiTNsTAIqKMdteJw718VWWsfK3KxeXl7R5MV3QscM6RWq1xgIV3bXl3NZPezMRMe5/41mrZdo8zwjqMVxREQq2rrvgGU+F+4d7XYJOSCUzp5mLTb9MaWoLQex5awWOnRVtt2/0hVJUwNd8d2vVYm/RCoh2orAR0AVVQufSnAH0vbquo+8HjSNtU5cz8V47J8EcElg0A6vAz5Cw9N0XV/Np5oRS+JFCOaK8ZT+oJVYYKEnwYQDXkc0rtmlRcKgRvV4gfAiEriMXByXYCHRLOH/F+69rtW1GsCwm9M/LLrsuPBuiwAJMVXQ/PB6PDMPAbrfT+2Q0ooVegK4etXbUzqsrzsAlyDZ23bV8hfU8qIXLix3jrC4wXQiLS4V6MmceHx+tsBTmFHGx4/LqmuNxrzuEFebts7d3UArCCiq29yxFBX5Cyyv0u8/phLVqweNce95VtJqNppKrow2l6LqeLvQEFwi+R4WZfqUPnrl+SC1IELwUHTueJw0/37Ju/+IJak5EqagdjKVxxZlKLYEhntpq09zdOaELQs1VDZovBnbbDdtND5Lf2hCctdpD1+vCDJqgdpueWmEaR7JZQOAr3nd4Q0ycd0v1oAb1ZsrvNXmsZqeAVdfKVfF487/01mpd1vWysOti5SA4XLXZ5hUuL6+WtjjGKcq1kHLB14JH7a7EOYbNhqura4bNQCyROI7W7jgxTSOn07E1fBQpdoFWzVQr+0x0DtVSZWfeAXVNbJbFXyy4i5UTVXTHL7KKb1D/uVQylUQqqkSXEilx4ri/5/HxK+BkE7/SgvbZHFlYHoKWfbw7hwpqlBtaahMNaWssp8Q065jalKyl4oScKuM0U2xkqXIeK5u+s80MmyJ2Xhw5vNe1rhwyt2x8qXHkzn17WDf99hxRlW+GKEcpO7vvhv4KmlQ3H0vlRun42zVBLZYWrO+jYKQ9w6jNzSCey8srXn51b3QRTYhjTISuI3QqWHDO6XONos2Xl5fErG0oK9OMWmPvVkGqPhveOFrFWm6tra9x2WggpVJTpYZi6D4WHW1FZVG6izPsLrMUaCJqQ9N1Hc3SBTH1d1n5tSVF5unE6fjIfn+Hk0h1Otr4LLa3y/XuHIYmaxdTLKFpXFIhJ53gF0sizqoYL1Vblw5MPJVwUtleGLcYUR/jEPBF+ZrVECo1VrcEcXGNWDfUBTTRf1jWpkbNpl7WhLRNlWsXuFbYbLaEvtME2QSx3usz0jjBTbi2xJRiCI6IuWA4+q4j2FS4WivJQIuUsj3D6kLhvWOeZ4Z+4OCO7Pd7S+7cknwUm/v+Fsq2xIn1PNbf7aul8cIrxRVKMMVqu2dowVpyU3fL4u2a2kAPr+fSdzqpyNtUM7H43/bFRZyFobklknNcOiVLglqB+btPUKdxwksldw5xlZiyjujtOlzYEvotc0nEoj6b6pGOIuSSqeLxxVuC6rQQ79UdhGSF6wpTL7Ht/FjQ/hZb4Wxvat1Ft3SqGprZkk0F4+znRbi9veXq6oq+75fOyxJT7efy2fNQrBtBi49Vu8bn4aXWSoqR3dW1Ca31+enUJgfvHClGnvZ72+xlmQTY9z3T6Jc1qbXdKnRu8Jo0zUQrriyXqK3KddAt2Z/lEIZ0tbAusu4oeRGpGqdWRCcv+k4TVBcWsNC7NqHPxMq1amIs4FGBeWoP37es279ggqoWH3EaFdY1iwsx+H3oB1y3JXQbkI6UC13n8V5Vu33n2G4uuLzYKjJTM7VmTdUxYnUIbHdbttsL+qHHBw04OOHu7o7Hh0cO+6MqGnutPIdNr/OpDZbXh1UgZ+OTGSfQrQ99Q1u81yS43QJt8WvVq5yMdXSlF08tqowPFkyH/gLvezBkDpxaaKWETzp5S9E2oe83XF7eoMtFycKqbp2Zp5H7+zc6yjJpa0tEmOdRWxFqILksjmWD9kF9H23ikLak7PAo2kVVBDjo09TsW9CPpQhEnAgeeq9uir5Gcjzx+tXPmKd7RCIhbKBu9J4ZWmdXElZM/exr3/2h1xJ8tuAhUKun5Mo0JR4fj0jY4N2wjC2d56gcpTkuhUwInpubK4ahZ2OcXaWY6IMlztEZCTSEzoRFDh/0azknSs7LRB2xwCetwrT2nUmvtBUmljzUSnWGjNamzvaELtDEeiuXVDRJbclvS1BtUdSqLeLOBW5ubqH+3Ph0inbGnOhLIuDpuk6HbMzayuq6ng8/+kh9GeNEnCfjlE3WscBmqLdE1VAeE9vQhAnNOqqoMKQxQowloee5FFg6F9t5LTQKaOu6KFLgbAQzaJqjs9QbGqb87zyfGE9PHA93HI537HbeuMn+rWJUN/v6jqzc1mZWD+bqKqkoSpNrUFFfjHRdIJeR05h0UETFxIBagFEL293AbdpaUeXwoVO+vxecDWBx0njlUESFSE1kBq1Galy1VRC5IFOtCCyQXaP6WHKKWAFU2e529F2nYh/nlgQVE2y4djMs6Oq61SKxof+bTdWE7jDREKECnKaJ3W7H0HvWWAm73Y7j6cg0zWs3yql4Jc7zIiBp2JNkRatEaB4yy4bfCqhaCjXXRZntvY66JltlhXGspSyLuiHDKWUbk6rlY98NbIYt3oWz5NTTJh2111LNwszx8Mg4PiKSLak19KvAfHwHEtQp0gWPC0rFqDimVAlWeM/zzOk0Mac2zUnXXZpnQhBy6QmdAgXOBzbbHTEJ45yVdykOiluQmtYwgTVX0b+bEhSsmFn/fd3nm3uC6GTBqnEmFy1UQhd4dvtM0dNOBUreOlayrgorxOp5ADNwSNdR8M60HiwdLnUoOHFzeUPfKd0x58x2uwVDxw/HA7/4/BcLYl6lkovu11YZLodDi3nljle1iJIGcHB2Ymh3pWjBk0Ow63MmMBdrRi8Fo9IAcn3bZgpEk9PQmbtH62o3hJ+zPwuL95Wdm35JvnXd/oU4qBVt5adp0mBWBcETgmMYNjqVSTpK1TnE0zRDLXTB04Wevh+43G31pjYlqVeT6d4Lu83As2e3vP/B93RzzIXD8cjLly95dfeGGKNNUtDzOZ0yx3GEJ73pwVqT20F5eSEEfVCMi2pbqBnuV50Z3Ol4PkUK6lnlapMX8JrYnCEYvvOGjIL3nXmpBUDtMWLO+OKVX5fUSxXg+uoZm80tU5yZxgOuFsSpib6TwgfvvaANIrBBUEzTiePxYHy0kTdv3nA6nTidTmYAbYpl2zAaMqqVI4YStWTFr23rKloNUiEnXMlsN55aI04883hkf/+Suzc/o+QnFInySzAvKEF7Vf+DKeh0gZa/IK353/Axx0iMUdX4viyKTa2JdKJZGLb03Q7E6f7SBYRMt1NxhXOO25sbbq+uKDWpD6wZmOSoRYba1vR439NvBh2X2uu0tJQqL1++ZBwjJSVCcGw2Fwua5EQUSawZqToet6RCalZTZvPkvCemxGbTL50FcaZCXWyzdI034ZWuXSsYxEYVoxNSbq671asQtQoqRRMc57IlfnpPt5sLQjfwG3FmnkeKZFKcmaeJ4+HIPE3s90/EGDXwlVVooiPtvFoE1RV90sDl6Tol1MvXY5QUqiGnysJWP9mStRuQkwY7qWL+rpk5zsprdB6hkMuJND3y9PAFX331GaGbkXKpabuYTd6Zml2ov3we39mh6vIG0eSUeHp6oKDtzpKF3WUgpUqcM3NMi0ivpELfdew2G66vL3DmxKAdAX3tEAIihRD0XjQ0Oled16d+yG6hXGhL0CPeLY4umr3pOlVen8ZKJzBXnWyFZGSre0Lnw8L7Dw6CA/MNQJZNSx9SZ8VVQ9srulFtd4H33nuP+/sjtTbKjlOHihwJnT6LBcgPD4gThs2WZ8+fEdOW43SyNq0Kr0AfG+eKdRscNWXlMooWMblmPEH3wKqKZAUtnN0bnTzUnOvANvHcwIQCTrnXfed1bCaBzgW2/ZZNvyWEAdUCgKvKFSwlWeIOKY7k9MA43ZPyHnE2oIOGdDvSO+CQFvqNtu6Lp2RHypnpOOpwFJfI1XPYn8g5asFcFEmcxxObTc/zF9eEXriqV4TQsxm2jFNhzhHns64Zcz9pw3mq5DOrRE0dPbJMigKQ0PxQ3dIRzVUnf1kZBujekPV2UQrc3N4w9IO1rR3BtzDbTKzWYxG8Gs1GRAgV+goXiBZWPijCWxUYmKaJbuhVPF0KpfSM48h2uyOlxN2bN/o6ImQKOUcOh72Bg+v7CiitT/ziJlBoCKiJdltymTXZLyXTdR1d1y2JZ7sSLulUwvMCPmd9xtV/WEfuamwJeBu76t1KiLRc15piOna95kScTuSsegDvvn3dfmM2sVjVFH0wc9KK3m8CfVCjd/O5JYvTynKOlBoNSRJ8ELoOnETLqpUf6YJAmXGusN1tGLYX9KHjzd09j09P7Pd79oc9c4pQ1G91DQHrBc21UpMmAHGOBO/p+57nz5+/PZVEhO12yziOC1rW922GNPbahhzUtADlDXGlajJaFi6qTq1RxXdhAGIq9Fa91yKkqOPe/u2/9jcYNlf8yz/5U3765z8mhIr3Be8TziWGTZv60maNQ728pL54oUhoKTw8PTHPM/v9ntPpRIqR0+kEtTJPE/M4mSVLQ/awVrOzql1/5VTp/LC2RsGEMJmSTkynB46He3IcKSVaImqoRkERBKNTfL16XS1mvvtDuWYRkUxxheAdXejwPrDZOrphwIUd4M0bUk2Xh66zhFur5ottjxfz0HNCHwRPZug6LnZbfOi5uLyh7wfECTEnxmnk8fUr9vsD02lWUn0peO84TpHu0OGDIwTPEDShVbTTAmZG20+GorRA6K1iDY37WvNiC6aPqkecURMMhVKWgfK9g1N++GboCKFXVwrxVoRVMxjX9uc4jmqe7jy967m8vNXrSaKWSC2J8kI7FafDnnGatCjIGpxjHBeeNRHmOem5GLraFNlaEzaV+YqCLHyyrF+v1uqqRTmsVFWYzvNIqckSpUIsSa1R4oHHuy94fPiSeXpQzKNu9XoKaAi3Vh8NIHs31u4cC2lBJ/W8FAHsCF2HqvsDOSW8FzocHs88Z66vL7i40HG43jtzbRASaMJZ1AvUd0p1cM7TdYHtdkO/GfBBvZxfv35DnE94F+i6nlqrcUAtbhSoJLw5V+SYVNksSueiqL9ja8m2zpvzslCzWhFkTANUyNL4zHWhbtUKVRT9vr6+pdaf6sjHs6I7xmRdMfNYFSvGhw2ffPI95nkklqhAS4wcDk/EaSamqOhRVusqJ4HGERVRjcK5k0ajNzThS3Beef2swiyAWlQgK5JxRh2oCmybDsDOHY1VsSRqCQoaiBatauWTqWXk4f4V8/yIyETfV53Z3opT3g33lDlpcRtTodTEnBJUve+lzkyxMMVEzSYsNhrV5eWO3W7LZhiUbmfFdc7qtR4c1M4ENxIWn1/nRRX4otqVc/50o5oIGsO0h29uLNaRMvacJXDagWm0p1IqXT/gQ0dwzuiKq5ioFVQtI1nB3IBOstSjAkUc19fXHE9JP0PRNZ5yYmDQqXad6ntOp1FjfKcgydXFJVOMiooapxOjp4i5PEipiyBJByGsnY4WX2vV6YDNRopaidOse/8Zwuwo5Pa5bH3FmnEiNrJen1EfVHA7dANBWhqp+UZF6Y5m86FJcY6UfOKwf01MM95e+9vW7Tf7oFqLUmgQsJ7EslGGQGkPZilWPWjbKHhHCE5FSF7UR1OgeXA5L9QaFYU1o//j4cCbN294Ouw5TaMip3bRz5PT5da3LlPF5iAnMIR03bT1nMWQrjaJ53Q6MfRXBkuv36eRzS1/axddrVustjcrDOc0mMdZTdC9NG5mW/xVA3wYuLi44oMPPuaLn3+u1aBPeFcR30QzrS0hhmyZAtuqwK7viSlxc3NjptOJ/dMTtVaOhyP7pydOpxNjnLFUUVtLJmpZKlZDAFolWdsGgaG2hwcOhzsqEWpepkuVqiRpqfrai3igVV4G+8u7AaAuBsnJCqJg/KHm7amcuU4fOAzFFCF0Hu+U/tF1HUPvEfOB9U65fzUndpuBMPR0/Zbd5TW1wtP+if3xwP545MkKiprKwumptZLqZMiu4B1MPthkMlVE9l2vNiaWtLkC0rnlvJ1bBym0ZtN5NS9Us6SUtduFWuK07/Rnooz2TJfSpq3pYAcdG6jdj67fUKpw9/iA1Fk7B9K4qHCxG4z3aB0AgRgnS1BnxnHm4eHRTJvj0h51Z0rlsljF1eUxFER55O1zVRVrOmldDlXkUtWKrl2zWjLzuOdwuGc8PZDTiRICC4lbqgmCVr5qFd6ZtTvFSLTRjGL3Rpy2GJ3XwrhYbPB2X5VPmtluB7abgc3QUdFr5Y0Hpj7OYshJjw+dCVGCDg2plXGcOB7Ut1mFcEo98kFl8F2n9kdenLUtre1NXTZ7L1AXpxWAuq5fr9pj1+6xrGK+tehtm6XXjo3zJlYVnUhlVg3aRXJLbFMzf+OZYALVftCYmXZKYSiaoF5d7pSLPunzGFNimmbEeeODK1EEzGWgdaiM4NhGdDtkFbiecyLrEoXXbqvIIpjx1dFJsDGcSgMqktTQzpL32ma4n/bsn15BHem6aiqkNgGuIq7g/XefoMaoAJFzigDXWnUUK6p3yPaMl5ptqpwwhI6bm0v6zjP0naKVWEywEepOHF3Qe6JWj1bcb3pcUC/lnBPjaSQbBxmLK9qWtwS1fc1pSz3blusaMADqg2pt8OAbGCCLxmBxY2liYbvnrsVbQ09rtXXrKx2O3faCcdwr0FaN/mLT/JTzvwpexVxKbm5v2aUtU5oMYMrM06iUsaKWmNiUw+byI7Vxwd36mUysuhZYlqPnlTeNnXsBqGpJiIA4BUhy2x+KosdevLb4bdARmNajVtUOVKWqUasOE0mzKvfnAwX1Kcbxrev2G0Oy90LKmcbTrBWrUjXAOe+oOeriax6ntRCcpwuBLjizjNJA1n41ELqUZAuyMI8Tr18/cffmDWOMloFDEwH9Kr++M917+8KChDTRlP4sNglFg2abY94W6/KarU3aWp8NRkeNr6nO7FO0ihLnKVHHp6aUrcKyBVP1pnrf8fDwRKXn6uqGzfaSWk44V3EuG7G4napVIGbi3MymRYTNMND1HRe73UJ7Ga9vNEE9Hnl8fOTx8ZHTPNEMuXNJpDJTicqVqrxtxm4LVp+zyjSeOB4fGU8P6Cz7Zj8jVnk1M/kztKD9QWEHpPvG9fZrPKolqJb6nxG3lQPqSdWRcxN/6doMQXSCSN8ZZw4dY+gq3joAOUd22w0b3+GCPgfH44m7+3ue9nuO44l5shbi2fNfDKlpvGKRyoxbKtnNZmMVuyGFRflATYwFa5DR/HQtodYSblVzriegCH1dnmSPc+q3qAIbbd/4yjKOsZRC3wcuLi65unrG5eUt++OsxvnO47wmPyKZzbYzazSvqm50lnjJiVwy0xS5u7vTIQPjyDzPOq3FKDA6dWQ2R4GyPMfijX7TnnZDTV1Dje1n2zCJdt9LmRmPT5xOj8zzgZLnt3yCW8prF3R59N6VtRuzok/eK+ddcPi+V3sprwjqZFz14N2ioO96YbvdqGLaqbDCIlczpCF0HRcXO4ZhqzE8eJIldsfjkaenPY+Pe+VhWxx1MRJCIOWZvu8I3tOZy8papK5rcrFas0KqcTVVyIQKWQWLbdg9Xh8UK1PQteyMzqLJXd91+hxbctq2YY15FqOSTUxzQvAdV1fX5qepMa3WRI6XiAjTODEZn/ppfzK+riYqKU06/SZH3bhbfcNqrN+KvKUB0Pj55/t+sdjYflX9FURpFCUpyoQ3f2xE0byaSGnkaX/H8fCGPhSCV8N6alaKg1P+tPffPfqfS1pcckQ0V+iCqvUVANREr9j9DCFwsdlydXWJo9AFsalTunCaeFKHFAS8C+q8ExyhC2y2G3zoSTFxOp20E5YTXaceuc4StdCqIdtnHWhH2FDJSrVCp5oFmO5zwdZ4AwSca5MDWdZt43aI9rItV0AHEDnwDjqp7HYX3N0fl7XSEsZsYjFveW8Tc/fDhvfef189gKsCUinOHA57pmmiJB0dWpqtZGV19JFViEtD75uGBcsyxIZurPm25kZAm+IHmC+7jQJuCW1RVxwv6juNqEVlLmqn1mwDi8XsnCMpjszjnnk+4kICMZrat6zbb0xQuz4gyTaBrDeo73s2w4ZhGEBgmkfmFJnnmZy1ahiGoG1Er75fwYMj65jBJUEVpERq0lnJP/vZl7x8c2DKSSF519DI9Ti3PFl6/FJRodL6PQ31O/85cY5xnhDnuLy84PmzZ4u6+hxBbcG1VUr6nhriU3ZMc2a/P1CoFOmINbMfJ7Zzph+wtpOouW2Gvt/wx3/yQ6if8Vf+yl/l2fMP2T+8NMQu4byNwFxQ4qWha/ZXAA7fObzxW/RMHVdXVwBcX1/zwQcfGA9Qk9OYI3OcORyfmOOR/dOeaRpxBWpRjpUUaRNroSTmcU+aD5Q8Qh2ByjJMyhLaUoGsU6WcWbrAmqfyDgRK0Hsa44RUR+d7hm5DZ9MGats9ilX6ZESUezf0Qtc5C5ZF2/vekoFQkRrJ2XN5taNIz/448cXLX/DmzR2P+71aySyw8vkCXqBm2hVr6y4mFcPphm8n79Y/apAUjscjoFY9nCWt9p36HvU8PZW1uLJfpero1q7bklJmGhPDIGSj0FhKQE6V/mLDB+9/wve+/1s43/HTn31FSR7nZrxPhJCpzNaqd1B1EkzOlT5sDHVQcUpbn+3XdDrx8PDANFlycP/A8XhU70DLJtscOw3k1gFIigYYCKqbhHVtvBTImZxHDvtXzNMjOZ2oNVKrTZpoiW5DHOQsuf/lAUvfyVGBmGZdg31gu92ydQHxHbV6coY6z1SKcpJ9oOsCm02vCHwtuoFR6DvBS6YXuOg6nl1d8uKjDwle7bnmOfHTz3/Bm7s3SwwnsyAtOmq0KNVqNGGQJcYbc5Ro4yC9WU7lqqitxmGvAxWC7gUN+W9IkW6WVoQ0EnDr1hWd0iZecGiC4r2JMsQjotOIlnG42XhueUJEr2PXDbz/3ic87PekeMSJdq7OixwsMRmnTMwJqMQ4czzueXh44PFxb9x/ja2dD4uvpZhvajXEUJFb1g5A1d2/UEw0aIlAVhN6QcgpIhSCU3vEjFNUtUzE+MTdm58TpyfVP+SBSgckRLqlAHgnJH5OkxkXPJ0LhH6jxdSsz1oNmpRHpzFt6Hsutzt6EysHV/GioipHG5MKVRyb7YbtdkfXd7gQdAiPCG/evOHh7oHD/rjYNA1DJPQdXd9bV6xXUZ6YnqRoQdTswoSqcd41YRU01D+YkAhpk6fKUli1npSyLItlviayK/o6oXNQhNvb53z+xd2StoDmCjkXUiqETkVQWCI8bDpevHhPARRsAl+KjOORnLKtzxOHw0F9hlNW8Z1xO9eEW0GRVcPjaAMLQghGT1ntt2rb85uQyZ7J2qg2uRrqrxBrqTrSPdcC0RyW9IkglUKtM7XMnI73vPrqM5yP7C480ul5fNu6/cYEtVkrVOOgkhMXFxd0/YC4QCqJMc4cj8fVlkccw9AR/KAjzU1AU9GMH6uinHikZsbjif3+kcN4wHnH4Dc2HYJFbfarDlXuV4O37WvohffeLJ4Q8yPVf396euJit1N7muYvYWfXLlOtdZkPqw+JIkKnMfHqzQMvX93x5VdviDExTapgj3nG+Sd819H1CZzgPQTfM6fMn/zwRxwPmdtnH/HBBx8zTwdiTIgZ33tZzd6bd15LLRTU1ckuLelu7XTXEnOrfvq+XxcZNgazfoD3WmTUkpFauBw29F3ASaWkmePTAzmeSGlEyHhXSUWheqVtZKhKDVCZoyXyje969pimcvzGBffrOrpeg4vOrA70vQroUo4kazHNo1aiQQQxpWYf1H4jeG3ne1lZx43XGaQwJbh7esPru0devnrN4/GoaIo0Pf7XDzlbY8XWnpiVEFaVy1nRZGWtc3ijG9zf32si4JuCfd3k9SgoPaU9EY1nHKgmlDOqFSKdef1N9JuZ4DeGVimCP46R29vAq1d3hO6S733vUz784Dd4uP+CnPeITHhXVZRkz4jOZFc7o3YOztCL4BzSm4DMe0rKfPj++1AV/TocDnz55ZdMs3JZp3nmcDiQa1TkSzT5VmFQVLGV3ZlS1HZbamGeT3z5+c+4f/0Lan1CiDjKkswr0qVrt40f1MI0k8o7oDQB+iHQDx1dryKGPngN9nhrsyXrPimVquug74W+g+CVs+9CxrmCd5mUjtRa2G06Lm4+IeF5c3fP3f0D9w+PHKeRlI0fV7EOjh31vFOla6tQiEk7ZvM849wNXdctcVb3QbUJur6+4osvvuR0OiGCqpYbeFDXLpV6MahCWzs8qkjx0ryAlVol6NSwWh1xLnS9EFO1JFCRq5QqXbfl6vIZn3zvN/nww0/4v/2Df0DE4VzEe41xbchFo5vcoMbx4qwYz5p0NkrYaRw5nU7c390xzTPzrBxrKZVxylrwVii56rQocwRQZxgxS6VqKulmySPWHha8E3ynlKKUZk6nBw6HLyl5T4xPdKGjFHdm71ZQK8GyTg76Do9hGNhsNmz6ji4o2jnPKkh0AkOnbvi7YTAP2J7t4CllhJQR5wmu0knBl8zlxY7txSX9sGVzsSOEwPE0cjieeDw88dVXX5mAmqVVXWtVnck0KqAtQm+e0X0f6PuOznuGrlceq+2fam0ZFhqV0mCCfk9zATI3DJF65hNgMdpiK5hI1HktSKh03nFxcWnCao8nIMVoKbl1OwtpKXIUfd7trs3ycAJRquXNzZXGrqoTAGOcmGPm6XiiZO0Mx6TF1TzPjONEMncP1UGY8NY6s+ct/pY3VPNvP/sHfBUdY+oCoXoGekIN1DmTTiNcZEqeSUW7aM6ohafTPePpidPhNePxJdtdh9QdDg81f+u6/RYOqqkic6FIwvV5CURznDmNJ477I3OcDJrWhGsmMfqqBH2zcqhkzcb9OimpzR+XCsE5NkNPqurpV6padSgpV7W8rQW/HpqpO0tSGxpCbehIm82tF3ueZ66vrhdEqv1Mq+hrIxPTBIBCxSPS8ed//mM+/+oN9497juOsaj+nLdniAk9j5mKsDBtwAXpLjqd54nA4cHd35Ge/+Jzn732E+B5XtFXnXV4U5s0eo01mWlpCzhkYty4m5zxt/nV7MPVzlLcWm3M2lMBez6OBQpy31urIfDria6akbNWk8VbMA6hmVawLBqcWx0JSqU6DPAIlM6d3I0H13tvmHtgMWn2LE9KYSFFV3/M8aZrvlC899IGhU29cYwmh/FNZKlsnyv15/eorvro/cL8/cZp0is3qbPB2gnoO/Gv763zttcKq+afWs+/Vb8hZx+NdXV2yu9iZvdRaeLWe6OpMybL2wZOLI6bCaUrc3T0SU2IuMGdHPUb6baLvIcZKF3XtT3NGXOBnP/ucV68PzNFzeXHLNB2YpkQtyfh6DQl2lhx7Y3sYgV8/nLaLzxIfRUGCnnGtiqgMgwXrTEqJ0ziSa2SOk1pcTSMlzQS3QSSwKHZTgq5o16Mk5mlPrSO1ROPCFbsyZe28NE6mbSK1ZuZ0+EusuH99hzehZ9f3WrBY2zOlbHOtI4Ii7iFA8JXgK76p0V3RrowH7wpU5QZuNgPJOV69euDlV1/x8PjENM9khBUH+BWIRutLujMwoNF+WpHa+qCsLUZnKH/j/W+GAdevrdf19UFN/i3ONxaizWZfsXKvOKQfSLFwGiP9xji2EnASLIFTE/Ht9pKbm+d88MHHfPrp7/L5z35MSge8jwSfrV3bpuYo2tk16NUJ4hWB2wwDF7sdKWdiSnzv44+Z5plpHJlGpQG8fmWiyHkiZXVDaDHcNb6zAvyaqBagWpLQ0Px29WtmnvZqj/b0mjQfSfFE3ay+naDPSUWoJZNj/Ne3AP97HiEEFc706lFeUf4hViB6D84cdPouELwgRIQIrlh7P5PSCSeVF89uqT6A65hS4vXr19zdP3A4HplSXArVc1QSoOll1K2rEGMhp8g8ixV0gefPnhk9xGJsLWy8ov193zPPcclPmntdU8i3wpil06rfIGCtfXMOWkK0x2+bTVsjsxjyX7XjUEo2m8OADwO3Ny948eJj/uWf/CklCeIy3meCq8sjque3gyo8T0XRUjvn43FPTpmnw4Hj4cjpdOLx8fGMFoCBhAaY2CdT9MI2FCvgxazkln3GkmNsyI+u3ap0OPT9S6nEOHJ4esM8PTKNd6T4RE5boNfzyOlb1+23+qB6b/T0Wimi5OJ50lbQ6XSytlBeHnStYAspOqRmDRjeKTHe6YZQndqoLAmhfVD9kDqtQVvJq6edRbG3t/5GoLCtuZGrpZE8/NrCq6USi4pT3qoOOBdJ2fdbRYs4Ko5pinz+5Ste3T1wmqLOWRZF55zrcWFgc3GJhAuKbMg2n7cK6htpKPPnX3zB7/zuSMEhPuBQW4hGnLdUxXg8RoA3xFmRNlnOualbFf1RaH9lmNjvYolaaMIGndbVe7WNyShBP6dI14K00+SVdsntHpU2yUSxbTVlx/5e9f+VzFzfjQS1KcX7flAbqBDM7FiT03EcSSmZAMNBFZxTVLk9hbVWqmTwrRVpXL5aOe73SsqP0XbtZnPCLwXMt6gp2Pd+7d/POUO6ntfXSzbP+uZa/VjPk4F2NB6SrmFdu22Dj6lw/3jkzf0jX72+I86J4ymSEqRSeNyPDNstfV8IqeoYPGsRPT4duLs/0fVXfP97v0XfX5DziZwms6RqzFa3dgDaZmtrSnywIrMVUmiLlnap64JaIDpMQ+9VpJBJKZKSTl9L08Su91zsLtW2BYjTzOyDmoSnGWeitjbTqF2hFmPeirRGyCg1vTNrtwEDXdAkvBjHX6+DoiHBC32vItTOK+qvFKpiIqRqCGRSr92qCeXD44HXr9/w+PjIOE7mJLU6erx1LAXUuTq6JaPQ1loTkVRZY0/7npSTKqDN/m+NvW29tw7W14AH0eK3WFLarKwqBaQnlZnTGNnOmS50eqdtI40pMwyBFAvHw8Tj45Hb6xfcbV8xjY2fCuKa5Zm+fyliQ1vaObDwIUMIdLUy1KKuCCmT5qiC1ZK52O6UsmL86pgi4zzZflMpNVJqQqozOgLabTABj06sK3ZZC3E8MJ0emaYncjpSauS8eF0dUxqw8rWg8x0cza4shABF16tS2XSJeQ+h03XbB3VzaPx/cdB57VjlPNN56ENPwjGmzMPDA1+9esXT/sAck7aUlwEfv3wuC1ffqTpex4Eq97TBN7ACW7VWs01yJg7PjKcTu90OF0x0tABk8rU3leUrbSw1OAPOtGh3PkD1xveHEtRz1Vv8U4edvHT7nj1/n08++T5fvrxj/3gHTJqg+jbgyNodKBiwqd683/XfLnc7KnA7jpzGkWmaeHh4IMXIOI6M06RJZim2hluyCWp0YDmQa8+/LO4GxYRr6x6kN8FZfqAUlkicj0ynB2J8IsU9JZ0oi4+tJf/fsm6/MUEtDRK24FNrZZpmUqyklBnHk05sWmyNW5ltsHW25LI6XYxFxxQGQ1qWBKhknfVaHS50i6lyMTJ9C4TnkyNW03pbImKCCgugzRQdW47FeBLn+NSv+l3cGXHYUKG7+3te3z2wP46K7iJIUO9TXMD3O9776FP1YQ0VJJLLiYKitrkkpmnk5csveXh8pJKVj+U83hJHPdF2Hmsi3oRSzreqGzB0rLTr0LJRe0SsR4Ya+vtlXm7zTvU+gHj1pqQs9AFXA9V3iNdlsVhnlErNxWyPynp2561AEWpNHMv+m5bUr+2otWqbpu+Ue4oo93GeF0SnWuVaizO6gyM3t4lqLo0uU5zXFjVtpahZOlULquCVq7ckqdICpJXe7b7Y0YoPOF/DleZaZ7TqhZ4SY2SaRvr++TLS8fzVWrBcKmFpCmdFT5/2B7748hWfv3zFm4dHUiw4t8X5Do/wsJ/YXkX6YaBTrQY+9OSSiSny+Dgx/fBHfPTRb+H9hhCUB+ckWYJalmeFZsDfyiXBEIW3D+d0jdVSlop+6RYAoRa2MlCkrCpacZAzoWr7r/nvTdMJL1iCOplziFLiStFxm1JXiZjYtVriBJDfobXbiisfAjXrqGhtJ8+q6EfoOs/Qe3N/cHQBXFUbnWYKrih6xaEG8dMp8uXnL3n15pExZkq1QqaeF+m/VPss59Q4oss27QQxPtuStK4Qq7oCTNM6LnIYLMNtcQrWtQvNP1IsdlUcpTpiLMxR+dKpFHLxxCwcTjObMTFsnXGUoRZhnDK7nbDfn/j885ccT4Wh37HbXlOLJpDizsR2iy6ijctdP7ODZe9zmLOGxdCNig4AuLm+Yb/fq/gvZ8Zp4v7xQffOmpnikXE6IrloexP16VbvSMBodMHrlKAUj8RJebMpHZEGcS8IzJqE/coM7Ts4FCVVQCuXQpwnpBa8tciDX395VxaroSag0tkmmVpm+tCRqMR55nF/5Msvv+T13Z1FSbtHxrdv8fato2lJWPOGZr3UqIi0Qr8a8mnjnVsxeDwe2QwDYp7VK9BwVk5VPY9m6tQcecpCr9I1LNJTi1c+91zoewzxhIan5VwIoWcYttzePOfTT3/ARx/+hJ/PiZiccf9nXVNYl0SvPM3rvK2FzaBjscvVleZiVUWQOSX2j088PDzoes2Z4+FInCdiUivAXMsi5JMq+CLKVbXCqmTzgV+8NYBaaAM3SrV2/3xgGh/J+YmcDpQyIXVgEUX+BYqqb0xQm1m5MySn7xWazakaB3NeLhBVYfyuD3gvpraEt9yvZV1eQ+hwXm+iWtsknc2bwPlqCU+rds+Dxtufq7UTW2LqQ6DfDuCFIhZUik5C0IWnhtbUqqT+5YW1Ve1NWamgmG7wf/6Tn5m6U9uXmqI5qg9kPLiOF+9/yvc//pTDw0tOh5dM40TxwhQnSknkOrM/PfF03DOe7rm+FG4uw0IxWD/iGRJm/7XWwlsTM9HqK+ev04zb5BPjNUnz6wuLNYzgIFfj1FTlyuSqFX0JFOkRszHxlhy0RUiN1JIVBRZbkoZ2jCny87v7b1pSv7ajDUlwJtyYponHh0eOxwPTPFrrSW87TogkjiRK6ejc2p6u3sQ30i2BLuesCnNnRPggFIIidjZPW30TzwLI2VpbLLksPgqaTLnKmUXHGnjHceT6+pqzksGyiHVKx2rThBUOKiIZp8gf/rM/4tXdA+OcyDaVqfMO53uq70kI+5Owu3QMBUIuuC5o+7cU9scDn/3sS/7qv/M3CZ364DqjjYTmHSwaJM/7wC2eqx2vX5e2NHaWUJzDS1WIxfqXglJNqvHzvFlwBZvx3KGm5yoEKGppJTCXTJxPxDnqXG29DJqW1rziHObRV0qyrc4R47uzdttRbdM6nU5Mk05PasW6RxgGVdNrzmeToUqiLV7vwyI4OZyOvHn1wOPDPbmZyIPeC5Fl44az4kfO/2b4kBVTbSjIYkFXMkHCkjCI6HNyOp14/vw5ITgrCt6O5cBKFRB9fe8E5ztyckxj5NXdI6/uHri7f2CeEw9PE7lUUo2Ew8T2ukIs+FDpemGeEtMUqex583Di+C9/wt/8g/8ht7cfUph5eNrbdfOKMNvwDWeWSO08Wo9IM9m3k0IR36TYNP7/8+fP3/qe7xsyqI4AM6fTgcfXd+Q4s+07tWt0Qad6lUjJM7geJ/o5fCgL/1DjtzfdQRPzrMmX/MVm7vwbPYrZ0yUKcTqR4oRznXrfmli67x2dMxDAKWmj823NZLXMwlOkEqeJ+/s9n798zf3dHTEm63KBIjBncNPXqirvNRot32OL0iEE35vV3lpoqTOJVy520pxju7tg2GwU+W8vXNf/yVIctAK7gWNagFSar3kgzpCKEGPlNEY2m2rT1LzdOyGlxDDo+Ob9/sSP/uzP+eSTT3m4u+NwUOQ3hHL2cXSd5mI+HbVlDa3wKmrxaRzZ/voKEeHF7TNyyosn/Js3b7i7u+Pu/p5xnpiy5Ugl09XMTdROG94jwaJzlcVuLiCEVDRvM+72HE88Pr1hig/k6ZGaTzgKQXTwgLfY8W3r9psT1Jg0qTEUbru55ng8chwnUo4LhN484XQjCQybnqFv7VK1hgkCzlWbpayZlmt7ElUDbMokB5ILVRxFWAnhtiH/qqS7Vgih4+bqkuvrK66urnh4uDtrgawJyzIlhfayrfpvuIDaO1UtGJjizA9/9Gck2djIVK0mvM3XHeeJ+PjI/eOe57eRzcUN4jOpHCjTA0PfcXm5oR8cD/f3fPnll8S4pwvXPLveWZs/m19c48Aad4k2wtVRJK/niyzopTa93r4oC3dVFlwAZws4xmwtlMJWAj02/jGPkCdKHpFYiMmI1HhcPSNTL0hUQweF1jLJeebp9G6gULBWwuTCdJgWlbgmjw3dY0EzHWLk84hgqs1aCa7HG2UEp1YeKapNUq1iSmWnrSTa2M16xuv75WNR/Frx501UAKwIE2dB3wqrrycRy50XUX53deCE6oRY4E9++GPe3O85TtmmMQq4nuo24Lb0w44X73/M5dUFlxceFxJjPIITUk7sdhuGPvDFF5/z5s0bLq42JiLr8TLjaYR6LVKW02knJ/qcn2O+7TuWz2BZel02GWcxQQhBZ1N7o6V4gU4cIUVkHslFOW7T0FOq0jemfGKeJzpJelmaFVNNSLOlKGmJQ5BJ79TaDeQkGujHkcPhQJvO1QYzEAvzpL1939w0SqZQ6EKLsY3uoahIzBnfBYYqOK8K31ybKEjfeR0Z+TZOb19UBLcVHoiOWCyoX6+FAx/UxzfGiCC8/+I9zvnZrThbfGnbUTMiAVXnBzKOH//kM372+VfcPTwRS6YWj8iA+IFcA/ePM7urRLlw+B58Bt/1zGnGd5mnpyN/9Md/xu/8W7/P89trun5H121tc9TxmQrAOGo5p37Z2drgAaXgre105RNCsy101YAUaYhrYSNKqapSSSVxeXnBi5sbpGQ8lU0IHE9PzGlknA4cD4HeexyVGE+LSDcvvti6H3hWFuRySX/51H/tR82FaZygFnKMiqQVFfCF4Bk6R2cGPUbcsH2klYmCt2d/Oo3cPRy5uz9wOB7Iizn9+V79TQhcc/YxCgEW7z0Mux7ntCjWZqNSUEAfI+ccwzCw3W7epqScx966cl/r0uFVDUmpatI4zZHTFDkeI7V6xqkSY+XpEBm2BemELlWlVXk4HEe2O9g/HZmmn/KnP/xzfuP7P+D22QfgMof9BKy5B6igqZ3VygA8R3vrLyfvi/OHfubLy0s++ugjpnki5sxpmtgf9uTDif7hyOX9HXNO3DnHwTty0qJSreYEcqR+/hp+432dOGf3tZaRWkZCKEqfAIawdiOp5VvX7Te3+HOiZG23B+fpvNPKs1Zs9MsC6yuSpFMj+q6nC86CqSqLvXEUdAJEwOBV+qHXSVLDwJwSpTRYqS5qfrv8b0H550T8vg988P4Lbm+u6fveyONv85pqrWy3Wx2390sJaruVy5tpiylDnLMqaBuf1RLGZ8+fM0VIo7be3ty/5vuf/DZD7xkGNea/m7+wDoISguNcefXmNSWdeHGzQeqVmXEXFPFq0zAs6TekFqMvLGKkFuCpNEVhC56ruIrFP7C1+qbXj7z5kx+zPRwRL5TrLaX3xMORqc5s+kCeD4ynB6bygAye4bc+gs1Wk7hi3nvV2tBWxbYHtdRCyu9Gu2kzDIqeVm3nxjkRY1ySUz13/TzeixlMe+XrLdd72U5pNYF2RZ0i/qVomxQxIUujy1drwejCOg8QKwIDS2B2QugCvguaXFrQaQnpuV9em2Tz1mEbrEg15FBooxO/+uoVcyzU4tSH1YqeKl5xI+m4ff4BH374AV3IzNMDx8dR140T+iHQ9Z6UI/cP98x5y9Wl42IrSNDRsI3Dh5xxFTkvkEytvWws7VrI0jalofQLMOIVxXKOzgfi/RNPP/oZl1MkC7iLTud37w/MkvC7DXmeGE9PTByoYaL79DlyOSwJMPK1La02m36xIRbvxtoNLiyoYpqTeQ+WdU92qgafpxmKTklzgIqMCsVGw+Ic1UMR87at2i1xFhcscICzqUZtug/rJqyx5gy5t0OWXyZgWvyqsQ6CCs+8L0vMbEnqGm9lTa6MblCsSKyl8vNffM5nP/uc+8cjUzJvFdExo84FxPcggTnBXCpTyfRFrYYa13CaR372i5/z6tVr+l7HcXdhgyPhiMb/r6xWTevnXK0G189cEYqcx2ATg2lmY59fEWrvhN474v2e048+42qa6VJluOjxQchPB6ahx+8uiTUz5pniHd3NhosffIDrqsYV8+V+6zYo5L3Gk/DdZ6jzPKtDDm1EbQV0Mh3Vq/jUtfauCaQpVNda7k10VNkfDtzdP7I/jOqXbOhnO2y1saRn0h6Or4M17Q+KSDhpHcn1HjYe9TzPqOq/W7m0tC4tZ7HJ0PRWsGB+6iKI6Njol6/vbCrmkWmGWj3HOVELHMaZ7ulA6APOJR2h3gdyUc52KpHD4wM/+ckvePbsEy52W7puoIpDfNuf1BFBqLZnnI1tljMHoJbKnu0Z6iPsdD+vLEM0hmGgoraHz29umF7fMd4d+aAGigTepyNlR92P3JRHfPwRY/iMqcKp2+A/es77v/97dDdbLT7KWWEgzXVIln3VCd+6br8lQY1aVVa/iEe08lz/rAbMTiF1r2iHxkYdRdYuhhpOq8rfeZ3rLKgwYrdVs97j9ESKWGa98taa6X41DiSsU4H6vuf25or3Xrzg6vqSWpRQTX27cqi1sjOLqZagnnNKdA2fLW5pC84UecuDoAH46uqCcphw88wcI2/u1HoqZ7XUGYYNOalQqk01qgUeHh/wJFJMtBb+gorSFPEW0Fm5qOfpdrvpFRZLFE1zz8UpxpVy+hpBHOP+wP6P/5Td4aT34f0rZBvIr19TJOMvLyjjSD0+gsvUvtJ9+AHdszYCtiHSWnRIG5FyluC/A1x9AIauXys16sINa8mhov6KhATvdZrO4uVoxjaCmdE3Q/Gza191Ay5FraWqS1hYoIn+1ngqfH3QxLK9O53TfHV1Rd/3NBcJYUVnYOVxvjXC7vy1RFfAkiBUmOfI/cMjKTmKJYnVEoFadWjDnDMu9Gwvbui6grjCeLyjxie8t4TBa5H1+s1rTvMWJ5ds+wExNwF3TpVpQd8yeg1Ebq3u3zrzlsDWMxwFe+6wCXSeTgJpf+Lwx3/G1aQJm39xSRg86c0bRBLh6pI6npDDI+InpK90798gl57YlK9fE8Atp2DL+F1Zu8Fp+zhHjRM12/m6ljiyWP8lqdSiHH9XlbKjOgSBEpadtYAVVPoebfqL1kmOlJMJptZCVw/5pfNr661SVvT8a0fzY04pLAjPL73KEsSgOi2qsLZozoWf/+Jz7h8Oypdt6YlX3j+uw3dbLnZX9JsL+l2H64U5J5rJmfNKP3h4fODN3R3XN9d4X1QpTUBINHcT0ILecvIlAVpQuzNaTl1+5iwpt0Rff6oiotzL3gXyYWT+k58wTJEhVXYvbug2gdOrVyRguH5GmidOpwOjd2yeX3DxfIt70atgtThaO+Zch7Huid++0f86jpyUL+0cyqm1eFSy7os5y+Ibq8WSfo+rthalLgt8nKZFyIrt9corbXuxnN0nWJPTtx/i5TFf4rdYGGg/v3YbY4y2ZyqC+nWf6bWAXkVWxeK8FltqsfS0P/Dzz7/k1Zs79oeRUjsET84OEU/MsD9MXFypHVyXK30BjBqWS+FwHPnJn3/GX//rB+XbhwEfesSl9fO0YNBs/tqZNoBluS7nuQ5mMWXBxJLIZjsFMPQ9UJn2I0/HifdOiUIldEF5/fsZOT6wGwM5JqZxJm621F9c8fx7H9LvAuS80tbadbNibt1H5S+XoMY403XBtg/132sPiHOCeIXum5DKWTKUUqLvdcqA94KXQOdB1d/N/kk3txACFxc73nvvBftTJj6eTLmvrcNmE9XUfqtPmC6i66srvvfJx9zeXtP3gWk8UZJ60mmStl6Ay8tLS1B1kf+qjf68EeW9ozfVtI5mY6liN0PgOM84p1Yad3d3xBTVlDgoH1fbHZZUlELFsd8/qvebbSZ6o8xLslUX0qqk5bHQ8Lygx+sZO+eWatUtnpjrQ6fJrqeXjm6MuC++5JkThjkzkPDbAK/fEGJkdzsT54iME9uLLafXD+xGIRDI1kasrbVvO/pSuFoS57vvPlAC9J03nqJQknKPl6lZ4s5MmB2d+XP2QTl7SHOFUCqD97JM9LI00SZzFTUHd0KOxlHGbEOqGnO3BPnryD/oM6TctWe89957IHB/92b5vqWwqZXNZkPzwW3PUHudWqEaf1pvh6OkzNPhyNP+iSSXVFGFvJhbQUVpL2U88XQ4MM3KMe+HSzYXN+zvviJ0HfOcKDkiUvjyyy/YHS4YeuH2esBJMBqKnCFQVhgawV4/p1toKC0pqdg6bvSRxQqK5R6pR7BnkEAaI/LllzwbNtT9yK7MdBtHffPK1u5oa3dke9lzev3AxQgFTyI36KPdDW3123lWS7jelbXr7P6lORKniZQ17nrN9mlejFrEqONBrZVOhCI2Uam04kA/W6nVZr5navU2517bguIDdcqa2NaGtqxJ2C8XV2sCF0KwDboswbElp4CNq17Fq+2n13pF77r3Lb6Zc8pc+PkvviRlT7UBEwg4AlUCSIfvLvjg40+5vN5xdTtQypH9w5cIld7ptCHvhThP3D/c87h/j+1GLQ07a5MuXtJ2NF3jEkdb0X+WrKysf1n4u28dVXcSh2fjesoU4fPPue0H3GFi6wvdxUA47pkOM5fFMY0z8/5A3vbI0wH/cKTe6HAGUgDi+VXTt6nLEqZ23z0HteZCjtlypYyOiq0mOM2cSGxqB87bpCEVStEF26dsrYkwp6hFhnN0JrycSQtNaUHh3xI+vw0AyNmfW0LazlPs+VAdgb73gqB2/eJHDefJoLz16ufdsAYw5eL4+S++4sc//imHcdaOlat4N+D9gHMdFc9pqowRfBb6AnOuyrs27cI4jvz8Fz/n9d097714gfM9/bCFGnFqDGfUlIIY6eP8ET2fpPn1Y+1oiKLXFRZbQg0q2u2uFf945ON5Zj6NBAr9rifkwuHugeduy2mK8HTE94nHhwfk/gH33iXkiYBY8QFQwMkyhh600/dt6/ZbbaZaYKqYOERgayMZU07L95akKvm56ALdbgfNoG2iQuPjFYPIvQMxgn8XOm6urvj4YyGlL5hiMuRGA5OiUs7mwGrFcH11xYcffsD3PvmIzWaAqq2uGdTE2usNal58bf53ax/VVcuxfEbXKnr7xN47drueUqK2CVsbWIqqQMkIERG10zhNB4LriDKR4yPTGHHe4YPgfCHniTdffcl7799SSGa3JWdPgNlNvdVPqO0kWbfvds5nic9bC3Ctriqqbnaucrkb+J3f+j7/8X/yd/j8n/4hL1+/hA7+xv/g9/l//Rf/Ja72DBeXXH//mr/xH/z7/P3/9D/Dpx5fOoMX7OFGOX2rSl03me1m4Hd/97e+ccH9ug4R6PsApTLNavECdRl521D/LnQMnRpL+0ZhURqnbZWapDrvkBDAHA5un11znCJzmtU4HaU4cGa3s96vX0b+uy6w22744P33+PiTD6DC/rByIJ33NhJVE8fr62uCD2vhIefBUivVZa2IWlMdDurr6YNuBqU6CJ6bZzdU17M/jBxPBz772U/59NN/ixg9fRi4uXnO6y/UEHW/f+Lx8QGA4zhSHJxOI2lOuAtZ3r+J8mjIv6wJDiv+dXbOrUhsbT1d004aaqUFkBeNFVfbDT/4/if8R//Jf8gv/un/h6/evILe8ft/62tr9zeu+Rv/wf+Yv/+f/meE1JOqijX1HKsGIIHqdPpQS1w323dn7eYSyVNkNrcJbMoZsBT1Xefp+wHMWsqjohBVSLMggwteUVVQWbJZ9CRDP52WUU04CmdF/a/Y3xZKlKDeqrsdoe8Qr5oBBwslpTlpKOCg68PJ11/vLJUQATzTXLi/f2IaIzFDrn5FYcXp34vQI1zefshvfPop0kX2Ty/ZPzxQ/InN5ZauM5/eUPnFF19wfXvDduvYdJkPn6swReQsea4Cfu1gLBCB82+dc+sUOdZiS79+DsXr2nVSudpt+b3f/W3+4//V/5zP/8l/yxdffQVD4G/9T/49/tF//n/Fuy27Zzdc/+B3+IO/8+/z9/7X/1sGp76uLgvLGcn5GdifqnLi6/AuFFeVlGedokRBXNJCOxeiqCVddHYnTctQPToQpROq87qGnBZUmHuNK0KPx/tAKsVGMWNTllbg5uv9qfNj7eo0MVGm2eIpGFCZ5wQ4+j5rEV81mfvlV1NeMaJTs0CoEogZjseRP/qjP+U4JXLVMaAlK13RSQd+wPUD282OJAMldEQJHLMODskmdk1p5vWbV3zxxefc3F7T94LzF0hJOGak6vQxWVr76zmqeO7rV0H4+udoSepCNxdBxdEQnDDsBj745EP+w//l3+Hn//T/zVdfvsR3nr/213+Pf/R/+Ht0rsc/u+Tmt3+Tf/dv/23+z/+b/50CQfNEN0+UqekmMiJZRX7nncC/wLr9xgR1t9sZtI61qIsaam96JCbKKVGrQvtNsdx1HVfXlwxdv6j/aymczNBYRP3GUqxsd54UdfJHLZXb6ytO700cjhNzypQKc2oL0uxVQuD7n/4G7z17psblu60uVFuwMc6GlmnypEhZxzRG9k9H+tDrSMC+4/LychGmNJS03e5SC65mwDFOJ0Lv8M5GplGI8wHqTOcrQ+eornB394bTk6NzEc+RUiqPD28IvrLdBe4fHhi6SzaDow/6EDfek2EW9sCxBM6GmpWziQsrmvF2S0O+FsDE2nAVkODIDg4pUj/9mJ/+l/+Y43Ti5tl7bH/vd4h/9/9BDhe8/9d/j6vf/ZQ//uynHL3HlUqXMlK0RehF1lGhFWvzKV/RO8/V1c03Lrhf16H1hKfUbPPaq6pJfUNP1aLHi5hVmla5zQt1be9DG0nb7HRqdbz//nuk6gl3Tzw+nRjtNRbupROCeJrHXUtSnevYbjbc3Fzx4vkznj17xm675XQ8qOF8Ue6qd45igr7dbscwDAvi+6/sRDdksiXhzpOxCtlpkSJSubocGJPgxkwuE49P90xxYpptBVaYJ00wUtZpJTlH9k/3FC45ng6c5hPITkUNSyLqluJKXKOnLFDHcl/aQl1FBvZd8vWNQL/mek8Mwikl6m99n5//3/8xh2ni5vmvWru/wR9/9tmydkvWOKLSE0W2qdpWrKzPkXfyzqzd5jaS5plabZxpMK508Ax9z2Yz0HfNK1nRVeX568bvZW3Z4cAHR9c7Sp6ptVOdmGSbGhOIORqP9KzooEWmZvenxX7XebabnufPb3nx4gVPT49M08TiO2nIv3OOzUbHnILFLVnITHrYzQ/Oq2DLe06PR37y55+RarFhU3pepapLTAbmNJGeHvji1UuevfcR28uOfrjh+vlH7F/f45wjpokYR7yHL7/8BduLLbc3O17cDnzw/AWd0XyW9StuicPtPd2yps+PavmiJrWlmOjS4vESH0WQIZA9PI0j9fsf8ZP71xzHA7fPP6T/ze+Ru54SBt77q3+F3Q++xz/74R8zdZ7kPFRPVwIRv/CFV1/wdR8otTDlme/6iEk9YYMXK5TqMlRER916Rfyrzbgz/YIOnuhY6ELVvNRzJCbjzVuXtg+BgqrK85R0eF4TpZ4fondv+asoCu69J3SdPvkrkqNjnyedJNfWrnDeKv/6a4tSnEo1TrJjHCNfvnzDGAupqgysLvQFrxi4BDb9BR9//wfcvnjG9kJI8YnDw0uG0BkoV8g2gvvu4Y7H/YGLXY93lSF0eJJ2XRuIJZW3TlO+/hSf/cNyPfTPrrp1vdr+FZyjD54IvJmP1E8/4kd//zVTPPLivQ/hB99jGjpy53nxV3+XzW9/j//2z/6IUxCmFBmm5quqz0+2Z2spfEsxgCB/67r9xgS1Kd5zycxTQWwWfNcPRjSuS0uomW2HrmMYNm+38py3CkXNhr0vZC8gEZp/l8B2GPj44w+4fziwP5wY50SdZuZZie+boefFi+d8/MEHRhzulvSuUm30V1wUz4160G7G6XTk0QvjqWOzGdhut8pTFFncAopV+WKJYkE9Wl3NujFst5xOB2qeySniHWy3PYSe4/6BkUrvC5sumUXMAZFC1wnUiYuLju0mEJwo59UI8GsLwhC3+nbNo4kRXwtOZ5v+Wa4qbuW1Kt+nMJfEHCfy/oSkyv7NEzHO4AKy2TKba0J3scXvBn70h38EuZBTRmKElDXYYBZBNqZQRFt76mepJtTvwrHwN6si4UPfqYfh4qmp9zzadewIuM3GXBPMn0/U/qsUQ1eq8qbFOULouL2+oRJIBeL+SM1ZWxiG/CsBueKdPifihMuLLR9++CHPbm+4urqg69TuZJnNbWiO934RynivFjMi7Vl7+7OuaM96eO/ZXWjx5lxaKThS6IIQs3UAJFFr4nTc0wWUspAOpFQsYBe8h5JnpmnPsO2oJUJtNviGjbZhE+eIQ0PbEOugvN0FWP/eCP1fa9GJPtmxZuI8kQ4nJFee7p5IcdZW4S+t3c2ydksuil7Y67U4VUtRPLttUFIXVOZdOBq9o7U4mxA1BJ2CE3wwZwOvPH/RNqo3ey5nP7fMiUd5ztfXV9w9HBhnSMU+s5nmrZPo2j05Lzwa2gK77cDl5QU3N9e8995zHHCwFqzWZW7prHnvV5Dj/POdbZ8N+1o7QY6UC0+HvbVPLelDEAc3t1ccxkI8RcZ44vMvP+cHP/i36VIg+J6L3TX3LxWcGE8jp6MOX4hRJ/r1HVxu1inq51zEUrFW/toF+Dry/7U7BbSG/ypKaWdcUfV+ijPlOCIZDvdP6iziPXUYiK5SyISLHr8L/Oif/XMkVxyeKkHFYAt/XLtWS2yzDb+Uokj7d3w0Kkjzz3c2Znpdux4km82QWQ1JtZCh6aTTkKmTDLPyzUv1FCJm2K1xp5zHjHp+EmdnpOu42XEF7xmGjmEzIN7ZpKm6UFLWTqpb12xdX/LrGKRYIa7JruN0Gvnspz+zLnErw2RJsHOFOWX6XOm3V1zdfEC/qYwnz/Gwp5YTLrSUrOC88PruNY9PT8xzoPOJD9/btHc+i1/FUoY1HzCjxK+d8TlaaTzzXyLeWwfLgZRM2Z+QDMf7vTozeE/dbJgFEgV/MeB2PT/8Q123JEi5knTD09dzzY1I3uL1tjHC33R8Y4La2nSlaKJCVsheXKDYzOSVG6pBcegHQuioOdHsF5xNJ8q5TQ/QNry4aPw+v6jxd5c7NRAPPeE0msJUN+nLiys++vADbm+uVfwj2NQlFe7klEgpLornsNxsvQnzPPP0pOT13XbLhx98oLevZX61op44dmNr490WnIPtZmC32zKOT+Q0U0smeKcG1KHjeHiAUhg6h+w0QRnHkVKyoXGJ7aZjN3R0QZFYsUSmBWqpZwpwfYKWZHNFnupbn+tsza3VHWuSWoFYEnGeqccJxkTcT1RnvoSho7+9XkzSa4UcZ2rWyUs1JyR5XQdWDogZnesYVt1Mp5T46h0IlLAK09T+RRWKSrhfi49c8mLf01VvFV5DT9aRbc1aR58tWYqvzWbDVYFxnpmijarzOsJTvX1tfYqKBze7Le89e8ZHH77P1dUlXRfUj7U2lX6zEtOpLMVM7EtWq6kmRBQJX+MEvp2e1troKRtSmul8QiTYKLqKk4yQcJIJXkPo4fBE8EL0mTw9kVLWUbBSCUGodSalkVpnnA0z0Fdb0f9fhYAum7a17N9uxtnmIq0gaxXW2ylsLIk5ztTTDKdE2o+Upvz9+tqlrmu3tCl19hRUs3GzrxnlcXmPr+Z3Y9QpsKDoYu4oDfVvxu5azHQ2ea+CTTiSWhb3A2liCJTrfHV1yc3NFfn+RIktedeEfbl3VRP25U5WTU4bLeX29pZnN9fc3F5ze3vN4/2DtWtZYk47/67rdBJPE7m+VZy0zwlnd35FtIp1JMyq0AE4FafGesBNmTxn7h/umONMTL3Osg+d7S+qoZjnWZ+vkhjHI8ejMF50NHu3t2JrbevYClH51Qja+bVpnxW+Zudj2EOsmqByijBn8nFW1FpvLMOza1uDhSqVkhJStGxQ1baOem0gTJsW1Pj07c/ljG73XR2Ny+mdAgHBB0LoCMHTdV6dfcQRxER9Ak7Mi3tBM61gMApZyXYtalKtmGuuEbwdA5ck0n6+np+XUrl22w0XFzv6oacN+mnoYbHOlYq7zxLUc0z9LFk9e8ulQJ/nyFdffUXFL+upfUPXdYyxEnNiihNzKvhuiw8Q+ki/vSIf3uC8R0cy6zCfu/s7Hh4fOHaOoat8+N5WX1JYBmVQjUt+pnPwooDK8s3LmZ4NFVjicfuc9v8moMoZd5pgypSDUo1EvMbcZ9dGUdR1W1NGSiVndUOJYC5MsiTyYjHpPI/5tnX7LczqlqBW5hhxVqk1XqguFLOLEEWVdrsdThwxZ3MO8XShJ3VRE9uclD9SEl1vyCvYxCNtBVxdXRC6DRfjzPD0SNd1XF5e896L93jvxTML0lr5SEt4kwaCtFgJ1aU6EinmxVg5jSeFmEuhb5ZZZxV8tTYTaOsgpcjQeTZDz8Vuw3bX8+YuM00npOr88H6zpeK4u3uFp8PVgdx3dBKYpolp0hGFzkHn4OpiS995qGrzUHJZns0FJV3EEPIWkrciDWWp1fWTtRew+t/ZZAzR18rYuNJUKGMh1LP56F3H937/95h//CXzYWSbK3/zf/Y/5Z/85/8nksv0UhkylNz8TysUqx6r8stKTbyZJv6bw5ffvKR+TUfOmRgjnQ/q+xY6jsejFVQKlTVv3M3QMXQ93nltGTm997Vk89msqGWcA9GJWjllqI6+Czy7uSYlOJ5OZLtCra0vRQP3xcWO3/zN3+KDF8/Z7lTwlHPGgY2yTMY51WS473tSVIPvw+HI4+Mj223PMAxshoFqHnZtc6xnwVNbrdB1gTlO+KEs7eFas/rT1UwXKtvBEUk6Ti8ngiuU+dG6EWpkHxxmMZaRqlwy79GEryGkwDqK1wKorJt2ddUsCc/XbEtOy9fzFtukNPCmmsklQc7UUyKgAjVXHXT922s3na1dKQQqvhifuKLoac16DwGqR5zjKU38N4ef/etZfH/Jo3E3q3dIzfRDUBs0AWolzYk5ZvVp1FQKcY7O6wybZnamfHl1mfBe2G63fO/jTxjjLyj7iVIirmD8uJYV6H1sXUN1u/B0Xc/7L57x6aefcrHb4J2KjUoxbre0KXV+6V6pl+RWfVrPduumxV42/oaE2ZpxXhiGnibOFaOmeCdsBk/w4CRDjZQSGeORIfYaAfOEq261GnQVKZk0jzw9PuAkcnPlbS6LLN0mcGeDUBrasxaja8FUV1S11mY4gzat69ny1iQltzHRuVAt7hYKvgp0Pb/x7/415h+/ZD6MbDL8e//R3+a/+t//X3gqmQ4VCku253zRhLS9yq5brXT/Jhbif49jGIalxR+C8qU3Q0fXBbrO2foURDLqLeK0wHErqi0OtpveOoOFSlbP3lzMMk73ujbXrN0jTceaUHRFE4NzPLu95fmLZ9zcXPPw8MDx2MYaN2vHunQqvHvbK70dS5q3FCRr+oooRXCKM8hmWdMKpmYuLzdMDyNzHNmPwpdfveSj7/82LnSEbsf19Qu+2v9YnSds/HgIjqenB169eU3feS53gSof0gYeNXu/ZT1al225JiI2tp3FYQLOQFO7jjk3r/kVIFCZiXq01rHSZaGa4T9dz6d/8O8w//gl02Gkz/A/+l/8bf7R//G/4KjJnVkltgukQm28cG41LPCt6/ZbRFIwz4l5nJjHkW3XkVMFyZYJ25t75YZutwObzZb9/klHEhqBOKWEk0DX9RashFKFzXCJ71oLICyzcHfbns3WU4rw0UcfkWsxR4DAsowtgJ4nluv407Wdp5u+cVF94OL2GcFavsCCoK2fuS4cFET99DabDe+/d8vlhS48L5U4HpHu0nzVKrFESp4QyaSYmcae/rrnNB3ZH0bGccJ5YbvpePH8mt2mg5JAMpjaUQ+9g+LcstiwIO69x0mBou1IVTD+MiqhPyXL61UcqWRmKicp/Ojv/kPqmPAXgfnVA5/9vX+C7yFL5ss//O949ac/5Hf/4A8YUuVkbVYfI3NyhKKuDeLcYn+TcybmQn6a+O3y3VfyoOrhGKO22MXhQ0dD7M7tm/q+52K3Y7PZGNpUDK9QG6bD4Yi4yjAMeF/wMdN1ek1d8FxuN9w8u+X2+Qs+/+IrTmNkmjOnaSSnia7reP78mQr6PvqY4DVwVCOPU9so04lpmtTqpIkyRO/xPCdevX5NF9QS6/Jiyw9+8AOgrXtdAesccWtjl0yukXHcs9tt2QyecZpJcSIlbd1fX26pbsdhf0+aFR0lHRi843DYs+nEZmhnri4GLrYDu6Fn03UoznRu/8JyjZuKv33Vi6PIr5q9/CsQqqUVZH6tJWt7rGR+8g/+MWWK+G2va/fv/lf4Xiiu8PIP/wWv/uTP+N2/+ftsYuAkgSoeXwEcKWV8TXhXAbWEoagwYijxnVm7oIhLLY6ahc6EeSnFJV4NG7VRa408d4YaNSFqm8bVuJJSdR3/xief8PPPX/Kw3yPZvBWd8uUU+c/UrIlYCJ7tZsMH73/A7/zOb9P3vb5uyeodm5IVR3qnQ1AhX25iSlqMtpjMuYXVeiydm5QIwfHixTM+++yniNM9Q41NFPmXGvEu04eCc4XXX72kD4EcYDy85ngc2Q+F3aan6z2lTMzTgZx3iOyss+bUDeCsyNfjXBSzWswtg0q+8bCCyzoAVZRXOwuMJD77u/+QOkX8ztbu3/t/4gcHvvDyn/1zvvrTP+EHf/NvsImeGjqS7yhpVGcB2/DUgzyr+riqSKjW/Cuu6K//EIHNpid4h5DVF90GGoB2slxLI8VZgpq1sAlh9efE8f4HHzBneHO/5+kwk5Lxx1ub+qwwXtypbLyoc01MqlZ1P/it3+bFi2da0NXCw5s7tT+yoqp1ib33XF5ecnNzrc9fLayqlF/6tPa7dl3FBU3CakMIlY/tRccSf/jRCx6PP2eMkZQdX3z1BX9lPOD9JUPv6cOWlDR5jzHqKO5SKHni6fFe3YfY0Sh20hIgfStbc42acpaq/4qFodQpLG6bO5H9TC55WbdRYJLEF3/vH+LGhFys61Y6AV/46r/7F3z1Jz/kd//gb7GN+kxFH3SNCmpRKCocrk7pcF172M+6AP+q4xsT1JQrp3GkpKwtenSyxRn+sUDZPgTEeaZpZJomJfw6rcjHcbIL4+1iCJ3v2V7sloCmZ6MVvxNRtCM4um7D2hrUQCHWqitfT04FnFdv1WmaljavVgmZ3/7N32Q7DATfRgSeITyNn3a2gbYE+OLigqurK/U+rdCFwDzPeGlCBk8sqqorwFwKMo+4QyGlA6fpRCpJkeCrLVdXW/rBg2u+lW2ByerFRzWU1Cohg/JFnP6cExtZ2DiJsiw+acRpWGF1dAa0S8JP/8UPeTrsedFdw/2RP/v5P6c6uL2+ok6Z46uvuPvZf003DFycoOwz87WipKnx9HImZ0WjUkw6NeNQqFx/y5L79RzjOBNCTw2aHPWiZPBSWvKmwWCz2dAPG3wIinSIswDr8OLxkyfnREo6SlI3WC1gFClwhM7ju57r6yu6PjJOEd+rR+/NzS0vnj/n+e2tcaZXvpNUG26Q4jJEoB2K7iqa5IpwOh2ZwOywVuS/PRtKZw5L5V/REYudD2x3O50INXTM85EYJ6R6htATuoEsgf3+kZKzTc8a2V16pmkmzYnT6UjwwtB5Loyi0gff3t7W7lknoirO1CavgaG6YsHRuMGyvAALAizY/62L0MyAdOqL4yd/9KccTxPPQ4fcHfizn/9/wTlur65gjJxe3vP6syNhs+X9U0few0kypdfkp03j0ba2bi61QqF7Z9YuFOXrVUdJ7bqulmPee42vwLJDVTP2ydE2clX0u1C1oM1KFQHYbjc8e3aL7zrGUX2cdfMoC5pSXV0269vbGz588T67zaDDHhbRX0tQdUCAiGMYek7HiRQjpzry+PBEzRnnFMH1m43yL2n7qnVxbAtYLLM6b21sddBQ9N/EqUx0obDpHVESp+MTT/f3CJnj40uIM9NY2fZ+cYGoNVNrUnqK2QwWQ9UXxIn2Z5YER79Uwa1r+/w+NZT3/NDcSWjdTIfgUuUn//yPOY0nnnUBd7fnRz//EkR4dn0D48Txq9d88YsDXSpczI6aHRF1LehdskHClVqS4rjGQ00F7uLmX8vK+8scIUDXO4Kz56sqrSxnj3MbNpsGUBUFDcTRbL4aJcU5RxHdw188f4G4nsKjIv4pK2oqgninEyjAHoEVoVchn3ZzP/rgPU1ONwMhOHJS/+qlMymr9VHzVR+GzQJQLbQgUTGUyLpWC3X5PnFa5KVScJLV612KUnS6nkoidJXglZc8TgfG6cSw2SARajwxjun/x96fxFrWbHme0G+Z2W5Oczv3r38RkREvMiAyK6KyzyQhgUokEL2qJlBMkJAQQhRixACpikExBJVESsAABFKNGEEJxACJYoKAKlEFSWZlvohMIvJFvP5rvbn3nmbvbWYM1jLb+1z377q/F+99zyN01qf7ufs95+yzt9my1a//qiOrpzQR4wRT5OXLF6z6nu26AdHhqBWVsmQ+KDxbynoKpOIicrrgU8l6/ydlKZTgQJHFAlPm//ed3+N+OHDTvcq3eXfg8OkX/PjHd6ymkU0ccXFiLwq/qWkJT3aZbIG4TCJnxxjljXz7+KjTnJmiCgxv9QR1gO2i3kGspiaTORwPGoFsOwRHtPFnMUaaRmtRFQxXIZucD1Ux4Tzeio5FnX5VmGa1aVH/LBzLJpBLrasyZt93NUIWpwmyRspuLi9pmjKTXk430K6vEcFMTM5A9lVQ1wLwlGrHKimRp4mYB6bkSNkhhkM4DIkX0w7SvdXywcXFho8/ep/1pqNpPT6INkvhF3hszgzR0sQxn8HSHT5Hj92iq3M+oAt2oxb6Z0fbr3AfvAdDom0cm6dPWHctX06DRhqvr6BxHA57vrq9Jz7pFEA7JnIUTXlDNa6mSesmD4cDx2FkOO5x8tWjDPdN0RSTKl5zKHxaRHEq72qXsQ8KKRJjREJJ9yiP6mvWDJjQuhs0qhUMmso7hw+By8tLun5inCJXKTGNkcvLSzbrDau+N56b64VU2WszwDSNteSgjIks0GiaHgsEcaxWPavVyrZ74VDluY6q1NflnOjawM3NJdvt2prDkvKj7y3tr8pkmvYaSXCeIBHndCLK8bhnt9ujWQhhZWOM1Yg3gz0vUkSZmsnQOs851abKA/Li9+X/JStSUn1VyZsAbroV26dPyVFrYjfXT1i1DV/FIzlCuL4iB8dhf+DZ7Z70/prsPXmCOESmMTGFiHMKDo6lFYsTmuIdTn7vF8uUb01aolEmyeU0Ws2/yrkmBNq2NdzjIg+0Xvw4HI0PnOH0DzgPMUr9fN+3vPf0hvVmw+3dnucvX2q39Dhqak6EbqXNfNdXl2w3G9arHpsJYoouV7SBImsBi04OtXHn9vaOFEdV5DnTtYoxWXd/EQiqshwdx5iy1r+HFMEm5ygKwUTwgusbOtdw3N9xf9uT08T+9iXrNjMcR+I0ImTa1jOlTNs42kYnItpX2X3MWapCajQ9iEA9DGAUJ3H5ucVfvMleaXv8zQ0pC8FfsLl5wqpt+Wo6kmPGX1+QQ+awv+fL/R35yTWpCSAOcYGcC9awrllOEXIsLqE6L+9Ag5/ZaIhTuRbjVINXzi11rtZNFwi0OsZ3kTEkW930dsuUhMPhC2L2aiQ6hwRPjMqQiWS45/rZrmu5vrri5uaa954+oesaG9qycKJt552TeXhPpqb3Hxpu2K2Vvhqol9CX7HcxJRCdSy9Wz9oEsaCGRvwlJXKa2N3fsV6tIcKw3zEdtcm7CQKSgImcJobjgSboOBONBy4T+3OmdQlBWByjQieZ1hoAK/xt9eEWVS5827Qd/uaGeMK3zQO+hf3hni/2t+Sba1IQM+YzkYQTTxJtjsvkRYLCbK038O3jKf7FfyVvUeAeIONy1jSZbXRKiXGctCnIAOTHcWC32xFj5OLiQhUiJWIyh0wEbMasRVwKxA+WqtKYDKWLsQjJItgErEuvZZs3iDgVnlGbomYh6+o1616JhqEFIeVIjJC18IeUEpeXlwzTxDiMFV9yvVprJCwqePAYPZlGBUeGKU8cxnscO0gTXdfywfvv88lHH7DqG5rgCEHhpsiGlerKBClXzkOFnJA8h+xrS4J5TM5ZZLA8URUEKkRLlCBstmx/88/RDxNuGFndXNO1gdWFJ0dBPnyPeLtiaODlLnG8jqReu9pz1FS0dnUrM0+GmLDf7xnGkeO4Y+L54xz3DVFMmeMYQTzeq7O1sOUArY/qug6w9GmM1pxUhJCO7k3R4MCsHi+EQGjUQHUh4Lx69JvNhn6lZ8WHQIrZvHHbr1Sg7GfZVhoQS2OfOkJ6bnKmOl2b9ZquadisVmy3GwqsG8wGYU5pIVzVou7awPtPr+n6DeMwICSm4UjoOnMyE+SRnEbidFBg8KBSZJoyu92B3f6gxqKnRmJLw5VIqXlcOkilPmphfWCOLGjjI4Z1SKlrhtMu09mLF4Sw3dD8xq+wmhJuGumvb+jahtWlJ08CH75HfLlmaDy3B5ieeA6rhiSZaYxwTLRNJPhEIuqkJZfUCcUxjPt3hnfF1hZTNKWZDzLeB7q+11S71YyW9Y0xMRxV/oo4Sw0POoDAgJ+d8/RtYL3ZsJ0yXb9jjIn97lCNL+8CT57c8PHHH7Fdr2lsPvw8UEGdq1LnXTMConWv1WiKifv7e6bhgBOFCLq6vFxEzc1pXEQsSyOKTjFLuBTVIEsTYkNRFO7QE0KLa3r2+1vuXYCcOBx2bDrHMA4ch4GURvrWc4zQt55Vp7jHyrIGVC7llgRK1oo5QrXM7hdopFwcMWyv6h9zRKFI8rBZ0/z6r9DHjEsT/c1Tum7Bux+9R3zZM7RCGhLx4pLjSo2mnNVAFdw8iMXqWsXbfeZEyOPPmQt/eqoDcCzqOE3RZGVD4w2NIJtsyAqBmEqNrk0mLB39RQavVivENbx4eQcHIWbAOcQ7RiBZmRQW4GnblpurCz54/z2evveUvu/IRLIF2mY5C6CBiJLFzVm/tzheOskuLGqOixGtJBbwUGWbq75NKdF4vUfnRQ3OrDW3XjLZnOT7u5ds1ltGB/u7W6Zh4ng4ENat4sSTVC7HgZy7Kk+LLtJ7knr+Z0erDE2i3vdslC81kPGtyepyKYdmsMN6Q/tWfIvy7faC40p1g2oWHa2s62QNUydBt/xGvn3r8RMa1ZxBxp1ITSWVdxQlX+BhDvsDty9fcnv7guvra31X1qikd2iNqqX8Ys60rgOEqDk3nXyXNUWvHdWl5jLPhqtNZApe5zM3jWe9WrFdbzTCFxNNCNzc3OhnJNWtLCmmur0lhI/BW6CGzuXlJT/64accfKuF0PsDN1dPGXFa0JwmpuhxwSEymbE7qTEwDvSd56OP3uev/9W/xnrd0XqvUTdR6A1n/FY7Ssu2ZBWW4jQKNEePU+3wdpainAWnGVclPWGYYwj4Jxes/9pfwMdEHxx3eeQ+J7q/+AlNt+U2Rg7HIz6O+I3n8Mff4cgR5yIyRfLLkTa0ChAOHI8HDocDu92eKUZuh1/jj/kfvi1L/UIplvpcsaMYo3lFJUqn9VLOylLGYSA4bzA5OhJSxysHNchxNI3WWK82awqCgXjNAjjRxoAmNHjf0DRtcRS14apaxyULoC8mg27zztcGkxgjx+NQG02uLy754IP3WfWdGguynH5ToF3UeIgxamQWk9vO8fTmhvX2grv7HT/68ffJeUT8yDQ1IANTTHjRZoQkDlzmbjdCvuNw2DNMIy4IbRCePr1ks+kQr42HRTYWlhOvwmiOmMwRpvl8leYVMXlQbZZKcxRY+bm9ueTir/1FHQsYhJ0kdiRa/wltt+UuRg7HgRAnmm3P8MM/ILqRLHtihuk40vUT3o8aBcFwUVNiiImv7j/ij/mf/6LY8aeiUhMHM/wY6F6uViuuLq9qmZM6A8qnzmecD4Y8ocD8h2GgTUF51Gs3tQ/qEPvgwK25u1coqHBUUP1+teJX/9yvcXlxUY1TYnGUi1GRaq3cNE01gqRyaaLMVR/GA8NBN/j66pLg/NxhbVjVucLsqZGi2Qudb950ev5yjnhgt78nuI6m7WjblhF4fnhGzkmBwHOkCR0pHbi7u2V3f4cPQuehaxzbdcfltselaAZTNGNoTuGXAQZlPq+j1D8+6HouDt4ybJrnAEHOWgPcPL3i8m/9DiFB2zl2OXGfs/Juv+XOMDi7OHFxteaHf/R92psNPjjiBNkL4NVYEoWfy1H1HmgTWCe/fPQUrT2O4GbYsrZtaUOjQ0YW9nyypt1hPAKZ1EBO3oRBUoMSzSJ2fcO3vvUxz57fsd8fGGMkkiFFpugr7vNqteKTjz/ikw8/ZLNWCL9xmsocTMNJH63O39WAkHOOFBVH/e7uHu8c0zjSNp6L7Rbftjb5Uu+dmKqhpdUu8zn03jHEyPV6pTZLjnjJDIcdjSgPOoGRkcPulhfPv4Q4cf/yM9w0sdvtWXWO4KBthDFNOKeNqU70HJRoZ7HJ5qCAQSjWRvkSY0edvEqWZudU6BY2TkkHznRPL41vhaaD+5wf5dsffPd7NDcb2iAwZZLZojlaVDmoHJn96vRGvn0rA3Wp7EqzkXealk4W7RnHgZi0WLttWvbDnrvdLXe7WzKZ1ao3ZawNT23XslqtGMZRO/7J1lHpFFoilyk+cnoTix8HtIbTKmag+uCt1s9VQ8SJI/hQPZ1qtSw3p+wzmoLQmiV902a9JqfE82fPGGOkaVo+/OgjPv/qOfe7A+MQSTkAR4OEcQq4HmDd9Xz84RN+9Vsfs92sCV7TxsHGwFZGM5Bs9TJmlhG0JCLmaPVhqSqm6u/I7NnV1GlK5o06oigETWwb7rznPo5mfQWQTHIO8Ey5Yf3BBzy5vKBtI3/04kfk4y1+2hPSyJBGixarghqOh6qgUs6QXtLy7wP/2bdhq18oOaSWcrjFYa2H2Qm+adnt75nGSd2CEEgJDodDjehrbZSiALRNq41+vkEkW921M8QFA0p3OslnLq1PapDWkXzZgP4Wk3ucjtQdp4nhqAd2GkcE6ELg4w8UWm1uxCyer/5VlWGyMgV1kHCKFbharcg5s9vtePHiOS9fvGC9vsSFqJOKxklnRNNaPezALkaGtEfYEcc94uC9p0/4nd/5bb71yfts1j1tI4TG4Z02JSzxhiuEj91qTMUptPd4iDFZlsWMgwpDZaP7ipJ36ilMbcP9tWefNLORXdJIhHOQPSMN6w8/4Obqkr51fP/+S0Le4ybBjQNjOjJOR3X2osogRRRJ7I8Dx9vxneFdoBqoy5TyarVis9nQ9R13L16C6IjQ4FSeOOcIYVAQ/qRDRoSg0W/nbAJVo82mZoT1zvOtTz7BB6/lXOiox7bravNJmVKWzdkiGaTfONQIatnL3W7HOI5a/tLpUAGPDn25ury0MgENOmCZsJIpcGIRR+dpm0Dbet5//ynBB8Zx4OXtM45HR9c1c3AiH3GMxOke8ZqZUgrc3b/k9uWt7ncQejNQNyuNqglJz6HVmNZntDpb7ckpY7aVSqmXOma5nuFqnJrGcsbnIkLqWnZdyyFFs3sTyWEA754xN2w++oCbywuaIHx/d2QMKw2SiAcnDHEiTJZG90KOUbOXdsja9oFC+yVRSjY1zvZ0u93St10FaVenUIf7jOOR4/GAiDA1MAYITaJpVjooR3QwQ9N33Nxc8+TJkZ989iUvX95zHEbidIQ00nctF5eXfOuTT3j/6RO6oPIopxkXOIM5/kfGcbKhJ+bs4xQrfBjY73YIcDjs8QLpgw+Qq0uCX1GMwEK5qpOogao4kZKinzx5csVut+Pu7lZtiXREZKJvtEwKv2J//0xZKE4cdi+43gSOx5Hd7p5xOrBeNdwfMuves+48fWMtZnnmyWXtdKHZHliW/y1cqwelKg+pNDJOTcPddcchJ8TpOckOojvl2+vLC5pGSLsjU7vSNi6JZKejX8mlsSuR80RBfZK34Nu3i6Ca4igpneAt8jmpB7nfq1DKZJq2UaPVujub0ODazgSmFvg3oeViu9GI0XgEyXXUZImYuxKmFjlZTCk7cLIZXhtaClZg8Opp5lyVonPLqJN1k9ZUaMFyXG64bqoDmqDC/3h/z/444H2gCQr9UlKy6tlEvEsEL0hwtKHjcr3h+uqSdb+isftrvCcYWDzZ6m8WED11EVAXRHJZh5LqS/W14kUtI8CacqNGtrCIWrJLpjJlxmdlOsxABZ69fMHzl8+5vlgzjOAl4FxDyqOuKXCwSHlK5smKfvel+5y/nv5N4F97K7b6RZK3RrjGexqnTkMs8C5C3bejjWNrvPLP4bDj/k5HjrZty3q9BkTrT/N87ZQmSr1c5RlLUekUJfU8kqVFsbpHXa65e9GhOJfrVY8X4di2qphiZLvZ8uT6ivWqsxq0NMuZIoCUw3FOu9TLbOochWmKbLdb9rsd/XqjZ1E8fbciiRmz00RMWu+mY1wVc3CaBnI8ktNI3wY++vB9Pv7oQ9YrnWAUgig0XAi18bHUQjlXwtYWkVpEBAvp+mMRDIWdyQWb1KJZukZWqwSMYlE2L2Sv5UUZhTCZcuHdF1xdbjmOGd+05NCSokeSVzQRnKV1HSlpKu9wONIcP+Wvp3+Hd4F3a2NZVTCWvTJc5+NBHUOtgVZXKKZk9XMBHzKSVAa23Yq27QhNU6foOKsJRBRmqWtF53ybG+GsBi1ZlFA7x+0npxkhIs6lXJrajOz3Wsu8Xq3YrNdsNmv60LDqVzqSeuEkZgocW6pR4sLeIQQkJd5/7yn9quf+/o4vv/qMJB7vFUM7ZYhZh1toJaJmmfbHI407Mh4P7IcjiUzjMpdXa7bbjq7zwISINndpvV+u+qE6+6nGn2r2ENBIViqY3kXZu8Wp5qRZMZJ1ipDpouwhuaype+eZEnz54gVfvXjB5XbDbkisJgeNgDQgDdMUmcbMpNMzaZw1G4rWJrpw/Dly4M9GJTUuXhtNu6Zlvd4YrONcBem9J7lo9+6sXlVHj6Y8QY5QIJ+8jaUOgfUaLi62iHiOw4hvdsSYuL7RiXw3N9es2g4vaoQl03oFkrKgpcwlKa5CsaWYrDY6M45HhuMecuaD996jmtYLW6pmKk32qlMTEUk4Mqu+1WZUSQqFdjjgBHzXEJqOlB33uxcK4IMaqW0IpDSw3+/Z73dWHpBpg2PdNwq9lZM2FWY9QyklG5qEjV8t9anFpin8W8Wx6a2lkTpH50qdMKi9MEhmwnjfaSAhIq/w7bbyrZCdB2mIBD1aSaP9euVUHVONqj7Ot2+AmTrZkZp+jKkYrBMpjYzDoNNDnOCy11nPWVNI3arDWwMVomn9rlM8x2z1Jxmd8lAWq3TXlZobsf8p7mbxAGSeeW51K2qcatNKqSkpBm3pejt9wCKAFgrWfsRpzSAZvFNYqpwS41EbnsZhIMVocCt62LzTqUGlGH/VeZ5erbncbui6FjXQLepUFTpVEZdoWBXiZUY5+aRwu3pF1UC1ZxQxiCLjRnOlxPay2gxm3GokCnIqpQXCNA1MxwE/TXRNR55GSNpBTYGfMCzJYgxhUDZOBgLffZThvilyloJX4GVfnYwiaER0CEOcNOqfvSPlyN39Hbv7+3msqCkgZzzmvBa+xzjzzRwJnIvUq6q10ZosCvOhxL6Vt9pWjYe+6xjHiWlQdXZ5ecH11RVNE1iC2C9JqLJqIYTKZKTEdrPleDgYBnHE+4bVqmc/JMYYSUnHBjoXEG/drQaBFeNI8LDdbvj4k4/YbHS+efDaqasdsO6El9X4L9Ol9F4cpxHBwovFoC3Gai1RYXleizEjKstNNuSv491hwMVE8K2WpiSFmiLphCPBkRzoxKKB4ThyOB6Zxtt3hneXJDIDoDtRY/BwLI1Q1nSXYIwTGixVg10Ve0PbdoZSoWVFLoQqH1V+aEoweKdpTCxCWNFSLLKdscl3Vlpgfw8hVHSBGCMpDtrcstnw5OaG9aqn9cEaUZbnA5vel824tQZGUU5vQiAnbapa9z3TqBBs4hsdmT2NWi9oqfqiS6YY2e1HWn9gGo5MY8R5x8V2zZMnl4qg0jqcn4dNyKK0AMBReNqVLD9l+hwIWTQFmvJctzhHqpbPWBxVrbMtNXjZKR5k4d2MECfVo5IywbeQvZ1ptUgLDmictEPcTQoxlXLSz/lffpdUncYkgvOetutwzjGOk5UklKCTZTu9NmMqOxUZoHzgnbPxvqWZWkdHX2w3NE3LOEVW2zU5w9XVNZeXF6y6Xk2zknI3Ho5myOnwkVLzWKYbuWp4BoO8LPw5w1/O46WrTWSBI/0+sQBcpAlCTBoddE5RheqkL2ltApxiv6Z4ZGCHw+MlWUOcItDs90e7B0frhVXXsO5bSjnBnG2yAFPOljWTGvkvZwsWAQGWfFJ01KxAiiyvThoQixFmVS8pz3w7TQPj0Zqsg/Kt2goGE2i2SCrBlVyCD2Y4v4Fv34iDWjuYjQHHcaRptCZimiLjsKvR0YxOAAk54nA0bYNrGwv5R5rGgO27HpwwTgPTNJp3KuTlAa+evH65c0I2cPkYY21qwiIiBYpkTjWWWowiYU4jlHON27xRujHZGkbFBJ/OZd+sVgQnpGnEBc/ti5dauzgOxDjiG0/wWh+36jzrVct2BddXW7brFV0bIMd5vNvCU6EwmCniwoCGzA25gAgr00XDH8WUTA2VmjGmtVV65VIHoo89j6FT2/fU2tFZxw0+ZabdnsvtBbu7QVMpWUP1MU8K+m8Mpu6lCp3RN/xYLh5luG+MFBZBUSO8Ao03YN3Q6mhNcTCjzQOJYRq4vb3luD+wWq3oe4XAyJai1giUpY8WB79Ac7gshu9HdS6qk5Hnip9qnApICATRqD8IcYoc9gf6tmO73So0j59hQ063zCKOnCp+mCM/lxcXfP7Fc6Z0R0yawVhv1wwvdozWnDVFTa0FiVZrlUmiXaerVcfNkys++eRjxTX0OqvZlxquYh5aZPrEKy+Ok00dS2luoFDXuty7KWIeIBG4kyVcrB6veJtL3h33Ry42W0h74hhAPDkJw6RQQ0G07OcwHDjsB8ZxYh/l3eFdIzGnHDHnShR+LMZYp0WpYZ4YxwOpae1zaoA2TUtoGkLb2sSpgqVshhJFPmiZhY5M1TNSoyy1AU4VTEn719rgtjWYNM1EpJjou47Liwvef/rU0B5e8asAjW2Z1jKZjjrNQAiKU10HrpSxy1nrcidGRJKirdCCaJRuSpH7cWT0A2k6MqVE1zZ89NEHfPj+Uy4vVjSNWGnEchDKnLETQ1ERtJEvia23qPEa8xypSoY8rg7r7JiedHrbwZ3RtObVELAGsgafIQ4T29VGh4bYuQFHytoEF2MixMzxcMSFxDgl9ns51W+/JCoRdW8IJ03bMsVRGz4jeB+UnxEzUDMhjMQJKxOy0a6llDCEGR/VCV4824s1m406BkNKiHN0ba8OULahN8X4seh/qZkuw0fAgl+iMld7OZzW+IcyFENYdx39qkcWqBNVFOUSyzJjMWnJSNcHJGRiGvHB0XaN1vEPI03TUoJH6m6rQ4c0hCDWEA3H48B+fyBFDWi1jWOz6tisO40ui6I4FF7TtLnypkak53NbzR+D9yJapLXob+Z6YbH3Sf1PSbl6EcDLM982Xp83Gd86M7JVvlgJimjENBXjI2n0dLZxvp4eN1CTenlaLpeJGY7HkZTuWK1XbLZr9i+PHDAv1r57GAZc6/CNFke3QTvh+q4HEfaHAzElus2aYTgSvNfIbLTOKAu9zx6ueaM1mlW8HvX6vfcFo5e5JGAhKERmxlqScLIN2TbIU6K4Gik8krm+ueLZs2e8ePGcaYr85Cc/QUKjEa8p0jU9XfD0rWfdejZdYL1y9J12jvZtMIQoXStPgdAperxER/PMMAZ2XozoWtTtA9FqZGu1X9Z6VRFPaIQSHY4512hf9TTqBjN/DoVFCU3QFE3XEgmM4z0p7mZQbmaQC1dqoIqR6p4w5n/hUYb7pihlR86KHAiz6e9EGGPieDywP+zoug6HJ6akk6BipGkb+lVH2zZaCyrQtUFB6vtVXcKSgqom2WyTvULLUpWiELEIfwiN1QZqWcrFelvTt87Ja69HfSq75kLpFeHkRbi+vuYnn37Jfn/LZJgefdvRdWpUDDGZ15ssoquNhK1vWF0/4cn1hg+e3qhRatOovDX5BRGrLbJ7LPxYUzhlQdSYjXE03MzS2KMIFHlxDpdz28WGAJy485gvl7DI/Snv4h3SK7zNMEykFMA1RMNNjDlqF3/WBp5xGplyJnLxzvBuicoX8P06oSkmq0W37miLXh6PBw77HUMYtOu5GKC2Xk5K9G/h5IjU86+YjZgvXFTxjPFZxkFmQWHn8gzpt9msTCYfdW/jwNObK7ablZY62dWqE12vbz0HFjmNcdTCJesYTVmHCqSYGEct/7i8uCD4FVPOTONIZiTlBnDQTIhTFJYUD0zDHuIO7xJX15f8zu/+RW4ur+maQOOFLjQ07cK5rCVgBZZPeTiSIC6UeNmLnDUCWpwHEkkWIrasLxYYWDhkVe6K8m7wgrgAQfVajqJT0nLUCXOTlspOU2SQkomITGnPcZy4v8+E9hfBiT8dtW1P03QadTTvcrffkUdtYCt8PcZYDcTgW8Bb2VxL03Z0veKQ+oKSEgyaUixiaX0cF9Iqz9SIfDS0CSsdsZ+YIuM4aIbFGvpSjmQS46TjcNfrNdeXV1xebFivOoJzdF1nsJRS61hzceDQkqtSmpfQqO/V9oIIrFc9/brDucTty2cK+eYmGCdiUgQDi72RScSU2e0mGr9nONyzO+zJRPrGc3m54cmTS66vtohExBADypEqsJS1Qz5r1B4pZQzOZKSOJU8pVmSKYkwW3n5IJTjgkqhN7KgZsuCFpm+haXTN0dMD2aaNav17lozkqIG1KZMYwSWQ6Y18+zhQf9YfhfbK7IaRrm21XlQyXnSTxTkbIZjJUwTvmVLCx0g0L7UJTY2wJJvQsxu0g69tGp1/6zyhafGhQcSRRBulqiGZZ2PMZUMJtEhOdrPltcQCK4ufWEZ3TmkOOGcbUznXFhVsVO89F5dbbg7XvHhxz3EcyKmMvMyERlitAtdXG64vL7nYrtm0sO4aVm2n3e/S0iZfU2xixcbOLTyarAX6iJUYlEgvc2TEiSDZL4zZ2VNSYehqhIScFiP8bH2YIyHFG1JDJ+oLXjvwUlJjWJxnIqH8mfDWuWp4wJSSmN4/41f9/+NxjvuGKJIZU2SIETcJjde6ypgi4zAyHBWbd2oCojlfnAjb7dYaAQ0YfBrYbrast1u69QoJKL6qz4jXpoVE1rIp0Ag81Bq+ZY0eYEJFJZNHnbCmCQQbUgHQhtJhWkMur6E56o99r0NLmiQLLqF62wtd0/Dy5R23d/cMkw0EqCleE7Yu4r3WO4XQsukaLi4arjY9m1Wv/JKzzdGWKvTme1jyMHUEqmQdX1FT+0sDRRKlfcrZWZXSuJDNsSxhVKDWASBIDvhUPPUF77psYcGMD53WoQ6OkUwjWscNihqSAILgY2btnvGr/t/72Rnu50kGoyNCjeaVprdUh0yg6ez7O+7vbznsD1xdXdX11SirlmwUtJQpTkiaaETFflGOoWm0rs1CpGVCXV4YXGpTJYuy6IAKstC1LawzTfBMY8RvtvzKxx+z3WwWUZiFk1HtNvMyTBDVmsGobYXTFGnblt3unsPxyH6/Bzyrbc/+EBknQxKIR51AKAqt5SQTJx1+4Uhcblb81n/oN7WRwxpYgzl+3jWKx1kMHCsITEg9iy5rSj5Hrb3Vqvu40CUlnetwC1ZVjVPct/JLgexwSfBm5GdQBYvZFFJsiwgp4Uu7pSTGnIhDZhgiOQ9kDKc8rvDN63XbN0ltb9FGkZpWFxFwqkOKAz5NE15mh6DwaNOURtRG9Zy3RudqOEn1rZzzCsVkY3kp9ihQSlKypcHTNGpTWe3fMASBYSBOCgt5sd3yyccfsln1Wu6CWGp/1q01aprmwEBEEWMSit17fXPJi9t7+q7VoAfCMAx0roEUyVFrp8cUSegks4Qa0bvDES93xPGgJTvOcXm15up6y2a7ouuCZUEAnI0AnuudNVakQbvsBNxoGVs1HGPWxmknugy1T/dBkAwbMa1LOTeGuexwyWmpJZzwbXbY4IRYF8o3PXlqSVEhF4+HA8fuSIoalImpeyPfPmqgasg5EZMeFlLCxYlMWw0l33rkOAOMK3h70oiLqCBLSVPCknNlsJQScdDPlTrU9Xqt3aPmwaaYEDNc5yihbo7Lbo4MSK01nhdzEXnK5nmdGAqL9+b6P1Q4RwtJm4EK0AbP5XarNUCu4dnzZxynhA/QhYbLixU3l1tuLtZcblds1z2rxtG3gbbVpqqmeJaLiFBNY5ZIaAnNl3ust+ZKO63+y+rRSr1j1d1YLc8ygvwaKkpCKoPm+h+mvFPO5sV6y/CrFNDqkwIaXASrsJVL/ip/6zGW+sZomjLTlBl9IrjElNRojdNEilM1sGOKiBk0wQfavqNxXp9RpKaqxnHgcHA0qeUwHAlNYwGhXJvysUiiE8UItqVUr1+kCmRAdZelFJugpQiltOWklvWV7Vv+ItfvLNF/R65GYemu3qxXPH/+gpwmHJnnz5R3h+NRS2yC4CXTeOhbR98HtivHZuVZr3tWneLyufJ8FUJKkJouUh6uUeI8319RQs4LnmW9rjmHhpHnvKbzvJQap5IGXjiXMjc0zlUuC94FcCZrBJwPhKYn1YiiKqiCueiyYrJu3bvDu+KsxtYM/wyMMc6NSmZIHoY997t7dvs9OSYDyZ/rfZ3TQRRd2zLFqGOqKWNkBcXTxKJZZZ0Xa132MOe53h8bqesdzrV0/YppXXB8M33Tsl6v1Ch2ucpVfbDyv5lf5qyDmOKfv/Pq8pLdbq/oFsOAiKgRLjvS7qCR1SQk0TlgXrC57hD6hnXX8t7TS54+ubGIs6NxmtoPVakXq7Jwld2jRXvFskSS9JxrM1c6eVsJIiwp5/q/Ku8rAkDV/rYaUjIOi2UigySSJJxviPZZBTfXkxGz1tymNNG2jl82+dDgvK8GzmQNPF6sjtQ5huHIOA7WBD2XCmkafy7LKka/OFnIiqWTX/zRvIDxS9Y3Yhjtmaq/5yE+vSFPKIqAtk362ozaGoRmsVNO2VeDDmK8oXXPeY4eOsdmc8GzZ7cqvQzDNzghINpYG7X8boqelBziksnNyHE4IGlHjgdyjqxWHR9//BEfvv8el9uVNac6mhD0cwJloMxsQej/9T5nCXqKolAyB57Z8Jl59FVuNjK7ooywLnxrITJEyt9BbOSriFcZb0HOOGbIVj8e38y3bzBQ1bIvBqordXRVcWZ8EGRUCzEtPPwpBIJzxLiIIGXFmPPBETIMw04jTFHnzwqCd4ECQJ1TNmxRm5hSF6/U+gllukkyQVAUe6nbq8pssehLoVgWvDK/KYG8iPo4cTif2a7XClnVtngv7I5HHX3aBK4vN1xfbrncrtmuW1Z9oA9Bp+4E9dpryla5/YGildmdKYxSBNvp/xa/k9n7X5QGLL2ex+Ak7A31grN6yJRibGfpFOcVk28pRMvdFd3V8oT38t98/Pu+IUpZp0lNUyJ6re2pzUtmUGujgzVamOHV+FaVa7basLZDxGn3Z85MKXEcjqycq+lq1UVa+L+sLV4KuYwKI7Kv3+2cRtGdL3u92POFgaoH3q5ZPVv7BjuHC9m9eKPywXaz0uEQdiZuX7wkOscw6iCLEFqCF9rg6LvAtm/ZroMaqF1H1zaG27sQXjJ/Ty6OzoJOz1x5JlezBSeNZFW0lme2yNND3l0YSfPZya/yLhj/qmAOodF589NivexyzspTWveUVf6P8U7QImKU0QhNnOJcC+00sjOMA+OkwPWl3KR0w3vvadqGruvwXktYyu9nuWKRa+YsTZUHFkxY+AVWey2VH4PTiValsUWArtG06Cxj5+d40yMXpUdW/+3y8oLPvnzB/b2ixPjg6bueEI52bwoFDgkn2qCqyHqBphEuL1purq8UIcM6y7V+ei55yCz4sN7kA7Nk4SguswAa2V4+QymtyPPCLeVxlRV5ft+Sd8vvWdb/JjX6oP628H1KURt1URScXzY5bxADImAg/DmXCJ+u2+Go6AplrGhFOzjhM2+ZDsy4On191j32QauZLpH/InFrtkVmmLW21XKqaZootULBN1xfXdI2wUaxLiVX4d1c72f+arV1SjWooKOzU9R61+PxyDgM9PacKWWiYazHlEk54CWpDM2RmEbSdIQ00Hjh4uKCjz/6kOvrCx3V6kV/glvUTLvaGF2lc9bGMIWkLEvx0FO051zI8VoqUJ926bQtn3vBt5lq3C67LOpqiSdnbSiUBNOoteLRR6ZxfCPfPm6gmrEZY0KygRF5b1E/rc+EZbRSlX02TzM6YZocU5hoUoN4q6sL2uG3+3KHE4OTcJ6+XdGFjimVyGWZ8uDVGK2sqQaoFrRrnaXkOS2qHtm8aMWT8lbrWs3cYhQUAb0QPtrQoTUmjfdEokaT1iu2VxfcXF+xszQxQLNqubnast30rPuGvvW0vlHjtNTteU9NaZkUNij+hV54vVLWg3oq6HXy1vy5EjmRYtWwYLbXaoj8yj+zCZIc9T513GdDCC3jQZuJ1AhzVIgscx4iT9jn//xjLPWNUWmmm6LoSDysEcJ7XDLhmWyaUSyK32qZjb+C17RTzonBpoiN08SYop6BFK3bPyhvh8YiKsal1hAw10R7yGHBow7xtgsLOSCixhxY/U75Pcv3zE5ViZjHsndVaGu0Z7Ndsd2uuX3Zcb8/st/vwAdNDyZoVmXKTmDTN2zXPZerRh0tm7zTeG+zs6UqT+24Ludy4bwu7vckVYRoE5abFXQpBZgNS6zj+WQz62sndKItZoFZeLhYOs45gm+QeKiLLWSch5zVSUjyhPt3hHeBavhrJimqIWpT8bSeV0uLdJiDTpYCHZaiDSYayWyaoCnErBBMJX1aOs9FRCOPmAGVYVmgVgzMwpMlWpKzs7Rse4Iw4l0o/oVdY/H3rzNTzWHUYKXuoQhcXV3w6adfctztGIaRft2TY1xMXlPTQGTCSVQDNHi64Oh7uL5Yc7FZ25pZ/bRobW9p3KjofrlE3U1illu1MqYa7RUMj1prV2swtWKSGndJ/trHfYWWvJvnM81SFzyYGpYss1mM2vAuFKFC9chzVgiwnBLSOFzUCWf3uzuG/YH1Zm2yVZtnvEvGj04xeUeb+FR5S0tcVH4sJU6J4lnNtMmOhGWcBYJzZCtLaZvAqu9r05QTx8Vmy+XF9kSfzott31SjBPaddZ/KYBT9ZBMaRdo4HLi9vePubsdmfUFMmcM4D1IZU0ak1VpM1KBXI3XC5UjXtXz80Ud89OEHbDY9jY2Bd8aDy2bHmoY1PSw5o5o6alOUZcKrAZ+BrDB7pQZbqtFqPGiyYemYvZKNzbPuOeH1GmhTYZ6SECMQk43w1g8P45HQPM63b2WgKqyIMR/CNE0M05EmmvHkPb7J+JzABgPEGBmdw8nEYRjpu1UN2Xvvub6+sRnf5u03Dev1Rg2LScHG29bPnoLOJKyCstQIZTP4ApYiYDY8S7d1qmbgnDZzFvLxxeh7sMYOIeUqnXQwAWjXYNdyebFlnGKdMBJJbFc965VGTdsQ8DZGrRojzkLvpTuzMAGWIijK0zAKSwuU0quB9xODBame9kN6K1lZvrt4XICguIpN6GnbLXu5t3sHKcIgz6laz4/Z8L8A/mdv+rZfOOWcFdojTTROo4QOFE4qOSQpfmCcoqZ/UwYmXD5qmjw0LIt3RXTU63EYiESmaeDYHum6jtVqTesDZcSkaD58lmem2JUDnTYMOFmMtCzHS6rAWHqhXxcFL6ZipZhIk43vzOrkOdEU0OV2y/B0xL+84+XtLYfjnpQV9m218txcrri6XHN5seVqu2HVei7WPSGIQRb5qryrQKqpLv3nqw2Zs3Ml3leFu3yenKnp49nxWVyh8O5Dv+1ks201lsIyJ4RkGJ4JUnFxK8KnnWer13c/IbwjvAtQIkJxmoilbtiiUnHKrLzK0dVmhSRVLTGOhKAwYpvNlr7vSDkqhuY0kgpmqAHlmyh87CaoUW0oXrLJYLGhJHPt9CvRlrz4s9QS1V+U+jm9VsLhkiq0MhJXsYGz1YlqLeEPv/8DDpNODRvHCd90dF5oA6x7a2RcBfoeLtY9m/VKSxJyRIcWUKGsKEagRY+KIXKih2u0VLvTi9FSGv30NX32si7I/LjFCVhmRrCAxJxEWPDuyfdad0S2hqzlPc9/qANm0/1+6WRyJ8URbIxlzomBPXFSA5UEXVIsmMIkzmlEdbXq6buWl8MBA9SxCCHEPFkNZdGFuXakn6ydOSDYSNUmaFDNeUfbNKy6vtaf9l3P+++9r+85YdgHtDgsteQgWwlI0sYl77QU0Dvhs88+4+7unmmaePr0KRebNc9f7NgfJqaYLWCmdfgeEFG57duGzarjg/eu+XO//uusVj1d2yjSC0J4aCSe3GtZFWUshSgrY4nnMkzN+hUzfhFRWezhzONy8rnC50tHfynny1/qXYmrUIYC3B+OkGzKJps38u3jTVKWwlySjsEyoH4B8ULwDUkEnzKhsVnmfq4pwTkkeCasmDx4dvs9TdPUCGSpfZiiRmZCCPR9VyOhZTnm6FNZSx2v1rZt7QB+VaHP13Cikb9iGNRXjann1GOZQKSbl7A551Y3I0FoLZWVyUSJdG1L2zSazreUQjFQZSnNll77aVjMhOViGGZ9lFMrU06l42sYrH6KFJdjzjh5DRYdrIV5zehINvkk+EDbbGjCRrH9OSrWnFjkVyyKJnuQ33vtd33TVLvFs2MczOjP0QSVINGRIzUShY0Qjc4zpawHw6k3X8ZiEiOkiH5ElbcXTxsa2tDauNFirZkjZQe5YCA6mWdqaw2P8kOpyVLhpxcoojctpk6d1E1DVWxz5BSbgpNxWUfmSQNPbq5ZrVZc3+/5/IvPOY6RmBLOO26eXvHkWuHQtpuOzbph1bZ0XWPHd+4qL8bFqRSC0pR3eva+hmfLqznXesil+VLqVx/j3dIT/QrvUmrSIt4pcHbOMsPcpJGcF2fcOXUa2SPvCO8WKiVTNauTTVGQGKcRab3C4jgxrPnEarWmaVpijHz17Bnriy2IaO111m5m5TlfHX0zmfRH0JrzGMF5k7kKMecsa4XBHBZjqxibYP9cyiRLB9amTU7E3xx5DV47csseWrT46dMbxmnk+XONnN7evmCM2DjirEZNHxTarw9s1x3rzrNZeTZ9x6ptaby30v7FfS1t5frPZS1unm+Sk4ej8M2S9GyfGqNfR0W+zsp+GZHLdY9FtHShDQ3Het7maF2RISIauX5XqOClkxV2aiqZ2CniGs9mvVXknqSDf0DYbNYW8S/N1BAab5nSmVdO5CcmH/LSkVV+0/KpAs2WtczHdWrsLvbc+4au7S32lk94s74p1z/0VyX4UP5tclzQKYKbVceLuyODoRUdDgc++vgjdruJ/UFh4qaUFQdWIs7r59q+oW0uuLroub7aasY6BJbN0SE48rKBVmZmlgfnrmQ2i14pddz1vTLz89LByml+T7G3ypqcGqlzpmrJ89nWxC6i48CdJ5thXtCYsmBO39fTGw3UeqN14+YDlHKkaVu0eFgqjM18m7NnpFhiZcE8CWi6ljwMygxOtJA/zyC9zs84i0WDFQFZhGBGcc5K1FHsfpcHXoWsFSOLWMRmHhdW0im5rmkpWZjrK5dpWecE8aIKT7CUpKdrWoUHsoLwGuV9YFyc0Gtfe1zA1Xc9FKavXPpU6H3dNbTZCmDxvmR3YY0LTjxt05LjAUWZLswrhqUoBBo2efNW9/6LptIglKGC1vtFQ0/xSkqjWTEGi9JVr1ONcK3h8zQieAIyOivwV2iUtukRcTZJxNVyDsEp3JtxcQH/9iZckhmXs2FahEFRikUYPIhi2R8len8aW5cTnirCpe+EpmlYrVas1x3746gGqhNWmzXb9ZpV17HqGtrgCV5qCkkjXQU+JGk5TXntT7BHM8+f1u3Cn5R3S52eNjEIQggNjiO1fqAEzuxbAw0X+fpP8DQ/P3r58iUXFxeLKI3tv9V6ppiIXlP/2Su0nPM69a2k8KfJege8ZWOyNqzsj3sddSoB5wM+FLQAVH6agx5ZRJztu4MP5nhRoeVmyDBZnCN9rUbZa+QJYClnF/Zf+X5mh0skc7ndcH+/5nA4cL/bM9lo7GjYld5FtuuO1aph0zdcrDvWfcN2pdB+TaO9EK50MhZlW87dgvdmRfxqVKr+a6HwH7638OpptPTr6XW8W5zMMka1rIVzCpN20sxicjfLq5PafhlU+RZzPis/lbZayGJY3Qskm67t6PpWcdO906xW1qEizps1WJ97sSMFVxPUNmB+Uc+2/lvxVG1QhTWjFl4rMJXL/oRT2fB6EleQWtRoLvrDOR2w8uVXt0zDoPrHynOmaWKcRk1z50BuNJLobbTvxWZN3yautj0Xm7U6ViUTVBoZRRWHoezpv6uvtDAU6+M4MxYVLrRCVHEqdaUY/k5eee7XBSkL39b1OF2dGtHWY23OQlw2uGqGaIZfez09aqCW2c1kXQOfMyF42tbba4F+pSPyfKMgRMNRZzM3IWhkycZFlutorY6rOGc+abTQh6AdiTkRQmewO/a4NWBj3nwxCi2dICZhc1Hor3kWs0cMGmmu+VQhOxdXz4aCCZuF0apTiTR6q2DCC0/GeRvX6munfilXKIJfZ2DPBofeaXxFkL2xsanc+RsEYLlWmfTy2DXmS81RQB0FGBViJasXGrPTCJQpraKcAFpZ8zG//Vb3/osmxbBrCE7oOgN+9k6jp86TXSQypzFCCLRNo8KyaesYTwWX1mlPoDwYzOtrfEPwDU1oGB9MlvIGw6NCQoV0EQDFcSmjSytMyEKg63dRPeHC49grGigoQmDm2+K5lu8USXjRxsRWhNXKs9muOQyjdQNrxKxrgtaaWtSikJzclwmnlBT5gHxy1h7n21wNrocGQBW+Dz/xJ+XdFBVexnhX4tc062RoWb0zvDuOOrykCQoXo4DwSR340uxk2LlNqQ/GKQRg09j40EjK6vSD2mVZHMM40oAB6C/aGuoaGt84VW6Ogi5RolJikX9OeK4qz4Wjv7AXHlX2NetQI8TZ0r+wXq+4urjQch1QJIJpIJvhsd30XG57urZjs2rZrnXy1MVKm0q0y9kZwH8iuVRvSps9ZoieOXv09Q7/w9dP/z0r28cjqfMZX/JuDWSYgSpEyBMpRq1nXxRnL42szBxM+mXSCd96wTsNEiBizrDycdO2BBSXs2tatpuN9ZqoUzmZgVpWqa5LcSdmn4cSjJLlehbnwS4QbMR4aSqiTLqzDzzcJr3mq89XLQv7o9RuOikTB1X2b7dbmuAsw6rrkaaoEVBnDdOupWsb2jbQtQ2rznOxdayaxMWmZ933NMFr+UAxf3NpjMvmKJph6Wc9NmeAs7W7FKjMwqvUxZttEfM6xUqsFsNlTh2f1/OtXmvWGRmZy7YqZuz82XLtmBNTfpxvHzVQ33vvPdbrdT3goT7UkaaB9aZlvb3Eh4bDMLLbHejbXhWCD0jW7sLrq+ta2J+zTvtY9x2H4UjTtdZdpzWCx+NYO0+rfS+ObJ384hUaQURT++M40q16q//LdXzZMoqqSzt7zo+RRovM2+CUeTXNWX6WAvpBeke02zWmSJqmCibcNA1OghoR9v6CHHXiQb+BHjLOQ6/+4XvfdM1Z0Zc0sf0wIRyRNJDjZMLGac3mwsMUW7i1XPLb/m+/8f6/Cfrkk09YrRQsvHGZy0tP27ja9DeMkcNhWDgkpuDF0QRtDGqDFvHvdjuuri4AjUyVwzgcR1WAPnAYDmYo1iDNKzTzRxUh1jX6JoVGjRAU/hUDYY7WKFK61vU1qvPgxGsNdnGwgpbBdN2KKUXF2xN0cIQrjWRfD/2hjWcRBihiPi948WHqc6ZU09Xp5P0zBM1D+nnyri8RqMq3C9NJYPUO8e6HH37I1dWVKqiccKJG5nGcBx20bcvFek0ZRRGcZ90rSH8xZqeYLB0vVR7WRso6bjkbJI1FadDlU1galXVeHLUHaHnuq/yj6i5BDGPxNDIyp/gX1sWC0jSRbcpNgQ0K4vB9x3tPn7Ba9dzvD+x3R4Y44UOg7VvW2zUrcypXq57tqtMJQH1nQyfqNygOco7MNbMFI7IY1gsjcXEWl2fzcTl9KpMfXqeQflRe5d35Rb1W1pr4nIY5wiyn6yeipSCHcXjN/Xyz9JBvvTPoM1FHaTT+vdxsSOOIy7DuV2zWazJwtIhjRrFSY4pIdIbhF6qeWRpJxejFfutFo/5YlnN+n7yyJyUFXh0W+3m4YzPvzoZyRg1gbxPwSFoo5JxjvV7z0Yfv60jiLHRdj/eem+tLVusNUwJpNzRBSw6a4Fk1nr47sm4z67bRgRIhWB+D9khMOTIadryhGqqBKo3dwqkO0MfPdZ1A+XaapgUUHXW4x8namBwpjqF+1p7+NXz7MMWfDee282nRsTavYgk+HOLjfCtvG60705nOdKYznelMZzrTmb4Jehwl9UxnOtOZznSmM53pTGf6hulsoJ7pTGc605nOdKYznemdorOBeqYznelMZzrTmc50pneKzgbqmc50pjOd6UxnOtOZ3ik6G6hnOtOZznSmM53pTGd6p+hsoJ7pTGc605nOdKYznemdorOBeqYznelMZzrTmc50pneKzgbqmc50pjOd6UxnOtOZ3ik6G6hnOtOZznSmM53pTGd6p+hsoJ7pTGc605nOdKYznemdorOBeqYznelMZzrTmc50pneKzgbqmc50pjOd6UxnOtOZ3ik6G6hnOtOZznSmM53pTGd6p+hsoJ7pTGc605nOdKYznemdorOBeqYznelMZzrTmc50pneKzgbqmc50pjOd6UxnOtOZ3ik6G6hnOtOZznSmM53pTGd6p+hsoJ7pTGc605nOdKYznemdorOBeqYznelMZzrTmc50pneKzgbqmc50pjOd6UxnOtOZ3ik6G6hnOtOZznSmM53pTGd6p+hsoJ7pTGc605nOdKYznemdorOBeqYznelMZzrTmc50pneKzgbqmc50pjOd6UxnOtOZ3ik6G6hnOtOZznSmM53pTGd6p+hsoJ7pTGc605nOdKYznemdorOBeqYznelMZzrTmc50pneKwmMv/tZvfJzbzQWIJ8ZMjpnxOODJODIi4AWcyzgB74Su82y3Hb/2a7/GRx99TNf1fPe73+VHP/yMlBziAs4HshMQkAQpRcZx5P7+ljhObLYbnHM8f/6CDITQ0LUtq65lveoQB46EQ79bRO9X7C8ignPu5N/lTeIEkfnHO4cIOPQZRIQmtFxf3/DxRx/x7d/4Nk+ePCHnXD/jcMQYYZr47Mc/4bt/+If8s9//A9b9iqZpECDHTMIRcyKRiCkTYyIBCPV+nBf+pX/pX+S3f/u3ubi4YBgH/uE//gdMcWJMI5ms73MOEc9ud+R4nBDx/Oq3v42IZ3txwcXFBW3bslptGMeJL7/6iq+ef8W3vvUtNpsNznkyQsqZSGa1vcT5liyOnJN9i97aYySAQ9ehfsjo//2d7/Jf/1f/l/zBv/333nSZXzj91m98nMNqQ0oQxwkf9WbFFZZPQMaT6FpH37dcX1/xV//qX2G16vnRD77PD77/fe53RyDgQkCcB+9w9tzH/YFhHBiniRQn3n/vfV7c3XN3f09MmZSBrGfEO2HVdlxvN3Rti/eCSKq8i2RETnnZOZlfh8rTIg6P8u3ydR8E7z3BedqmZbu95Hd/93d578kTmqZFxOGcwwHD/Y4//IM/5Pd/7/cZXt7TdSu9tnFAAnLOJDI5Z2JS3o05k1Kq3yki/N3/1L/AX/rLf4mPv/UJv//7/5gXty+YovJuub2YYbcfOBxGchbe//Ajbp4+oetX9Ks1H374Id7pd3766Wf84Ps/5PLmho8++YS+7xEXSNlOgxfa1YamWZFwZBLkB8z4U9Iffify9/7Vgf/1v/3+O8y7Ts9vjDiga6BtPBcXW771ya/yd/7Of5x/+vvf4fnzr9jtd+x3Bw7DOPNNFqaUOO7vGcdITHru27an73tudzt2+wPJ5IGedfAidE3D9XbDqmvxweGcMTdZ5bgIzsIdyqPYj8m5Ko8dvjyo2Hc440sH3gW8C7Rtx6//2q/w7W//JpvNhhAagijPD7s93/lH3+E/+P/+Q3ofaNseJ/rlyrNKiUTKmRQzEVQWG+/mrHz967/+6/ytv/U3+ct/9S/xz/7oD2fezak+S8zCbn98hXebrqfrej766CO80/d+/vkX/OiHP2a9veCDjz+i73t8CKQsuloO5d12bVr0zw7v/savf5h9u0bw5JjJUwQSXbcip8Rw2ONRnd13LU+f3vBbv/Vb/I2/8Tf5D/7B3+f27iXDcOTlyzuOwwiAZF2eaRo57nZMSXUpwNXNE2IS5dvjkZgT2GtifwYcjXN0XUPXB/ou0DUBJ+Cc8awAJk/NLAEHTgTvigwT4NR2AOVdLw7nPE4cwXvW654//+1v8/57H3BxcUHTBNrQcNzt+c4//j3+0T/8R6x8q7aCCDnPqjRn9IznjB17Yp5IKSlvG6+sVyv++b/0u/z1v/HXeHn3gv1wT4xR34c9gAjDMHEcJqYp8/TpU9774EN8aAhNy/sffsC6bcAJL17e8sXnXxJz5oOPPqJfrQmhIYuQUVum69c0bU9yHsj1jPys9O98Z89/8V/7IYf/y1/+Wr591EBN5UFxQCInFVupnidVGC7pxgJMU+Z4jIxjJGc9tCklUprIOUCO5CTgPCJOlTRZhZP3xGmqihSEBAxxIh7UiJ2miYuLNU6Mk7IoFz+wllJKeO95jHJWo9GLXiYhOJ9JOXJ795L840ScIn/hL/wFVqvVLGQFNepSYooT0zhVwZdSstuRykzz//PMjCYgc3Z85zvf4Vd+9Ve5uLzU58p6b06EvDgMYocEMMbOaqw7RwiBtm3x3hEjhGCHstOD4FxQZstqMTVNo8aaaZWU84lxWgzyh+Tqfdg9Lj71nn/B303/FvD3Hl33b4KmnPAZchZS0jV1kpVdENUUKRJzYhwz3kXGIZGTw0lgvd5yc32DDzuGITHFSM4JmfQMqKEWISXISa/vvPKtCJUfsxqqOcP+OCDApp/ou4amcbhycGYRVZ8hpdlgdc7p3mG8J+AXik1EkOiBiGRhyBO3t/d89w//EKbI1dUVfb8iO48LnofnBZb7unw1VydJX8v1z8Lzv/97/4TNesNms35wvcW9LT2g5VdX/s6Q9NucczRtS991dF1L26pxHbO+37eB0Hbq6OLJ8tMreZVN87+78B22+V8B/r2f6jq/CPo63hXnTVsnUk6Mk57ZOCVAnfK2bdlstzRtQwgNvLxTQzQlVShViavcJRep4vRHBPAqp/PsqDBMvLi9Yxhaui7QtgHvPM6ZCJFs/CroRpqctGBBLr8jMwFenBofQIxR5Q2OlBOkyMjAj3/8Y/q247333ufy4pLUBLq2rYoYKSa0aaJ8yrvZjMLZ3C6/z/Xn888/54+/98c8fe+GaRrtPljwZTVNTOaWIIX9mPx0mAwVMVkc6LqOrutwzlfedY1X3g0B/gzybsapMZ6Mp5zgxJFOdPT8d3ViMs6LnfWGtu14+fKO8XhkGidiiojtl25wIj/Yz9fJMxUpiZRgmibckPEeGh8Ql0qYza6ZwS2uUr5KivRT/jz9TtOTHkjK71PO7O7u+cEPf4iYwSqrFS5agKqcw9fca0Z5KC34UzlPnbeUkuobYJxGfvTjH/OPvvMdfu3XvmVrnKqeEK+mnXMJZ3ZWoWIvNE2Hd45IqkG9FCOtyV2Vr3YGzKj1IZBdADNbfxp6yLefBPivpQ8e/cyjBmoRMOBsD7P+HY2mlMhkJhGTHjQniXGMTFMiZ8F7FSpN0zBN2dSdCasMYgKh/lhU0zlv184qXFGFDRnnhK4JNMHTeKdG3fKm64Lk2ZBDmU2FZBU5J4uHQE5qUE/jwO7+ni/kC37y45/w4Qcf0PU9IQSNwqIMk1PmVGlnM4ByMUnr/5f3VT4TY+QHP/ghP/nxT9hutvSr7uEWPIhq5tcdRQCLQAipCPCMraXDeTUtM4AJDSxilkW9xYcG6sl9lMgexcAzw2Nxf1GEF68xan8ZpN6mKpSczalygrdICwshEJPyVhUITmjbhvVmQxbP7v7A4ThURUrKKijsx6VsyqmIEyGZJVbMO0kwpsg+D5A0CtZ1ga5rVUC7YrkVXjKnz36UlzM5zyteFGLh8aK0oyn5nAe++OILVl1PSomba9SJca3Z1bk6nVA4S068+aLwk/0pJ+/V+/rq2Vf86Mc/4v2P3tfo28K5KTJiGSlLWepaeXEEHxDxJnzNQA0Bb3wq4nDeV57TaIUsru2YLeBTHtB7WP5WHghKi0hIw5/n8qdntF8AfR3vBh8gJSYGVdKJGlkp0faUozpOwGrVkzMMxyPjMDIMg0aWckZsc8tuuhqNL3yrvy+Kc8qJw6DybYqRKSVWXYfPGicXMz5FyrVs7Y2vXTUcTYK4zKnEUZMZl0ES0zhyf5f49LNPaZqGrm0QOgJSZe6Sz8rZnXk314hT+d6Sfcg52U/mcDzw6aef8off/S4fffy+6ibBgid2rzkjzlVjtHyLRtg83vYF1EAITaNyoEaG3fz9xrsOyO7PFu9GXQLIYgEWp5FK7229zaxJqfJt2b/7+zvu73d69puGzXrN4DyDPzIcB6bRIqrMPq5zQkyPZ/2K0TdOUY1NyQQfCC7jczZdnjVIlqtfBSmb7VPko2iGCzFZbc/oHBHAlUgjuOC5390zjEfLnk6M5UrGU1XS5/n6ynivOizFMVrK/SlOvHj5gh//+Md88MFTXCO4rA5ecQz0nl3VEUuZLG7B3ygft21L3B+UP8XhnSObvhTn8eaY5fIMr1n4n4ZvJ3F8zuNBxMcNVKCYjxo1FRCnAkLAowcup8mMSN1oP6l3n7LgfMOqX7Ner9nvB6ZoHoRFS6rhm3WzRMB5p965OPXkKcoycRxGUkys+p511yJdg0dTTroI8wKllE42JmfjZtu0LK4ye0aNZXBISkgWxjxye3vLD37wA9oQuL6+YbVekV2m9YGUlgJwuWbzZqXy76xrKSKQphMj9asvn/Hd736Xi8sLvvUr35q31S60FMKvOtuLCJwpnTiNxGmEBLLw+NXgL5/Jr7mKHtb5IL1qqNbvW6T4ywocpeF78v7DG/ylUMqcKHkwXl4YdIXvkqXjS3Rcz58g3tH3vTmtwjiNTFNkiiOkhCRV9GJevTpcILnEleNi7zOSYZgmUkyMU2SaGhBP8JngnS1pwrkilPRZsmMhtCzeJXrPRWlC4Xf1pEH37u5u5Mef/oSmLcYwBPOykxmoqrSX+1zWS7+2rGV9ReaIUsqZw+HAp59/xh/98R/x8ccfaqR5qeSN706VNVYWpFEG5zykXA3ZpmmqbqiGfynPqTdnnGw3JQuefr0TVwT0/Jvy2D1P+Db/hUd56puir+NdHwIuRkYLEehxd9VAdQ4OxwO3t7dMU2S93rBer2ic5yAHSJkxDZRSDlfOetmvoojzq+uYcmbKmTxouceUEt4p7/rsFyl9jEVN0mhqyowxyot6buqblaK9nkW/z5F59vwZNzc3TNM10XuGXM7FwgFcGDqFN5dRKJVn4MSRJc2yDYgp8sVXX9L80Xd58vRKz0F5FuNVl1UnzQaqfto5h3/Au9552tDouSqrJ8XR0nUvwcQaL/kzwrsxOVyC6uo6NZR8MVCNb13W7FNKkRIR/eqr5zx//hznPJeXl2w2G7q+w3uPd55xmgiWQS06zDmncpfKVcYWeWYrEYsjZOIwMcWJJgSC0yxjLS0RE3RVvamNk00vLB2WvLDMcpX/9p6cka5Ree8F3zhSjkyxOHFFHpr+EWf3XZ5gdlhmrpidL/UBMqTIbrfj2bNnPH/+nJunV5rVZ75PizypLnOzTp9vfv6L956ubdnvj+ojFh0oc9x4Nq2XwYqfnW/v2PP/4Z8CH772k/AWBmr5Ygt4Liz/2bMnFmGQiCJMYyRFyEkPZL/qefLkCXe39+wPR45HTaWkaTIFmElxQuIEVl/lnWMWo8YItqlTmpjijnEcGceWvu/oGmepJkH80rCqVpQuqhdAo7vkpDVsRQGKEGOsgiknIaeRT3/yE4JzjOPIkydP6NuOZrVCMqSoZQAqvNyJAZmLEE1F6diWicbYYL7P3/8n/4TVZs32cktKkVIXWoShPT0lzbGssy3GBhmG44FhOELOWusY/Bw1LNcz42E2S0+pHna92WqA1sjwrAtOGHWT9/yV/M/ehqV+4TRGR4gqOKrBbULEWaFRmmBWciV6BzklXrx4wU9+/Cldv2G92tC1PeM4cDzuuZsi0xR5eCRPo3qzEVeFplHMmcMwMsVIlMyqDzUToHWoQnBWYmOC0gmEVBSkebEiuORsP4sj5sniVRnERAiOFy+fsTs8YUoT4zhYetJZ9Dia93/i6jCn0HRvi4DJdioFNB0rWtv3xRdfIE7YbtdIKAKprEVxDJzxfp6zHkWuxFTXrPGBru3wvsE74zmLDmtqtERg5zss913WR7/+Vd6tn1gosExmZOKz/OLtGewXSF/Hu03TkvyEHFwVJnGatCbVOVKKvHj+nO9974fc3++4vr7m6dMnbNYrrlc3pMvM82dfMU4TAFOM5pjN2ZTZkbU/zYkry5UyDFNkyrofXSMa7fYO70FMBpcPScwkAe/dicyKaa7xFISctFYgOzUkBaAN+OBo+4Z21drajEguPO9mfUS5R2dGhSdb2FaKkW+GbUaNYeW7yH6/59mzZ3z66WdcXF3g28K3+gw5m6wUAZe1Ht2eUfVYrKVPjfd0XUfGnhnAgjcuhCoTluUD8GeDd0ccIaN6NSWy0/1putYi5vNuxRiZpkkDKnHk2bNn/OTTz5hipu+/5PJiw8cffsjF9oLNdkvbddzd3bHb7Tgcj0wxEbxnkIni5mghIlUGa6aAqvQymSklXtzv6IIQnKMJnr5vwJ92jJvUo5QPSI1ylyxW0XtJIwjlubM6bpeXl2w3G7q2g5xJU2IYJqY04ZxTB4+MF7vzvLSvRGv/ZQ5uJQt2lbLLlDPDOPH8xUv+4A//Gb+z/ou0fTD75sQI0X+Z7BQLFtSssvGVB4LzBO/xweuzpUwmgvfzmv4c+bblO7yf/8vAs6/lqTdHUHO2YvNkGxBPLHHnHNNC+WroXhaetKZMm6bl6qphtRo5HA7s7u8Zh4k0RXJMEOMiAlW2SVlOo51z/aZkGGMi7geG40R3HLi63NB4IXhnG1jqKkSdiMwssIQqYCT7WWAU4TnFuRIgZeI08P0f/DGJCVziydUNbQjklIlTZBgnQL12XyOPZQNnhiikakDIRLtPuL275Xvf/x6r9YrVqsXaU2wV9JpVhZs3501oBq/1JM415KgREu80Eow6qFonY8yh96V3NqdsXzVU5/utzFDfW15YZqf+nKz419NvvIGjvhkas9DGjNgaO/Faz+scofUEJ6TpSJqo6fGoNf3klHn54pYf/OBHDBNc39xwud2yWa9Yby65unzC8y+/5OXtLYfDAVAjjWWErxzSouRFrL5YvWYyTClxe78j5xVCInihaxvaRnmrFumZDCuGSPUd0JSuoFElrSEyw0KUcwKOy4tLrq6u2F5slWeyCsuUEs55xjThfabUm5vYBUSbGTWktXgqFql/IafEcTjy4sULvvvdP+ZXf+1bhN7jXCl08AvVlMFlfGhn52pxNpwTgvc0IUDOOJwZamoMKBs7DZxIQmROEb2Ofx/y7utIgM/4kv8N/zv+G/zrj7HVN0Jfx7sxTTjJdF3LfjpSZI7uvWcaBy1Pmibu7vfc74/85PPPubm65MnNE26urrm6vmG13XB3e8d+f2AYR2LKOFe8iq9Zp6pcnCrcnNkfjwyD1re2TeBiu6Lrm2IP6NvF4a38YGmY6Wum1orCz1T5GWPEe8/FxYU2n1pNXcyR4XhQ40aEIUU8AS+e4u0UrptNFuboU87MMTf96uM48dWzF/zj7/w+f/mv/C6r0OOdxvskV8FZjXjxHrwniaheXDxP8IGuaTiM44J3qUqbkq3IfzZ5F3QdnDiSOaIZ1XGhCaRpYI7Ua4nPNE2WHXGkpLx7d3/Hs+cvuNxecH11xdOnT7l5+h7dasf+sOc4HK0Uq8UfDrjFroMt2UJ8Vtc6qwNzHEZeHg+0TeBpuGYdmtrkVxxyja7a3xfucIkpqliWWkaXszaSb9dr+qbR+lMKzyVithyB94zjRFuaF5nvM4PW7WY1YgvvZHOWfPC1ucoHT9c2HI4HjscB32o2avEU5mi6OThgUe3SLFuewwetmfZyWyP8OakVEkJQPk6ZVBp7Fzrolf2vN/A43zYErt5QmvJGAzXlkqa0WiewThl9yK7vSHEgjpHStpxKLZkAKXJ7e8v3fvATVt3aCsd7mqZhd3fH8XAkHxPm1M9hdFQWuqjeRN08U/KqHDNjiqRjxt3v6VpPG9QLyCRCEFyazcEkCSd6aMS694W5ji9lNSKyeegqOzM5x5oOKEw3xZE8qUGuDVuaMEvF6JOSCsjVgyn2STFYS/o0mzf5xRdfEJrAt3/z1/Gts7D8UmUUL2Q2foSMd1oSUVijCYHJB402MNf41k9mB8ktihyXjCYnzk5Z6/n42N8enirgczz/lr/kv/smpvoGqBzqEhnXYijq4fReuy9TNfjNMIulhhqmKAzDyJdffsXd7R2rVcfFZsN7773ParPFty3H45H9fs/t3T2+eprMyv7BGc1lQwUTlpEpRva7e7yD68sL2iYsnCgL3NQI5OxgVYWfTRZENK1k+5pSorVmozYEgkXSY4qMUb35lBMj2ijopPA/894mFmlmfRgVuNl4PCLiiCmz2x/4yaef8uHH7+O7HiwCIRbGyMxlNXq25hRSTR1lTZ12bcs4TVqiUhavRtuo5/NUQr6Zd3n4Eft1w8c8lX/lLbnrF0tfx7sxJVxwNG3DfmcyMi/yTElr7aakAYU0wTCOTOOX3N3t+OrLZ1xfX3F1fcVqs6FpOw7DwPE4EEJTI5IPhI7JSVkcfDnZr+OgJUsbVngXbL/1x0lxqnkgbx1l54sBoOlyNSAcwna9oQ2NIa1YWY7JWBccTddwPAwkEXNc5tKRhAU7khrxSajlWCKiTWRNQKw5twkBhNqA5bSuBiRbREuIzOVSmoOwDu66cRnntTxlGMfKu2UpnfGuLqEFYeqH//TzrhprNa4465ykfBBC4BhHSsjFie25yaqYElPULv1MJO32DMeR29t7nr+45cnTJ/Rdx2q1Yb1aM4wjOY1Wq16akm3fwHSunovZMRJt/kkZjdo4tFem0R02mVJEuGQtpxLR/o4sTiP49nReRH+8NeuJo+86mtAQxFf+1/UQVusVN0+vuX1xh3iHeK81sb4BgaZpGePEOE1MMULw2lOSM74JXF9datlEcLRBy7ZWq97Kt6T2ITiTIMmir0UW55RnvpzDy2rwWvRfHwTqklmZYI4mnyVppFaFxc/Mt57AlTx5lKceN1BzEQpmnJa6jKJYvDbfhOA1HV7DJIv6uZzZ3e/58ouv6Lod69Wa9XpF13es1muCD3gf1OAsdaxW76MMUphlYUItolI5q+A+HAcgqOctQtNobZAGq+Y0bpZM1pY4TeXLLGgxBZpTJjnzudWi00L9rqNtO3zQmpqpdOWh1YbFkK4CvfxfLBLMXJN68pO18eD27g759DOePL3m5v0bjXY93JI88wVQBbKv/1aPTvck12j0K4ERS3880EWP0mls8FV6xgX/Z/7WO2GgSrZ4oEAyo7FGzSmGajHOy38amUtTJsasMD8pM+yPHA9H9vsd+92elGC73dI2Df1qhQ+BKaNCriI9LBd2wbvOfp/LwfakDNMUiSSGcTJDtKTV8ysGqRQnyxQ9gnXNzo0ZxYjou16VvPh5/zLkrMZO23cch9FKBqjr5FST63mptdZW052Udxrv6awT1AdP2zSEptEHLUazlBjAnOCb6wVdsbYX3rYKyqZpanSqBJKX1d611OU0TPL1/LDg3de93QHda5pVfhn0Wt5FiDERzAAq9WvGzNVWTzFq2VHKxII0Mo0Mw8Rhf+Q4jIwxsVr1NEHhnEQ8zgWc1whLTsW8OFXsLP5KLo0YpdFQmw2deESiGbQWtbL7L2q9NGGocJbZWLGazhKd6LuOxgfVBcXZsWt0Xcf2YkvO94Qm0PiG4IM114ILQRFWisEuQoxav+i9Z7XWhteiv5rgaRpteKqoEpQU74LfCupGsYGWili0PCGEQChIGUUPzn8xByuXYPRsCHwdP/wp4V2PKHwezOc5lyAONE3D8bhnaUDWko+Y9CclYrIATtJm6+MwcbSSqO12y2a9Zr3qaEIgp4S38qiSNco5Lfg2L+QmFkX0CEEjthnGKTGXXszyqTb8oVlYZ2etWLGCGafOW+bWUtdNS2O85RQTE5ed9jSsVzx5ekPX9gQfWK/WWjLYtIgTVv2al3e37A57syuEu/s7mqZhvVlzfXVJFgjeMk2NNjf5YJHTXJPtVZ7DrDvIs57QbVo09xoKiIUByCkpnGbojJX1dy7Pn3tM8L6JbwOeSy4e5alHDVSXPUTzlS0VnaWEv4vASjQhkCZPNMEkooremok5Hifu7va8uL2nCS/o+57rq0s++egj2ran7QYthvaevBPrlPe24WURli491kBVmEmbTpwTxnEijyNXVxfktoS7TZGdKPm5jk3XeSF4pBwyLUj2TlitVqxWK/q+U+WZElOaiDmi/p5i7bmqcK3MAYuEZoNTKXekb9GIgVht1zByd3/HZ599wfV71/W555pvmaMr5XbLYcq5QlCUVBONmH2Q5joq7P5SiQ6css4r7CYy/24pAxd/L4X+dzzh7+f/3Ndw0zdLLmHYhAC5OjolpSyADw4ZzDjKgkaWRQvqrZEppkScEmOOHIcj97sdd7f3PH3yhJubGzabDW2/YpsyITR4F0z5asPSfJCLLVHWXveipDTVq48LA5VFJCpRuzGLKb0QpoKA1fgFO0dqSMJmtaIJzQlKQ3H4VqsVVzdXHA92/lwwJa+C3zWBaVoqeVMcVgPddA2byy1OHG0T6PuWzUbx88qXLbM8pQ67GBz1ECy4sERVyhmrVAUkWrKVtCO3gAzpcz3g3q/j3cW/s70v8hX7/H8C/jtvzWO/KHIZvC/QfWYYiaJzgMLJ5ZTBUxWrc7rnOeVFJGre8RwzMQ4cjl/x8v6O68srLi8vubi4oOtWGqEJvqYqhdPGuRPeRTNQiqyQNeWNVLgrsUh8fR5XjFK9RpgDUPanGhjBeULQwILLQhsaNTyt810dJs12rNYrnr7/HiG0eOdZ9yv6bkXXdYgIm82W+909+8NBMXydY7/f44JntVrx5Ok1KWc1TIMaGN572jbMUc2lA28GqiYPF6l9UR1Rq8OdwUw1rQVsaicCKZlNnhMpScUSfw3n/qnkXeeCGWoGcBJVfk1Tom08bd/BnT4/zvSi10xeTNF4ti41U9YzkK15vlO4AAEAAElEQVSc5Pj5Z7x4+ZKL7YanN9dc31yrQ15S11kNUNJcOwqzc1/KCFT+qI0Rc9TgVgYRzxKLWt+fDTOVGgyam/scjVN5G3xA0BKUpmnwPqgc9trz4AVcyqy3GwTh6VNYdSue3jxhs96ozeMdq9WGH/7oRzx/+YKm7bg/HvnRj37I5cUFT5/e0LYNikpa7AuFuVJZWGL7S+ErNaUQQqjrUQKASlJ/3zStZR60P+F4PNL3a73EFEmSCZUhzS4zm8ku8ka+LZ91OHqaR3nqUQPVi1MInlL8K+YR2CJ4Cyk779XzFrFmE1MbCWLMjJMWFk9jYhwP7PZ7bl++5OXLlzy5fsLFxQWXN09YbTfI8+e0bc80qdGQjUHLw9dFt/XJefZiY0pMw5H7u1v6vqVfN9U7KItWCvCLfa0MWNJNusGN4VmWTuqLiy1912mKJp42NzVNQ79eqXFtvy8GpMleJIHEYkzmEpTVGpK+o+lbgjjarmG97mnbAoRrysCw2CKeJIEkQGipEQ4RkoCzRIL3nhwypfmhCsOT7vDCXEUKPOYLnZJbMFpafKiTH/Ob8r8C/pNveaVfHM2ZMwHDXJQKg6IPG4Kl0q20w1k5R5r0MGohf2JK1qRjjs4+jvzwR5/xxRfPWK17Lq+ueP/99/HBE5pAaBqGWDrZ06miNylXHDlE00OhaRnHI4dhYkyZ0BRxk01w1oZM6zq156xM5jRz4HzNSngfaHwwyCZfIwOl+3i93vDe0/cgCl3bcnlxyXq9oe9WxKjYqS9uX7Lb7TTSkBMvX76kaRq22y1X15fEnPAOgkWiCtzRMoKhCkexDB1oc4AZNxXBw6jUo4YQLNWqwhKZ99Q7r2geU6Rpm9fCnbxCxu4P31q+epRLnuW//RYX+sWT4A3ZQBVhnOYFapuWi4srM+yLQ5qRoEMkhmlgmEaNnjLXT0bLHKQMcXfkuP+cr758xmbd8+HHH9P3a5VJzlnEy1UooHKNkjWbHV3w0uBDQ5xGbu/3XF9fWLnRzK/1Hk8UvBkAqMEZRJ2S4INiNIaGpmlougbfBHwTtE/BaXR/e3VJ13X86q/+Gpt+w5ObGzbrDW3bghO6dsV3//iPePb8OX2/4u544Hvf+x6Xlxc8ffqEplUsxzk7YWdVTAvkUjOOpngtHZuzBVBqlG3BUVkjxmrodricK35nTokcFK1jvzsQU6Z92tnKvpEh/lTwrtZFBkMGUd6MMTGOI+tVx9XVDV998bnpxxLF05IezUYaYo+o8ac6b46+xQRxd2C/P/DVV8+4vr7myZMnmgrHmjYdihihF1L9jqGdiKuDelwT6FgRp4EpRZO5wUoAZ/0tEheBAA0ReX1BedFS7W2r0GJxiiq7g6vZtKJzmwB+67V0pek06NX19F1P368ML9jhm4bQtmwvL4l3dzjviZZlbVG+LdnaBYjLK1FKR6IMUSqZg1zuu2Q/SoTZ6To1zczbcYocdnuurhMimWE8EjN0m9Vb2wrw9fy948g/5fuPfvZRAzVIR84eZ23oEpRxLKhDjhq+75rA6IRJg3IECUgKEIU0ZcYhMpmHVOAljgwcDkeeffWcVd9zeXnJhx9+yGazxYeWYRhp25bjYaKkd04et1jqokLOh0aVuU80Xc9xmtRGF2/GaCZag1eptXNObBML61lUy6JQ3mlUabta04VGDVfEYCX0VrpVx9X1FZfXL9isNmxWa/q+p206huPAdrtV7284ajROhOPxqHV2XcfN0xs1/r3QNoGubSw9ND/zHNzNBm+UkBRJol7IafQX8xI9MyCwNa6V0UbOuoBLuc6DpT0Je8nr2Wvu1Zvfs+WOv8K//xhLfWMUkkOieYoOsEETKWozVEpC13Xs7+9rSsj7hlLjnGIijvpeneihdc2gEVhPZj8cOY5Hbu/uePbVC66f3KD7tlgzp8qOupcmHEotnwjZeULTWQRMmwjX3YV67bZBzqni1FTTrFidFb0LohN4mlZTjD7QNE1N/xSYnPlHhzS0fcv7H3zA1fZSy29WazabDQkIvuF7P/gBz1++oO06bu/vud3d02/WXN5c03UtKU8WITP2EcjOumeTRSyKPaMhNKv58uZ4z0DuYh5n+V0xBHJOxKxFMj5nUowKel2yFTVlyOO8WxV9fvBLuObAf5p3A4Gi80ExB50HL8TxSE7gm1adAO/p+55pOtgAlEQIloEqGKGLQx2lRAFt8p/JljSOTLeR3fGPubi4RJyvk+4yzI5rlSu63iIFYUUV9NpvyHECEsE3Jl9T5d0yZaluF3YfluZG9DreC13f0ATDEfXMETJRw3CaJsXV9gFWG9q2o+97+rbDNxqxkuBJkslODZF2syZYE6POPShCz/Ava4hD1+xVhFZO5aQvSAGWNl2YBuIEjycEDRbkbFOBihEVy9CEheJ+G7n7p4B3G3M0CAbrNB4haxBLLH2s6zDzZ+3/KFOQsiKUFCMVihum61dq3KaceHl7rxkn57Vsz2T0nLHKOOfVoHQz/4kI4oVVWONljXeZpgvV8deghUVSjW9LyY33YmhB9oMOGWg7jTwelljZeS6RUez2htzoDgYXFMUlRp2SdQTfdiBegz7O45umZsNcwYavtFDa9XlfQwYlmB+wTckSqJHrKqZ7ilO9dowT+8Oh2l8pThV3tjxXvewyiPgaOoVR0/e8z5H/avoTGKi17sZqvcSUTI5az5liZhozsuoJ3jM5KVnmeRPNe5zGyBQNe9FSHylFjbCOieNx4ngc2VxcsN5sALHCfV9rJUHTehh8VR1bathf4sDnRguNp4Im4GZFn7GufrGaJ6mLrdGseVxZSZU2QZV88OGE2QIesmi5wvUV7tu/zuXFFRebrdaV9CtiSvTdik8/+4yXt7eEpuF+OPLZp5+x2Wy4vr5ite5JOeKdFmEHmWu7Hm7xyd+lNBgU47SECO0dJigF7bKuJQWIrinLr3hoob6OGR7jFKV7NvzD/M+/+Y3fAOmhSgge7x1T0kJzSWI1egnXaudymd5Rzv+yUS/mTEwqpArUV/2OrFikMUUye3juaJoWxA40s4Aq111CUUHpABV825C9I+emRj+9A1FYAYNpmQVsSTuVKJXiDTqch9A441+xscAlYluiFspfnRXzex9Y9WuFdmqC2s5OO/izpeGarsOP4xw8c0A1lKnC+iELlV9nKWqpWjv2DtP8NR3FgtfMk0+JmLPWviOLaSze5PMb/Pm34N07Iv8vuX3zG78BKka5TiYLihSRC9JEMvQFXagiowtfLfGVZ2hAGxwhIIaIUBCgc04wJfaHI85knFW6zPyaa8DIti6b86/RTx8ceG8GpzUg6k5BxfU1ZYj1fLmScqVGdFxwGsVyOtGmOItlTUCjQOIAr5BNTWgUFsfJXDGCGoWKGe9sTLE74dEl5NHMk6f7cDIIIGlwxZVpXmXtOOXF5Wfnz6VqoMYYrUHn1e97Lf0p4l19ZoVZaprAYbcHSiRey5h8UKSZDEWYPeDbsigFiBEW1iBzaERB+scoilZjUW6klBCVUkSoJlG2a9nUM4Xwm8tRtGEr6WlZyLZiI5Tov9oaDmf10c77CikmMpc/lq1bjvktb/LimYaJLKonYinRE+pEtjIUqNT/17mvOdaIwMI1YuZn/U3KNqI6zlM9s2TFPc7avOqqg5W1YXYcWS0wRfV4CqQ0N8HVdSxH6jVM+lb2wpp/IP/co+95KwO1CgmL5EQrJstkFO3DNsk5JoNSqPBOJuBKd14BoC6TR3RfIuO053gcOAwjx+OomH8VWL+Itkxp4ZUq4AzVsQi5BiR3kJNFlwxEmqh9lyKnNVG13k+vqcDhZqSGoN2eZqwWT1vEK+Yfjtx1OOe0eHt7wcV6y6pf0XU94h3Bt9wfD0xkVus13N3B55/TdC3r7Yag7l1NRZTHnadYPKTCGCqN50LkU1aVhcDN5gyoIeHmGhI7gOVbZjN1KWlPXyvXXx6ImT9aAh8/xlLfHEnpIEfTkpMZSFkPYooJ7ztbAEsJ2GOXiA0U4VCeVEzZ69/nBj7FlDwcB6Zk6WuBJNnwfHVvqkIG5gYS89CLY5I9TjRy75wKkiIlnbzOSDUjwol592qo+lB4ffGdPBDe3kOjkVIXtJg/kRjjBNmRs3bnZ1EMPeetjKdeL9Vnm9f91a3I5hzWCVxOqiFRQ3RF+CLVGCsp5qLkCwKAQt4ZOkBhQSlf/1PybokWcOSOH3wNM33DZAaqpkyt3AeNwk0WPQ5Nw3GQ2nRZ0onLARRFw5QKSfsNhaU0AwMgllWIqkO9hpIKFM0ceaqcX3lIZbg2PakxYFF9mKcE2uercSqz0nZOm0d8/VE9kuK0cO5mI1XlsMpebQjVs1YcoJSTdSnbTHIrZxFnhk0xsC3KXHm5Gq3z91UzNmvpD4YZXI3TIgDKZ4ytqkFaZIcZCcq72Yyo2br4meXuO8a7Ba6vaQOdb7hdGD6lk7xpO4bDeCJTSxRaWdaM/+qblNdUji05UJ0up6N8MSzaEpCSoqvNaLMJS5ANZzwTsWCV9We4Ygss+LbU7lvypxqodYpY0B8XDKTfS428FtL3W1OgoLJanJbuGO8mtGFH1ZHBS4mAV8jMh1H/15c1nf6yys4cqbA7Oamxm2fZXURxzlkhv6RoOmZUhpRrmUS9fv3Wn5VvhVtpX/cgld5ooKY0Jz7Ic22GCgPItqg+BCQE0uGozGKQTMFC05OlShVqRsB5yvSdjDJFmibGr55zvztUOKoSiZonbjhlAg8l/F4YwHuHw9OFACTattMJPSi8VE5p0Ti8rIly1eD1XgjB0TSBptHZwN4rqK+3dJP3BdQEOtG54QpWrd58JjNOA0JDlqii0jl82+KbxpqijHGWno8Znl9XU1dM0dkuzPX3dYIWpglyCe3rGscpIS7hDU7l5KpfE5Z/5cuL0lt4UEt6P0/8y4+A7n6zlE1nqDAQD0yGRxcjcZrw1hCUZLSPWKzJlYi6q4bUfOBchYoqWLXGYWSRWugvDgh2NMVqocyb1w/r+s2OgwlCW1TvtTQlx1iNgSooxaBuinHrVFgWVA0VmmpAl7qj8gQFR7UAsougKBpZsxMFxkfszzIfzjcahaqCUmaEiFxHni3Xv3Jr/VdR1Frbl6rBUNAUsnn0KSdinBSPuMyRT9EaAbIl+ywaW3Olj/BwtXMW1uyDt1/ne/5u+gdff41vkIpsLQ03CuepDv44atNav1pxv3upMUoNoauBunDCSjmWrvPSOVLDtrhOMYum93MiR1Vi1eCsQAGz0VsweksGrPBtE8zcFbEJdszNfCx41xfeLZBvGgzwIeBL7bVo9CuXs1GM2UXUTQ3UgjqgRkrKAlbaU6JGLmiPRJkupXejUbKi8oVc32+bUHkkJePDXBoEUf637xc7N048OUdinJhGHUYjzIaCB+vAduACDxtUX0t/ing35lTxa5u2A68lDWTr0k+ZfrVmOO7JxoNlXDOgjmnh15wpzHcSppHZTCvGbc7a65JyxElWyCdz3M0rAFTnZkvxJrTswItoM/jCjBJR+KilTpUa2CrZ2rnsJIRQp2WV4ITyr1s4YjYZ05xzL5rdUntEz5ziHGtQZEqRYRxw3s4l1LGslD+rUl4af4UKr2tEuAT4dIqndbAzj+jOUIMCUqPOmeAcpTO1TE17+E2vpbewFzb5lr+c/t1HL/MGA9W6zmW2kfu+Y5/32tWb0Kxy1po274OlpxO1+61CxpRHhtruII5YSwi0/iOJMKUMY2SMB1ywNGMNk+cHXv3MVPPiKD6dwqdoBx0563WsCLp48tVLsGt575DgCI2nbbWrM4RQmxBKWQHoe32jKSfnPClq1Cw5g+OxiRo1Pem9NRlgAm72KMpRyCczqvUQFizJOA3EOFWc2Rr2rz9UJihDBJqmgZRIccLlgFu+yb77jULy5L2vJwGOPOW7/Itvea1fMIkZTiYoQtOT4oFkZSYxZdpeU9pp9GY8RoiR7CdKval8zTMXJAD1NYtiVogczTAAGbxXaJoqLMEkbInCSJ2oovBCQhPEANrniBOVz0v99CJSYEreeU/TtYRWsSNznOuxiiFQMgUl5VmM3BQLD5njFDVTEeM0l0cEbzKheN9J16z628tHTDbxR38TY9QolJjQEJUvLIzvkwhWjTplc2QzZW5QEEHaluyXNVlvoKrgX8MqgOM3aPkfv/31fsGUstbPNU2LDy1p0kEmMWammHnvgw/48tkXChYvzBN0XnOWl7qi/gIqh3sgeM9xMDQU74go1qHDn8hWTailii2ZiZZ8ELAxpFp+Zc0p5StlwbvFuPSeZSQ1VGgeIbSNydqZdxXq0JuTpYEO74UpR1P25UnVtdIRkyOHo3bvp5zIkjAMtXpf+lymqM2ALcGC+tDG6zllDcqkjMvLOGBxPAu/Gm6mav7qzHkEmgZ8eHvR+6eId4/jyBah63tC0zGmsRqo0zRxfXPD3a0GMUwMGh+9+nga9Z5fWCJpZWMsrUvW/hayOfQScI3oeKTFtqRcyjHUmM5RiCIQYpWLReZLdXqLcVpsBBvD/qDptAQZXFN4kxOnqtoohZ+d1lz74BaNh+qkH8cju9093aoHp6NSEcXYPeWXUwZSh2jWM854Lye1x8bxqPbAMqiwsDHKkI/ZqbUAh2W9fHDg9N9S7JhH6XFT9vu0/I/kY/4Hj7znDd+innhMUWskhAoEW8PyVlXftA1d39E2DWVGbja15QzcvhgNySIErwau3fyTxepTI7FgrIJFiKSG3muN6zQxDAOHYWB/HNgfDmbIzdEF5xR6yMv8+SIknff4hTfkG49rvAEvh+qxsxCu2m1qXadNY0X9tnKWFko5WndixgVPFoh5qgdFBVckSxGqScP91QA6vV5R8qVOrRjWLISlppgmYpl+wGL9nEYTbHtfIeGBsSss0qcP5WmxnPRPx3dp+O8/zlLfEKnssPQSmdVqpVF7EUtlJFJ2BqtkBmbOeF/SpEWYmReakiklFAcUMM/EQMBrTM+uHzXaNRrwdC5KXPWXzodWB2aKI9M0MsWJlCdTxLbWJXpUPHJD0hDm6FOBKlt67DU97FToFceqAOGr0axNhFrrhEa2vMN5Aa9J0uM0sD/suN/dAybYzYvXRTsRkYuf5a+KgtU0VooTKY7W5WzdluQq9AsucErK5ynpOMllo2RJsdX95nHefQ2HnPxM/JBn8j/9KbnsF0OCNoKNNnt8vV4rlJN11o/jSNf3ul5vdb2q/kri+wT3VoW0nuOUM8M0MQyRadRSGBUtMtteGZVXWZs8osmblKJGrwpUjyl0Ve4eRAVkRiOI4gPiQpXPc32f1qOC3lcpbwBq7TN69Ga57LF5jTPvDuPA/rDncDjiva+NIHpdqDBui9r9V6jIYSt1E+eYptHSsArfX6J9hXfLGE8RhU+KKc4hB9GMjl8Efcqe/+nnXY2gjuNInCKr1bq+Mo4j93c7rrYX6sBkNaAEcMlZI6m+tzzZcsxuQUMoJVcOlVXibM0tw5BiZppGxnFiHDWIViK1Re5Ok/bExClqfWZKVV/MPp5m0Zx4hbx0NsfXpohpDeoCr91kr90sweClCjlfsrTz70uzlmuU38UXyMnMOI3c3t1VJBlSXvCI4bnLzJtS7YU886pYKQ6J4OYJZlTkooUTZvejte9zg7YPJYbp5ubyYnDnn9FesDe9T+K/IrtHOeoNEdT5mzKlWUTwPpAC2kGaYBwjoVcjbQZvTvUQ59cprhr6pbqxhl88vytnpphwMgHeoqgKBH2yF2kB/WRMnUq91FIOS4maWjp1EQ0tZQLFk/fe1dfFLzvql+mpuWDae0dKUr3+slExJ6Y4MUxqhIgxSvFQINfRYiw2tqzPMvBWQvP6LMpsUp5ZTmsNc8q1mSLGXI2EZdNO/Y58ujNfr/UeV4c91/wm/5lH3/ONkSlVxTTNrHyDd9bda1PRSNg851k4eq+je+cGiLI5blZULOC1SgSgusgqYKaodT/jFDVqb8gMtc7drpOtyULLrbMiBkjZzyKxZz4tHolU2BSrsz4xTvUar/gfi0sUJn1wWWpzgJ2aZHVJh+OBpu2UBes9zs/u6he83mteKnlnadlceNm+rTSl5Dwb706c1VGVgn7jYZmjsyXKIotnfPX7H+fdQOIJx0ff802RilDFFZ2mSNv27PfHuibDcaBrW4L3jCLU/7JUeJ2HqeDlzsyiRqvylsq1TgmLmWmcFJ4u+1rWVKMrxVhIYFEKywjMhp/uRzHeXI2cql/nrAlsiSxhfQTOqfyet/hE7hZZV/mh9jvM78+o7B2Ggdv7O9brjTbGlPWtLTi6GvV7ThdI368PBznhWJROlc8u9m7Ju+pcqZFP5V1KEe5Czy026TX0p4V3Hap3xmFiGEb6bsWdvyfHRLQAUt89oQstOU4V2HHubpMSvzzRU6+TKiVCWeRPKroMGMdJA2TJkXMopZ/KN2UqXoaKbkMwfqqFA8rHC75d8mYJCpQglTNboZaUlOjCCU8ulLnpm1JvXSOxYlX9OTGMI2l3D6IZttqwlyNLhtGmJ1mcNfsuBEqgK2W19Ipey7aqik1ZS1f0FpUvS/1/EwJzFVX1CqvN8fCsnNLjfDvS8SW/9uh7HjdQyxfYc5dDF0JDMlzTTGIcJ9a9gpSLLTBmoKas3Y9VIOTFOj74LhVlsyGrae1MIoKAC68aVznPxmPO1iGZ1MNfFtlXRinWP4v6Vedqo0mNQjk3G5uoIQG5emzFECjp0mqwWqpJD4568mOMjONQ4aXSiYGKHYb5seyI6qGx71L4ztnQrDWAVcnbr4vSNqZzTmcdv6Kcll+4ZPiquh5ywam2O9nCGpS4ouPvvLq5vwSqTU4x2wx7nRIzTfOwBHLWCKpI3d9y+E4dK+ZFq7wGylfGf2XPUQU+JW3K06JzbewIwcr0bf/VKEsV2aJ0Vs7GYjEGZ+VW5ilTyl6cevgl+rgUjuVxHnq4ld+kPNKCn+39xfcmwzRNCnJutaqnxsd8raJIlkr+xGkqaU6DNUnlWos3Fb6tEGlmNOVqoJrycgZHA69j6nK1N/NupRaRj177yjdN1sbGNKls3fZbnASyjDWC2vqGNgQGw/h1GVW+aansjarxJQ/+NDlYDFSUd4trM03TouPXGT/OZyJFe2/SvFfOBehcKg8U166ME1Ylz4lhWmD9nF/Wmc76fO6eLs8zG6uFL4qTI1gTow3JGKeJvLsHDO6ovnMZNZ0zHNrnlc0CWmhf02nqPGRKKFlVitg1Tn/0Y/p370udo0ag8jKL9WeEd11WFJ9pjAzDxGZ7iffPmabBYPtG2qalazpGC21KMU4XPJsXjTiFThrmil20FJSY44QQY0RyhGT19uIUHUcvZLwzV7Nr894yK1uu7yj9KdVOMMfKW+f+HMxyVY5qRitrmUs1jnO912KjFrvDlXsXqecwxkg8HlC+tVKo8vyVlpGlhW6ywS6ZXGunl/yJnUd9cz5ZP81+U7PPTbPoWTHdM5+K11mnb8m3ApN0PHuDgfpoij9bkb2GjxOly6uktKshViEIFF6kGmCoAp6mEUQWXie1bLKsceFRla/FK0LxUy2SME2TKXMhW5dxTpCIJKY6UCAzUWo8pUZ6MOYwJnGzMKwG6YIJy4FIooZKtrSSPNwP4CTS5dBU0wIGIib1iA6HAyWMXgW9m++vFDmUzdda29n8LOtXSgyqcZQLbIwJcjvoKelwhTqLWMdZILVwehHbXsqJB0J2yVQPf5Z1Zl/wKf8H/s3HWOobo7bpdU1SIhs2Xdu2um4Alorru5XBmZWGESrv8vD5KYqyKFBZ8E/pXj41tnTcov6M41gbj+xqpIilnjQVmFIpi2EZJETVZ8BJoMAPzQreVWHp/GJCSDE2rUHLOW1QKRJSLJJT+cm7On6rCNCUE8dh4Pbujru7u4WSB0uemXFvZzvP9vAJ5Qg5IlkbFkmaIlY+lwfrdjrcoBgPIZSpI3P0uD6n/fnQQJg37vU/JQv3ghX/N/78K/v9yyDBm2MwcjwOdTLNkp8F2PRrGhdwVoquE3SkOu5JPEk8dXrOQgkXeVHgpCjpxYVMKHWD4zgyDANTTlbXPdfP65mJ2j2PyoIgOqee8huZ90ubTAMi/sQ4bRaYjydnzM6Bq2fBmjgWRqy3spSaERPwoQQHNHL38uVLomE8ylLGsYge5aKM5uyJ8mK0OlI1WsrceEoggTkD8JD31IDWRlolpwZYNa7qpv8Z4F2Hy440aar/5uYp3gVUzqn+Dj7Uee8xlu5yVTx5Fov1+R46M6W8qRibyjNzbWbJ4MYUGaeJMWojX0xWOidW9uc9wiyvfUEFqv/5ehMl1b9seCp8G4LK41IaWDOy9Yay8jBqsMrC8JGloK5RVHdigwzDQcsFROoo+IzySkpzyUNiUQKx/Cmnuag0ci2PKg5+tXVSwpdyNyl1sv6BQ+CqbZbt1n9avrXtZgv8R15jTy3p0QiqnrpyUf3HMAy03UbnwIaRIR4ZxxEQhavxjjwVfomkPBGXEdQHVCIgZa+q0AqeaHVoGgVUI6PxLX6+HbtNpx2/YBHUhHOhPsM8MhLqyEjrpiv1Jc6r0i8GB1AXPLuMJKs7bRbvccKyts87p4xum5bQms+YIne7Hcdx5Orqmhj1ATQiVCJ3erOvRJc1l6GCn6yeYS6QVMmUfp6xyeofpwzjRBSb8CQgaFGyNzDJ25Ln26z4N34+F/uTkhOFQjNDdL/fs9lccDwOTAzknDkcDrz/9JrhcM9wyGbwJJyldrJbRKEeUDXRzNFoXINvG2RK+IJmjP4xDgPjMdG2LUl6RITgBC+lo9RM4pJeJRMM/zcuDuFSSHtfym0aE5Szsbp0sApYuQtWu+0UAL3sv1iYtta52ndlMr4Rm6CjRu/Lly+rN/+QSvTCWI8lbBeYI1Wbx3QkcWgKJJ17cK3i2JfsyNftArPw/Pp3vBVtuOR3+E/8Ca/y8yERQdISG9LR9z27cbTaXF2P95485bi7V15CzHE3I/Vtyfa+bRrrFZgRH0BH/uYUSQKNqJPjTZZQ91zrTrMrOKVikYZiTCyyUgusSm2s9fjgCM1srII1nljjn3ixaNRi6EOpc13oAKpRbPJ3ijptrGmIcaQNmi3RTnN7QrNXyqCIoohLzX8xSMtelPr+bB98yLvlbmbeLfcrdW/r6i7F9Z+A3hXeXZZgJJO7YiM8FUpyxAHbzZp43NNYDfVsSL1yxa/9nkJdu6Lvo2Ypp9EigLlGrmPB06V07JvTgCKvkDUr2gSPZEMYEQ3MeYBiMNrZqJFP52oUUiEBfd3vmBONRfZL0MIh+FxKF2Rh2JVQkT7TMBy5vbvl/u6O9cVWHzLPa2v9SjPvFCMTO40mPLNLTDkz5cSYIhInxmmkTQqtqI1Z8/3GpJk8he4qxjTg4Hg80Lb6fEVv/Dxshg9Y8S/zW4++5w0Gaklbzg07c+rYKxbfYWAcJmJM9G3DZr1hdzeZmrVoXooPL8zigvMfdpAVJidU0P9sDAeZu7t7Vl3QiSrF286oZZ+KN6wr64xxhFKkniid/SXVVKdGLUL2J964fdJ5g7dy6p3jck3pKwyMMRHFnlfG8cFqWe05bm9vyTlREmrl+ZOldxOiaX3Eflc6QB1xso684iWW2i8MaH7hQcHiIDvFeqth/bLgZheXta9c/nCvePB7OQ0uFn51/AGr/N8D/u8PL/KNU0m5FItJG0s6HUN61Fnzw3ECnEKiJJs85hrrOC97s3xQqielWUDjcAtlOKcj7lKTcENJxWuTSc4QbZ8kZ6bCphSFZYJRMiI6iF2ywyUzGZ02mZTSExZ4f8Wpa5pQ66OK5VZS/2JwAOJL6GU+g1XWlFC6GX3DMHI8HBgOR/r1Ckg0NoJT62Z1fZN9VrGG5wj+nF6q0tQcOr+INiiOcd2rxboXoa/ZDTWAlsZoFZgnD7Gkt+fdI/f8Ef/gEY765qjwbjnXMUZWmzWH3T1pXET9HjQXvZFe+57Cu4FVvyKjmL4ajYnkNKr8tqzWFKP2s9e0YGkw1KiT98HSm97gxOYSglJz54TqSHnDnK6ZCJO5TgzKT6QatpXvRWY+s3B9iRTV9GbKDMcjh8OhQuuFRtEtNHti7y8lCWZYlehUleHVyJxTw/rc1MhazllRWqrsXUTH1KLRkpQqT0oJS5kM8trNon7R4lfvMu+WKKJrdD/v7u64uNiSp5HxeFQdGTztIlquNOvZhXdqr8iDJRCTNQ6Syrf1eoNvG0UWipFpHKysYNIJa00gThMxGYqC5d2zfU/hW1WrSctapMjYuQRQDdWi52c0irlESuojyIJfagkhs5NS/qwwamiwqGtbUtLgiTMc9iLXyZkcM7Gg05iHlt0pX5SbWNaWxqxytwS8ag30Qn4UfhJzRPWjigbUdsXxm2uvH5Mnb8O3X0rP/1G+zV9/jKceee3EO0xop1tKiThFXBto256d2xuAtBYcd/2K/e62RkZJr1m85b3LA5uobqqnCfOmC6X7emQ0/avYYhh2WNGvxTMw7DHnKVA1pei5bogpSzHvfk7VLpir1E55e78p+SVo7yt7JeXhqV53Mhy8aRorrBUpmYDUt5szv6h1zpXRoMBkUO3agqhAPRj6ohqpi30UIZWocS7GlX7i9bJRXv/7N9IR4Xs/0yd/3rRU8uUwe+dp25ahGRiPR4vQZ1WqTWvKz6nhvzTeoRptYgz7Ct+iyrikY6O9KZOYxoEUx9oRWVM1JphmAw/1bg1tgKKcbSLazKcmIBddofPfZ0ekpMYLcoPUmgHj2SIWl5AjlQH11WmcGI5HxUEtzqOUOiWD5lmUuBQb11TJnIUoNaXFIBK/WNuihGzNZI5knRpgQpwmpA2V1x/SWxlqr6GBHZ/xnZ/psz9vqnI3Fwc/sbm44GV4DhyAXPEKZydaf06ffrGvr6NcjDA1tvrVShW9NWWMcWQcDsSog1OcE0ZLbycwnkpVQWcR68qP5li76ri7ZbOJFCWvdzbL2xKRtGcxGVxrDcvvWAYPZt6bBbG+Pk3Ku03TEJq58ZXMK/xT2CZbWCVXBfZwyQSRwKyn57pzTX/mE4Ol1uOikVsX5m9+uCt/2nnXOadoNo1GK/e7HRfbC467HdGm0E3ThCvp8ArVJMVuN9VZDKDXf8/Jzgs0XYvvGmvaSxyPR46HHeM40PctCMRpqsEbkSqd5h0uZSjMsk9eka8ly4TJzdnQLA7NUlZV3j05nwtj/OTfs5GYYmQcRoaDnvVi4M6h9/LxXNctPzjqUnpzzBbIFqzI+TVNaDKvtTx4DXTIUrU786vb8rPy7Zjv+CL//Uff80YDtSjkZaHtOI2EplOBdnvLFCPTFEkJ2q5TxlN8GzOo3IksgYWyZ/HAhVdFcN4TfKNA1QCizVJp1MhhCdWX7l41RkpNmgoqV3D2cEiei6Sp3pGrirt6OgtFP4foXYWVkBpJLbV6JoAWufmlWEs5MY0jcRxp2oacE42lmtIiklocR4qgMy9bUmGYIhHnGpQadVqcWPUJTlMm4hbPztwGUf29WjD9CDM84MElT5aPJVbc8x9+5CLfHBXeTSSyU+9egK7rGQY1UFNOTDHqqMTcVl7IJ4tqVBbM9qhAbOg/pa691rkGxVdF9/942DEcD7XgfIxTjdI4EXWqsjWaUOp+Ci+7RZPIAgNVlth85s1XZB5L9ZrLU+CkytQ1NWDtszUdXKIWc0S3CYGUIofjERc8bdfVyJcaqCDOJGTxZYtx+WA/UpkRX7AyxVufyRyB1sddRDegOpLlouM04tqOsuBvFzl88M/X8O5E5Fl+8eZrfQNU8UFz1EhHVpi0tmk4OAWgH8eRGl08KVaW6mNIbTSbMRaWVN6XTdN3fUfnxOor4XA8sN8J4+joV73B+Y0KOWj8r7Z0noEDvDZKzVHw2akq/DrXSc97XAzTZeRUoW2UZ+fzuFT6xj8Zaq3rQtnHGBmOR9UhuTPwdWcKW6B0bZvBUjulOeWRZNAVOQs5gfeNwhc9ZHRj01R41wIeYnGEaZrwra3NY1Gbh5u0/Oc7zLvLmmIQhuORDz78kNuuZzwecYJGBl0ZE+rB+lxgabapFisR5tfxrUJWqnMcQlBAebF7CIEy6Wu12TCNI8fDkYrrnmac8oKuXAp6cy4NUxbhXwauvDlXqJFbM7jVkZrlbZW15fnqGXWULFWRw7UOFxtoMOmgh2F49fmLI6/8aq+kfCIH1T4oZSkzn3g/Nzw9FJvZ7KlS311sKxEqxFsNIrxOwD+kt+Dbhuc8Tf/XRy/zqIEqopMSXHbkeGrWeO9ZrVY0XacRImtQCs7TND1tv0K8r/ip1QJbWOsIOBaRrgximGjeN2w2a2wOEzFFpmnguN8TWq1zKiMrwdJItevKLH5n9UvZqaHHwyJkFdzOeR1b5p1NjvKV6agCa/bwXZkktSjYLptXBqphQi+EhmmK7A97fBPU8G68AakbHFYR1mZs5NdmfhbgwbbDWht16pHBbLyW9EPF+ZTiVSWy5Utf5bNXoy5viMNUeskT/p/yX3rDu74ZKrxbmtuC96Q40a96pjix3+2sHDLT92toGtJ4nFN6r7tmns03PcxzxEqb6cAFT9d3NCttBMjA7Uvl8bZvEbTRMI6T8o0Abm4ezBQl7zRluPDOfemAFi0zKYapPxGWBYqtdGIC1Zs/VeDl78VAmX9UwaSUagRVRMjJgKDL81OOtNQmyddTrvO49VSUphlfu0JPnLqkTQ5dY2dNpCr1cRppZ1eUcufle978m9dT5COeu//2G971zVBtfiNoWn0cIWfW6zXTNDIc9zx79oz1qic0DSE0OB8oVmKBnTldhYcayf60erPskjr+aFqxyIrheGCaJjabC+J4ZNgfiFlxnXPSyU1a5WTumn1xiUO5ZZrUlXG8Jp5d6dA3+bqoIS33KEtlL0tDtbjXmZTAlxpUe9SCxzkcB7wPjOPIuu/qMIzlmhQkmFednZnXa4YuYwa38a7MKeiyZjFGRbQp0TSLqk7TQMN69uVevyGP/Ob19K7wbuM1CJCmRHKKjZ5SYr1ek2NkPOx58eIFjbNMTBNesZTqv14Xqlu+L4NYg+YwHMlA07Y68VEs0imOVb8mNhP7/cH6WHTgRQkOZaj1nCX7Da9GTkv03UEtAXQiBOcN01YQbFoUc5R/GVEtcjWDDXwoZ1VfT2izbEE4ySkzDaNyawIWvPbK4JdXDEI7Z1YDWyKo1BIxOT1qluktZ7FEo8V5bU5He6x55at+dr694JL/KH/70fc8XoOKGqLBBXx27A73NE2gDUHn0zvHe0+e8KN7BfF23rPqe25D0I0qVjklwvlmyqjwEyd0Xcf1zU2NIgzDkS+//JzVWpX/cDwyTQfImsZPVmtaAi5q9poBKVrYXzHMjKm8t6iUL4XDZXNLxKlErDxOAmSnI5lLpOeBierwNkFHAaljjIzHgePugBNtdiheGZgnbw1kJTr2OGmKKfiuCtacDdJM5miyONFaWai1NuXvavCAyNvsyNuTo2XFLx/uBFQJNT4oiL5kg+eZ2PYrZLNlOByYDgckQdd3uNAwvCGqkdE60ppuSZkCL6VKzzBvbcxh2/fUhmMR+ranbVvGIZLT3potMjnODYTZOXB+jkhldeK0fg9txAsaRQpOFLtPtB5aLzCLhSUvLZ0WpQJyv3jm7CC7eokyEjZOE3Ec2cVI1zZIFuvSnpuc1IacI2KvrJ3BaZHtDHptgiloF8VJq2mplGtAzMxo5lr05Y78fGik5Us++bld709CXhxNaBRD2Yz7cRjYbDbknHgRFRHk+vKCJjS1tvNnoYpAlhLRoPBSzlxeXNB4T3Ce6Dx92xF94C7sYJrmvoKsYD0FDs97TybWgIA6+5jMm89C44uz7074ZXaqyuUfdgdrc4uYFaw5B1cEPsU6niY16sWcGwGOh0GnvHlPTjZR0JWSL/vsaygZugZZaEKjuq1a169+LOeMC3Jie5WIlP3rjcr7p6F3hXeD03KNKUbilAlNyzQMrFcrSJGX06D11G2nxmQIi/NsC/kGnVQRQpzQ+AZBYZiGYeTu9pari0sts7LRoo1XE0enXDqdTieenKY6gS0LEBpK3UbhW0URUFvBiyaJap9KwfAVMahCm75mpQIPjb+H/FXsilwzdkpl8EVOqY7YpRqP5dolVFgctvjgu2a7oGDEt01TDeWMnAyBUOM0GjyXrm20Mca+0dGmb5Oo+mnpD/gN/pv8T/jhI+95VKrpDHtdyMa3rFYbmqbTNInBnfR9z3qzqd5Q23Ss+7V5El9DJkyW/jDMUcp5TJ0q8OBcLcAmZ1arDX3fK9RV8XJjSSGWBLc1m4irU1Pq1ChLG/qKu2ce/SIKVbyg5WrMqajF714rasy6yBCte24YR8ZhZL/fk2M2Jb9g2OppPU6lDEAFryPFZHO354Us88v1nSV6tUz8vypV34r/SoD6QYaqXO0jPue/Jf/bt7nSL5xStskzot6jdt+qdxpCYLvZandvmg23Mk5R5DUHssjPksIuvALWxGavpcR4HNjvduzu7ojjpCmYrFFc732FuwJYum1Z0K57b5EZ+ylGcK0/FU7qTjWSw0LZLuugHnrypw+VcTa4dFk/amu48ORTUkN6HEZiShrlXUJaVS54EAUr5qXxbfGOalF+iQqYAFdFPvP0QwiTbAyoNbyLr3kYhn1Ib+Ddb/Ml/wb/+0cu8M1RmjQdqQJkns4jojXObdvVrFLTNrRdtxi9S80YlmdbBlqWy1R0ZoWcSZk4anbh5fPnDMejRlGAMoPch5KaRRV91tG+kUzS8BIZ499a0+8UKsoi/sFrZLV0P2vavoD2+5rZWtb4vZbsAQrIWS2CkjkiVDrwx2GsMIU1M+aKolZZOq9ScbZyVfQsznjpUzi5D7tHhf5JNSCT8iJ7kE/3otKfEd4d45FhGhjjyJRGm+ik2VXfBLq+YxhHppjwztOEAGLSx0RHNgckldQ7p3xb4RAxpOqcrJFQB1i8fPmS3W7HNOoUxeKgeOsvgPknZRuMYpHFXKadmc1QUvsFqWe2F06hoOZ+ljmrNdefPiCZbYblXmZx2nyYZgSCwsPTpE3op+83HrUsw+s0uOp8qp5yCx2gEVkzpFOuiDdlgtvyuwpO96tfwJ+Ib9fs+efknzxygTdEUKfpSOMbgrQ4H2iDVCOnHP6261iteryFisQ52qb9qYyt5U3XeGRW7NNx+v9z96extm1Znh/0m3Oubu/T3HPfva+LiMyozMquylWuArv8pbAQtku2kG1RgEBC/mIhgfkAksHwyVhCSAgkZCQaS/UBZBobAYIqY0wZFa7OVNoqZ1WZzIhsIqN9Ea+73Wn33mut2fFhjDnX2uc270VmxItXzBcnzj3n7GbtucYczX+M8R+imAvNAxm6tpO/N14iFy0GKq+XyNWwFyTT5FKvpcGvZSVUS2RfDTuvV45HsH1B04DFAVxWmSSStcPQk2v9LPeciM9yE0sdH5hqJMruVcHLSyd/SR0UQeYlFLt4tLn++LLD/Qri3VesDFgSA4c3Pu4LW3mFxOSsE6VktJ11jq5rGRtHSDJrvvLp8dnO+ktGvv5SlEtEi9xnL1RRQZxlp00BTulzRHnY6pRRIluFmnJeFJ0ER2oczfIlXdFLHfW9qzqS3+XfquVNqaJV2VEnlWyqAiyd+Gu5KrJb0vv1Fer0ory6hnzvnC8SuHyuZWRp2cOluWpp+pPPd58RZL3+4LI7EPllbt/4uC9qhTjjUivGOmdIMujD2UECnL4jJY8PQfRyK7XtpqRFBQg/xumqg7++M+W+L3XpKSXC7Nn5ICnGYuhZ0BmjsmmNK9pWgwwrGYCkF6GO20s11MWAr+r/1ynR+4HVetWa/9XK5LUKk327L0MatOeVXlhTn9c9WQEA5TVrZmB1Deg5lrq+lR5dZSoqYKKvk0qdQEFEjj/ZS5/q7zfZTUqOXwKWGAMxBqnx15S/nw/MPiinrDiPRvsEcm1gWz55fo3cFoktDaYZGQ98OBwqImhdccYWbZ2T0d9bClVY0bFZI+W107noWrv4CyUVwOv063HjX/2bWR5XssRZwYF6fSofpjYLCofr0j2/2onV6y3XwNFrmdVrl8fNs+dwGNkMm/q8otcLCLj+TEdn8CX05g8utx3P+DnzF4F//LWPeyOCOo87/DyRgnDElc7bMkYzJKmf7PsBq7OOU0pKhr7coJfq+tQXq0Yw50rcb/JCflwEbrfb4edZD7/UfbRNS9N0GNeochThTiWVUqFwQYnQmc+VkNcUeh5Jk5Z4zawczvKY1zupIkQJbXTRKKiSXRshuy5uYc4SqXjvxalW5GJBlMwrb0mJQErXOdnWTm9beAdXq3SJr/c/aWRWFOjRZ9G9L7ORy2Ned99eklFdV5zw1/KffvkPP4ulyE/5LDHG+pVzUhREmm5CEK7euo+v+IA1glenLWb5SsX51znfxYlLKbHf7djtdszzXGUpJ0EPKnIkUx00iFXHz+gxL52lxlS5XWqfSvF+YaoojVCl7k1N9SsM/CujbRSFykuT1EvoZRYjsKSsqDV4Rbnrlq+eS/3dOhgt17IgZqtGPX2utToqMmnZjz7uuO66vJh83b/m+39/nezesuE38ps5+b6oJQhUJKRIzGLk/TwTUsA4Q9/10qHuhRrN2YYyq9to84iUtliSNgWVlQuqCGi8rxyIqQ5YSSkxjmOV3ZJ2zzEtAwFSqb8oQV3Rh6hOW1AdYzmSXVNQgpXuWyZZHQfdr84AHBvNKtNZdT1G9GtxPPQxgkTF1buorNSgaakFXy8JyPKR3BT9Xxt9MJWDEsA1bnmuyu7CrnLfCef/L2S3Xj9ZG+pCBQVMFodsDoHDODLPnhgLxU8JrIw4qavXW/1QUWn121RfcqQKCuf1pCwt6z3NKySw+gXG1qa9gnSblX6V9P6iY4tOrrXHBRjS9y9yXOj9KI+pTm9p3lX/x9x3UqWht22a6qfcH1xST4hB6scpm3IsV2UdM2HAfr/j8upy9VqLzarAl9qighRT3qN8Dv7wctvwCW/xF17+w2q90UFNYVb+MB0hipUuM02VlsHcw9BiDJXWwzXd0Q3K8HpHb6W0lhsqEVFpJrm+vOLy+Qvubm6lL84szT9lKsVLxpFiAM3RvNuSnpGiZwFZm0IIXSL7VSRfOuXWcP7r60SLg1mUlkSVxXGPKcpEljpbV/3qYuSL165ruemLg1AEbaGYWhvtVZpKkVZjS12XvsZRjdernZU/6HqbD/kvpn/1J/Z6f5g1jneEeSKHRI4QY9apOMKPZ63BtQ0+eMZpFCfyD/meRuU25kTQYvfb21v2hz0hhPIouTcJsiJNUh1t672MISF8+AUdXaaYlNqlJcq3i7HXVePsVxr3+9d8z9CvnIsQYeEsVSMfo6abFrnkNf9ar6P61NV7TtPETmvY148tmYLXN63pWX3NX3/cdcJH/Fr+n/+EXu0Pt8Soe2JSPseU8H4meBl1SiO10PvDgcN4YPbFidSRUpbalQxLYHW/vrOslHMl0M85Y53UPO/3ew6Hg+h1dVTX92N9TwtN3+RnckzYBA1GUqvW4ozFGUEcDYXazyENfaV5ZB2ss7z2Wn5X4EHl3dQmsaOJQsYw9D2bzWZlh7TUKUcqRU990VfvjbxWccD1OjQovb6+5uOPP5bHrPY5F8epPB9W11B0809ufVlkVzKXSYbzRE/0gekwchj3HMY94ziSc2a333G3u+NwGOstOEbp5NvRfr5iVaRaf7aN6Kp5nvE+4OfAzc0N0zQRZg8pSzlLlSFtwrSOy8tb5jlIf3V2mu5vALeSSYdZIeZ14qVBMgq2oe9F3spUvmwL4ICUajnJMhwOEz/80Ufc3d0piLWszWbL6ek5bdPX4UDZZELSCZkqryknlVrziq/F1kumrbAhGQ6HA5eXz7m+vlwhs6I/2tbVc26AprEMXbf4Pj9Bf+FrGP470b3xMZ+R4pf6STPPJGtpmx7vowbOnmma6IYBrCWRiMFzd7BsnQMjN0rmnN8TsAVxFwja3Pt1ccI0gYQxTEq6XNAwoCJ/KSTatlk66OWlpT4zSkWLdPZZLXxe0KhGicetc9p5eY/GZHXZL0UmKgxr3lQUmi8RkfglSyQSojgqBRWVQEWnThUncvUZ7m+aNZZG6w5BEFofAj6EWuOI6t5sMuSEuRf9/3jr3s3RX611htFbeMkJ/y5/kn/6D/pWP8EVgwQCkoaEmAW5HscDKUdsY3XUXmS335FnT3O2fa0jV9cbbEsx0hlTa033+33lr93t9nRdqwh6VIom6vmQCVGWaRo57O/Ydi1tIzPKZXqPdqhadWadqQa+IDkS1SsDwMqoFvlbSkRMDSKnaeLZixecnJ4zbLbCZaj1rsNmw3Z7oowZUQ2C1IjLThXlaslazaq78fp9opxFy2F/IKXEMGykJm3tyL7hBtS/5tV0k1e800v37LWy+1X+Cv8SXw4OCukyFiojyVSN44FsMq5xFQW926ncYuvQBGAVoax+/oxVZKRpW/qcmY1hX3gYfeDq5hpnLNM4aYAnyrekT8tbXT59yra1nA2DNB8aQ4NTOjPhYy5TpNYc1MWwrp2H2jOwQnEkmyBlAdM88zu/+y2+//3v8/Vf+EW+9tWv8fbb71C65tu2Yxi2wtMdZgpKF1LEWe1TQJyLaE1pvVqtvLqmgtaZem3TODHPMycnp1w8ePDj3+Tlw77qjiyXsPrVl192c7WF1lrGcWSOszYyTeQYaDC0xkrqmiwO3vL0l+R17aCuQawSNDdNA4PUZ3vvdbxpJKTMOM04K5RjZSyoQRrfnBPuhzBPfPLRh5z3jofnZzSnJwztRonwBThKHNeZFt+mXHIqRldXpacqwJj6Fbvdjg8//Ii/9H//tzkcDvyxP/4n+ON//E/wx/7YH6tN2s7aqoOnKdL34qgK93GZayx6VIKhyqAuX/lYdjIShMogmYa3z97GOccHH/yQP/En/yS1jArAWJKBqP1Hxlja1gn/QNL3KgHYKyfWfX65/RE9/5r5I2+cPfmZXfyCCMmoLBNcTdvFKFH9eNhjnHRN5hA4MJKxPHrrXC7s/sW+6vPcR4ZX6JBzjtQk/CyOWAYOhwPWWvzsSSHq+c6aBtAtyplxPODo6Fzp1tcaVFPq9xQtsoWaRwupK5KkxcX3mk/Wjmr5W0yJJ59+grFOIqCzB7hGInppbOiqkEpaXiJ6NKqvUP4bUaEFVi9Rec7gfWAcJzZDX0cNllKFtOpMfFkPlvSWoq16w9ZBA6+8f/eckAyYzAmeP8Xl/Qf/TFbOiZgCOTVSF4cgU/MsRfvGGaZxxOQoUza6xOl2uO/+CF+tvKL+5h7iyJLqWRpCBNlv25agDS4ZGKcJkMAvRc1KuKW9owQS07jn9jqShwGzGeg3G+1KBbTrU6hCFgL+iqbqda0/x8syq65lSux2O/6j3/i7fPzJxzx+/C4///Wv80d/6Zfo+4aUZI540zSknAk507baDavK0mgmIMsoD3E+j++E3g9Nr1mROqNRvVC2WXa7PQ/Oz5c9rjWKCwOI0eDSvEGhvOTYfk7ZPSPyj5jd/Qf/bNZCTloJt33wMBnMLETe0U/yMC8zzkNKR4ZeHK+1p/qyVpEgVt6v6iU1+MOwIYSk4yMdjevo20YCilxq3OX1i7pM0bO7vSE3liYGWjKDc9imWaHzRvVtKeRafgfr+yfBVM2uyR8rO8zz58/47ve+x1/963+TcZp4cXnNYT+y3Z5wcnpylKIsurU0YWXVi9aoTBox6ukeQlQACJFdBR209MRZR3c6YAw8efKU09NTGuuqNpWvXEvOjBEbVF0eRWHX9u8Pqne/PLK7ZPyKjpnnGYJmlWLA6sAca+R3qfpTWcv7StgAb5JbKWzOSi+pAXrO9MOGlIRvFQPnDx7SWMM0zsw+LmCZsp8YEjl6QoQxZnZAS6ZvG7qurZchzVJLMGUoPsFxg1L5nbNO82JGnWlD9JHf/71v8bd+/df53ve+R9f1fPDBB5yenvH222/z3nvvalBa6pglYMwkPZu6x6ppxXdAgDlTNK9Z7ZMpYH/1A8iGzbClH3qePH3Kfn9g6DpKmUGxIZJtMfWeZp2wVcf5sbzV/bDu88rtFS1/xbz9KkGq67MdVKP9cjnqHG5ZJV1tnQWTCH4ix0j2kYRZ8R6+wpisPs+9bEiNjErUXCIJUdKRrMThbdNUQ6/bqA5fEe/E7u4OGzpM1zJsemwrlD2GpfC5RMQU4St1VKvDcTR9av2lymueZ37wwx/x/e9/H9e0vP32u/zCL3Y8OD+DDE3T0rZCLp5sFCOvSrN0Mao+JZs1Dlw2adm0grrKbwQFizExjhNd29brLWT/pU5ncaXyvRcuheKvd4tfdd/MSrGWV8wY4pvYG77QJVyalDGNWYrN51m6TDEQ5rk6qC1GOHU/K/12P6BaBzPGaHq0rbU8ISTMOErRvm0kxVlkLGWy03tSgiwSfprYhxnjZ1wMnDYNZjPUt1/eb9UcVWRU/76MXWT5ffm3Bi/X19f84Ac/4Nd//de5vrnh7XfeYw6Rx4/f5r333pMaMaSeypIhN2Qk4yCjdYU3r2QBat1sNcNLo15Ky5nK+jxjLV030HQtd3c7TrYnOLcMShAalqUxE4TmxVQDLwr35Xvymvu2umX3ZfcEy58y/f1n/YyWOn4rA+F9FE5pIKVAjl64P0OUsoucV8b++HVevSH6CCP3CXVQrXPaUOo48YHxcAAyw2bLpu+4ubmlNG+Z6oyIbokhMEWZlHfImQ7DSddhhm51Tso/CwBwHDwt38VIOi0PKI+3RtCwD3/0IX/7b/9tvvGNb3B2fk5KMAxb3n33PX75l39JeEtX8u+cI5t1PV1xAupPQKFYM3XbDDKlsBQApJzq2TvZntC0Ld///vc4HCa2m6Ea+kLlU7Inxki6VGQ3gU6rk3uwIGKvXH/fyO4qsDKABT/NJKN1qVm+S0bT6uz3dXFF2YeXwYCjVeTcLPe2kI9b69ieCCF/SontySmdtTx3L8hIBrbwVxvNMOYov4s+Mo8HxsbhT7aYk02V2eqkrq/LqFOnD6hyjTTEOq1hLTHQ8+fP+f1vfYvf+s3fZPSeruu4ub3hk08/4cOPPuT999/XZt6kwzKWmtX7fSZFYkt+en3S61/UOZXv8kNOmaZt2Z6c0PcDNze3uIuLGkgVKrTaWFjqS1Mmm0jJzsmGvFm3fJbcelqemDfTUr7RQZVmpwREMJJwN0aUU4qJOXlACIhj8piU8FiSa6WZh/xKe39cU2IqGfmivZTUuXUMdqOppw7MjsM44n2kca06YWhdUXFOs/Cgxsjt1Qti1xCHgY4Lhq7F0ijJbSaXOllTaqFK4f/6WleOai7lXZpmTYZpnvno44/5N/6Nf5Pr62uaruNXf+XX2GxOeHjxsI7S7LquFuj3/YBzVtOkmTLRJNdPcNwJV45tQicjUWpEDEPXkwzMwSsRcUmxyBMr+a6WZy01J0e3gGIQF1dgFYmZ+7956ReA4RNzyl9w/xA/e8roYgBZOX6yF2MMlM5aQ8KqLAbXrDp/V1FAeb3Vvtx35rOxMizNyhSTtm3JGW1aEqM/e8/Dtx5xsh148fwF8+xJHHBIZGxVbsmBFA0hZg5+xnjPSdtydn5aPpg8vvJH3kOiUOO++tyudElnwAqnX0rw3W9/h3/v3/t/8x//vb/H43ffJZsnbL635a2HD/nKV75amwwKUt+2HZhUB1WUsHDZIyuBrFFi9eot2YoK1FAyC3PBZrthe3LCBz/4gGma6Pte4zGN5g2LsjYsHLQ5ScqpKNQjkfzxZRfzAGP/03xZlkyaM2JAkWxVDrbKivwvQYrCWZgTsTib5KOhEmW9FISKla4GcGGyEA/g7OxMkJ8Y2Z6ese062u4FdjwQYq7XYvR6UhRZCT4xpkSTM9u+5/T8hKzjlCwLNVkBIhrTKK8uiuAs1926hs41tNaRyDgD4zjxrd/7Pf7Wv///Ydie0LUtKUWePv2Ub37zm/zar/1apUZLZdyT0e7tSi3FPdldMlhF/xZXtjTfy45LeUWIka7vOTk9o+0GXlxe0jZvqzgWh6UwdMheOWvVIRJNjsvVsVr01N/Hsls+gwajTduw3+8o0xgM4pcnsjitqhPWuuqoNLi87Epu1/0hNcixRnWPPLftOppZOFczYGqPiSWnqNpSwZmcydEsv9MSwnkcSfmMxpTCj7yS21x1rf5CkHq9vpylSc65Rmte5Vx88xvf4Aff+z45JrabgWHTg4XnVy/41u9/iz/zD/8Z6VXRCYcpATbRtLZcbTVplaWnyHJ1/orbSjVjxUFOkvpWJ/6Ut99+hydPnrDdbCTjWJuIlXeVpXkq56h9EWC7DspELAq48uPL7WC2/IL5lTeK1BvhLqHRsKt0tBoY5ZQMwbPf7xkPB8JhJk6+jukSaqU3vndN3a2vPiJdqNY19N1A1/d0/YbTs3MuHj5kGAbeff+rfOWrX+OtR49ou0EjsIi1EqU2jQEipEA4HLi7vuLpJ5+yv7tT1BKNfuS7M43WKhqNyJo6wabyaSZBDCzCIiDoJXzrd3+Xf+sv/iW++Vu/VUmuf/TxR/yNv/E39IaLU980DU0jjmrbtTIJKy17Ksc2knIgEYk6sKAoxnK7iomHBNZycn7O48dv8/Ctt9gfDpUmRpgXltR24UItY/vIEV7lrP4h1q/lB/xf+BIoSlBOU5kcUjOIRpyimJM6+koBF7PKq9zrhSz5x9ubpclOviLgmkan/Uia3LU9TdthrFudj1KCIWeLGOmto28aXIZxtxc+yoXqYgGkSjmqddT/TIvFETOahZDhGq0aegvkGPj4wx/xO9/4bc7PT9luNmyGgcurF/zdv/d3AHR8sZb0ZAgpVdaJbO7pG1hJa8ln5FqhmmqKVOpmyz43TcN2e8L25ITr61sph0jifEoZgKOk+WsHdpHdz1IwP8b6PX6Pf47/yk/s9f4wqwQtWXXa48ePdLABrBMUNYNfftbvS0j9WevlmnqD1P6HnAiagk0GYk7MMRIy6jiXTFNW3uWISREHXJyecH4ijuN+f1eZH+RN1mgpS1kMyo2qg05iFovj2rZSDBZs/Rvf+C2efPopDy8uODs74eR0A85wdXvDBz/6AOec1hzKWR/nmSmGijDfb7xZ/1z1o0AdisRpPzDIsBZEdo21nJ2f8yu/8itcX98wjtNRI1rJ/BUO5vJeIXj8PEEOPxH1+2WRXYvTITkJ6yzvv/8e5xcP2J6c0HXdS0F/3WPdo1pP/hnLqA1e/yxL5DaSpTnJGpFbn5hDUjrKkr2KEBOEjI2JxsDDs1Muzk7pm4arq8uFDYfVdRlqQFdS/nVk+qokybhGOYHlmc5abq+v8OMoQdvpqTR4G8PusOfJ0yeS3QsR7yOTDxymkSn4hWUjRnGoda8KKl0+Oyv9K99NzbgalkZbrMU1HRcXb/HgwUMO+5G72z0pgrNtBbgkVhUtnlJi9p7Zz2h0/IeW3V1+zDfif+GNj/kMBLWVTkuMjuZSRMbphCQfpI4TxGhlmSVLGTn6eT5BhYsBu9QM1WWMkEAjUUvpjosoQXQSR1gQUDXgKN4YpQmqd47GWqb9SHoQkc48Lb43bnHwi9WXF6jLWunGK4Tpzgj62VjDi2dP+c63v8WDB2cMm46ua4nR8+LyOU+fPiXERIiJGDMxaYRFqTpc7dHq/UrElQyUJIII2kL3ZVyDMdA2DcNmQyazu7tjmjwxqDZt5CAtVCcIYpgCJCtNOhlytnwuzbC+bat/l6f+Hlv+6/nX+Ns/3kv9VJZ0n2tFWc4Mm4GTkxPudrdMs1CQxHkGCmrNUXqjKIBXLkVJlmaAxZEqaZ5yb0sXZ5laMs8zk5JV51zMrjjSKQAm45yldZaH5+dsu545zOz3O7anG5p7fZRybI7PTXWUBZetdGaFzg1ruL2+JobA2fk5NJbtdoNxlpASh2ni+fPnTPOMj5FZm/CMhb5ptYEnkSw6XvgVtodCcXUPxysK3pRJQo6263n06DEvXjxnHKVbPWiAZe45FKUeKoYgI5GbIETbKzThs9arZDdlwy62n+8FfsprM2zoh4EpBELKPHz4UND/eWKaZyHQ9wGnOF9mQR2Vc7siKW9ahUd3SbnLU6wCB6XiSQxiwnvR+YUiDywmShbNkHBWGjEenJ7yzsO36LuOm/0tOSfl6z1+/1zZR15GyaSusCEbSwB8Fv5oazPbYeB0s+Vks6XZ9LRtRzQGHwO7/Z6rqyvZJzX0s/c4DE2yKyrEz7oLReaWiGBdjrBwuTr6fuCttx4xjhMLtZqpzko+0iuJGASp7VME6xQ1/Hzy+2WW3c1mK/LjPTHL+X7r4UP2uwO7/R6fZ0xKi6xR+G5EDlKCRDqOwl6xjlHURXakfKD8brGyIQQZ+kMJMDIuO5oErYHeQBMzP/f4HR699ZCmbbjZ3wJJh0mYBbU0ksl02VL+M2V6ZDlzWpoUUmSOAWcge8/JySlnZ+fc7fZ0m0FGwSNN3uM0SyNvnKVkJ+kQgWy0/lmp+HJc9kdRguW65DMXYKoM+5Byi1x5Zo0pU/wcP//1X+B3vvFbjIcDD87Paa2rOlZeXHyJko3IMTHd3dKfPajXIOJeIdxjdJnl1/X+6fef54f89/kfA//n197rNzqobduqcSupdHnpvu/IZNrQEoMnRaT+NOf6GHm8fLAQ/FHEWg7u/bUgUAWlMVBQJcT5LIZfENygXKzaOAKKqiQZtWczbSPjV8+2J4TgV5yKKxTKFKUoVChyE81S4G718xtb90JS6FLE37UtuIau7WQKEBBSZD8eMKYhxISPMhnLOq1LKp9EswWv3A91ypfaveUOlwNqnZMuRgNt10vphfc4dcxB0IiGAr3r3qe8IFW2vM9afMr6/MS7hjva9HeAf+yNj/0iltEudax8rq5tGYZBlJhzTOOeOC6R8XE0ujpQZl13pF+GulfltwUtETnRwCNLU09mIY4OIehEm1TTqTmBzQZn5D41Boa24fHFBW9dXDCHmYOfqqxSDJ8pzvXxvVljYiVLEHOW7mVjcXpG26blZLOBxsncbCP8kSFGoWYJqQZYIcZK77amPyvyu17HtX3LFVXxXQWERVlaJ0jqfrfDT5O64cUImfUdUe7ZhCFggse2ThTkMky73sXPK7uWnq35+Tc+7otaUTlJJS0twfRmewLGkkLGxwnJmJfmhYU7unAdH9+Y44Bfvi119PVP5af1NtYcISSVWxByNFXJNNbSO8tJ1+Jy4MHJCY8uLjjZbjmdTxjDrHWkhQ5tJbOitKljl++DBUUPa6lSTAlrHG3b0rcdbddjrCNl+VuMAWOlQ7xQosUk41AXGj9TjfvqoOuWLcGlqUDLS9sne+ecjMg0jseP3+H73/suYfacnZ3StR3rTIc96iQXEvv9bsf2QUsp8MpHd+PvP9n13kvpW9beiJTrtDBpblM9oE7qOnQVkV3LwGd76+Lnmvr4RWTXdGiie7PKsVFVYbA4A9uh49GDLeP1FQ8fnPHowQO6oefBxTlPb15Uispib+s7qZNdK1DNUvMp15B1UlVS2RO++GHY0Pc9TdOR1LbHJFM5Mcv0vlIC1TU9UvdZPg3HUlJLnESGRA2KVVg68+V6TfEl9HvOhmF7wjQHxnHm/PRlUGbR3yKPiYyfJvqzAuKU81zuSbmvny23CUtg88bHvTFU6TqJTivfnN4kqb055ez8jH7TS/pJXPmjTQNkGknlgFxWjXrMAkmLoS+1JawUZdY/qvCRJE0SAiHGegNqyitnRQpldvjF+RnvPn7MMHSyeTlVk1nLTo1EFoX+pjRTLX9U59SUrm6ZVtJ3PRfnD9gMA502dJU6p3GaCFGu0euM9tLQUI38/Tzd+iauovF6v+VS1MFe6IOcaySChfo+RimPhC9twWuTRkg1UsqZdeFILWh/hdPMchn1q6yeAz/Pt1/7nC9yGet0LKOUqLQqw/0wMPQDjW0WpPQlIuRXraVbvhz2RWeVOk3dYZVVWORXuojNUZ2PqTIrFFK9bdh2HZ21nA4Djy4u+Mq77/JHvvY1+q4V8uh7TsVxM9/agK6uC9TxTDXlbq2rTvtmGGjKcA291uJE1wyA8u4eiew9OhPK38rpysd/Xm5O+VbOlUT/p6dnShCvvLHrmMCs93KpjQpe6INMri1FfyDZNZzQmz/92ud8kcurzsgYsE5GQ7qGxrVSExdZ6m9zSY3q3cuok7q8XgnA738tf1/O/n1jXM6INTKfu+gLwQ605MlYtl3PW2fnkuI/O+XB2SkX5+e8//Y7tM4pzZ82phZ9Vs9QXt3nY+Cgji1VXZiSfNZG6/rbpl3q/9T5c20jBVNJnNNExrq2Oqf1/OS1JKihrf69BmDrFp610Iixqmnci4u3uLm+5fLykjAH2qal2Jc1TfGSpUns9js9R6k6UH8/y+40jkyzJ2VwtsFrgFuDeUFiVD7TUSNnYa7PGvhAoWtcB7avkFtTK/ApzZ9rvl4BmqIGbKVzX7MHxrAdNrz/9tt01nC23bIdBk6GDY8fvkXfdpTRptglwJIUP8t50Sst4AEcy2wxCE3T0vc9w7Ch63pc0y5+k7JMCK6x6LemWZBx89KdX61j71ifvzippcnK2MVBTRlM0+Ljcp+kasos96vsK4vf4f1M9BN+ngSgTOsG+vzKr1fJ7Y6B3zR/5DXSJOuNCOrJ9hTbDcSECBtgQhKuvE2HsYa2dTwfR6n7yJlsEjEvaNLL6/iDLx69fDiZmatCp46hqYSzSm+DqXU9UuMp0ZnJBpvBmYx1megjD062fP2rX+Hn3v8aV4cbnt9dk8zxla0V+PK7ez87RcLK58plwk9D2/Scbh22bQjGELNEidfX1zx8+EgJ4mWkpnyGMhFKbl7l3F8J9+LwHHf1r2tdSqNMqXF9cHHB3YsXhBCOHKbiEIkCVPQ2I9OWypSYBcp96X7prTn61fphJZ644x3+Lv/cq4XpC15tM9B1G1I2lQMuZKkrlfrhMjGjtJ2lI2NU1yqqP47w10vHOep/JQmfyMcOKWalOKkIjcuiLE83G37unbfYXb3g/cePuTg75WQYGDYDt/s7xuil9M9aMEuOMpO1CUVTO6ZUf6Z6fSJJQEYK8GMWYumuJ7tSeyiRuDGG0c9krPJwRkLO9E1Be8pY1QhZOp+lodNUJzWZjDHl/Rf2j9p1C9rckKXAD3hwcYH/dmQcR/pWDHw1KlYu3mgAJaKa8FOgOzWLfB4F759fdhOPuOOfffn+/wyWMY5Mo+VHTvifYyZpoG8aS4hJynRyubu6auC5MlhH26B43QqxqtkYRA5TFvfgCC0v+iSXeml5zcZYXI6cbQZ+4Wtf5du/fcX7jx5x1g/YDH3f07UNwVDrl1G0Zxld96pNoNq3ElQvDCuS3dtsTkg6lMBGcZZtRekWBCsnaUzNYZYwfaXXq7is9qoYUtmeZW9Fz6rsYmR6XJQaQescPkSCjjXOa71rUM5j2UNr5TolCMuCnlkn57Cco3Ixfx/JbkiZhkzbNDRdz2EayUHL8AxCXB/Qe2KWehSgBPYloK7oXPVsFhqy6j9ASXxTgmdbnqty6jD4HKkMP3qznZGGu9Oh5auPH7H/6Eecdj0NEOeZkCONtUs9q/ol+qKUoFnMSNKsmWEZN30cbGT92zBsuHhwAa1j72fibk/Wcz00HTHlWlqfs+jK6H0NbIpoFLk1RUZyRtMqGGNF+6suNtZgG6d4wkKjWbI0cwhaX24JGfpceIpBOH8txmSsdaScmeaRmxfPCdnQdj39ppda8U7uigQZBQS09d6UVbZxh+fvmCdvlKk3OqgxRiEHd0LEn+IyFSnImBmC8nbFHElEbIZEGSf3+lUdovKRjPyrOGpSH+VIeSG2jzFV6pGC8JCF5oSca5d9bx2/8vWf5+rDH/LOxQXvPX6EzZH33nkHnzx7PykkLhuXslWnIq9113KdKN8plhTB+wTO4GxL23b0fU+cDM61MjlKqStubm44PT0nxCg8hVAFpQqXauFSGlFuYO3SW2nwGKPypBpcI7cupSgNPk2mcU6IimevNVLyHqVAX+RGGoIwgRQCJgvtVfIztu2Ow/0fc53yIf8J868C/+gf6Pk/yRVjJIZlKk4IgTSOwucZAyFncFbSUizIxmvXOuBaKcj1MkaK4RsrQYgozjWvnFAEaQivMih1dTZnWmPYdh2nZw945/wBW+uYdzvurq5WfKMrmTDip0olw0p+oKLr63T76q8Aiv4/JFsIZPbTRJ5mTMxs+4Gbu/1RcNP1G8K4X6aYrbfn3n4cNbrm1bdq5BeHPZW6tJzrFJio6EMIftlys5wJY4QPdNwfaNorYso0bUc7DJjmMwsMX1rv0fNf5es/9vN+GiuEgAuBtpPGhsP+UIm6szr4M6o3c15svK7qYOnPqZD4m6JzFh1bJOqYQfH+64kzhpG64+T0uTmRTMI5y7bveXh6xs+9/Q6bxmGUhvD65lJ8DwslC6UXsHCcWrt6M9X9rlXZiJqiRY2vMGP0/YZHFw9p+5a78cDVzS1pjpgIvXHEEOskHQHkLD6GSp22XmvZrT7zPdkFOWNN01ABZm1o1INPBi2lEbtUB8qYJTCVQTGWGAPzYceTj36EsY5uGOg3J9I44950N15eXxbZjT7gbIvtJAs5jiPOuOogWWsIBqEpsoCzuEZ0syEXv+8e9dSyVm571S2vM1VLY2WQZikD0ZSaV2k4fXB2yqMH55wPG37u/a9w0rc4k/EhMAapJy5ZyFJKUHpQGudoVoXMpSbZapYtpYD4E5aE2ITNZsPJyQmzspVs5onBOvbjSNd2OJBG8xgECDOQnSWHJTh85WeVK1j9RtuoNTCQyZmN/kUcX8rkSeN0CpujjrxKSevbWXwHUOQhEmYZUNEOPdZm/KTsSZu+PuzzrF/hBf86f/mNj3mjg7rb7ci2B9sQc9YuOIQix+TaLWmbBnzhkRRkY9m4+yuv0NWlzkbdxVV6R3DDuIp6jtPiigRas6KzEYXtrOX9R4859TNvnQoPWgqBy+fPSTHgrCWVDoAaPhe0Rl4p6bSc2sUP1NFVZEE5kqBQJ9tTthuJDPd+YpxnbMqcn5wSg46GRCKjtu3Vob+3B2bZhQpDoZ16Sty9dgwMRtOutgba1lpu7+4IMdDT1STJ+j5kTXWQs07dOGD2B7KBfrOl6QftMrf18S/R07xmbdjwq+bXPtdjf9prmiZcM2ObFuMcJkbiBKYRhWOtkNxnY6QLmVTl7iVlkFE+4BK+5krAXB6wruA1a6nWUox1zZW+m9KvaJRrDJt+4N1Hb2P3O7bDQNs4nDWkxpEOSZr09JrX6a3CgVqWOKZQyKTX1GLJQGMdzrV0bSelKV3H7L0YZ5233ruW5GOtm0sgip5XN4+tJeTVE3lWj11lCjIFFdHGghBr7VpxQuV8FJfLVJQkpsg47ok54ZqWrt+AgZYe03wmxfO99Qwb/0/An/oxn/eTX9KAOoOxktKfDaZjQRRVXqojpTopxwQpKxIqTRv6lLoW5EWNClnT7mv9s3pcgTtyDaUp1EDSN2o4PTnh7PSMzTDw3uPHNNbRNE6aQCykg6dQEJRrPhp+srq+cn6sWRpTwCjxu5RPnZ6ecnZ6yvWwoWsbnHF0tmF/GHGNlEHM08wcgjB2OENypafh5f1eHJ9F/xrNBlIb0Bagojy2oH0hBNCGP2eFND4DOSQZvEMm+iDONVlQ7iTTraZxT9cPxOAI80RjDY3rjxGnz3RWvxyyW4JZ4c1M5JBo+laQvIgCI0KRVybVWYOgqHq/V9Uex5y+NSYvaXrJtgoriMhVAG0slmxANlZRVVO5dLMitWS4uLjg4sEDcs48fvQIa4UaigZ81Dc1tqbFjbV6f+2RDiuXZwBnJQtRJbkEYAbOHzzg9PRSuEcxnA5bNk1HPElsNhvSNOOnSUa8q2+TnDmetPXKdeycSiBvKU1iKFi13sfCilKu3ipFYtM0UrcdEvMs0zsvLi70+YmUgtCKZkPbtzLVTvVVFzzGfn69u2PgN/Mv8mff8JjPHnXqPVhxqJJONZEDqZsBegOqBIlDZRbk+f7u1l+tUJaC9CwtQaocBXZZKVWUTWAx9rYaL4l4+q7lwdkZw/wW22EjZLnOkKawpFyMqSknoYxY1b6iF55zTQm/pEWV29EYQ9d29E2Lj0EOXxZk9rTfEMJCeIsB5xpSnrXTdmUS1FEu/vLa4Sh7KHuke27K+6/cI2vZH0ap01o2FrJOnzFBHRfZuRg98zQL+tR1QtRtHVmnUZUP+yondf1jRQfo2fDVlwXpZ7BKCUhWYmETLNaJAimytXzpk+7bgXtO6iIbx48V8Stya45inqwehcn3Xm/l5hkDjWvo+4Ht9oTWWdqmFWXopPs5FRjKLGetGvl6EcvHKOhU5Q0tf1FHoRt6ur6naRr6ppU6QWvoFJLtbUMKis4XZakNX/eV5ct2ZDHymEI19TICsA4E5JutAz5kTrslxUTwkpUQ5GFLCVZTDPgg1G7DxhCjJ3hhb2gad2zk1/vzCtltibzNni/DKgY+JpngJw1JZS6NlQDJCOm76DL5Kohh1Z/q3FURqP9njiDTdVXzuhnjyGXLJdBa5E/0vGWz2bLdbrHWcXp2JiCMpmjLFZXnLQHJy84p+jepVT3OCtQPCvQboR90zokz3Df0TcdJO9A0LS5novJgRj032ZllJvrxq67+cdzeYVgAidffrIIyLTW2hWYuxcg8zYQYMBi2p6d6f8Woh+CxvqHternfweNng22bkjtWO7Vc4pdZdiXgFNQ7BC/BkrWYZCoXs8XilK2n6Kic0qKTQTLVcCy3RZZLLeXyrlWH1D47U3yHxXm0ZjUGXd+373vZe2Pp+o0AcDqRLca40rWm/O/o6xU7QOkNEbu5BngMTddiGkdG7EHXNFjXYDNshy1dTBgv0zFzEhpJyuCS+/b33j/yvd+v/aojGrUodjGVTEKlnSx7Ltm/aZrY73ZkkjioSEYrp0jwnoiRgFiDLe8Th7sd/fZEnPx7wvoquU10TLz/yp0s6zPd3RAS2QStI5EblkIk2gU6XnM/ls5zvRKk8/7+yvX/NdFHUaTLRukHy0YcDP29ZcUAYErXtDwxZ2gax8l2y3bYsHnwQFImCOl/m1uyH+v7FkdVAiWzmjJyvJmmEJ3rVct7UqMxay2bvqdLrYw1NY4YM6fDhqvdSI5Sc2oah2kcwUuN3jJB5HiJg7McDLmOpZBeHiQH8AgVMEYCCkTQyEapITLzOJPahCspqpxIMRD8xOwjTduQQyQGjwsO4wzgllv5iuu8vywtG979jEd9UUunYiTlzAyWxhhcdmrmNbo2FmMK9YmpCOqCBOTlNuTlnldXTP+QtTa6Coc6aWsv6Qj5Wn4BGNq2E4exbRicUD5lDaKSKfIqFVeGpRSmfl998vL74ozLWxW0UpzafuiVj1co0od+YGh7TrsNjWsYrCOHKGU9OYNzauTNK87zetcl1Vn9H42uqpiulaX+O5XsjO6dUYcmY8kxMh5GMebAZthidFBFSoHovaSsVKaDn3GNxaVWnfFyanVvXnPdLS3v2sdv+GRf3JJpRZmcIzEGMJJwE6om2VkpSkoLD2PRi2tEZ6Uu6i/r97WhV+2WS1OQBhj6lxIc26w1pxg5U1ae1/UDbSdTlLphgy8BMTCGUOW2Xpcp1dqvqPtncbhLR3j9u+rnpmsxrSOaDCnTdy3btsP2G4a+x4VI9l6ZZdTJs/YYkXvFKnsl4qQa7w3PEcleAIiS3i2OfIqB/W7PNMmUxZPTE4zJOi0oiEy7loKqphCYc1ZWjQHrms+UW/jyyO7S8xBIM+J8yY3UyUoN2Sap/9QJYdbYWs5mjNGgatG3yw8c6WijMizDoMogj8KVexy0myz1pkqFreitHJWYIRoZijH5iE2ZaDJj8DJmeO2O5tV1rFaxGetSDrKpwyKKjjeNIxnwwdM7g8uGHmixnGTHEBLOB2wIwtFa/apjX+CltZrolFc2YQn+c9W1Re5iELYLKVNJgoRqUEnOjIeR/X4vZQxplcXWxtRknNznFDHJEkJkmmYeNh12cC/t0atWh+W9PLzxMZ/poKYUdWRb1m44JG2qQtAYp7Wfqjy1uFg+ziK096PQfPSvKoF1Rrlk03MtgFYRFAJ2hJJBaKEsKXktZBZl+ejRYzCGYbsFZxljwJA4TJ6svKdrJGCdbipLUGF51yVlmet/WKmJ9TkQspQNtE3DidtwsTmhMQ3vnT3k9u5DSEkiOOeEi6wcHFaRxZEH6Orfj+9FEqfDSMe1kBSu9jVn4U88jGQkHTvPnv3+wDxNDEPP43ce44wUtIcQmOeZ2QdOOSclmaARQqCNG4bt6WsV5Pr3Zd/e5pT/cv5T90XoZ7TkPkUlW5YpPIa2aaTD30nNjRh3p2jMkroptdZrRWlghcLkqjxsSQGpsoxRppU4Iw6bM1bKSlLCGSN1UDljkihOZxxNI+NRA5BswyHK/HWTDD5GYlFyaFCn+qpK5j2U2yxiJgh7Zim9MQafElMMjH6iN5bGOtrG0XQDJ/2GB7ahCwkbozYxZeEgXsnvG5fukzF54Up+6RYt4/US0iwi9Yeu1lySEi+ePmeeJ2mcykIzlIpBnyZaZEJM1EAwas33sD2VWkZTL4nV7TyS3T3XfIO/xp/jX/7sz/bTXibXMZkxeB3qkCSIbKBrWuYQhKw8CvougykWXVAkpSS6StxkSllTQTStGr+YSDYqcm1ojM6mN8IlXQyezQmjtD3lPsWY8CGRTMNdnLVhKRFzZs5ReSHlqjBLCchRQ8tK/x53shtiyMSYMb0EU00/EIHdYU/jenosnXO02XLqDNsxMPiI8x4TAkZ5cpdpga9bS81qUgBm0fwv46gSWImjToyUBkBrF0dlnktPQAMxYZss75MjfpqxtiFGj3HakDgndoc9bz1+l25oyEYGTpV1jKZ+uWTXNgaclMflEGitU1GzNK6hbRrIGWczbWNwzT3Oc3Tv9VfaWqIBw+L4o4F7zovOzZV9J91zWCFEoVOUMmhtkjWWq5s7ToaB06HnbhyZc6TrO0xj8IXvZIW3rTNWa6cUeOk7lGZFMBooXzx6TNP/kJvdjrbPDFEAwDZb2iGz//5HbA6e1kdcTFhnMElKsuz93pC8urAV4Ici7nL6V+dI/bCUS+Ou9kIo+X9ZMQYafd2hH7i4uCD6CP3SHJmzsLr45GlSi81SQhkRxiB5YR1SkBe7Ccdye84N/6n8t94oU5/poMqMba1lyJJqDNGAB2MNm16Mq3OOnJw2RyyjvwrqVybSFMu6orXVu79Ksx8tETqjCkZoFzQ60H1NSZphGiXODckwx4wPkWQNnbEQI5OmYgzrBpVVZL96V2cMOEc8qjeBnE0dYWkaK41FznGYJjZtK8rSNJz0Gx66ltMIjdJqFNXotBP/Zbevqu1XrpASRrvOI4lEGViQMFrMHLyvE6rkugVrKamnSp6VIyl6YpiJUcbVynSrQJwnGj8zDCfa4f36tb6Hz/kh/xb/U/4b/K/f8IwvZjnncNqUEXUSRmlUaqw4ZNt+wI+CVjl1NF2jR7sijixpprIytX5U9tjhbAtW6oJtjLVTvaRSCh1YVqfLZkESZFkm77m6veXjT55ycbrlwWagzWrsTBaux3uHvNTqrSXmPmG/XK8hRQGTBYWwNN1Ato4xBIY80SZ5vQaLHTL5+TUb72lDxMUMWeiNGttofePr1lpZlmswq78sqwyRSEHqTFNKSpnmtOZQGzUbx6bZcn52oa8TIQvlUYwRq5G8Q343T5lx9jTtgHNt3bNXXeliTgIHLt/wub641TqpBZMauoWxxPTCuXyy2eByBiLZZZrW4Zrje1KzRKtVxG0h0DM4GtE61hJDIBO0btDIZLyUhU1AMwStNXTOEguBN5nnl5eYGCB6zk+3HPzMyXZD2zUEVvdd5eE+GPCmVSf/oX1aznF2dk7X9YzTRJMDzeRx0dKaBrdJfPhb32Q7ewafaUPGdBaXBFhx9vM20C3Xd2zo1V5Yo2iS6NscZGiNU2c+5YQ1mX5oaVvH6elpTYHXz2+SErN7XGokfZ0iMZVgw9Q79SqA58smu5t+kJQ5hhgVCbWGthX/wDqI00QKM85lbKOkpKuZc9nYlxRFkdvK/oHBaiYs58xht8eHQNs3GNtoalqoBed5prGOrm2I0RGzBFfRGK7vbrEpkqaRB6cnzNFz7gx90xOXTmaRW/UT7Dryv7eOgizKxzA107Ddbthuepy17O92NOlAky2N7Wi85ZPf+h02bz1gOya6KeFbAerarsM5oxzuyHCUipzcu5YMOne7lhiILFk5y7Y0cWVpRo2AcQurDVJZ0HYNXddwcXEhY5bzMhGtTJXqw0xKLTlLplUuq3x+tRlvkNsDv8jv2v8ZX3uDTL3RQTXOKKWRxegMYRClkZLUSThr6ZqW4BpQugThMtUESC7Ex4uZMtkKIlqiaj2IFqE3sFbqx4IPMvTTB8ZpZpxmcX61/tLmTKt0BmVzvA9cXl3y8ZOnnA4tc2wYUqQfBkGnWCKMpd5JrqHczHVtopyXREE1C1wuBftw/uCCd997nyc/+CE29DR4WttgveHmRx/R+0gXEi5KfawE3DqdC2pCbbl9qZKUl/IHqw06QuIrjnJKkKwWoxchTAk/z1qjq9GmlbnAZ/0ZXd8t71TSqynpwANPQ6tj/TI5C5dgoT1ZN6JR6YOW+yd3ONBw9SaR+sKWcw7baM1mNkRTkDqDsQ2tczK8oW3w/kDbukrblVKR15cNw9rZsqsUTKmXDsETgvDegqR4wjzLK6VUmSacopZo9B9T5m5/4Ecff0J85zHTNHJ6umWz6WkaRyzOaVGYRUZf8dnXDurLGQwRamNhs9lwfn6Ov7plmiN9dhjT0sSG59/+Pl0Dvc+0EWkqTFRjs6yCEpT3XH5fQ26KvCUN1hW9LgwHSHo0qgKWcycaL5PYbjeSoegayMs5LXPqZz8R4oyNVqlQyhCEsgrRdYHCj/cjZ+gZ+cX8g9fK0xe5rDW0riVbS9SzXlL5rnFYd0LfdRwOd0SfaBphWilZA0CQlJV0FElOSWgCj5rqNM16GCcZ8jHKFKdxHMk50zSNMAsYWwOtpCUZGYOPidv9no8+fYIPDzk96TCd46Rrqr4tFD0l/Vg7+I9Qf3P0dSS7ptgiQTf6oef09ITw/BbvI6SGxmTaPHH3/Y/oHpyymTNdlOYZUqJrG6wr46tXu6PGvqSQ69IUb04qu4UjUmU3Z0HAoo/Vkbe2GPtcxwxnlzWAkNeu9fFZOCVD8LRtc3yGSooEoxYhsbCCfjllt2kdfTeQMswmVDo8ayyuBFHDwGF3hYmesp2FSL80jyazuOVFbsuYbmsMhQfNWdEH8zyzH0da3whbgw4MyJpVKHsnfoLa3ZyZ58BN3gvokxOJLd3phsZAqimqlS/wGUFVAeaOfy7+hfx8en7G22+/zeX3fgTR0OaG3kYGIu56D23HSYJT13BnJGBxTq6huPF2XQZRJm+ZRZ4Lcit/zhCz0q/ZykeccmIcRwU4bOWRLj5P4wrDkdT/G2vx2jSVjUxEnOeJfujpCiih3mcBxparfLXcXnHgr/Nt/ok3ydSbNtw6g3VyxVIWJQ1JhTyZJD/3bUvuBxoyMWp0VNN6K6dPUc86x7t+oLLZgpKCpPqncWQOEz4E5lnGQzprV92q0EoHiaSKMPgQub3b8/HTp7z98JxNaPFpwA39io2xbNOSwjn6/UqgCqRNbsTZzotqM0Y6Sh89esS3f+ub9NszNtliTIsNjsNHn+I2He0caWOJDnN1bGQLlnqaej8lzj66spLyK8KX1LEpEyeKAk0xrRS/bKyzTgvCW0qJQqFvSdpNGkKgjFI1iNFaHIzFWdaL1gDkeO88M0/54ZtE6gtbrnG4RngkyYlCUW+NIHRN07LdbomxY54trY5pBCpaUqJCieyX117rqaJExXDJ/vvg2R32GIM4TClphiHVOrXSBFInyyQZd3eZItvthtmPmNbR6HAJ6Sde7umCwhzfg7XRX5RlXhm9etg4OT3h0aNH/OjTZ/TJQm5oDLTZc/jkGc1bZwwhM2TDbAwmJ1odflD4dBfRzeuLKG9Lme5Ua3pVAaSKQpv61Ky1TsUJks+R6YdOkS/VHyr/MqM6Co1b8DRNq0hHPj4D9wINEd2Vww74fMoz/qHPEqsvZDln6dqObC0+RXzIQgWjnbautdi+JeKZ86xGtJxJqW0XTtuVsQTK/q2X8HI6UhKKr/3hINmotmOeZ3GKS3mKWzmVWd1hI9PHxjlww55+6DANnMTEgAACejQEuV07pkd69liO14bemjLvvLxW5uR0y6NHj/n0ybXQ4iTojKXLAS7v6Luek2Q4MQ13Bi2RsBVwqKMc1/4o6x+NgivULGDRy1V2kfGcISjPsZUAqXSVGyPlF8mk+urGGEIMFZWavSCoKfU4t9I7L51t3aucv7Sy2zgJukMG63Q4glka4xrnsK1jnhtKmV5p9C0HO7/CrlD/rI5QLa7WWt+cpaYSQ8iepBms5JSTVrO3C5ooJYMhJSYf2B0mhmGk23TMKRPVQZU7fCwja1CgyO2RrJaMbEZLu1x1Lg1wfn7O+++/z933PsKGSJcyvcl0OdCOHg4zZzjOm54pQw5RCP01vhG9u3KWVR8WNVps0VHJVy7OudhFKUFJ+HlapnTqC0liV8tWknTtSxbDsNvtuL65JgPjdGAIQ21qle928UOr7tW9eoXcxmzZcfJGmXpTrk5S942jaQyulSi9bZvFeKgx6fues7NTzs7P2Ww3muaw1TDBvbPGPRHMGjEr8kcSWo7dbsfz58+5fHHFeJBmI1ImeE/OqdarlQkMKctI0cPs+eTpc55cXvHidsfdOCkP6YoMaKUU3+SkAlWoC1uPZamx3Z5subg45/ryCr8/YA4TzWGm20+ET19gru7opkCfJH1qsjRyVQc+l8i6HofV97y6hoKvCvl0SEkNUapNJpVPcn2DzfGc9lIsHUIgJvma/bQ4qEbmcztbygH02o6CIEvOjpxXs7SBPXt+k997vUB9gattHW3b0batyLGVOr3CX+eahmG7ZXO65fTshO3pdhlXWmX2FYpS/qJMXQW5XrzYFDPBBw67PeM4Vb7egprIs6kHooxxjDEzh8ToI9e7A9d3e6YQSMbgkxjGfHzmF6lZGfr6N7PiQS0/Gw04dT14cM5X3n+PeZwwU6CdI/0c6EePu9nR3o5sA5yYhjaLwpLOUw2WUhmzuQhHPhYUcjbEispT5SUpKlZFa5W9sNoMJGck07aOprG6f/K6xcinlPB+IvhQa4al1kzLaGw5SUKqJJF9kd/CvAjXfI2/wX/v8wvYT3H1rUyc6TqZ4tc0raT8rdSMuqah6TqGzSC1i+qsF5+m6IlKd8ixXyr80bGGndY4coIYIn72zJNnUtkNISxcuEnq1UrtZVKdE0MmhMwcM/txxsdMYpm890qHg2PZvb+K7ILQEonOhEL1d3Fxwdd//ufprKMJmd5n+jnTT4lTn9mMgYeu5WE/0GdpBnFWpyGWumftYK7AiX4VZ6ik9nPOevZEGxaCcylJ047vDEtDi1xzzgnXiByHEMjINXg/cxgPilrvdQR3cRS0q/pVu6Yz2b+ssttYR0HwrXP0/UAyELRmMaQEVsvxWvEtYgxyr1n2vqxjzE37ArI0vWb92RgjTA62kcxqSKSYhd4xREyGFLN+QYxZXyeRkjRHxZSZQiQkUxFcaU5VMOBeYHX/q9jXdb2nw9IaR+ukdKN4H29dXPALX/8655stfZQMVTdH2inQzYHNFHloOx51G/osn6dMsyqmZtkcOVuSTDIsBkLKTyz3HOcMTdNgrWQRYgiEMJEJpFI0aIwMUkCyBkEzgTlnLi8vefr0KcYY7u7uhGtYwbGUQgUOl1WUz6vldstD/nj+c2+WqTf9sR962n4D1hFSZm5kooGJEesahmGQ5LezdO1AN3QMoWW8uao1qK9bVfAUzSheukE2OidIITLtZ5qu1J2iBj/gGonOknbKlUYQQ8anzNRmbu5G2q4nYPGaTyg9AfXa3nSRrIw/CWOkXrEfuvrEk5MT3n//K3z9K19jczeyjXCSMyc+0Lsd27bnUXaMtuU6Qo4Ja5qa3qzCVcOy4ruUHZLIRJSbXTnkJZqkNoykppH7AxxTUi0jURNR6KXmqdKI7fZ3nM0Xkv5rUHQ3Y1XmS7HzS/DLvfWQP84/yV9684Z+QattW7pO6vhIkRwtwzBgXSsNkjHSNAZIEoQZmQm/bjSBe34561AC6ohCvXdFSeUYISbafoNJBW1M+Glm03TqxCbmMl7OZEEUsPiYud0d6PoHZNuRTUM2saJPcg2vT++v10sOqyvNYWJGLh6c84tf/zrff/QOPLti6zPbnOi9Z+g97hB423Z46whBrtluSkCVFPmV+l05t1kaYvI9BCgjU95WTicr5CEhhmaeZy1Z0WSftWQUPcQQYqTXIH2cJnZ7GYl6e3fLW48fUWbTy+c9wkwVXXi9/P4RvsH/iP8sMH/Grn4xS5wi4evdnmwwrpEmmjCToqFtdNRy39MPQ0Xpy9lfy+UaWbGKwqRssCtDr38VYnWUXzKJkxi9J8dAoiX4SPDCVWsUAZMgWR7b9Z73ui226cRB5dUq9vh3y30pdchlD0rAsUZSGuDdx4+56LbkD57y6X/82wyzoFBNnDCHkc3B87jvmdqWu3hgPATa0xNtsi12VEuY1OaI7Bqllhauj5jA5MLfaqvTL6hYJBFo24ZpGklRishgGTxhnavoHUYCq9u7Hdc3t4QUub665r333tN+16T2IEIOyAS25v4WvbS+LLLbtmKng8qfdY0EPikSs+xN8HL2+2EQB7boilzCW7VrL716JiepVc/GHtlIY4S/VPRbxCg1ZC1jQxsOcySusoQxR+Ysgy5aH+hPTnBdr4Hs/Xzrq1YJwnOV2yLX0vchE8NMNjik1OHi/IIHtmf6lV/jg//w79H5iNPMxnx9w8V2y+PmnGgNL/LM0zHSPHCLvwCEXKRCd8mo76Mwawm0SjmkfC20ZTLcx9MPDSHOxBgwNtO0yp2ac82cFz0avSd4T4hC1bk/LIGVZCSkHCDXMQt68t/oN7wA/h3gX3jtDr8RQe37ns1mYNBxdV0ns2mbtqXrWpxrtNA2Lo051tBvN1hXKF5WEbRZLn25xZkl5XLvw2QRtK7paJ20QJGR99SRYCllfMw6wSMyx8gUpWN/N3kiFtu0q3TX/cT+MZq6XuvUkwi5okQ11EtYZ3l4ccE//Y//Oc5dx9ZnNlOkPXjsYWQ4zLxjOt5rNmzmCFPCareLKKOCvik4UPrRkn6VsoKkKSRnVXjSStjiUksawkLfk2QEn+aT620IIbAfR6Z5Zvae3d2dBAlFQdisUV+hnViLi3lZ3vT6G+CReZ0wfrGrbdtV97Gkgpxr5YCnyDxPHHYHxtHj50SKVAqfdQfx/VXQqYQMZkj6BUvnP0hq31WFtar3TbFSg0nqqdB/yHS2WWXXuEYmIllRFNXZWwNSr0jxA0cI8FphNlDRXgu0znFxesqf+RN/gjPj6H2imSN29uS7Hf1h5G3T8n6z4TRk8hS1DrHUcS3OkHwZjZvKRcp+xpSJCLk7zmA6RzLKN5iToChO0kaJWOleyXGVARADHmNkPAh1z+w9PnjGw6FS/cj3qNR0q/CiIA6vkl0ME7/Md/hffg7J+umvruvImkUKIeJcQ86SSp5nzzhOHPYjOQW6tqXvOhlo8Bp5gBXgAuQ6hERSquX+lQa10uhTSc0qeqP6oAYeip7kJFmdnJl9ANUdKcVqAu43R5lVVufoOo/kSRB1ZyxOs086Y4TGWDpjODx7RucDTfDgZ+I8Md/ewm7PRTK8azouoiHPqY7RhtKvv0JNj67BrNKWKrsWaIrsZrIzhCQE5U1j2I87fJzJNtN0dnX9S6lcSolpmpQgXUoqpnnCl3nmuWiXdVhcrvQV91Vld+SXvhSyO88zwQdIGWcaJW0X2Ykp4efANHmiz1UnVxEwRWuYRX3ccx8SZWhIXoYeVQRPuGXL+U55yVqV7OOyh1rvmpVmKiem2as9nAnrTNf9dSQrxY6r3lHdT8kErRB1sRWRNM/sr674xn/0GzCOEDzRz/h5Yh4PzDc3tNPMAyzvdBucT+rksvKRlkzo0aWVc6NAZh0x2zroHNlmmZ4Yg+jaFMSpNBGjI6VjCmLPSgbBJHyU6VbOiaN8c3MtTCkhLPXDKRKTlG2UzIIyuL5mDw09DV81D14tTLre6KAG7zXtLqlip+TDgoQI/6l0gyXmOTDPkRRAmPIW77kWNud7F2xgaVjIKyW6GHQhK9emIq05SzHJlyIxmazeexG8LIhviPiYiLnuCcvBL0pzdQ5WqdKXivVLqlQPUFYOVpMzzJ7Dixe0IdAE4THLYSYeDrDb8wDH2+3AaTQwBS1MNvo5gUqrJUhTGUNWUkYidOvuz+VgRB8kstFUXIlw1umpQm5cDLz3gXEaRTn6Ga8lE8VhKoGGRFP3FeRy1Iurr/NoeJpb/s18/kaB+6KWGFtFqrMMSHBOSOpTypW3bTrMzGMg+jJ9o9xv3eYSQJRVFGPF5PR+lPSjPsgqmrUOIGKMojQUuQJqcFaa72LOzDEy+0CIxYlAt3/tna5/XuShrrwcL2eWSSrL3zOkRDiMfPL9D3BzwIaoCnPCjyNpv2cTEg9Mw4XtYJYReNKbWbk9XjLy8nMx8iK7xUFN1pKtJeREQKi0BM33YuDJ1UGNZYwk6IhaL0Z9muRrHjVFGpSvddUwWI17+feyT8eyKz+9IPPv5OnHE7Kf0iqBTs5K3+d0QGMSyhyvTmrwCZPLeMU3qvK6qo+eC56sd7IqIFP1WtGrQkuTjurdIbOuiRekTLIC4zTjNVA+ktiVo/qyHJcLpOraYnOssavBIfLoHAL+bsflx5+C9xADOQnHo59G4mFPM82cJMNb/QYbE40CE0U+1jqSnI9kmLzIdVDZLSUWISehaZtnDoc947QnpkA2MkITk4nJ12A156wlKZ5xHCW4mrThpPYAiJPqlQKo1HeLfXqz7F7Cl0J2p0lGYKaYqo0zRphNQgj4EPCzNJEmmZFeoSJT9KiYRQVsxBYeO5iGZXJeXhypLO+R1WkqZ8gr6pdSrnKb04r3U4OukDK7w4HJe+Uh5hWBlTYer2qtCsp/BDJhKi1UKSXJACnjDwd2zy+Zbm4xIQg9Xk74GPBhxo8jjCNDTDweTmhiwuWCLxmV0xXWXN2ZY+HN6gdlpLnQNk50bk6M88TdfsfhsGN/2Esjl2YP08oWlbPhvcj5PIvzPvuZGD3eT1JeFTyT6vCsOuR+tuoluc0Gy46N+btvlKk3pvinaaLrA6bRaJJMipG26xUhteSUCSEREFqNxkJvjxVQnWajdsKwRPNVKNHmK+UALFtuXSGhjurxJ1GWKmSZonCWiQgS7YOPkTkE5hCkyeR1NYWv+bVcnqlYkMOsDLR+nJRIh5GPvvtd7DxjhfeJTCZMauTPHnBhWx64jidzEDTALFG2GAS9EGOqohTndDmg64Jt+VtiGkcMhrZpMV0vHKatUOssdVaymynnGvWMB3FQJWIsdWbCzJC0vmpZqxv3qo3T6x0JfPdL0sVfa7r0RjXa3CN1R+iEoiCF4M7QFVLwTJ1Uth67V32cFSsFRWmUWqUSWxiRW7Kcl5gj1uQaRCzRbzGMgihImWciJMM4TcyzJ6ZE46q2lOO9gh1qKb+pl1T8jEVZKnJU8Nwaasye8fqWD7/zPc7nCBFSMkRjmOeReDjQnwZOhg0Pu4EPwm5FwC//J/GVcgyK/lxQkVxUpcqiscqJaYg5SyQ/T1pz6pR0P4ER2pKgCBbkGqlbe9/IH4jBC/l0DNrFX8NddaXzYvjuL/3dBPyI9IoHfPHLe082HmyDda3wvpq0OIshQgqYVqjHcl5ngF6DWsifdJUg1xwH7vrEhFCh5RRqvWUIUisYi2Ff1UrmDMlIuUcIkf1hZPaelAcZRc1inJYruO+kVpOv5SDyeCF0X2QXRK6nux1Xn3zK4fKabbakaAlkgsn4AH4acdPEMAw82mxwu3GJIauDijqCpgIjLznOxVgrZR3GahAZsPMkzo+PFA4FeYqM4TUprYIN4XKd51nkd54Zx71Mm5pnvJ9xrmX2EWs7PUjLnrzyphbZzV8O2fUhkE2gbVo90wXJk0ZGqQmNWGdr1rUCQMUZLDqqfGSzuDqGklpRXWi0fTSv66EDMUelTDQE7zGY6niZGlSVulBIWUoYd/tRxrjnYm31P3P0E/eBgrVk10doeVIBHwwy6GK8ueP606c0hVovW0LOeJPwUZzBZhppwwkPu3PalHE5L9iHnsfiN2T9nVnJ7/JQdRa1Hr/o3DxNME/Mh4nr6ytSzji3pPNrAKf+w93djpx33O12zH7CJouPnmkWH0KAgomm7ZezVezb63wuYJdv+Ub+D/jPvEGm3uig3t3dYZsNTdeD0UhSUc2macA45qiF9MFjcqJ3lm5ojqOMevNY6QB77y+rKFGCJDGqjZMoMwrCUmrVnHOCMGmhe0X+sjh1MSR8k9kfZnb7A1EHCIjA3T/wK2G7t6HF4NsMTTZ0WFptAA2AjYm43/PB7/0+b0eH8ZCTJZrM7CPzuKc/7NlsBt47Oee7dy9orNbilQPMMv2iCBVUHVX/vV4ZcaRubm+Z55mhH0RxTxN931d0JCmq2iYHGcZx5HA4sNvtmIMIV0qBMM/EFCCKY7Upd6hGE+UCtE5ldWONoqzv5Wf81/iL8EbiiC9m7XY7rNsQlZaobRpMUqcxJ+YYSEGnTCUIrTuq786Fz0tXqY3GHBtadQEpdaFlU6w1ZEVDEkkHO0S89xoMaJ1liT7yUsdnrETzu8PINHs2Xe2HVlTJYJSqiZVzYgpqAQsghvBqOpV9q+fPZsO0O3D35Bnzi2upGUyWkA0+W+Y4Mc0H2vHAsN3w7oNT2he3NBTfxogYmFwDKJHdrDq0BC/FEVo1QDlHzuKEkxEicy3pjynRgtKdafpYjXwIMs/89vaWwyTGZJpHfJg4jHuGcQNI40NX4AxKgLw+82vjAuTMRTrjn+Af/UNK3U9mjdNME2VoQufaapxiioQgzQ2kSOPABzHKR3rrXoBpyj1YIgvqlJpqhIvsyAjZ7CMhe0nXIRPqpKRIEdT1+chUgu45Ru60dCYmaJ2E84W1QlovS51ruUD5ZjFHXK0gU3ZsNpiYaUrwFzNPP/yYb//WbzOQaUIk5MyEdB93CWY/044HupMT3u4v6O8uMSkeB5168RVwzxmImDLMZdG+kiQx4jhnYJ6magv9YcZPMz54Gq05TQlBx1R2jZE9vLq6Yj8dmP3EYdwxx4lxOjBOBzCG/WHm7OxiuRyAFYfHfdnNKfEgnX4pZDckIWO0jaPpW73A0gSqgFKGQGQOhhBidVCPJibVqGlx+xaMZMlqWrOmXsyC6oUZchKO1VS4fal+QpFbce8syaJNf5Hd7sA0e2Es0vKDQhP1Eupf9GhhbLh/xapzUkw0ChJZ4Pb5Cz753g/YYjAxEZLQYGENfYIpBNw0MviZE2PZILXiEjCa1Vur3rW5AivFSS26oDT2ocwSOSNlUToMKcyBH334IYfxwOnJSZ0KmpVWCmRIxscffcxud2AOMyFJQ1WIgpoeDjucs+z2M6enjeiG2pthEFivrCX4yCnx/XTDv5L+Lv/NN8jUGx3UkKQZwaUMTiqS+s2GxrXY7JDOLCHEzzFiskyn6Ztl/Jukp6XD9BjBKLfz2PNqap1QrrxnOUZSCNJ80rhq6H2YSVp8jaI48tKJjCOkyN1hz83twDQluq2rby2Zcksphj9Ko1RlbQi5bGhWgRQjb7IcxhefPOGH3/wdeh+xMRGSY0Q67bvUcJgnusOBYZ555+E5/U3AZSSFpihf1gO2VkNHKxspnK8HFI0KM4fDgXny7OyBy+eXHOaJU/FktEMxMY0TTeNIKXJ9fc3Tp0/Zj3tCmpnDxDjuud3dMtxtBLFxLf0w6HsWptZjXsn1mv1E8J7n+w/5Tf7qm0TqC1vzFNjt97imw+gEk5ii1iZJ2h0Wx6qUjdRO+3s1c69c99vqdRUHMQR1gi2CpniZ3FU6+5fc1fEZyMBh8tzc7bi53XO+3eiUKx0ZWuTQrlO7+fj9V69nrdTwuQxNKtmAzJNPPuV73/xdCUa0HAYgW0OXOyG73+/pT044MxcMKWFCxAQJ8oqSLH1POSGTWoojpKiIFnoh5QwLYZmfZ1KIjGYkx8xhtyPGyDAMyuspdZeNk+Bqtzvwve/9gKfPnynVXWI/3mEc3O1uaZoGHzzZWDYnZxpkJFWUlvv7XJb3nut55Dv2Z58mBbGhyYBtHe3QI0bnXmoP6U4OSWrByqrZl1fpEf3LMeZTnlf0kETNpevc4DApi+zmXDvWbb6/m6UpJXFze8Pt7TkPzk7YuI2UbLiF6mxp9zBVRqxew/3zlq1i4ClV57Kzlptnz/nB7/weZ0EuJMdIUEfGG8s4B8w4MowjbU5sjYAJJFsoretVwAqhesXKMVGsi9HGr9lLzWJOmTRHPvnoI5xzbDcb2lbMaoyRTuuDb293fOMb3+Bmd7fYLQPJJO7ubmlaQU/n2TMMJ8ToyckjvQbt6t4drxACN376UshuRrhQm0ZqFcso76QNysY24mgRhDEixtecSFmlBA+0/2PVj3EccMq7l1I4fbbE0MpDXZkoXnXdKvvzfGA6jMzzzKbbqK4t77P4BrV7XwfflCq4owoqa6BQo+mzO2d58eQJv/vNb/K2lvVJ+ZNhBrxpmILHzTNmPOB2d2xY+TXqVK+R3dVuvfS5ChUlUAcNeO+lJyBE/Dhzd3NL13ZwoghqkvIGay0heu7u7vjggw8kYxW8ZAMbi2lgv99zc3NDjInZZ4bhlBhmpvHA5CNnZw/hCNkvoIXQgv0RP/C/MG+mR/sMHtRGjUqkpCAthmzFcy81jVThMUsauXBz5hJpv0Zh3jPyUaOnSoCOjN+KUdLPNsts2KC1LDFoB+o957dwtc7zzO3djucvrjjdvC2bD0uNDBabbY14QJSvNQthcLnZZH18yljlx7159pwfffd7uAQuitGMWQR7NtBGmbrgvCDMLkcZF5hLA4++fpZu6IzsV+nIKx+uIFCSB5Y9S0m696csdT/T4XDPwRJkw7WWeZ64vr7mO9/5Dtc318zRy+saKUfY7W8Zbjc0XSfMB2FWov60OgyGInBZ711OicPdHS9evOCjjwyZf/GNAvdFrYQ0ajg9bDFnjNEie03xWNOQc8CYCKY0MWX6vsM1SplSnJsilyWoWjmv5TWlhEDqJq11eCRFbxA/PxPJypm4OBp5pXZkxSw1a7e3t9xsB/I7j4DKvkuZd7+kdfTKUqmRgrxS/cbomTUFMZRA8PLZc77zrW9xUR8qjQMhiePjY2ScZ/Jhj9nd0RhwJh8pxvv++1GKX19TmqTE0ApPpFyD9545e0GwfeTm8oqzs9OV7JfuVEcIM5eXl3z88cfc7u9qMX8ykWHo2d3dSlYlJVKC8wdvEcIEsyEly9Cf1L24f8ExBLz/fab8LwN//fMJ2E9xJZRTspHhEdlmKGo2C3pudCBByJGQMm9I7CviVH8CymtpUFzQVrPcu5IGLdYx5yxp+wI81Nda3jcbCFnmeO/u7jjsT+F0oLBAivzpc43R3r9ibp2QhaupyGSVlVL6pZeYpKZ62o9cX11yEhuyjlXMSh0UoqQyGx+w04S9u8XmgC1k5knq4FxJEb0mCM05E5KMbDXVJsneee81qM3M08zt7Y7z83PIYq8KKphyZpxmrq8vefbsGaMfpU+ABC7R9T273Q7XtEqBZEQv58g0jfiQOD19qLp6daazUCd5PzP732PK/wo/a9nNxuC6Ftc2oIFF1NpwkTNloElJm44XVqLqKxR8cF3PDzLuNy+6p9R5LhMrqQ5o1rhUqJdWgbIppXmqWxA5M2iTTxDAZ7/b8+B0c/zh9DXuezIJkUejnM6JTLYWYzPZZpLV988ZB4RpZn9zA/Rk4yQvlhMkw2xkwEupo55vb3BtEuYN/VzJiPZPBaxigYvySo+D0EWxagzMoPXkWTLMPjHPXhxUspZJSammTJeMTNPEs2fPACtc4iZho8Fli/cTd3d3jNNMStB2oofn2bM7TGyGDa7pqE2+K772aRqZJocz//AbZerNo06tLaplwTrNwsdYoHPxPwvUvUJOVd5KZFycnFcr07KJ+UgIyvtm7do3eieyNl+sg6ICfZdoPuWMD4KkffrkKT/37mNQxqpSdFzq9dbXUP9VojVdQvDe1EO1dQ3jzR2ffvAjLkKE7CSizkL2G0zGp8g0T9jDAbPb0yK32qgjunxYRVS1FrJQRC2nr9yA5cNmDQaC1lOOh4MqUTHwKWWMA2MNwQfu7oRXdn/YE4nKSWdwreMwHri5vabpeoZhqw09EZM9Mk+9UUFboy2COu73e25v77jePeU533ijSH1hq9S+lVolzbsVvtiShrYrxVVqQ0uHcV45pbkY0hq03K9GWmNbK2WRj2W01EplRRXWTuriPAiiOx4mDvuDjALV8iu7Cpqqs4yhpvpLPFgdARaZUAVqcsYZRxwn9jc3PMwdpds2Z5TEWursmjBjpwmz39GaTM0c65SdV52dtRNUzmLZ75K1kPpo/QqZWRufTk9PqyIthiblJIHm7S2Xl5fMcZapUyZhdKb3OB1wuxYfFwU4HvY4HwjR0Lc9uDLOT3c9yXS8aRrZj1fs+MHnl6+f4soYbKvNoRq0FseQso/3gsXCL5uLJbu3xEBDiQ4KB2q9nRXNr0+owIJBz0PNit2/3uXu55QJBKmzPIx1FkM9J/UaVs9eT8HIx9huLikDI06LTHmzmJQwPmKRdHzpPCZDsuqgxhk7j9jDDiHEWDvUpsSZmHuy+0qgraafFxQvp6y17FJHWvaiOvFJ+CalyeQgaFOOonur7LrahBKiZKumaZSAq5mYQ2K7OcU2DWt9EZOMCN7d3XJ98/RLIbvG2mXSnN7jkrUCcS4b10i2VR23Y6dqZfP0d2uZEbnPa0Ka6jTmAoIZ85L815Q3RfJXjn55u5yBxDxPTPMkDvFR8d0CSBT/R/K062h9+WdWm1H8JWOUeiqDTQL8oEFm8T6Dido34zHzBOMB17oKli08R+t9eeWdoLBT18eoM5bUjyJJBsbPnrQpYOLCRpMzhJg5jCN3dzsZYWvEQTUJOtfjfcDYEasNkZeXL7i9vabfeMYpME8HBmsxTmXXQNQmz9vbG65vPiHyO6/6AHV9hoO6RBmgzUt2FYnkvNBLKeRtstU6hGUzazdnOeQlSl/dzvVeGu2Qr0K3+msxdOW1XwXbZyNiJd1xmcNh5NnT5/g50DuL0YLg8rXEIpJkL4qoOKhGT5LVzyiHDvKcma5uuHnynLfsRhpAtBM+FQc1CqWROeyxd3d0TiPCldNpdDJGBulsXE0AKh9PHr64RGXvY0zEECunadv2VeBrMXjWhpRRlKQPnmwl9WwbQ297pmnkmmuapiMEmcwzTQesbXCuw7QI4mgKYijk8n6auLm5FcRkesrEb7xRpL6oJSTtmdK9Uxp5shbUgxom66oTWZuXcnHCsypGQSWLUig2VrBMqbKxpT16tff6JgBVbqtxy1m7VGHpnlaBTBJ5B6+E6fOMaVtxTu1SB1deXCWCpTNb31qvYU0gLk6ZeqFzxMwRaxPYsgfyPJ9k6IUPXo38nnbLUTQPS83goijvo3XmSG6L1qzTXbTWNChdne7WijYqE2JimiVav7u7Ixkl+rIZi4zhnKYZzJ0gvtmwP+wY7m6wriNheHB+jnV9NZxSsxbw08zd3S37XaAzv/QHE7af9FIyfplVvjL02qhqjEyHE+L84l6p67JmMtB7Uv3BYr/JS6JwtR9L24VZ0CKVf1ver6y1U0lBR1W2SczTzDROygfJgl6u3nN12fL+R3WzIqsFX5XHqM0JARsSHUbKz6xmRbS5UdD/QBs81s/YcaQ5McsUxLonK+HVw10yALlm9sQeFCelonNZHpM1iySDThYOahCAIKbE7EOt+8ehsgs2O9pW6tQPhwPjNJMz3N5dM1wL+pSyIbz1mM4acLa+foyJ8bDn8vKS66vxSyG71lqappHskxEm0ZgVLUb2tW1bkok0tvDdFrlVe1Vl1tS/GdV7RpHsdbapyEStY1XybrN6nTVgVh03Vv4GIp/OQIrSsCbNWBIw5AKu2SVAWF6hugqLXBnDETWgPifPARcznXWYQLUTUW1OIGo3v8fOM2aacaebOiY4Ld708v4l+qv7obKqYIWp5+vedzSY9LGem9IwGBQciDFyGEfGcSZb4WLPRLLL2K5jDl65mgMxZ2K85Ob2mtMMMcFh3Es/jK0WipAi+90dl5cvuLr8kMxvv1Gm3uygIsbDpIwERaZOkiJnhYtjddzE8UoSHRZi2aRTBljulyAvx1FMIWM2dSLCsqFFQEuuR2rvHNbEZbdXUdLyZHmBlBLz5Dns95z2J1LIjaQrARV8fYkk/8hZCvp719JgyHNkvtlzaG/Y9gPJe77/ww+5+ehTHrQDzSyfPadMyAlLxic5sHOQjk972NGeOpwaiRrBl+/GVOe0+jeqOKM6ypImafTegNBRSYDgY8baKE0OWpidQsZ7zzSN3O3vuLq6khGgTtBVl2Sqkp9njeCFTuL65jnZGDbbUzabEzE0bdaC6wxJalieP33Kxx/+kNuba+zNBX82/w8+S6S+mOWkYxy7pLejBkvl5ErnopSMWGe183MpTakOKiyFluqUYpY65cXwWnKOlWVCxkQ6oaTS1Gzbtkx+VS+WDWS3jpYo0UrOkn6+u73jwcnFYiaNqUjq+guy8n8WpDLTJOixNCFjRg8u02QY73aY3YFT20jTH0LNZrOoIeehsw3Ba03UYcZuOqGaK0isBitFRlexJzmvOm7L/ugOxlLrWkETMSLzPENKlZ7GYrRcSBqqbm9vmKZJU00SfNiUaNtGWAdSJHMgxsCnn36In2dOTs/Znj5g9iObxmJdQ0ER5xi5ev6CTz7+mPDpln+K/8lPUgL/wKtwkVprNHDIlJnY0hwiSBUpSbnRagJaoc85Mu6myG2RMVsDq0bHMTqt803aIOIaRXcUuXLO6lB7XWt4psrtUs7i55lpHKUsy2rdfn1vrbRXR6IE6KZ4hyozDkNnHDZCngPYTG8d0/Mr0u0dvcQoauhV3wGHMNO4huADbvak0YvsYpRGXM7/kqsr51M/RV6dqnzsQJXgtm5xNqQo3NyUch5jccYy6qSdUl4lNemJrDzTJie6ruVwGHE+gDHEmHj67BOcazg5fcCwOWGa9+L42U66w3Mih8izJ0959umn2MuHXwrZLfy5zlqyccRMZS4xGbCW7XbAT4nGRGm01mC0ZgjWo05XcuWM6MjKG66uaimzkIfLZEmfFrkFYReQUsRSDmjKrRMZMInGGJw1kBMheOZpZOidoKALZsnq6jCgpWPmCLhrMPJ6WdBSfKRNcPvxE+YX1wzZCfKqNloo3ZCRzcp2EGPABo/znejkAvqZXCkO1wMsJCgqzKNqkXIu3abFI8WkDFmAxGKnCt5anll6JGKIHHY7aZCcZUhFQl5DQIFASkZL2xL0hsNhj7WOmGC3u+Xi4oH4I3rN2UdePHvG00+fcHdpOcv/zBtl6jMd1IIEGSXoFyGEMpmhKEIZISkmu0mlhrK+yOoVS3RTMCBToyIBuwpiaeoGrqPuwi3Hij9yCbuW98j64SSqT4Q4cXd9w3uPzunbFmtKeizjiPVKAOFURHpNe9cRrvc8+9GH3H73Iz7uN2QfuX72nLuPPiHt9gwh4bLDKDRe0qRTjrQhEvxMmGbsNGNPhiNFeRy7KB3Fyu0QzjChrPDB0+WyPxZTCtMQ5CuEwKZpqpdehC1nGKeRUWtUJWWYIUKTMq6VSKmMnJvDzAc//AFfTUZoN1KWcYutIweNRGMk+Zlnn3zCi6efcnd7w0fXN/xV/hr/bd48vuyLXUtzBqjcJpn0hMtsBiFF75ypc+Z9SjUzcBSegkI7Brt6/fKVNWVd+PaASoBQkKhyDTkJe4BZvUzOiPEyCIo6e+ZRUtWNe0xjJdJHC29MPk53VTc1Sz1zYztcgE+//X1+uD+QfeTB5oTd5TXx+RW3T5+yxda0kwwdkFdKSA2S9xLN23miCQ0uKF+uU5VT+IDVaS7vL05Hqqlno/Cs0eYAm8GJCJITBJ8WXaPBWh13mYWT+XDYS5E/IsPGGSxJeX0DOnpbeBZzZJoPmIOOPOWxsIGsSnain7m5esHlsyd8cP1d/h6/wX+e/+OPLWE/6VUmKVkjYxw9Bq80WjlKhmUzdCKHydJ1nSAsKeFDXOlIWWoaEGhAmybIqmvlJ3Ip8VhkF4rcqqH3XlDN4iQs/kONs+rzUsKkRGOtpNfdgl6JZjMrbkejn9eClIPTNS2Dabj+0Se8mH5I9oE2G8I4ky6vuP7gI05zi1ManvKRy2kU2Z2x04gZO1x0uFim8q0/YWGXWJ9p+X2lgE5i6I06oEV2QxaWFB88kIUpw8geWmsJweOcIYbI7e0t4zhinKlUijZZfOiYfZQx2EY6/QHmecKN+xpMYTKH/a3UCNpGnJh5ZH93w4e3v8//l//9l0J2vfdSO9xIkzUI4p2inP/GWZpNT2sTw0Yc7uIQ3c+GlvtkpJBDf1nQULHZOS8A01pVWyPgUE4ZP80VsDH1NVflVSmRVjXQLkPrSnaoZFXfsFaBl1BRWvJu5uCvOITn7F5c469v8Z884/KDDxmwWGQcagmsMhCVinCeJ+zU0M4e4z0uZe3NYbnmoitfqqEu9keD1pTJsbb41VbRrMhy5eUuTjCCnBZZfPHiBcF7ULlNJkPMtN4L1zHFx8pgGp49e0YIidOzc6bDgWkc2e/3HA4jKWYeXLyFH/fsbi754Po7fIN/m/8c/6vXbu1nOqigdC/aBWcUPYkpEYvSzOC6nr5vJRqPJdrQiMgggqWzmcvva8ya9bH6nKN0U0769HJTllo7Spp8uTeyXRlBHihFxAZH5mSzYWg7uqbBWbkxMRlQYdcqEYnwNaXfWcfZyZYfPPkml3d72mzosiEcDpj9SJslqi8TZtcTHgoBb5g9vp1w04zxLU0WMCKKhGmly7GRT5Stk0OYYtHAxdAU2hOtRMvUWeVCjbEmZs815RZCwCEUXeIvCQvD7HUmchYal91OiHz7Trr5rc2EMHF9fYtzLdZYKYa+uWZ3d8d+t6OdLvhPmn/q84jUT30tlDi6tyXKy5mcIilEkvV0zQnOQNcYhr6hcZaxFuwv9+J+W0gGea1i+q1OOCnyWyLr1bLWSDZBDbxZvW5d+ntJUohR7NuOTd9jjAaEOtLTEldXtHov7XJ11jDg+MF3v8PVJ0/w+5GLfkucRtrDjPGBPlqavOrE18uJ6Pxs7Spt5hnje1yWsxhBh1XIHpSu7gVBFee0BvBlUEde1L30qOj4zbK/amnKeSipuRACO43mkxEEVbIciTlEWu9pUhYUKkSur26wRibZtE1PCjPeOPb7A94H2lZGzs7jyDQeeDYafp1f+4MJ2094FYJx1wZwgaBlUylGckhEI/qs7VoaY9luelpnFYmKR7ILCxQgGAuQpQRJXDVlOShCrYpHaoAFOnBWgt4Y4rHsVsRTu+CTlvnmiDPQtzIOu7WRnEMNykv9K6syBNAGkygCE42laRv2L654/uEn3D2/YsiOOHu4ucVMM2024rjIYVxk11DJ4Z33NN5j5kCjshRz6Y/QBii1I1LniZ7rBZypZkwDK6OG3hlDcrYGFDJBp9T+O+GOdQ0pJcZxlNStsYrgCtI6Tp6mnYkaOIUQefbsEkMjwzpCZHd7y2F34Pb2Fu8Dfb/l5OSEw90t+7sbnhzyl0J2o3K6WtfgjLD8gJanREEpc/L0XUNjE61ztM69tsN+/ZtQ0WkKbF7vURl7Yo3BxyQ23AqKTSn90HtcXITyrbxmipEQEo07ZehaiEHqLl+6kuXaYnku0usSjaFzDQ2OOM48/fQJV58+4+bDp8TbHfb6Dg4TTVTHstgLVbylzjTrOcg+whyxMWKzExq+jAJuStHlGkwjQ2jSSudaq30XerRNFtQ+x4SxTtBi5LXKOFTZS8nAWGOIMXK33wl3apAmqWQSxhp2uwOu6WhaSauYlIkJXrx4IYDBODIeRq5vrtjv9kzTTONa/uQ/+KcxZKKf6acH/Bn++TfK1OdyULOm842O0rTOVY7CpJxahkTrLF3r6DAqhKV/U+5ordUwL70DpigopFPNrIp2F2HSzj3WxLuvEexcFIylbRwPtxseXTxku9kytAaDENMLt2u5pgXNLcirD4GbuwN3VzdM17e4mNmYBhcjm5xpraPVVFWJ+EpUIeP+BLFLIWC9x4ZIk7MiVXpislAGF7J+V2bC61w/AZ80Glo2Q/alIBzqkC2cmFAaI8TBVU7OFKVrOgfIstfMM+046fuKM3dzc8vz58+JoXS9i8J//uySs/MHbDcnEGEaD4y7HX4cOYQNPzIrGOVnuGpKLoqjZUtOTjnlUgyE4GicoW9ahs6xHQZN86w8NUx1wkAl1Sy3IKvlKipT+E2X2L84qtYK9Upho5DXX2q1YQFLQBTEpu95cP6ARw/fYtMPpDST8woBr+6x1sgao4pPPn+IgZgi10+ecfXpM9J+wjR32BQ5yTKcoHGOQmCe65lBlGVeBmIQEzYmQavKZ89okJdJVlCOdYOZOAkSzadyD9YoKQtd1nJe12dpqR2LUSZ/hejJVrMPCciWaRKj2DSitGNIvHhxiTEW7wN+DpxsNxjTcnt7S05wdn7BZtjg/cR8OLA5NPxj+R/5aYjij70K56ufZzCOaHWOdhaqNNmcyNANbLqO021PowHY/dpn0LpiRSuLPhbZXb6S8vLCEliV0i1nDCZlofmr9WpQlbr+u8hi17ZcqNxuhw0mTYSoZy9naRItPnRRaKtMQC7UQLlhd3XF5SdPuP70OdvsMCHRTjODtdWhzJQqx1w/b+1PEKZ4lV0x8MU1KFRHGKMpdKkbzMjoYxkz25LyCjVGzxniYBpTOtULHCKvLnpfzndMkXEqDqqre25yUtl1tI2k+IOPmKtrnHVM08zhMNK3HSEkbm/vAMODB2/RWBmmMR72DF8S2S2BlXUzjbFE05Q/aKmf6N7N0NE3hu3Q4axZye26bU/kwGgNXNlfpz0FsofiUBV9W57t1DktOmrxTIuyraGaBgrQtS1nZwPvvP2Yd99+zKYV/yVUWX/FMkV8hUEixsycPF22pHnm+skzPvneD9h99Bw7eobJMxhLa131D4qlqF9LhC/nLQRtqqJ+hhgjfpZ65dBEkVtApiRCjFKSWdDlvJLblJIwsSAIacpZ6bBUb6sNQ3ttxnHCx4CJjiguKsZCGieGra++ATExzQKYeB+YZ89htyeRmCZPzrDdnJCiJ/iJeRoZ5zsCz94oU5/LQQURvkKQ33Rt3US0ViHrB990LSd9w8mmo2sdwWv0WWtEc6lUWky+MVVplLoQEbxSd6HOqRbr2ywj/3JcOjeLwLDC+rN2sQ9Dz1fe/wqP33rEyWaLMxHiDNlis0TrUd0QV4y8Oqfz3Y6bj58yHg742eNCojUJi5Y1FGcWcUzL6xiMjHZEjHhOwhXrYsSlXFqx5FMHGUGGsTQ502iqnbwgcsvM96wGXH5vrVXlmbXeqwjaEhBUhVmcVJMJOZKTzkI3UuPbulIUYYXyJGZur2+5ub7m8sVz7m53XF/f8tWvfo3Hj96mcx3zdGDa7QjzzGW85K+Y/wB4M7fZF7HK5zU2kL3HmQZrqem6nKWxzFnD0LecbTrOTzY00kmlr6IbaFZoT61fSjiVtQVoWSH/lOhcjbyiCj7MMmkspaUOuxjr+gMY47h48ICvvP8+77/zLn3TEVOWFGWWyW119rO+t6OkTVWJTR4/Bna3d4RpxoRIjDMuZ5q20wk994y6KnNt9KTW4OaMVSez1D5lMjlEfAhgLK5J2K6vRifGhPdRa5IiLhWVvP7YthJuL8jfOv2kAVqKeB0Jma2VeehAjomDncjZ0LRSAxfjkuq+u72j6y7ZH3ZKIzPStT3vv/813nn8Dn48MO53PDpk/nz65Z+A5P3hV0xSWmGsIxuLaXtNYyZQQ5hiYNO3XJz2nGw6AQPqHmoDntFADSgeYM0o6HuVGruYI0nz67VK1FpcSX2XzFAJ9GoPQaqRVdFJ56dnvP/ee3zl/a+w6QaiTxgiJmrNsriHFBJvQXjkp2wtyQfGaWbnEtfPL7m7vuFwt8NGS4thMJbGipyvwQDU+c6a5ZM6RNVoWpqwdoLn2TN76b6PbasTkIQ/2ofIPHn63mrKM9UUKLCaNmTxs1f+SK39NoYYZXRpRuR4rwiq0/R+6fC2zpMyQilmDSGI3SkTfIa+5+72mqCUQH2/IWd4+OCcMM/MhwOP9/Bf+hLIbpFbrCMiPQu1v6PITsqcbTacbBo2XYOzgtQtrr2s2qyKegrV5ql+UH8iViBLtYk6W9YK4LNolOJMvuxuOmM5OzvjK++/zS9+/eu8//gtsh+Z0oJsF5DtCH3Vn61eT0zSdNl2mSnBzfNLLj99Rry8ZUjgjMO5ojvNkb8jL1quUj2gFCEGKS3JEgymrIw94yTlHs7SukYpt5yW+i0TzArxftnZIssgTCcZhPFGKbvKAJmMISkDUYyBmBpxUJUSy2SvXMmKPs+erGWZs45iHrqWyc9Y42i7nsYJD+40HpgOB57PP+Kb5v/Kv8hfeK1MfaaDekTRkBLTNNX6MJOlJm4OgeBnSBu2m56vvHPOtnU0LkGWaRolYqk6bl3WoamiYgilm28dmQsChbE4p9cTozqoq5epjxfJFK5cw2Y78JWvvc+cIof9jsZIerR1LZ1rpd4uRXyKlETttB/Z395y++KSzsM7P/dVXnzyhNvnl9js6WwnjTUlmstr0dIGkuKkiqciyGwUZ5XsNLpOzOMkRt6al2g6QJCi6AMpUr9qE1qGaZrZ7/eKprqaGo3RE6PUNO33B253d4x+xkZL1AjURmmXODAxN8JnlmLG7Rpyzux3B66uLun6jmnyNK7j5GSLs5ah6bi5viJ4mSHc7iP/YB4/S6S+kFVTIHhiBoejqY6mqKwQPK2zPDjbcnG24XTosCatSs0VUU+G2r9skNKIe0iV+K8yslNMuiBdjXL/NYqYHJH0G6SG09y7eGPoupa3336bd999jxCE91eMmDQ/eYSFIWQtELEWp2hAmGfG3Y67F1ewm+hPNkz7A9M8M5lA37SUmpSMpETRACiTF45gW5SZfgWvZOeSdQg5EibPQafqNE1Dd9HJpCNraz1ujCzz4s3SRSvlV1J3NY6jNgZpes5ZUtIGC2PxIXC33zPrOcnIuTLWErxnNOCCfCjvA8ZIKn8YBoau4/bmGh8yzjWcn59zenfG2ekJN5dX7G5v+Pb0Cf8b+z/kr/G//ekI5I+xMmJ45xBI1rNpe6lbt45oRZ+GOHO23XJ22rPpHE0DMXkJ/zNVly6o1Mr8r0pPjP5/1og2Q72XoKlCra1fSqFedcWLrfjKV7/KO+++y3Z7ynSYcaprnZHhKVHLtgo4IQituJkW8PPE5ZOnXPnI1dNnTN5D45hiqOcpW0M0YMs16+uVKdvGpiPZNTFgYoDUVOqynBLjNOF9wNmG7eaU8TDRtC0lWzfPU2XPqFRdlL6TJfBHgyZjWdKtikjHKPKdsqRqc9bMlZajmFJrjdT8Hw4T4zgz9D1D33NzfU3TdTRNR9O0hCA2uDjuvx8+5n/3JZBduQ+ZoMhf33TahCdcvlIGkjnbbNn00LiMM5GYJgF1tARIuSoQBaXOaC7ZwVJCp9Z+VXucskyQslaYerLy3LrWCgqec31skRiLADNffec9/tgf/UUeng64nGjaBlIm+1m67FnKY3Iy1Q5YY7Q5LhPnietnzzkYy4fXO3bPrwk+6seQRlwJ+HLNDse8MHNk65TKSYO9FAV8KnWiiOM+pyi6fJxrL0mMke12SzcM0ijmJUCy1pDRyYVZzldOUXmpha/VFX/OuBrkShPUxOFwoJxTlCLRKPdsClK64WMgedERPiQOo/gjfd/SdR1DN0AS0MQ5J5mgMDPNDRN/+o0y9Rk0Uw1d35ORyC5GpI4hZExjaGyjRkXIicmJ02GgtxZnwRlorGVoe0Fdspj+bIxsfj3vmUjAyYBCDGUcniPhxWA6SSFa7U6WVOdq0lGtZyohjsbVOjVoCp673R3ZWxoiLkU653Bth2ukNjYpz16aE36aCOMMc+SP/vKvknPm0duPOLy4xuxHLn/4IdEYIoK4in7NCxJlMuAwJmJMo9FfFCUfy8x7OVTGSBPE7AMlCnS21Rm6MqYs5oh1IBMCojhR6ueUyVrZGqkRUeSgNEtYC1OQkXohehrb6WSOVRlCjMSIclOCMYGnz19wMgx0bUvTNmxPzqTz3zoxFBjCJJORfPC8F5/x381/E/iX3ihWX8xSttlsyBFsjthuwERB9sQ5EwFsm5auabA240wiJQ9ZENKlH8Qq/10xhwW7WfHqKSqCBgiFxqsYuIymV0yq4fdRPK+pFwd6PXahCJvAzoHGQlv4eI0U9JeaNoygUDe7HdP+wEm/4eLtd3n7T73Fh9/5AR999weE6xs4zGSnZ1ERykxWVHJ5rWRErjOIss5oN3lUkm2pZTzME9Nh0uBSXnOz3QpVkgUfwwJBZFGIUTwhnenssQ5CNDIApBohyIhsTrPwpMYUhO6MBR0sJUiCGihKYBtud3um2TP3LYfpwOlW+CRNFnSg6zoO44FpmriIn/Lnzf8TvgQOKjg08yzGEEPb98KyEWxFQNqulbHJZKzJ5OxFv+SIyaWuTIKXBQJKLKOUFsS/lg8hmRfjBFmyNdWTadt2NbWq1LgtgZrFMjSWs+0GR8ZPB6bcMjQLo4W14qQWdKdwui7TyNKiTx495ld/6Zd58clTnn7wEXcfP8HsR6mxtxLcJS3zSkllTCc9FaaJBJXSLGnmRLDoTLSGOQQO+wNREfeu69hstzStZAlnRYnEwMflxMbIHAKTF9ltGsmlCb+v9AzklBnHkdvbW65vrqpjlbNcU1JEO4ZA0CBB2D8aJh9Jearn7UQbjwxZ+xESIcyEGHiYnvDnzV/mZy27xjTEKM13ubFgHE1nsdOspSFGnDoLjYGW0jkv2rrYa2M1yFfwp+Yms3bV2wLQlHS06NiUM9aBc9qIbbQfQ5vXSuBWNDegTnFiux3I2i8SsqPpHZu2A2N0WEMqCW5pVNX3R/W8Kh6yj7iu45f/gX+AJhnGFzd8/K1v8/Tb35NBJ/q+CXSAgchEPWYFEMzFN1DHPSVMFHuVQmQcR2JMNLbh6upK6JyMIebMMAwcDqNmoBTcSgnrRDeGOciIVZME1ay6GS1tSezubnny5AlPnz1VlFoHACEy2toO72WM+jx7soHGteqIBryfSGmQKYAIqU6jmY8ycv6t/C7/DH/2jTL1Rge10B45I9RCYRRnpG1bjAHnlPfMScRJzkdE4ks6aHFE5UMutSYl4W/1hsnvCsxapUi7LLXbUr/WTUCLxNU3r5HA/nDg2YvnbCyYoaUzGZcDY5SuZdc6sIaQs5DkZsN0GMk+MPQ92Rn6s1Peeu9dzpuB82z4v/3r/wdMFNi98OJRjTyaIlNnpXxWJXbOSesQrTqRVhCi/d0OP0tt6FYNvGsEUQ3ei0ORkiJTxZnMSuURqvIsRrs4niEkbq5vuLm9q3+Tule5FykBIYkyTHqHbEM6eGyW1GFHZggB0xmathHjiJERqlkMxUcE/pa55p98o8h9QctIs4KwTcnxt1YQNue05ka0hDTEaVhUCuYlYmeRYWCR7LLPgpZbljIVeZwuK2mdxrja4ds0DdZbTRHqo9eglCoL7z3j4cBuf8e2MbiuweIJJhOsoXEdprE0fYtMrBG0JmVB5OdxYthsaU+3nL37iF88OeFrP//z9IeJv/n/+HcXRcjS2BdXl1Ci5JKay1kojYil60men9TAzt4TQ8Td3DAMA2YcabtOaNbmWeQxBskeFAwjCVH0PE+gKAIqS1WuU2Q3Hri9vWOcp8rTWp1YdQhyKJ2owpvqnCHEUiMf2ZCJgyAd1gphuCHhdKDDLY7fNA/4b/2hBe8Pv4zWQmILk4HoYuccjRMnICuSjf7NWmR/c6Qkvu9nM9c/FuetDgIoqVgNrgTFbnAFf9Wu6JI6lAtdZFf8iIAPjtu7O677DkfGbbfQJDbIdDbrLJ1tBAwwJXMmI5kdjnmemaaJnDK2a3nw3mMePnrE+++9xyff+i4f/NZvI9CYfg5VvskUdphFduVDr2RXy6BKEFUD8ySo9OFwwFrLOI606qzOWutnjJU9V6c+JTmjRXZr/kydzJTEnXn27JJPPv2Uu91OdW2uaF9h/UgpCo9kknpu9Y2JwRCj9CNsNyfS+W0tbdPQtS193zN0HWM78FtfBtk1BVa0VY4wUgqXbMJmIyVCWTrYBcSScghyWigqWQZqGA0nanGFipwpAGuRf+cwZFyr5H9G/QxjZbKV9wU0XZ6vByKEyOWLF0wPz5lNxqWWxnS0vaMzDuesNDwnaaqzSkkmGWSpeY0hYDGcn58yjzPbi3Pee+sxeT8T7nZc/egjSAoKqGNdy28U1BBqvcK7S20otYo8pySlgKX5OvpINInoPX3fVwaIlDP73Q7bOJqmJcUgX1aCv2kOUntNWiby6Zjv0mj54Ucf8sEPf8Dd7k5eU8+8mCfRO+P+QIhRRt1bSI2hkDdZL3tzuj3Re6ulDTnTtx3np2cczna05skbRepz1aAaa6QjTi+0OJLWSnF520hX9wp8qWDmmr6gYFELB+pxAfK6axJDdUCtEW41W8Z2FYG0lvU6svOmGFAYp5kXLy557+IcHwzWKLIaArMPSv2hIyZVcUyHidY6Li7Oya3Dbnreevcd3nvwFsMhYFuHSbEagVSMZbkSFbjCA1Q4Ck1J71YFJUrPh1Bh++5wAGDYbMigEYnHWLvUkWZ1PleCtd7HhVsu8fzFc549eyYNIlnRgFSwE3lMCJkQvTZvWawzGp1mLYzWn1PCGUPXtmyalqHv6doWP01kOma+8nlE6qe/Voe+NO2U+tymabQeJtfDBtS5yXldK5kVDV8FPiV4quTkoIhsUX6mZhacc7jSaIKhaVvspJRk5GW04+og5Azz7Lnb7bi5uWHbWto84PAYRXEsI7Z1DHEAZwkpiUOTLfM8S7qra6FrMJuetx8+pHsf0tMrTOskr1+CquLwrfathJEFKSsNiYVbLyUZhBHTQi4dvET2bduKYVcJk2YfIXROMUggFG1F/otiLdeyIPoSoT979pQnT58IbZC0iasc53quCqF/SBp06jS2mKR5yDmjzmrSgCTTuYa+a+nblq3b8M6XUHZB5couDqpxeWFFoBhio4wqpd7sSLPKyx79bv1e5U1M7Uh3TnVudVDl3Pjga3Bi1lkrFeAQI5dXV3TW4HJicBaiBIptairHq7EsvKzGaj21kISnlAQE6VvcpufxOxfE8we8+OFHNF2L9UmPoWqwrCVh1fOAGk4WvZulhjqnRAwy4U/Qn5nZeymjirHW1ZXzP88zpckxJWkyTdloE5tnnkV2xc5x7PTGyNNnT3ny5IkOUWkpslsCsaxk/iFKw5a0VZhiQghRbF4Inhhaog9E7zFq6Id+4Kw7+3LIbpHbtSVWuSnDQQqNk9PSDmulQ798XkDBvFxH44LezZKdWp2PQtNUav0x6wBFHuOaRkeR1lfSvy7Zl8sXV1w9uqHhXPwNvTbrpJ5VUL9ISGnR84gDZTP4KKUa29NTstnRbXpOH5xDK+V7TdNgQmmOpspAnVBYMdRy5gvjiZE+n9kzx8hkkgAC80yYhXmHKABJAap88EzTRJsk0okKDERtcp/9JGwHq1kCMlBiZp4ndrtbnr94xtX1FSkXdp9Y984gNtJ76T/wKUkGOonfUI6im6RJNYZI8EKZeHV5SYqRvm3phpYdby4J/Pw8qKwUpnr+xgoHX5iXur2KmFYFm2uxuDwpa9STkSpNdSRBagS1Rq0YbGut1m5IWrk6AG2DiwH8wq13/L6AEWPmQ+LFiyvcr/yidgwLtG6skbS4QvgRGLYDu92ebDKuaxkenGFPB0zX0A4dyWS+/8EPqmBZNdqlWDuDorsrmqdiRGOUCDJmaYzS9GXOCe9nmeCgn2FUBMoon97sZ5xrJK2j9F7ez4xehE4UtqChMlkqikLNkd//9rf48OMPubm9oW0dy1hELROIAR+RguiM0FC4IE57kvdyGHgg041SCDTGcH56xtnJCWfbE9I4c5Hf4p9Nv/JZIvWFLLkPZeyryrEa4KZpaduovLumys1CzbE4b7Vxx6ATylZOaQlECuKFKFprlUjdJHFUWUaqOifyTNK6Pz3MdR66ni3vPVfXN/Rtw6Zv2JiMI0CKJB+kzrIxdLserBW6HGtoXcscPG3X0J5siM4w50h/sqELmd//xseiLJM03Mgp1M+pAaCped0VZ2SS+iWXBL3zRMYwE1KQ9LufxQjptSvORIi+dtlbK1OfQvCEYPF+Fgd1DkiHqdQ/y1S0mRhbpmnk+z/4Hh/88AekFHGNIWdbfbCs+iVEmdgzh0hpNsRIcBu1GS6dB1HS80ycZ4auY9tvONls+dX+If9C+tlP4ynLOcEuK+WecTiXya2c2aNpOagzo3pgzeYASRqtKnaDKNdVdqe0eRpMJVu3btH5VoW0bVvGeSLndMS/aEy9CnKOPHt+iYkRmxNnmw4aiyURnBNe1KbBtg7XtRjbUNifi+63bcP24hx3upGU5XaL94kPfvABXdNiw6x1efJ5InKfzX3ZVaS0yK5VbuHRB/ZhJpPYHfaMhxGDZeh7gg8kEjGLIZ/mUGujQ/R4PxFTZJqkNCSEICViWTICUflXvZ8Zx5FPn3zC8xfPpPawXFrO9UvojSKT91V2Mxmc2JAULY2dOewPNNnS4Lhyzxnv9jTOsel7fqV7wD//JZDdyl2MAi9qB5umxSSjCLTeq4JyGqEmc85gnKnBT03EV+Ne36UG0EDlSpfmP1M5PAsYAUvjVC5j8FYvmbM4Z5c3d3zw8afC9960te5zs+1obSOlhc5gjNRvgiCCjXKZxiS1pt3JFrvp6fqe4D13z1/wvd//ffq8CmIA9LtFwb8ynpwlcMkkHJY4zYw5sTOBqYGQPPvdjhACXdvRuk73wmtnfpKBJNr0GONMzoEQJ1LypOjJKdP3A4PWrKYUORwOXJrMJ598xH5/h7HCEx60pMQYR2m4KWUzIUqtPMaQspRxFprLefbs9yOtbaRBcgp853e/xWm3IUwzMTVc8+CNMvX5aKaSOpOriD7mTMbiGiuoRoylzEQctDreajWdB3VuzUJFX2J6QRu1VspkmfmqSB0mawRkairVNQ7bOvBWhIl1bHQs0AmYfWA/zZwNHca1NMbRtQ1DtjRdW6lr+mHg2dNnxBDoh4H24oTRJDY5M9/t+eijZ/z7f/n/BZOXWbZSjKfKMokTbW1Fd3PSSUZZip47DHme2d1NXOZZG6gyLy4vORxGrHW8ZR4yHUau06Wgu8V5cI7tdgMEYhoZxzvuDiNCpG9pjdAmxRTY7W/ZH264ubni9vaS7bYn53Mx0FFSSQX9jTHL3N1pllSfdbXsIUaZtOQwXF9d8+Ak8smPfsh4fcf59gwDDE1H7Df8oO/418yvfsbwsi9mlWYboKaCY0y4psU6Qf2j91jbaG2cIG4ZUVilzCQXtC7ZlVHXVRSlWeQuJiHuFooUeW9perN1brN1DpuslhKwKlVZVjaO292Bxl3z7ltvwbkgOAakoCc7Eonb3U7qoxR5NMayGTZszk5x24E5Rlpj2T2/4uOPnvAbf+s/JPkgxrQgOIoitwaoPfpL6j8nQdx7YwmHkcv5lmd5gs6RSdzeXGsDndbtTRMhCIIaY1QEQ4x8jP7/x9y/9VqSJXl+2M/WWu6+L+cSEXmpqq6u6utwMJrhUKCGEIURRAEkhqL0wg8gCNCTBEEQ9KyvoAd+BD7pSdSDIAgERsIIlERxpKGgGbKnu2d6uroqK+8Z13POvrj7Wsv0YLbc94nMjuprVXgiMiMiz9lnb3dbdvnb3/5GyRPTpJzPtrUMdcRMBoKYRt7xeOA8HvjpT3/C8fjAZuh4cnvDNE+WVBd/Pmq/n3NlnBsnMJIpyzYmLcL5BK9fvobrimRlPk2kIpwPB6II9933+E/41/mf/5Vb4p//agmRgSe2iGAzGPpOhTobGcOSfjEIR5Wui9biNOhweT0LdkZt8L9ZksLmM3OxQjR7q7JW51WKFfFWfrURUszsvwOQxbU9X76+J8bA9z76gE3qOY+TtXAdWazAZr9l2O7otzuC2Ha7EAP9fst2t4cgPL255cXnX/LpH/wRr778mo+6ja139qTENuFAUnxDUEuUV+UTVRgkcD4ceX448U0diZueUmfOxyPTNPsmLWUcR2LxaWmPARZ/hTxP1JrJswV7mElRli5SnyLTdObFixNffvUpn372Mw6HO672W4QPORwPHqeWqGe+tyqzB3rxYatIJIgyUxnPyv3rOzqCibvnyj/9L/8JMivzeWTiGf8Jf/NXbrvNblt7f5pm+m4geds9nw1tDjHQNnmZrD1c39xwOM2czpfrdHkrOXUVXZW1g0KmVLNbUaATZslU52ADy2Det14O+4uqkXPJ/OyLr7i7v+frD57y2z/6IbebjpJ7hr6j6zpSP9CnRA0OoJW1m1ZLtQQ4RcJcud7s+OyPf8of/7Pf4+H5K7bdZqHCLduw1JcJrS+z5ERWSAodgVdv7njdZd7ECrue0+lgyw9yZqrQ7wfTHR1P5DwjItw+2XE8PjCNIzEEDscHVBIpwpMn13SbLZthyzBsyPPE4eGeL7/4lM+/+IzNECgls9v2hA+fMc+ZcbRZgTwXo/2IA3tVKUVt3sLjhQ0QFrLC3WsrVmV/TbcL/MHv/R4bOuo0U18P3PDRO23qF3JQzQE4p6m1zZc2hK8ODCer6GNDRteHr2ByCVItWXPneeE+oVXQrIkrqsxlRmoTna2UUI3L55VSyRdcKFiwgEdvQEArTLNxMatgAT4G6CNogm49WDPQ7/bEEOmS8b1Chq0m/uS/+kO++qM/4fTp1/RFaVuGW+ItKkiFReapSb20r6lKFzsejicexpE7JqTrmacz0zSSSyZibbLj6UA9rojc/uqKaTxx6CJdl+hiQkLg+npH328YBvulJXN395pPPnnF3d0ruiTkPLHdmvzPNE0cT2fqWBxRsQM/z5m8DJerzWIJhoJlZazKw5t7uhoo81e8efGKXjrqlGHKkAux7gj88J0G98u61grawQqXPaIPpJgICNNsMh0SjTDentfyxNZC/VEx1a5mddpkHLAENdfZBJEVxFvMNQSy220ueeGiNVu5DFoi1lsoCuM08+r1HT/+wYeEZM89hsjOC5YpT4b+a+XlN8+Z5pltvCJ2HYFATyLfnfjDf/ETPv39f8nxi+dsVMkC2R1jdU5YqI6ltc/dQF0sae8l8uZw4Jgy55ARjUznE3nK1LmQK8x5MuRvXqknV1dXyxaiGISr/Y7Y7+i6QL+5ph8Gdru9aXmWzIuXz/mDP/wK1UItZ0qZGTY9sOc8Js7n0ZZHyNrhmebZpaXMZkN0rle1zzeNE6fTiSEmogrkyr/4/X+OjgXJlauD8nf1xV+1Gf6FrrVg8YTLuyMxJeih5NmCRPDVEFUIIYEqHzz7gMNx4s3diYbR+EtdXBfedylSC7lmsvtcS/bUtpv6+ZlrMV++JKlv+Vsv2LLCac7cH848f3XHs/33KHWCklEfeCtVOc0j/Wlkt88MfU/d9IQUiSFChTjD3eff8Cf/1R/ws9/7QzaTUvPILA0QuLBdhLAIjLLCyliS30vkxcM9Y18osZKnkfPxQJ5mai4eI2xi/+izFm1dcc6Z6Xym5Im+69jfPuP29sqFynt2uysEW+/65Zdf8POf/xQRNcRbZ/ohIGFL3yeOxxPjNKNalg7KnDNzqVS1hRftGamClkJWU885PDxArpR+4icv38BpJlR4dk78PX23nuQv9/IHoADOAUWoc7Z10xRKVWKFIB3jNLHdbGzt69hWkoZHtrVEeG+vNsqWPSezSxsgs3Z5aX0BtbkSW3Xaurhut+7sFGWaraP24s09tVae3Nyw659yHkeyKznEGNk/u6XbbFFXUClVEQlsdjtLdnOFsfDJ7/8rfv4Hf8QXf/QTrkjoXJm9a2x0wEaJFPNH7bNFL0y9M9QHuLt7zcOgHIfAPJ94ONxTZrefYPJ70xyXVdBtIUyphv7f3yuf/OwnfPz9H/KjH/8Wm/2eEKN1oqpyJvP89MCXX37G6fRAlwZUKzHCZtORUluHnJjizDjmxyCcqt1H71a0cJhzJWObvE5yROfMNI7oaSbkyjB+zAd8752W9M4EdRE7d/j8EdKj7eFaZd8l24ltHAQL8MazaPt1wMJuKz9ae+giWV2gY0sOSi22h1mxiiiuaE7OmezKAUs7663ySC4MUYUFOawY98OQAVcVcL6oCnSbgS4ZyhqrkrLy+tOv+PpPPuXFp1/CWAxdpC5CvqpKh+35DlhVtFSTARsK0ErQyuvXr3nYKlMPOU9Wxc+zI2CVOXuLaBpRVfq+N4OqmUOMy2T/r/34t+k3G1LqidHu/XQqnMcD9/evePP6JdfXO9BCSgJEJHTuuE1epVR8whwPhK0LZYMwWj1ZSsbHGs8n6jxzVrHgXgqpWPvsbjzyQv7vwH/3nUb3y7hWwfJmZ63qdiQ+RFu/2FbE+HamFStqnPqlvFjKnwsK9dKqu5wob5PFC9fMecdmt2X5s/hNb60oe3GW922JoXKeJwqGxEoXTEpFIiFFuhpJajy27X5PXyr9ZvABl8D8cOSzL1/z1R//jNdfvSBONpw3t1W/nqAigeStYVHXUxVXtqAS1QaM3rx5zWEnzJvAfLD1udmnREHIxVDe02mt5ht1opZMCMLzb77kBz/6TZ598IRuGIipI6UeqZXTSTmfD3zzzRcMfUc/mGRXisCQQCpBhGkymoPORg/QyuI7WoGrtXgBBkpgHifGOJI0UKfMeBx9wxB8PV7x/5bf5e/9Ndjin/dqiVGbcF8QI6xlGmKEmpHgdudDG1oq2+3GuY6nR6+5Fv48gmkapWXd1KeLLzA7ZRFZz21LlbazcHHGlj+Kt/tMfvB4HtHgKaQkJEU0CiEmTtOZuWaO5xPTeaTojrTp6ZJJ8ZzevOEP//hznv/0Uw6v7thU0282Xryf0mq2q99huybkrkS1r379+o77nTBuI3MtHA8H0/BWU4cxIX3hcHjwlZ0mdwaQusR4jrx+/ZKrJ0+4vb2iGzaE2BFDB7WQ58o4Hnj9+iXDkNjve6zzaC3s1EV2uw0xJabJdpmrAy7tWbRgX6sN46ghPMzTxBijqXbMZrucJpLCXb7hn8nf+JXb7qXdhhCtaHL7FTG/VX2IT9UUNwQlpZ5xnJjni/n61v+HRc90be27J24zA/5Fzd/i920dRGsw0neAsn4pgbkYKnOaMi/v7/nhxx/YXEYtxrtHKSmwV+9moMb/TIBWalE0C/dfveDlV3/M/ZffMD2c2GSlRvVOV+vO2VR7+3zrmEOj9KwduIfjgbtZeSiBnITD4WhaxRLQaHE5CIzjaCtyY+BweABR+t5WlJd5pOsCXSf0yWS4SlHviWTmfOZ0emCeTuRsw072PuzsdF0gxJ6uS6SuMHoXZ41/9uyqtiLCbLo49zQpSCmczyc42ta/nB+45y85JLUcGrkI0M6XUHdIXdfRUUid6ZS2feEGxazt/UfoFCvh1i5Z/F272nCVVJZEs2KyCaUUihYuTe67DO+y/ZqLt0Lt3S1Gv4g8K6iTqlOyKfVQC/P9iec//YxXn3/F+f7AoJa02WutYuIpqvtqWRqlyzantgqiVu7u3vCgibN0jPOZ0+HocjJ2YMfxzFxMh6zpl7XzKjEQU2Sz2bLfb9jt9kiI9n48aE3TifP5wDyfqdqjmJ5liJDUpF602vThnCvlEirz3xjPLFCLKTNosX3o0zhSZIas5OOI1EJXQaoyltds5LNfZFK/lKs5SwtWK8cHWJyfaequA3z2PfZ793Z/umVd/pUuLpOVGuBf5i9VVKk5k/OjRunjl5T1DCxatmrtv6KKhoAGR//FOFUhdfbza2V7fW2cppgsicmFu69fcPr0ax6+fsl8OluAU5MjadxTVSHGdeCm3QJg2dhiH6Rwd3fHgY5z6DlPI6fjyRID/+zTPBOA49HWkqZkCiAhRFQNsb4/3PGjKNze7umGDRCMmzUrUJimM4fjPUF2pK4HChKUkITe7TfGaEWbyMKxbL6ltf+kTUz72Spuv1Ehx4nj3cGku1R5lYX/TLo/h4X99V3LxOzCpbsYBvWlD6KTFzZ18dGy+OTvut5KKNulevHHhuqvQd4KrHXocj0T7VXXE7J0LGRdhz2XbK1P8ecWhKA93TBQHoRxnjnPEzoVZIj0rsPTKbz+8jkv//jn5LsDMhcbzIviE/Lme6lCSvwZbLdy/3DPA4lz6JnyyOF4sgHGEEHU1j1j0lA5z3Rd4nBUozqEDbVm5vlMjMJuN9ANAyqBmtv9mcl5ZJrPpNhTq4uki8sFBWUYeo8vGZGJqTROptI44YZsVQ/2BgKVeabExCw27DeeRsI0U6ryumz4f74Httu25qWUbBFI169qO8Gk8ZS8ftZq3xNC5HQywOQylrf43LoI7Wq21hDSxW5RQyYRL1BxxRv3DZ5ErIDZGg8EVyQRmKtyOI++r96pTtVkxcr9PYit8I3JkH6id8bmQp0nXn35DXc/+YwwzqRiwicliasANKpBpZO0kGzaCW8gL7QYUDmeThyrcgqJkk33vIudMb0QpnlGUHJuIBem3xuNNYkW61Z1EShUnQnVqQaYRFspMzlPlDKRS+d0NAcoMZpWFFvSlDql3B8vfO5ju1VMcg415YuSZ3IKlBqZ54lYrdNV6wNH/RfvtKk/4yYpfVR5BzF9TvWbGGMkqLXv0qU8DS6hcTFhfmEW/sry1t+0B9Uy80amXts1BiX719kPoxne+vqyvo4nttM0UqtQ1YZKcgWR4tIOlvxRTWLF1mOCTJkvPvk5L/7lTynHM6FY2yu5zmaulnyEi4GDRlYwhGxtV4g7l+PpxH0QjprINXM8nOhjTwgRiZWHw8GGkWp2DhqoFlIX6aohpVf7DVrP1JoI0mECFcYzm6YT43QklzPT1CEJ52O5LFIKXF3vjZw/ztwfRxcPtuq21rBMrQYVtFpFn+uZOSZmMP6Nb7poE49Pa+E/LO+HUP+jFaMhuYMMC7ofQiB2HVGMOyPOsw4hENT+LirMS3KwQJuor57E7bJN/7f6Sxs6Ko7YejVUC+Bqejaluia1bSCm/ZTqk60FGwTI2hQ0rJvQKlwxtWgkCMPVNbthY1Pzc+X0+p5P/+gnlG/eIJOtzJuxtlrWYCL/2jT42lpLr+bDxXnyRCmXwnmceDhkjlg7+Hg4m05utCB/Po+ImgRPKRkROJ2PJkeXBAkbdtsNfRcRLWiZrXiopu85zUem+YyWzDSdSb2v1vOERINvcOsGuqGn6zseTnnhXaqLwCtcVPNQcyFXZQoBUdP/y3UmFJOVu9Hn/Lv6nwL/i78We/zzXiJibbWUSENnE8WerEpXiRrdxxiKqLXS9wOvXr7meDjjBgKwDOjphatthfPlmln112lIVVGgKOq/EC5eYwmjtD812y3abFeoEmxnefSORQquRBAZrvaU05nD4cRUMn2Zbed3qeTDgS/+5BN4cyDmaprLWunCQHEk1ZX6vmW7ls88tt1SCtOcOY2VY1RynpjGTJeSCXZLsfYoa4wxqTNbL5ySEMKW65s9mz6idaZmS7xREzwv1Xw2NTPNlXECV7YzzVPxZSwpknqT6nt9f4bmezWbH2g3uhUHagmeaqYWk6jLzPTYs3oir94b2w0hsN1s2O2uqc47j7GzWC5C4mxqPJiyQi3eBp68Db8ohcJiW5d2e/nDWiHittsS0Fp9a1hVNBd7RI8sVH08275HllTLNkoWbA5DY0DU99Yn+/PxPII8kGejpXSpQ5PxXM/nE+cX97z6+jldNeqJiJKjyWTOarrp1m1lUQuIFwWjtg/sVBWz25k5CrkEzvNkaj4+RBUAMr7V8KL2XADoSgjC1fXets2ViTwLEhMipjVvW88yzcvO88Si5LPYrXVuEgb69H1clmFIaPJr4QLFtuHMEKPlLV2ybkRQE8Etyr7e8Tf0//NOe/qFCepawVum3w9to4UN5CSBmhJRCzE530RWNKZDGASi2O1UsfV9KOvAXkMZ3/rZrd1caYiIIEXQ2YJVleKmFoFV19My+LgkKfYjBEmJ0CVDmLxP21YqLg/VFaOLFqbjyOGL53zxk59yPVU6jQblS+WshYKQxV5jo2FF7BzFQTyPu0DUcslMpTBnQy+neUYkugqAfVirJP3eByGmZqwm1H69H/je9z4kePVj790+7zQdmafJtjzUyjie6V1LsQ1yIYnUd0iMEGwzhd5PSDDKRGvRSRWbvrQ3w2az4/bJNafzyHSe0Kgruk3gB5z49/ijX2RSv7QrpcR+v2O3u6JiwSRGU4OIKlTNJlsTwNcaEbBCJue8oFJmQ941uESi/Hr0p6oW1BHn8Doy7yiUJab1cQX/1msJdjYqFuxnLYaQY1qlbRNJ0LBuR7EYRlaTWsqnI2++ueP45o5NsU1aQjXdvGAoQXY6QgeEKBY0ltZaxHR3fWqeaFJkag68aGWcJyRB0YIWWxIQinObHZmT4MWjWhDuu8D3Pn5KkAo6Oy/dXr9qYZpGSpmQUKmayVltShqW+5eSEKSzhFeE8zgRgtoWsFD8rHhy4ui2ehH10UcfMHQDz5+/gGBcVRRObPij90Gqx6+UElfXe66vbqlENAhDSL65picNuhRGdn9tsOl4HJlms5L1+radfffVIr4Pq17AsapKDaadePmaZYEYVh2XdkKaHJBG2yTU5gxVjPJESsS+J+XCeZrQmCgqzIcTx8+eM53PbNRLb1FKgFEqo8Ds52ir4Tts1yb4a4EazJbnXJeuWdXKlGeI5ue1CrUExLeUgcWBZrvgcxRUrna9F1aZEgSVjGDKKMfjPeN0IkT7+mk60fURCTYMBbaFJ8XoyYHQd5CS0kUWAKEUX9jiQVGj6cd++NFHDH3PN89fgiga7RmddPte2G4Igf1+z/5qz3a7A0nml/xzSIp0ahQdz8RWahRg/tV8ARcqEZceVvHht0VG8QJyVMFkacL6bWrIQJW62Onj13wLIHPwLfYdoetIBBJKCsJ13yN3rzmdR87jmRgim23PbfeUkivTOHN/d0fJmc4qEuueUdFSDGwAEF9LHVlOzSrisuK7hkBCTB0E10BtMpUNSCKaZrC6SoFFB3JVm+GSmTmfmecjeT4xnX0zVOpcMaXy+Rc/58WLr8jZhgDPU0ViT4o2BGs/K9N1trEupog8ZFKo2MKtwFiK6TD7c1CgBKPb/M5v/xZXV1d8+vOfU6m+6AAG+R1+Pfwv32lTv1io3zlkEiJ9tNVVfbdx9KKni2LVSzEB9yhC5HLXvCwaoRevfAlI4c/MzU+X9mZVSz6lPTjVZVJMXeTZjPit1FZXVKptVWowfogRSYkGSy8zBP6Wlo6QWrvy7vgAMVC1+LS+tVuJ9oUZpQZloyzocXsTtWbf5hL8c9n/Cf75cm7Th0r11oSIycWk5Pj8MqGdSdF5VVqYxiO7qz21mjyVccArX37xOXd3rzlPJ5MAmpU49H4PmrFVSImUbMVekJkuCakLUNr0tq1WDQTwlurVfseTZ8+QuzdULcxlhurJrFY+ke/zv+V/wH/8TpP75VzJNzGllBiGgZA6JFqxFRBCgRozXTIR/ShCUJfSWFChx32ltwuo5lmbfbbvqQ1RV7Ad3vbN1SsuXb7Zgn1jXjX8fW1W2D8Vdf1It51qdlC8ndPGs+192PTxeRw5HQ9WuNTsw0Iwu3ObBLLYOeqCLxJ4+xi5gw+0ZQc2qWuyaL6LufpqPptYYpom+s4QMmrb5OOIWs3kYlp7Va2tVMW6JHlWTucz33z9JQ/3rxFRxulM6Dr66ImZVFQzxqWOJgBeQWQmJUg1oDmYlrEWX4NoMi4SEtfXV3z44QfEEHj+8jlVKhjwy22959/hv/xL2dxf1WXLT+KiLhFTQqLtobfCXhDprGMl0fa7u/TUt430T7ku/BywJApLeqkN4W9tPr/k8Ws0T9usFbhYNOAZaerMDsWk/BbOexT6TUdIgZCE/fUVZc5kRjR4d6rogvBOtVAlMofqm3AwZIdVckvd97duxCLxhph8kJbFdhsCIt7RyDlDl6wjocFbu9bv6AnUOnM43vOkTOQyEsQ6GtNUOJ3OfPmV+V6jC0yEEglVXZ/TCi5FkJCI0eKXSCZGJXWBGtKyaMK0mC3uItD3HR988JR+GHh1/5o5N+QQbuSOf4fP/yKm9ld6NTColMI4nqkYwNIFb2Wr75eHRZVnTU/XrlJxyTF7at+O7c0e2kIJYAWj1DSOubBtxEXi/fdL/scKgEUxiTIRMRpd1yEpLR1YDUIN0G02nHMlT5Otd0fZF9MIDSESYiJXXWZTEPO5IZreqxHqlEFXBY2WcxQtJElLUd3u59APxDihNRvVK2cHw2SxgWbP7STWUm0hjURyUZPZjD2p72xOICZU4cXzV7x8/pzT6YiEtUCqulJGSskQodbkdEOh6wO7fU/sIkOpjLNJpeUpI0HoY2eSmiFwc2NrpT8PyjB0iBZCrez0J/yt8r8G/tM/1ab+TAlqm45vBrDwSrXQFsMIpqtYa0OHLiuVb5nZd1/KEiQFWoS8+F5DGW2141pdPa7c/eC79bVENYhxN2UR+G/OdEVYw8XqvdZyHaeZqSq9mrNTESaq75gs5AYhtj6EXn5mXcjPbfoQ57iqZltBlm0KLARFNCytpVpXDUQTgC5oTNSSOZ8PvHj5DcNuS68Z1cQ8V47HAy9ffsM4HVEfJsh1plZLbnAMzgjNBQlWxaUobLcDJCOKl1KZ80zN1e5bsB3CXZ8Ytj1D7pnmjuqcF7WdfZxk5ifyfkxC22pFQ+pLzt6u9BRQcQ6OE+AvUMi3K2r91m8e/e/l/y0OpTlPQKQNsLTgvf53eZ0lsbW/uHzpRdNXXJqlybO8lYdI+/nIMjBoK1ILc60ETzBUYKymhjHXShUlRH38Uy8SluUd6fr3XeqA0QajPJBaPPHlF+AdCbWhw1ZoBoBKzjNv3rzk+uaWvk+ElKgqzJMtlLi7e8k4naytv6zfFdNIbEJH6pzqELyiD2y3PSEF5pxsI9w8mf3igvNRbPPOdjDh7T5SXS+mlsooGx7eAxQKcLu1DV3jOFInW8sSyyoN14fZVl52nvCpd20aWsWjWAwXv3/bfBf/5AW4ehtvMVVRaCtoL75n/f3q4y9bjASxgb6YEPGhClmpLcEW1/swDWyGgUlhjsa1riLkIC6JBicqk2bn8VVicNuVNTH91uditeWUTBuy2S662q6I/76U5aSCoa3ir51L5uHhjrs3L13SqzOljXPm9etX3N+9ZprPRiMSdR1qJ5A5ONBWdItY8dGlwG63IXWdrdREOY8TeTbZJNtCZyBR13UMQ0fqA2m2oppcmfKWr98D9RSt1eYUZCTPhbkayp9cZSGibGQml2K8YbnMCy6M9i3rfdtul7xE1zxB1Tm78fL1LrsMK+ywvPRbrXUvCRYt1JAi1GIFuAhFQDrXQ0d9LapCMA58jJFu2JCGAcmF0Fni+OruAalh0cJNXjCJ8AgUuMSLl46nqi2VYWLObvvFu61xLaSKL6NYD+16n2qtnM8n7u/v2L+5pjuPgOmWvnr5ivPpsAywWor3mJIJOFUzE53y0HWRzaYndbCpMFWz23ma3fULodiwcJ5ncp6JXWDY9kCm1omvOPC/46f8b95hU784QfXwpdUcpnjwzdXkDToUJBAlM8WeaZopvVX3TRNLvuUSWSoTyyflWz/P7wrqYvtgfLvlETx6yTUhtAdtO8QfsVl8qYBtMWnT+86n9eTCNlUF961NXD2gIVI7QxhLydwfXlOrIW4IdEHAUVKlSUy5O7+wOlX7eSklqLPzTMtFMmB82Iq1KKu2cS7xz2/bno7HB54//4r9/orNdodIxzQXXrx4zv39q6VFavKI7hS1pUmtiGhOElIX2O+3DHXDXCtzzpxOZ/KUjRiNEJPLNgWWRLXOJnptKAtIeKAP//RdJvVLu2xYwgaTxvOZydUbQktQVUlSmfY9tW6bhTnqFhbn1eqNBaHhInm7cHDLxDW6PHttSCqAGD+4vJ0ySHtVJ+TDI/treq4hRojW6hRbMeLoTyusfNMaeEFhxVgNgnYJCJRauJ+PRJ/elKCGytXmDFc1B2pFYlycZVvNmmICdQqEr2k063R0QMLaevPP1yY8EVOtePnqBbe3TxHRJUGdxsKLb77keLgzqaMI0qZM1ddTepO2epBHIiEKXZe4CpHBt0kVVR5OR/Jkm94SgdS0mQUkCv02oXSozqjCgT0/4b/9V2iBf/HLGCeVeRqpVRmLbcqSUpbnu+1tancTkyHEl3Yla5LaivUW6pdgf2m7zRXrisqb2op9D3KJB6gjX/6N3wE+NL6fFRDucxexR0sDTFi/AQIQ+kCfOjRmUozWXt1sSGI0onmeeHP/ANNMCjbg2EtcGQfts1z8auoHrYMXxdqhuWRDR0tBkrjtZiAYV7RNWvunad21UjIPDw+8ePGNfbauo1RlmgovXz5fA32E6HJnC+riPqRWW72rLqXUdZHrkChqdLACHE8nxvOENh6jbwcrdaaSSENkqLYIpDLxML0ftquuNmBOLHLOTlWr5neTCLVTzuNI0ECfAslM5eJqiDcXhfGF3bZuUUNYPZGrVUEKgbiCQs5UuWSlLD9OHmclitJ4n0Fs0UoMASVgmxTtqyQG+s2AUQHMvkJKJqHXdWy2W3Y31/QD9KnjeDrx4pvPqblyvRkYpLNZk/Y5L7Lv9Yi5aodzwgWh5spcZwq6rNy1BHTljRtybDlD9A1YLeGc55nT6cSb16+IKfnG6srD8UjOM2jB6kXxbVctxtk8yrLBspiCTEqBTd9RNaASKCpsNwPTBXARqjI9HLi7e4OSiV1kEwdUhHGe+Lkk/k/yo794ghoJi7FoKUzTzBwCjKOlOrVCnq2N2gk6bnm97bnqbknbRK0mZK/BhMr9OTzijbSqvE28BzHujQX4arIiAXfCgFRCciI+jY/hN/TCsNciSfwmB/pua8LWF9QAVUOn2jrVZcgpBIZ+4OnTp9x0O7pj4dnNE47jiX/yj/4h5zrx8ZNbroaBKOmx0bcDshigcRfVfVXbljNVX5OH8R8livN0bWDEpmaNJ5eStdm1KnnOzNPEz3/+M3b7a8D04B4eHsht40/w8kyacHJLcg1uqHVGPfnp++RkflMDKFWZdjumyfQ8tVSkKMfzkfPZWgHDpoPSM08nmKx/PIQ9P65/410m9Uu7RA05PeuZacqc50KuBUpBqiWOmz5xu+s577fMfUfXCXNl4UlXJ8wttoT9prag7S6lOZsmL2YDaY4QyIWaQxCWXXfrK1686fW3ZjbWNkqdTf7aM3Jv60VWCNES0yBLOzOFwND33Nw8YfhBYFMjXey4Ozzwz//rT5keRp5dXbHve1JIdI3iwEWyQuPErYNaFNt8M5eZuU4QLek1f2ZpwZIIucKGhLbX3TjSpZiz/PyLzzgeDyDC7MN65/FMLgWhEgX6LjwS/xZPFkrJlOKfOXRst51N6osxunJVttuNbVVx29VcOBwfeHP/ht1+w2Zr3OOpFLJm+vnEr/OTv0oT/Atf6qjupDMyV8ZsEk9kE8nuOktK39zdE8qGq03PZuctueaDzAERLwtmVX9MjwsrLv5/bW1CPyMLN275+scoqv33LdTWz0aUSN8Pvja5fVerrCzwtT5bSubfazGt6ydPbrjtt9z0e7qu55tXL/j//aOfcBgP/ODpM243G7Yxmu2yJivN5Va1waSC2ZxU27A35ZGpTsQ+GQqWrX0pEihU48jSONTqfteAkVLNdu/e3Bk1S1x9ohodTEshiPoQiX3OEFpEsSUaZa6UZEhUCIl+SEiudAQQQ//7FJk2VjgFFTQXjm/e8PzlC67Lns2mo0+BUY6cSyXx8F7YbsnFNmNhswyTDz6lEEghQkyUHr765iU3m46r/YauH3xgp5ECVwQQcGDnsd02MAAu7NYTTK1KlEtlCnsRyztY/LZjmIvfCv7yUWzou+sGcK3QctFNkhjYdEZ1zNsBCZGu6yk50w+Qbq/Z9QMf7Z/x7NkzXrx+xT/+49/nOE8k6emCcaqTiOsN+4e8PKe+xCCXCsWKs7mMVkR10cqe4uoQwWKviC/twM5as9tLxaV5nnn56oWhwyF5K1+XhSyiSp8ShTXpt+IyoNV0rA0cyMQINdr0D2LyUjF2dL6JLsVAksinD/d8+c0XHMYr9ldb+tRDjMynI8qPuI7/s3fa1C8ekvIHqy5LoLi2mLpOqYJxLIVxLnzz/DVMEz/8tR+QUqSSTAqnTZUuMLSshsfa9nm8TnWtzlsRFBd4/7JdemFki5ukdaZcZDeQ+kRbO7AQEBYblfW7/T30Q8/H3/uI7uPIx9cf8KMf/YhvXr3k//CP/2/85u/8TT7YDNSHA/nV3TL92ajY7YDUdv/UnGYMicPhyElP5FTpu55xHCkh2kAVYjwUsdZWy6X7vvf7YgK44ziRiycOyIKU4jqAKQYCHevaVXuaniqT5wwSSLGj84lqm9oTahVSSGyGbuES11x4+eIFD8c3dF1CBLptR3/qaFq0T2Xkvyd/+ItM6pd2CbJoj2oFZz1gn8i0Fu6PJ774+gXj8cTtkyuePOl8955NOQYNNHm+C1YJBjyuz8cQlrJuofKr1OqIvX2j/eS14d9s9XKkxb7UjEZE7FCLGMEcs9eq9ozFi6rmdIOjp5vthqthR/jwGT/6+Ne5ub7h0y+/4P/6+/9fPv7hD7iKHf1cSWMmIaZioM3+149pb8UHCdW3RJWRGgqpM2qJViDZJLPUalJYHpSbswy+57FWXThqL14+J0ZDd23zlKFF7T2E2BBkpzpU+5Sl2ErJGIoHk4itijbkO7rD3gwmjdJJ5Hw68/z5V7x4/RVTuSIm4aobuJ/s8zyVJ/wH/Id/FWb3l760mqRWK2ZRsYG44MhjSBACXz9/zf0reHK144e//kOuYKGEsNi5XUvyyOMOwIratEAFCz/qrWvVo1xfe916vl4JpwWlSN/3hrBUt3txXRN/Q2uyYO+n6zq6fWS46YkVfufHv8P++oqf/PznTP/4P+Pf+vv/LeLhhN4diPcns133P5fdtyYduCCoVZlOZ3KdIBZiHJjGQqapDWQSxmle0KMAm80AGI+06W/PeeJ08nkGhDnPIE2Wy0AFwZC1GG16uVah+lrOmis1FkeYBTQgztdMVZhQUhxIMdLFBFV58+ob7o6vka7QDx1pE9HcM01nnsUP+B+9D7brhVVwTdKabVg5hc42bfU9EpXnLx84dMKz+Yrr22fshh6b+IJGhVhbpPotu728mrLJJQS5pLieazS7vewaXGqzrACEd1JDIKVIcSQzshbuts0rOkLZ/Jqd2RQj2yc79Kryox//DjdPnlC++Jz07IZ/8Pf/fdLxxP0XX3H4/CuTsLx4XXGArfg5bKi/VmWeRqYyQQ8ffvQhX3z+BVWLLT0Qi9WlmgKEuCzl06e3nE5no66o+tKUYrmaBmq11ahNAzu40xWBELrlc9k9Cgs/FzWwEsRt1+5pxQa/Nl3n6iORvuv5NBY0zkjKSDQkOktGNpF+v+P6ePtOk3p3gtr0+Fgdkd1H+1MU6GJH1w90nYm/zkW4P02cxkyP+MCCECQuE8cLeuqG/PaqxwWyx4K4XJLuhAtHun5PaE5TV+mRljm0wJ26bkGGDNCSJVVZ0QDj9ksQG4uWQEdiuL1CtgP12NHd7Pnv/w//ATcKP/2v/zn/6vk/WzbXNE3I9mKNg1dRCMYHGc9nzjqiVx0ffe9jPvnkZ4ZUign5By2UGtzZKapLOu1ckLCI/uc80Sok4/qllSqxRh9ahiUEglg1b2LyxRN4S+LFVRu2YTCnWyu1mPRE6BQV0zUUN+pua3SOAHTnB47yT99pUr+sq5Ti1R8gZr8puCSMFyzGBY6cxswbThAD10+eMPoq0BAiQdcQaoP+uthtXNAqWQ+0XKSgLpHWCo2FVwSPbLeVRnKhp9Ja+ymYREcL7HKRQTYps/ac2zOXIMS+Y5M2aM7sP3jC/vYJw3Qk7Db8/X/332U7Trz4ySe8+Fc/s8pbQfSC08dF0oL/uVZOxxNnHak74fbpE7784mj2XUzrJGmwARYUcRrBdntFKcURVHOUpcx+T4y6U0p2TR4/ywFbRhDWzxhC8KEopxWoojUTJVCD+rkNJLUKXjGpphQcAQiZojNVsmmJCqRNIpXEl9PIf8Qf8A/4t//abPLPerUJ5YYcR/XAGU3jtkuJFCNzMRWVrJYAfvfVUBW/X/FC2YS309C1CmtITq3qfsiv5t+aX/Xve4ucZX/n1BOTStLvtN3FTYvHgwCxSwybLSVn0tUW2W7QoUO2PX/37/096ouXfPOvfsqru0+8cMEzDI8ZIhe2a/Zba+V0OnHWM3qV+Oh7H/PTPzkYHUUKoiYvZ/fRYQyBrusoJXtMsudiK2HFlldIMOpbbMOVrmoQhBhWelyz8xjq0nqt1RRcTDvcznsnwib0NJBDELsPg0AsVPFzEgNpiGx2Ay+mwn/08v2w3Xa1+JpiR9/7cHXf0wUI0aQOJdrwnH+H/VLxIfwW9+Utu9X1qxtYJasRGZpa16FrbXmEv6bf12VESaz4WzpW/suAIOu4haCIDyUtySTWpRTXNrOlEAKdDTfXITIHyCmwe3rLD37zx2xPI5+dJw6ffumWot558D89AufMbkspnM8Ts84wdOyvbxi2rzidTi4YUxE1ul5bECPA8Xj0zqx57w4b+JrnbDlStHkARUipdaNWfWxLntVXdostLvJOsyWqFRY6pHqXrzO0FbsnXYKr694kqaJ6giqEThj2Pd30KXdf/q+A/8ufake/cJNUS/RELgWaXQJDAl2X6PvB3nww55Ar5KrEqrartVaHmi+zSl3tcjXPJamz38rylcYKeEzUp7V0/DVCq8j9IeGJmrU9TSjYtBnNSFNrFWjLZdt78sRWzABFAvSJOcAssL254Ye/9ZvclMrLTz5zG2hB+eITenBvEDrguqYzMxkJA88+/JDPPvvM9rfropBpyabzDZtEjyFZYsMd/lrFq5l1qjEuxPCwcAov77ssA0TiRtj4kUEqKSW6rred0LR2qn3/dt+7odlQFwrDtjOKBLDRwLXev8ukfmlX40HqRXIeHEmOPt2foomHV1WyB6fqjqlWRS7q7AVNfOvnSNPaXL6y/fvxd1iCcBnoW6z2zTd6EeTlgoManX+KFTcBkLjytN62uVaSBwmkvqOIEDY9MtivuN3wG7/7u2xPZ8rre17wsxXib+++OfWF/2XP+tJ2kYGb21u++eYbyjQZUV9tglaqoyiiRDDHV1viJUuwb4FfxISuLbBHT3zEqMB+89qtE086w6KYUR31MEcZA154iEkg+X3sc2TYdMROIOiiT9kNkaoDYR55of858D/9sxvZX9O1tONb8S6mRBBTJKWOLnW2ZELU4ntsitzvuC4Looay0ooaf+7try8Cpa/ook3FN6Tp8uvF36c0aSCRJeCJ4JxP47glvst2/ZVXF2UDgURqCtQopp+62fDB97+P9AOnr17wkvb+ZIV2rXLx998+Ax6cZ2YKIj3XN7d0Xc84jReFY/O7DRjAtahtUEZKBGyS2aSglMa5XjWXZbkxjQfffLCorJQVLMgL6l1B86lDPxj3e2ldGyd4t+/p+kBIEJIP1faBQXvC+H7Y7lr4rLS61HX0/UDXd6TYkYLFj5hM41dpsopmWUvXtD3Kt+22FTbaOJir/a5p6Ppqb6OuzWZDs3u9AHKavw1NZWemzjPSibXmG4rotrqAQM382g+IYioUKEWEzfU1sR/Ypp5+2NhnrAZIPeqdXbzXVX5LmXMm40BfSnS9bd5qw+LtfqnnIAicx9EHo6H9RlBf+V1oFE1VTMZMknFqS/U45T/dQcog631a/r/PFoQQuN7vkNSRc6bUYqi2FPZXW1KK9L0NaosAXaSnZ7c5cPMLqCm/sMW/rGFsxrA4JzPCzg0whMb9EEISCE270G5ww2BbEFbm9bVYnvkjh3npVRsvqC4UgXYPxQYqkGXAaa2UxDJ2154LIXA6Hel7IUoHmHi0OHrUfPwqmG7vsIrp6c1ayQL72ydITKRowdAKiiZR1LLmiw92cbX95CYdlLh98tSqdJff+Jaj9ntRiglXSAnUagZnztO+cNntTm3lw/JyhvgtEd6qwdDa/oohBoLWTBc3bLeD7ZTPBQ1G6E+dcPt0T4qBmAwdAIihR5NNnPZ5yzP99V9kUr+Uq6Em0vhIASCQYqBLySvCaJ8nKqlLdJvBJTuKra1TmgqcJasXATxcnIVHl0dsWUx0DZbfmuiwl/IZVxbUYWkWtCGSYMj2NGckQhe6VVswrDSDx2/HLTgal3bGJH7idsv29oabzYbNdsfqhPwmtSqRxtuy/9+SypwzRSohRPbXN/TDwJzzcj6bFV7KGM55XuRlgm910loXrhMIpdSFBx7Em6R+1h+deVYJMTtm7iSRJUHd7TbWwvJhFKXSD4Gb2z39kBqDA1C6IRFT4OPywN/jn7zTpn5ZV0OvLwOuCV535m9TImB6sDGJ85PjglJbPWH6spfDHwjf6VvXjVXtofl9B9dprKz3fy2gwG3XeSfhIlY05Q9VZRwn8/cpLLJQl7bb3l4L0CarVwjR+H8FhRjpdnuk79nsr+iHwe5NNSRnvVbbbQjaWhAV01+VSNcPbLc7G/azm/B48NY/yvl8ujj3HvcwrmyTObIzYsln9EFFdV3rttmnva+VU906eX5/PYG92m+Zi/ne2hKOBLdProgp0PUuB4j7hzjw4Xj/XtjuUvgIrhBjm4e6obdtXLj2ZwyE1BFih1I5n+clfrXtTcsTbfZxgfovxdWj5LRdLa2T1ed6ptsQ6eD/0ApAt+1lOYYvdhjHmTyeoSb6GHwZyTqMvSTRXBRC4MOYYWnBb/dXPJxG9n1vq21DsMUX6CKXuQx9+Udor1nFB+uwbsZ5nJFoGubBJZ8ubom9VhCb+C9eOLHarSMNi0+ttfl3l/QUjGWx0AzqcldrU/lQ7x7qyrO+vjGt8fv7B5qCUKlwdbUjJqNMxGRPNUTbOHq9+YDflHcP9/3CBLXWaq02d2LmLU1fKwSTVeg6d5jYNqkYTKcw58r5PJnkS60UTNpGMSfRHubbJrbC9h7opVUK4kil8/zEZHcWFp6ugyKAi3MH15CMnE5nvv7qC26uN3QfPWPYWlsnhosqyj9zjEbgb1qsRQtzLRCE69snnMaZm76naYRqky0RYeEgIRctYrVd1u3wYEjzeZ4piqFiYUWqG2rV1vWdpwmthc0woP36XkspiLTDKNRckM4+zyPR3wXmExRbG9gma4VWQAipD+yvttw93NvUqFY0WOGx7QZCShc1n1WLqRtA4efzM/5j+ff4z3+RUf2SrpUZ0lBUfRToQY3nmITQJbp+oNbKNE2WnDuy0iSSlvKqeZJWPV86THeudfke4FsWvl5mJ4Ega1C1oB0W2gWqpNjx/Pk9mz7SXV8ZpcZf4SJ1MEfpxlyxqVOCkGtlrMVFn5Pp5Jbi31Q9oVkDjH2wtqa4UDWDtE1Wpk855kKpFoikBZEliHtLLtguaQG6YMMBsSUo1RCrJbktJureztCaktZHd7DU7Fwq8Q0qrjVLJMSep0+f8uLFC7SWVn0SItw+3dt07gItQIi2KvY0/YBP+J/8Wczql3KtoKA6Jzq47rTpYkZw/dCEpA4V5fX9PTlnqjaCBZjNrm1n4FtB/TLQX4TfX3gFDcSWHJrx+f22AgIRztPENM4mfbdJ9CF8y3alvQfwYOnJbwhICmSUrJVhsyHEjuM0MWaTyaPaZjaJ0V/HX7HZrhZKnUEGqoKqMM/Kyzf3nCffkHNhu+2zmFA/TDmjtVjXxQeyGgaBtj3vmBxRNUF9xGch3TYv72dVK9hKMdstvnpDMLTx5ukNL56/pK24Vu+g3dxeLQo0y3MLJpx+HH6XT/gf/xmf2l/j5fq2Kvb7lJJt60s2kFiLfdaYrLsTukTRyt39nRW5DUFcN/gsfmRN8v1HhTb8a4nW23ar2IDco7+tVhRFWe2uXeaHK8GTtuP5xP3DkZrPxLCh6rB8bfatVSm5FasDQLp2iSvWUZIY2F9fowKTx5QUItEl85rdtbMueqkgZLZVXE96PGc+/+wr3AVc3Jd1YYbdG883ZgNozO/aTABYTqNq0m0onEcllLwkkISWzDfgAu9AuHoKbUtW+2UDWFPJzGWiaPbbrQzbtPgXdfBNQjVQtxt4xQfvNKk/kw7q5Z9t2oslIMWu87ZcoUHk0VunJRebkmwcHK9fLNn0g60sAdUS1rBC955UXF7KOkXtdY8lVhIvhqTWIBlRS5pj5PBwBN8wMo0ZhmHRohIVpPrPE9Ds1bZvpJpKYcwzsyr9bscXX37F5sktuWRS7AjZh2GWak8enwAFKbqIPwNM55mf//wzTuczXfL3ribhFUMTnLKj04y+fbZ2XyxYs7Qr5lKIZbakKqxE/uUOO7oy5dFoA942DiERQ0fOtpO36yKnoyUlIurtCAXN1NZqkWbIZtjPuol/cP3NOw3ul3bJ2380PEh8UUPsOsQPZYzOVYzRhiCmiVwyTQpGlmD/p6eay71YKtX1akmyumC3OkgesdatCTOH5awpkFwvL2BFyP39kbvX93C1Qa+uLj6Xf7/Xa549LkkfmE3UMtuqyO2WuVRO93ecxskS1rE6qu50Fi+RHOw0JEibs6wmEzIWfvrTTzmdTsRgQyTiDq+KOfrW+gVM1zEZamTBxQYuGz+xbUIqrSAQUKnELi3ari0z0GrbslSrH19rrabUM2hl2HTkOlMoNtCJaVz2KS2ISZs0x7U0b7uJf/v2sz+LZf31X+2++78EbNgmJUIMNi0esMHPZN2QUiv3D28Y54lKRSWh3x69WxCa1uSpAm8nrJeXevvB9JPtFYLbrTiFaGnxu+xODJisl8J0HikVTg9HElt0d/leLmlGLdCrpyW27WzKxjHOqnTbLRn45sUL7g8PhJjAtZrt3IUlnNSLe9iSlKoVFWspf/HF15zP50e2W6tSY1sGs8Y+W4xhr9dsF6olDg3F0kSuo88u2IGMKV34AzuLpboMjxYbvpGGqIYlQZAolDlTNFucNN0xlu5Ei6MhoxK46c/vhe2azizGuW35QQgYWNkSnUIInT1hp60dDvfkmv3TvSW/hKxFh19GJ1q7UQ34f5QpqPogdfAu2sovNUNeY6t4kdWArCAO9EiAago4KuGiq/P4zIgq0hBRdX9ZXZapVrphw5uHA4dXbzi+fLmcygasgYFQDVZoCbffTDsf6goq08zDfGKTTDovFPv5thDPfLaGSJ8iBcHko0w+TTyNL75syBD64NJnGc1+R5INKxr9jaVwaBJpUNFGxQqRXBKn04HYd1TN1GovpKFSKBf0gIZf2yd8ejPxH/x33p0v/LkSVGhO06eRfTJ3bQ21A12gFmubLw+iJahr8rYyTtbXePTTlpv61s9HkBCdc2obnBqpt1XmbWgluIiuQfaj63T1mECChbemJmCfzRxrdf5nCOIDKpW5mFDusN/x4s1r6quXvH7x0vmc5vUvKRFWTcry2iom2VM9ecg5c3d/760ev0uXPBQM1ZIY3PlaFqKwtOwb17JJSUkMllwVSypDNO7tEplQIPhEaVkg/D6JS08U02elEb9bJW/VvPGmGsrl71OC3Tep9L+IC/dLuh7zktQ/9cqNagF1LXRsArXG5rZ8yEesXvQSdwl238VKXR6dCI8qem//gCAxER0RjE0eqq7Vb7OfuHCeAlrhdDpTcgVsTeKKtzYCgljBLTb5LWHlP88lo97eTJuBr1+84PDpZ7x+84aVG766xRYomkyW/ZLF7iqVkgvn0xl/R/4xjWMaQvSBLnGEKtj2TD/j9j0ux1UNacDPIFpWVCE0Kkz052BIL8qitVdQQqguIO0beNRPmbf3Fbddm9f2QnB9L8ann0nh+Bc3uL/C61u2W9c1nsszakm8WqDH/cmi4bm8hP+mVQ+LVX47KX2EIrar2W1ILqEUFhqGyeToGnykbaaTC4RRORyOzmMP37LdBjDgVCV8ELGqLQCZ5okYhKKmM/np55/z6vMvOd4f3Gc2sKIlkStAYf7rYsjPg3LJhfN5pKCPbLcVQm1Fq3XHzLcFR2WaDJdNj1f33ZZEqTYpNluHnbRe2K6fsYpTMWz1arPdINEHYk4Lelpdqk2lrfW2jlzrMNjrBuD9sN1G+2sF7vJ37g1ri7NO4ai2/tDmKHT1Pm/nAHa1LsplvrH84CV3uERMW8s+JaH36XJrbBXKnBf6h7mpFZHNOTNNM/d392w6s9mmEaTq6iItzvscknqlZoN5lfM0ExHmWun6nlevXpFevkaPJy+oPGYvH1MuPgve+tclvnORM1gRJMu9VFcteXS/QjD5Lg0gkVIqJbLY7HouKv0wkLUyTpPZVGlxslElvKtbTcHJOlY256Bi9n4+n3l6ted4PrkygFMHqNYhpv1y/VgEicp236iN3329M0FdH9p6wLW1cRaD9GSzGaJn+u2QWdvbg73LHC2SUv6vR0nqI9t0Q/+WL7VNDymZYGwXookZ1zXhXRM3R8BqZZ4z43mk7rZEMXmbi3wQ9SQadX5QUNfFM+RoyjO5Zls39+YNp7t78ps7YohAWau4i4S0vfcW6G0Liv9dqRxPJ98QFJY0YQ0jfvBchB3/PKqWhJjckAd3tXe/6RLTPFuljm0RsgDjA2aqFiTaFiAnPNdgJ02r7fsNKayBYh3dauntI5S4NtRDlIZp/aqvldDuA2TQbufyqzmD1qqzhB3sX+sE5NuyO42G0V7C/9J/xsonekTQd+SuS52htkFWybSy/twmmL+0b9SkxB4e7pfBrcfrWB+Fg4WWgLahD0tQFwS17/nm+XPuv/ya+f7Bi6vy6OwttvvWpRe/0epToJdJvv/sJQERQAIhpjb0iZ1NCwq1tfhV/Rwaulq1Oea6vJZ4kWjuR3wyuknSGE1CnVuVs21FoVRHzaojwKWlQ/ZvR04suSmY6NCv/jKfFdbETS9WRwsenFry6qiGtuLRC3+1ApuLBMGuy2LoWz94CdJv+9wQAsPQ0Xe9d3dwn9tk7ppCgy62VNU0SO/u7hhSMCWX77Bd8GeqzZYbql6ZsiWouZoqx9fffMPDy9dwGtn+GW23+cnlDqjvuw/hW7bbolGTHpQQbSDJfUCu1bb3uK21uBhiJaWeUqttCKQsr2kBWZck2PjtrWgy9yLRZKjG8WxJsSe8ze9Wt91ldNMRvYBSye+F7WplUQQxTqn7ySXhN19Rlg1xK7ddYM30FnkHe901T2i++yJXWIqjVmg0MMC+uK3o3m4GUnS98looviQnZ1t9uybWbh+58HB/z+bZE0KI0NB52sCQBXRhBaCsbLBzOeeZjDDPhgwf7h+I9w/048RWZH0NuDiZrJmcn9nVav1rayVukp/dFi+cWrgkamCLBEC0KWlUHwJucaG9pDJsN4Riq0qbgpIVUXYP7ZE0GoPFxTa8p2KKQjlnkxET8dxvtVxL4ZzYKAFxWVLAO7N/+vXOBHWB7D1LbyR8Q0S82nGHtvAMFmjbpGTmOlLrDNICvj3NFWhcA9CKEvjDigGpFx/AA34QYbvZsN/v2G4H+hgXB92StePhaMZRlQbhl5w5HY6Um2vfy/z48wqtbWXqA+Giysklo9PINE1EEb5+8ZLdw4H+PNJH4yatc8VwQRVanLBiPBzbqicEVaZpJPadP9D6KLGx+27TrBLD6phUTMT3wrm2n7Pb7ajHA2UqUG2Tl6318wSzIR5VLcC0JKq2IsReZxiGBXFoptb2IKk4t9afuVVIYhzjt2/qr+pSseqxBR4RVvYvmFPDnCmWnNpWJKHqTBOabxhF9USWi+9tnz+EsHLDPIEQljpu+Z4QAtvNlqurrYut+5kpJv1RS2GaJqZpMv1LrK0yzzMvX7ygT3Gp5lu77yLdso/NY5sAJeeJeYRpmhm6juffPGd8/Zo0TvSOKC96fu0zYJ9XFvNQG+QTWSSCcsmEPnml3OrxdqYVg/l8MCbGRQouX6yTrLre1hBg2GzIpXAaT1CNLlDKZYKq/lwVPLBVKaj45LNvs0mpY86TFVzOoyXUxfdIaNxfC/TGkH8/bFerc4frY46d+VtsV3gUqlqbuZS0BIW1Jr78LI9RKbNZK3wXlBxWoOHCmNrvY0x88PQZ2+2GFNOaoLrDmKaJh4cH5mm2YgSjYkzTxPl4z/c//tCkvWiokSz/oG4L6IKCen+IOduq6XGciCFwvD8wnc90pdhyiTZ0Jd9lu7L472XSHlk27H3bdnUJ9OqtXQlxGeaCYMN8YT1nS05QCzf7HXPOjHkE9UKsNn6krZC24RxdftWmP+00uJoLm/2Ow+lhKd4qBaIuz3cBCZY+z/vhd8tcbBBO1eK2YNu6BFOgCEIgk/NEjola+yVBbUnmRW65mHDLpRpCG7y7tCSGwaiFVdVoeu2hCHQx8fGHHzIMg3Va3TcHLxjO5zNv3tzZUhoFk/0yZZfDw4GPP3rmWs1e/AtLYtqKqSWZvHgEpRbbtDiOjKcT0zgjpxNhzt8C4tpnfMSXlfXUN5SsfVff9fZ/SqFWH1AVL4aUpg5q9Aq/maVA9i5F+1kNzBi2O2LJHE4nBxfdvV4k+y0HbPHQcgOTEURx7VjLF9swVRW1+RVtxYq9OWUF3Ir+JRDUNrVbq1KLLktwCtkqwZosqAu2DlSVmidEZqqa5qCtZ8vYDuLoD8Er/RB8WIWVbN6ga2EJ/pV2c9rhhCc3N9xc7+m7RD8MRNT0Af3XNE3cvXnD8XQm50qKiXGaQIUYEqkzIr89KHNGC6J2Ub0pkH39Z1Y4nc+cTicOdw88G3akrqB6aBH0kWoEsPIDm91VJSh0Eugkogr7/RVRC/P5zDie6aNtNRF33E2oXJzTpMiy3aIZriUltn2iH7aoCKfTCTBS9zI7oPYEUZx+4c+0VpKjVrVWpvNkz53W5s+OyJh8VfWXEYXqB7aWjsWif8VXzcazCRfVuATbimU7zNWfbUHrxBwiVTfM1YcYakbJKLa/2yghHrxDoOs7D3rr9POC8AnbHgAAytpJREFUushahC2adu48P3j2jP1+Q5eiy11F+hjoUoeIMM8zn33+OTkbWtiSxzwXutim/YOdJ4VHXLn22bWuCDEwl8KcT6adVyvn44kuRGJMVCb7pqIrh9pvWZNiaXheuRTz9581+Nkr00SeRmKIlpe2ar0d69h2GplmZztubSBGscGrDz74kGmeGL+eVhHu0gbH3AfUtbC6dJYNec3TbLxMVbJa2eSq9ysSozjiavez1CZ186u/2orh0HikUqk5+8Bn8rMIeZ4QKcy9+ZHsmbwQWEfjG9fOuhxd3yYsw+JfYfW3YtMs65tR4+bdXt/w5PaWPiXvXEUCymYYSCmhCtM08vlnn5Ndli6EsChE9H3vNr4Oa7DkESvCW1qy4u8rl8I0zhyPpll6Pp090bjQfnV0/p2269mF/dkD81u2G4JRUbRRbtw2Y0r2eu1n5roeAMxH56nQ9wMxJV8fmd2mqz+ClZIFK1DWOo7ieqBtorrJMxqfuIK3dkO4AHByMcWL2vM+2G7rhFjSD1A5n44Mm4EYfGWoCKfTkSAdU+6Yc/b5h4jIqkDTUlXFaBhNCWCaVmk6+//ffdkpsA16KfVEMbpGFyMpwGazcfqF8IPvF16+fMnheDbFCS8WTHw/LduUQL+1WQ3s3FQv1MTl73K2mZWHhwfO5zN3b97wGzdPSDwwH1/a9y0SV5f9DXxWwey/OiAYCQRVZoWu69ntNpyPR+7evAYqg8eZgCPTFZecxO3aQC25dN5i63WfP3/hVMCOyTdUtW4VHvkawLV8IzgivoI0Jed1GUVo6LhtApVguvni7691036R1b4zQZ0vIN9aPdER0220htjIOJ5sk4uaw2LODAOsWloXwFNrW/mjEOu9WFvgYjjq0eXtUgQPdkLX9cRo67pMKLnQdRbwgwvqXu33fPzRR5zOI/f3D7x8+ZLTw4HNZmAYzEmX0mRCYNnbbGbBSmcwB11RW/c6ThwOB6Zx5ObD7yG5cs9LmtsxAQtDmIwE71iBoz4VQC2ZyWpVZ58r/aZnnjLjXKgysxks2au5oicYOjugIVlymIu1N1dqhRnzixcvSENPaw400BpYVA7sL4qXChbkY7JDGGMi+LarUpTqsc47jSSBJjUhnpQL4snNerB+1Ze1Gy35CC3wqpLjaJqZQek7G4pC7WtNziOu0Kc2TpUnV7I+ZXVZpCaw/fbVnjuYbQWEPnaGPonzn9WfiSe5KSU2mw1/52//bd7cPfDq1SsOhwO1Zu6OZz768DfYbLdLS0ydA77ia86ncsdr7cVA0co02/ameZ4ZT2d++PH3yEQOdwfaEN6lPFnUFvC9WlSr0kMIJCJdMBK+qgWPWgpjznQi9LCSQtSS0thQOxeDtoNw2d4Rcs28ePGC5jmccEPR9WvCWo4t7s0WTDRE387zNE7kbBxXDfZsiT42tBLO7VUWCbb3x3anaSYEk/MKEm2bzOlIKcYn3+x2zPlISu0+V1LsbO2yrBJ+Zrlt8AIoDfGH5Ju6HgVb7BkvgAGONBMIwcGEYshf6tsApq1i3m43fPjBh7x4+YpXr15zOByY88R0OrHdbun6fpETM06zJ8faEhFcYcHQ1xAjtVbGeeJ8Phs38Dzy69//NXJ8wfHhM/58titEAlHalqAL250zUa3w1FqdEjsTJju3JixvYImxf5p8lMcO4Muvvjb7fuR72xfJRfRr3tn1Yl16yjiGlYfD0W1XLGFWkytaCCm6PquF/PEe2G6tljMEMXUHlUDNM6fDA/OUkIgN0tTqg3Z2P/q+J8VIZF6SLH9F2lmvc+Z0Hu1exeFbdvtdl1Zlmrx9b9WAdV1iXP7bxchut+Pjjz/m7u6Bz7/4ktevX1GrFYF939t5KBV85e3yDhUUG2JqyiC1FCvYamUeJ6bzSM6ZPGee/OAJQQOHV3dQWgLYBgXdf+MWrYJWH8wKEco6K/BwPAOQ50LFaAT9NDuDQilUW/c8DHRdokuBWMEGHVt5Y53lXIVSTkiKhORnj7qcG5xXvtqcUgSGmBw8ibS5jvv7g834+JBcdQ5465wtDI5gKCuXHdw/5foFm6TKctCsqmhmU5FaKCrM80gggUYiitRCjB2N/5FS5+hn42u04OJ500UAaxN0l4ThNZ11HtDStl05phZ0qg9nCXileX19xX5/xWbYME8TUpXr3dbaiDkTkqOYHq+KkS2WCrt9bjzZsJaaoY91tmpBlhZc9JjX3rv/y5MQcSdagg1KzapMtVCKcDqPUDJ5ts0O82wagEEgVEHLtKANiBlEFlApy+uKWiKSiw3NiB+mhj6/DQ43RNok29YkqSGC9fJzt6MhLaAv7GDaR66tVRbfDw5qcx6oVaSCtYFrMf5RDhDoQIuJR3t7yRL0eEE9cXzHP/Cl3baFB6Z9uBZWK161Op1WJVTnIDeqgbgzaGlmDMLN9bXb7cDLFy959eoVu92W3X5HijbIFn2KUj3RajjZGjTtmcQQoCp5nsnTTAyReZyom7q8pyXV04sPsNhuK2zCMvBX1YqrXBQdJ6iFPJu80ZwL82xDShUQnQGxAsvPc3WivdEo/I6qUGrl4eEA0XeceyK7TFCrc7z0Ysp1sdmGcghaTQfQeHCtweq/c9Ri8dPLs39/bLc9u4ZVVxGXPxsJZQJR5hRte1e3isP3fU+IK8IdFo6jXry2fd4Y2rCTfOvn2/d74u58XxNTN63nFghtaKKgVSDaOXhye2tIYjBN0NPp5IGyswK9ViChsg4ctZ95cQcMeXXqVpmy225gHkfqxqUALlG0P4vtIr59R5lrhXGCUs12scntJqUT2muqQC+oJKPuFGx4rzbuq9luVeV4OhOirXlsBeMaP+wFzXd4gbnY7vorxsh5mu0++U1pxZ7F4sZvh/YmFd4L2200ChFLrMV1cLVmaq5IgRqEvkt0y1YiLzBFFl8rrVNyYR/NboNYYhv4LrsVzw3U/f8FR/OCgGeAmziNw87BzfW1LW4YxyVB3nQJRZmmCUmBvt9aTNWLRPKt97AAW6oWZ6aZFCJ5nJjHiVTyGje1pYo0k13O3jKIvOimeie3ZGSGN3cTUXxQSYWcXfObtasUHfQLImQxnnRtwKE7warqa4jXzYjqUqANjNRGe/HPZSFllfcMIdB1PcfjyRJ5aZGv0mhcYfG94dGHfnuRwtvXL2jxF2ikdmkB0B+0NE6B73ctJnHQh4ZoBALJ2z+edC5PswVpWW5gW+t4ibGuTx2Wg+hoQfGNByJO0dDqwvXq2b+R6jebniDC3X5PFxP73YYQTFB26CN9byhsG8dYVqbSXrOhlMblrMWCfsmGSMVpYtWe/I7Zbm31vU+whkgWmLUy+0GZpokyOT8yBHIphNk0ZYO05CMv2nw2TSsUX/kYiItMigaF0iRMxIR+1YJdWyEpqmhxR7D8ZavC15MiXH4Ya5NeJqdrvdAcCkj41TtKaO+9tflYkktzmIWShSxCiuoC8fb8Uuweofgi61N9+yi11kbbyvXom1q5qO122l0qF8i3SZ408m/wX5WuS1xvdwtn73A48PT2hq5L5JI5nwspbmhDfi0JjoJ3Ajw8tgpVDakvudCnjulw5nQ8EubJkzNPROTbn9HvIoIhIkWVWZW5FtsQNCl5Gu0eBVOQmObiuq424WmC0TZNSgiEuvJ+GyMMf9+n80hoW2bUOKjqgYz2LOrKtQzeKgqyysXYcE5ZP8wFHcZsGC44BrQ2w/tiuxdlkNntEugbYFCZx5EkLqHnU/V9360KFdUSevOXa7EJq90Gid8Z6N82AlUL0uZvff2ssZJNnqaCVkNa+r6z5Q3zTC22me5qtyWXzOF4pE+Bvr8CPEF1XxMvnI1eABBLcTVn+thxejhxiA/EcVxsV/+MtluB7H53ykIdIY8jbXjDqDBWXBnv3N5LWPgDrWBfbdcCbvC2qtG32tdW1w1e4orbm3iC2jovTWKuaXaX8bwkWQgem+qC1GrrI1wgBO+D7S5Ah9tv50ts2kIUaiFS2Q4dneuMRz/bLca2KKoNDBBxzrWvQQ4mYXlxQuxnt6ika3Kr0jo49SIy+31UsSRV4+JzJQRub24WQxpS5Hg4cj48sBkSfZ8YQmfP48Jul3hhh3PxufM8M48jXYzM45nD/QP96exgDjQoY41WVlAtgJY0UMC2Us01M2chJhiniaFLbIbBcoZcllgVPObNc/ZzDkGMXlkuhwqrOhBl+YtE5/R7sDSfW9dlFKUpCpg2awi6zMWEmBiPR/cRDfDQxd/aURZbEFdbvnRhw3/K9c4EtUwTYbMnxGQtntKyasvOu65j09mmiKqFKLDb9ERRUoqsLVIBVk049X6xxIayNjHXxz//EvZes27j1JVSnKTrW5DIfiMSVYVaAnkarZIpma7rubm9YTqfef3qBXevXpIC/Ot/52+x3W5scrJVw3jS5UmvkaSVnAvTlInYasbD4YH+7MRiKz0Wg2oP8vIjRQJJkgV4LZTqHC2xAZa25Uh9GtaWJBjZOeSKTNkdWSJoINfqjt0qyqYpGWulekXaVAMk+IGt7vh8y1LUQA6B6K9VtDJ7IGQRUi8gPsiDV47iFlywpNjfR4wXWwR+hZdgre4YDJVXVboUSNH2tHddRETpU6RPYi0m18u9tLV2LRzohj4tG2G+I8C371lOpt23or4iUa3tYcMWharF8b1w0YKB7XbDkydPeHh44Nd/8H1evXzOm/FEChC+9xFdf+2DKriws33yNvShakoNUk2HNE+ZTeq4P73izZzpTyOp1iWJXgj/CkXacw4edGzYaVaYamYsSsmVPhk1Jbkmp03R24CZiAUgVBmnEeixU2DZk+LT5rShFIXo4vv+zIp5bYvt4uoRbYuPABIJtdK1ew5ktULVKNO6TPBforFLwexcTdHw3thujBdbiUQY+kTWyq7v6TobiBIq++2GTRI6gS4GiP3iR5fBniUBB/FBBrPbhjjznVGihU7FyECzI4bqkuD2WPOC7FlxVYgpUqsN2O73e25ubugCfP75p0znE9vNwN/c/65TEWSphxdM3J19s10tMJ1mptPEput5fv8lL89ntmNm+HPZrpCrMtbMuRh1KnbCOI100WchqiFR7d0Yhacw5dkDdjWOtbdKF5KCrkUAVZbhkqygLleouE9WaAgqAlKqF8eWBBdWaSFdEi5XaUCdn8jie+0Rvh+2u9kOiERqUYIKt9c3HM8Hnt5cs+k78jzSRRh6IdaCqH32tlmxaYE6AIcEW+/b9T2Dbw6r6/j5oyT1bSROMaQ8aPUhurraVrNZCVTNvpPevj91HZvNBoD71y/54ovPOB7uGbrEv/nf/Lt8+OEz60o1u/WOY5TWwjbqYsmF4+HE4XBARJimkVd3r7nKlcGzmjazgCd0gvi0fSWqEkOkKExaOdfMWTNagy3ICNbpUxFS11Fqto8oVlhZcVoQZkANG6DFcpbOc3vP3lrACtqyINrlEpWtDUWN5OB892CJb65lWaG6Yqd+stvBVlt5LcvwiwEY77remaButj3b6xtiNyASmMaM1spus2G/3bDfbYh9QkKl1gmtM1IylEwKwSFda5caqupczgh9P/C9H3yfYbDtPYfDkdev7ywpU5t+DFg7HCwham3WNvmntVibUALKTCWaXh7VNj+VmWgK+BQyn372c169eMnp4Z7pfAKd+Nt/528uxr4ISTSHftFK0Ayn48jxcDIEpxbGwwk9T8RqNe3j9Zf230gkaECJ5Jg4AMeSOVEYVQlqa9VKVVsRmyIyB6/8WjFWiaLU0YTHtRR0E6mSKcEki1phUlWR4gNW7jitRjC0OfpEoBaXLQkgVFI0Ga7z+ewtfhuQU/FJKAzdU22CJ8bjbJ9UPJHeXz15p8H9sq6rfY+kLSEkuhDZDAM3+50pECRLpnIe2fSBJMWUGJYAHpdT1J5ojJGu69hutzx59hQRQzanaXan6QNx34VI+VVKIZfJnMTSICo+Re76olqY88ygzbZNeP4P/vD3OZ+OHO7vqHWmlr/B1dXvkhoVBMATh5ZceCHMPBfG08h0Hoki1DxznDI6ZWKzM4z91RrLqBBJBBISEhITSuRUMg86c1CjnySfkg8SjMdUkw26+OvWAJHENGYPKAkJkYiQtdC6C6gN+AUiNZjjsjqq+gCMUluC6uv5xJ1eFFukIbGQq+kdN51KxQY8EXV6UYMG9NFAYwyR7Xtiux999BH39wdSiNxcXfOjH/6YTz/7GT/4+CM2m548n1EyfRTUUfCuS86ntmcePfBVlM1mYH91zc3tDV3X+cT9gZwtqHR0LDXmxXU5VV1r9WLInpWiIDY4GtqmvTwv3NWqhTmPnM6Zzz75hNevXnI83LMZEj/4/jOGDz9ahiYajagTC77a5NSAPM7M55EyToSqaC6c8kjMlW6FEN9pu8RE1cipzJzrzBTxldHV5hhCcDmpusj2gVJF6Upkqq4rXQNIR4xig8IIQSqBZDMBQQjVBtoWmsaF7YZgCwHUg7JWkD4QYkE0Uhak2hMGVUpbkCLqrVZfZuGFW61KQt4Lv/vhhx/z5s0buk3i6e0TfvzrP+KrL7/g5mrL0AWkWq5w9+obQlU2XaTvB7RGpEYiHTEkakhIJ1xd7bi9veHq5ppSCnd3dzw8HJ1uYpnBu5LU9neqZR1UXsiUdSkcpmzdgfN0pujMcTzwyU9/xqvnz0lBQAtdso7lWk55e53LGlAXP2zbCP085EIKkZoLuVSSGqYrEtbCjBZL2xif0ZwO08SYs4FymL58nS2nmmZ7va63otBAJ0PmczCtauvXOYARvTvgIGNIUE2ehlqyDUR7cRSiy1QFJQUrumpbpexzQ0IgS+U8Ttzd3RFjNM3taoNWzRk1UMC6EaC1AWcrbfRPu96ZoO53V9w++ZDUDeRcuM9vKHVm0yW2Q8cwRGDGtIwrpWam6UTQxi8zBENE2Wx64nZL1w+k1NHWer5584bz+ewH89sV/eKC3BIF40TVasoAZiJ5+WqbBLSfr+rT98X+/9dff00XI88++IDNkNAy+VSrt0NFjOm9QLbm4JMK6tzFMs9WTfhmKanFETpxErU1ZEzOxHhPEhIxdeQYeTmeOaJWXVcIBUq2iU0pSqxKdd08PNAHFVDbqFJrZc6FMQvEzLYf0GRIS9OeI1tio1irM3WGDiLu3NwQW1LdNAxDyEzTaPfZt9OoV7Y0cd3lQciSmHmHgCiB7fX23Rb3S7p2uyuGzTVOkGW3Hbi63qHqqHSEmhLoTJnO1BSJ7IBKTNGlTCD0HfvtlmEYGIYN/dAjIXA6nRlnW4kaXOKo+vMSR3UIsnDQWsb4WJS6APb9FqItKS05G7G+FI6nI5988gnj8cR207O/2tN3gZvba1gQFvFgxSIg3f4BMS7UPFOyCfajMOdMX7z8E3OM9l+ljZyoiK0ejQlC5KDKuSFDb9tuVZKCpETxLVx2jAJTqUQVkLzYk+nyFhsyk+jDXBhyVtwhul3atiR7HtJ0VNzWS3HEoVab+J4mTqcTbYK0aPGfZ/dC25kUj1Wi753t3lzfkqdMcg3H6/2GJzdXdAEShZiEUgvnhwdDm4e4dDVyma0z0A/EfsOw2dD3A/3Qk7rE4XDieHQJPmXlhMH6X6sBFqRFdPURXbr4Ym3SlgqOzpSSbUtNLZzOJz79+afM5zNPnz7l6dMbNn1is93Sgrw0QEDba+iFjmmwjUpzpuTCPE2WtGldVUx+ge1qTIhEHqicgUmNhxoVNPv6acF4uilSqoEAqAXyMec1EXHkKySjOxm9JxLEaSzeQWvbd3LJtoM8ek2EJ54tiRehFLuvOWTC7LYLjxZOSAj49hhLaJRFb9Sc7/thu9vtlvu7O4auY7/bUMvMbtszdEInFSUjFKbTkSFFolTX6zVAZBh6dlfXdLsr235UZ6Z55JtvjhwOJ+Z5xtQnvHNlD4VLnevFdoK3ytV8RJOGatfCO0ddkN5kBhstSlX53ve+x3azYbvp2O8GrnyD35IUWxWCRmfcO0AE1hHNudBWZxMETUYzKcFGZQpyceTsvVUJ1BApMaIx8nI88HqaOJVKcQ50KUrqrINaSyZte3/vGDpflFoDIsWVTTzuhIxEoU+RLiXLJ1wrWkpAQlNDKXRdIsVIUBuObytgUcvnbKjYqC6lzJzPJ7q+d79dqE3iJ7SBSGkmbOHQO48th/jTrncmqMMwMPSdrZSripZMl4S+E1JSMzBcykcUiUoNNo2Jry4jKFfXe/rNDTVE4wL5dN18OnI6nR2FqnTd5t0nYEGHrG1fy0WrZKmejTsbnJNTa6HUQkqRDz/8iCjCbjuw2/QEivGAWobvPya4h24cJPUbWbINMuU8O7m4mnOLwafW1mn3KkJhnc6O2w3h5ppPPv0jTo5CtlNVSrVKX1zfNNiUuMcIG0oplRqgqBA9SZCYiSGhYgoG6i35SrYE0o0FbwOtK8fMUVru1lQaKqXOzNla1zF1LpaNlz0CGpwb1A7WUru6oxDof/VcKIC+6+hdlw0NbIbo62Rti0eQugyQFc2+J7pQaibEwG63RSXQba+Q1NPI9uPZEtPTabTWnwpduoDiPNgaydz+ahkcULMhbYVEe8CwtjSDOclSy1IgpJQYbm/Y77Z0yT7L7mr3iBLTKnj8P21AAFwY2213GselfagGAyz8PUsRzP4rNpxTYqL2PWm34+uH55y0kNsXv2W7c7Fqu22dQo2rpFSq2oBkxduhoaz8tGTT1cUH/BpHSXHtVSIxWZIQWhFZWe6pOeBKCJmcgwWyi4LN7nW70Y87HO1+VeW9sV1bvYvxNbuI6sR+N9AlCGLFpwCnMvmmPB9Kq4XdzkCAfrOjxo6mSXg+nynHwuFwZJos0NtqR/uZly3lNvze1EgMYSkLPegizJut1WqdFgcGSjEkMsTAZrvlarfjye2Nx45A6owy1VLbSwSsoSot/5xnL9ZyZhzPdmYDlnA0mUD50203DQNxt+eLN19wqJWMGheuDZZEQ17n2paTGMgAlmzM2TaVKd4FJSO5IEHpUqRL1h0sTdBXbY5AtZJd1gwJPicB4jJSgnh8cd9bCjnPNgiXukU2ze6HB5YlpZcLDMfTpffAdmudCVRSNNvVOjIMJuskqCf1SoxC10VSF1FMV/b6eo/Ejs3+irnCOGdyHpnmiWmeOPrkekwdKXVEVqUSdRjT7PYimKsu/uGiHFpQ9gX1XKiHliwPw8DHH39MCpGhTwZu7Ae6rmd9eXGggQUnWG1XmefZlWQq8zw57SaYeEuInivI2ktr6UAIxsHfbemf3PLJ7/+UN3li8qHToNiih64t0rHOvEqz2wZuNZTdbLdoBSksVGX/eqv1rSvbQC3VsqiiKOZnq7bFPlbsl1oI1aiOEoRpNoWFVVGmGYV3rGQFCC7+L4/88Hdc7xbqjyYdJbVQy0StI7vNlq5TYjSeQgogtvMOiXZQkgCaKdWy7dvbG4btE+7PZw7HM6eTaYlOefZhJ8UEctfJxcVhPrrWhCjn7GiUeVNpmap60uWaX5blK13X8Vu/9VucTsbh23TR2g4XUV5V2pTUmtl3PnntSUPOszn7UkyoKQihS5S5kqVNwjWUMVIkIpue9OSG3Q+/z09//x9zCkpZKm0L8kO3ATF+bUoRFmNjUWMwp+8FuPNrUnY4PfoWnVKQxpdUSw6C62cuwyY4Au/8qFKtajLR75lcrDWm9WK6u2XgHlIEd7iygniIUNOv3lECpGiBO0hwhwiCUU+sDW4HVpKQgw0uqGZKzsQoXF3v2e6vCP2O0zTz8HCwVbnTxPF8ohRddHxT7Jaf22gVpvkXloOJO7R5nr2NaBBeWAaEPNA7N7htUuq6jo8+/pih6xiGjqELDF1k2HTr6/Io170Q+NbFJkq2SfvT6chcZtul3Jnof52tYCqt0GsVbwiElOj3O7oPnvKzz/8lh1rIYkH+u2w3hGTOz4OHOC+6qicO1Vr5KjMpBfoOmpRJLmW1OXe0Sl5uod22Bu+5s5ZWYJkihoTZ1gFG2yPdCsaGmtL+qxe2/Z7Zbi0zQSzQd1GZpgPbTTLusXiC44G+DaOhhTlP3N5cQ0h0w46HceL16zumaWKcRsZpYp4zIoGU+nVI5yLQW1x3/3H574UXCc3uggfkWh2k8NZ441FvNht+/OMfQ622UCUFW6ncpKTawKW0pAEvsluwN5pRyc3vnkzrdeiXz17n8gttd/jeh/z0Z7/HQ8nMS1EOWipD36PAlAt9dGUMfw5afcBUnb2nhVkrwkyw2RrauuxcnR6AgBRPHvKyTjkGf38tiRWlVLkABwqhWGu4k9ZNsMESPM60dZvtqUjjPUp4L2x3Go+IZBs8jRXViT4FR0orGgz53Wx6NkPjU2eQys3tNTENSOq4f/GKu/sDVU2POhdD5kUCoRVJHsjaco5mm8vkvzrPU9vUvdA4PUZ/DBe5g8XHtklws93yw/0Vp8OBILDd9gyb3lDrBaf3eIoDNp4ItzZ5nids+Yupb9jmMJAYkb6zzZTq70V8z6YIJUQYOrZPrrj5jR/y0//XP+Rh2zNHd3tVCS0pFx+gapJVi/GKu0jj9ht27XxcBQnFO1ZCG05tHSuL55WQbL7HrMqHImt1qkQllGLD4rXQRlQGNRTfTr+LoinIggG2vEgX//uXavErE8Js1XMe2QzCfgcplsaSoPcp2opVldIF+hhRLZ4cWnX48uULXh8OjFMm57q03e35BH/4eDWib70P9Qq2LsLf1hrxXciO7rWkUNQHtLC94BqFWuF0PnE+nxm6QJ8GCNGSUvGKyH9accSyVTXGd82Uau/7cHjgeDxS9nuk722qbj6RRCAaPJ9DQEJHSIlys+f6N77Pj/6tf5PX/+j/yAjMtRpUvzALmoG7+HtLCldQzIyjOolfbIJwLlaHZb+dZmjz4mRFKqlPKJUYPXlWoFRSEBOvxxEPNQ5kqZmonX92TzSWIsA4rX4+FkQVLPBcXQ/vtrhf2jWDmq5rlxJCXtx7lEAUqKUwdImw6cEHT6jWWu37nvOYef78G+6OR19PaK8cJKLBE1Bd0dNm74uuJi3gW/tH1PdwF10ciTm3ALV1F8z5BpNrYLPZ8PH3PubhzZ3FqWBc2CCJpoy4CEZ5cK5NaFpt29OcodRMqTOn04k3d3d88L3vMcSO7pzJ04kJJ7wHoYhtfpolUXc9u4+ecv2v/TZ/8l/8Qx6iMqlJTF3arnpSnqNxyG3oCUcxsMl9V6RYGqZSrbilgswLSv04SarEaFKA6kineufEJoCVqpFaraqXAnOe6MKwJl0CUg2xMjfjHvPCq4cY3hvbLfOJFCp9B11SYiguZ9bSdkGlsh16IpCSkMvE8fgAjnY+HO755PMvjfbhsEmUSJZVCghkCfStXVeNJ2J/p5bhiycE4zyyGaLp73pCqDVASED0RHXlbF9dXfH06TN+/rNPTIy+axxv28wDq+1ebhE0H2bvJ5dsQ4Rq9I3D8cj3b28ZuoFuzEzTgdhsV8x2iZEpJPK+Z/vxU67+1m/z8//H/5lDhFlt4UTy86fVNuAVrUgsBPWJRiNyLXI6WnVZnhKkkgSkVHS28wUtqV4HppRKTK2F7G15R1ptsroBA966DZDLTNIek64yJJZqPnuZaWs+3Kkr8X3xuzrRd5UuVZJkotNnbIioucrC/mpLiiYin7Ntzru/e2OzLqnnm2++oSKkZPZpeudrTtCknNY8QVe0hIv8QfDB4Aw4+qk+/KY2GxLEdYbnwnjOJk+nikblcDgYVUF9syRQo/UPG6+zSe+5R1sQ8blYHK01L8DGucwMmy2hTxz1aIBQ6OhDNOAkQgw99XbPza9/xNO/+69x978X6qYnl+yUESOGBR8sq2AFuYif1bWAQbEhpIp3DaELvlFqrkSpS77V0Pp232LBtkS1V1vAE3MP1WUBRW2BQOP7W+IZFt8rUS1nu/A5TepPEOvwvuN6tw4qBWQixsCmr1xvdvRDICUhRUjRkp6F1L1M2MkyFZ8ClJIZx8w0zkxzdtmZBrE3eZiWca+GGMS0PVeDXLA7bz+7WdRqHDWNJq8SBCFSstEO5jlzf3/PP/+93+M3fuM3GJ7crk6Y6qIiTabHP7lXutVlE3KdHXGUpeWUge7mhqvtFbV/iWwH+u2Oznf+akpsnt4wxcjxBx9QfvQRb7QS+ojOVhXmWkkuBKze8inUR/IVLeGpqs4NdxhehFwsW4yL3EpdBhXEi8YxZ/ouQbVKSLDpaWuNQoxt5i6sfBWgLS9gcYiAS4q1HxCaBTXFhvSrnyYFC+RBZroEm40hN1309Yj+HLsgaM0+aBR9elw5HQ/c3R85nTPH48T5NC1bR1abFZpAMXjlLi6CLqHBK17d4yt0L2SmsPMi0bjKQW1AxBh0kZItaB6PJ7784kvKPPPRhx+Y5JDXUnXRFG7FnS6keG+sA8EI8BfFYi7WWJJhg/bwMGVC1yMpoqkjdYlRod/tCNc3yG9+n2f/xn+Du5QoyRLEUsu3bLei1Lza65IDYkMzNUMWc5YiJk+HRi8yjYCvXsHXakkjwSTZpFpbFtSWJIChShIcsTYUvKglqskRXnVUjog7TytIG7LVrOV9st0UFNlUdlvYbgOBbMopYtPCIOSCtctjpOsieZ4o08Td3WtCHCD2nI5nEEP5pRU/7vLlrWBuVAF59D60IU9eqa/KJOr2bcEpWLRGsAnukpU8F+4fHvj88y/oQqS/uXLEx1HSaHZraaD5tuL2q269RY3KVWpZfnYphSIBHQboNxzHmX6zZQ4RSRHpOiZVhus98/4G+c1f49m/8be4i5GaBM1mu3OtpCqmOGFoADXrkngL+th2jcHgiVJx/qA5xUBdbLedsxhto9pczXYTvvygFpP5E0twlt3oGLd2zplhsVtBNYLYxHSjsy9Nv4agxPfDdjc9XO927Lc9XadIyKaaEnzkrdgzTskTsiCUkpnOR+7v39Bvr+g3VmAX96eN+9yKStU1QW0SkLYRzxjM0b+nAXVB2vY99x1aWBWFjF8tVTmfbKPeNE68fPWSn/7sZ/zWj3+Dq6vdsla1okvHOmBtc9uq2Epu+5pJs3nfWpZOb86ZqQrp+oabmxty94LUJTb7K6OiefK3ubqidh3zrz2jfv8Zxy6x2+yQ6UQpsw8KVqIUQoqE0JnG7AVK2RJx1UZtYInfVQOlWr5VFkmoxj11vx2UmIPPCYdlNbB1bT2+pAa+rM8CsOJTGxdXEfV743WfOGd6pRq+26be3eJPwRNRQVOkT5EYm/wGixC0lyrQ9NmwqV/UdFRT15PLbHw059K0QO/ubsUvLz5sSomqhaBhIfuKigUoF6dWhRCDQ+gXyGtVcp6hKtM4czqcEVxOKHpr09GmIs2glbZ1Ymk7tUPhCOdl9aYAqUN2W8LTJ7AdCFdXpK4zflfXUzcdD6cDZTrxxf0b5pK56na2hrEY97GoSUqYZpjrMHT2g9+mOag29NL4UjkrBJsEh5XXtEB2qsQspBQW0niTORHaXmo3oOoIkxr5vx92DjIpjZ+S58mpFZYQ1+KoiQhJAtf7x0HuV3V1XWC7SaZflwIpKDGoa8haMLahs4JgGzFisjbOZrflcJ7Rs/Gctba9W+3Z20hTjM0trQcVrEBSNS1gWxO3BngJ6/0R8SGqVqn6I6s5UzWQS+Vwf+CLzz/nex9/7EmVJVkmReLINSt6sJa4awIy+yDRUnyVQhEhbrdshg1KZHu1Z7cZ6FzFoIRA2m8ZFe43kbsEp5rp+x1xKlCMDiFqckhNNqRIJsRHA/LrWblo9JjqhPjqSHPtKGho7bKyFFKlBIqpUnsu0dp54vSXFqwg1LYFSVmVErwjIZc8dUepfaItvke2OwzQpR2boSeG6qL6SgxeV9dGC28oerRVhX1Hv9lwHivTdDZUUTEfQVkQaPOX689rBWmIgYih6G14EsULJ1alksaprtWgbfy51Mo0zZ6gZg4PBz799FP+xu/89rIustmubcjSdfIeD3hLYe4C5GqJW73wuxICabtl6LdUiWyurhbbjTEyhEDYbzgV5XUHL+vMuWY2/Z5AgdH8bgVHg+3gaalo8ndycX8ubVcdgYNIVWEudT2+stLBarHUoJRAjcaJRR1MoaHPj9HAR4OWEk0QvVoCEWPy4S1dun7tNMUQ3wvb3Q4911cDfdeToqvLiA2lBlWIQp3bubNfqesYtlv6YWNLPg4nVE1+0dY9ty6I0lrpsCavIkJMtkShokRdz7io2WxbpLJQQC6MX6vRBR/u37DZX3M8CnnORv/YDL6CVTzpqu43BA0NKlvt9pJnWYqhrrVxkRErJLoO2e1Izwqh64hXVzY4HqyLLEPHYRx5kSe+uL9jEmXfd4SaEYnkWm04NUak1nXLlcSF6mmb4EqLTH7WnbstwehZtck/OZBCvbi3rYvhgFnrpNCScPO9Dbiu1YtUFfrNhhCTUWiotPXoWmwFdb0ELyyKvtOm3pmghmAJapes2uiSIFKWdq9NgyvLLnttoUcQF4Y1onm+2OzCmtXTEvvv4pviIv8rn229O8bNi0EcKZMFXb1EHUvOED0gl8yTJ7cuZi1eTazGvxCs3egXOJrmLOvCFVoqB3xStO9gvyN3iXnoiYNxaaTrOFF4yIXxdOT569c++NIeET7oJdSc25k1Y1rujl2tImmPV8Qqt6pgKH7T22MN0N5iaTzcENwRVjW02AuF9ZdjAkHohw03tx/QdbadBudFnk8PzJMNitmk+bxUlCklNsO3n+Ov4uq7wG7b0XdpES5uB9ijqT2C6qm6RJNT8oBr3M26PK+Fk9ySIxohviVDdoUQTKUCRb36bD+yabGuW6pY9FXtz2Z/xQe28pz9Xmf6vveFAOIJ8gVnCJYKuT3zy5bKpeQS4AOAIF1H2u8MUb2+Iux2pL4jhGi82k3H+XDgTZ15NZ0YNTPEuN6jUm3q9GJYcTkXbz2Phmosf6YJ/0sjVdvf+1BPW+fapkerV++t9moF5WP7bQEfQkx0/eBJ6VoYlLwOL4gjskaXCe+N7XYp2FBGTAaORStCWptTnMNlA55epMRoKxxFmOaZ82gSXrWBAdps9tJuvTPjgS4lk1xSi3LmC1mLguBBWeTyUV74coXxPCLR1lnWUqjFFqaIv9fF3/r7aDQh5OIUyerHV2R1tV1EkK4j7rdGS7jawX5H6HpiisTYUfrI6eEBzSMvTg9MtbAN0XUbPUmW4Ool0jYPPwJL7DPp8vf+1nwQxxJUahtNaeDBChCIF/0N4W+oFnKxcf477FdCYNjsiL40RKQQU0fJtrDApsNdhg6TDnwfbLdLie1mQ5fsZgbX0KyeoDVQqNmsuMB77CISA/M4k2tLmGRB7cz8jFJl6jxxSehjjDYHom17l+l7LjYLy9zBUjQvrtwTsJLJx8Lu+tYR2cjt7S3DsFnXWDtYZIOsFwWe2614sWs+XMhaXW2iDbuJ8U9jNCnJ/Z4iMPc9aegJKSExkgMcTifO5xPfvH5NEVySSVxZJ7vCQfEFGdC6GY/hdf98sn7o6v6gBYymE83iFwzLD0GWOYbldrXcYDn863R+y2dEAtvdnt3uyvOG4L7KhoPH89lWUJfsNqF2P95xvTNBtYcFKVo7ojmolko3R5ezIpIuEkQ7wKqFaS68eXPHeWr8vJb0OSivbrBSH/3cZSOKP5gm8k81iZouGBpqCaUu37cK/tsEsFW7dtN/8IPvmyCzZ7q1QnVtOnEv01rla3A3RKwCubpklSfMKsECdIzIZsOpZEJVclVSVSRnDuOZh9PEQODV6zc+1Zdd+iKYkQVbixaSVcMqfl8u0IWWYCjNCF3+BddMlQtyuBuaHfPQaE/oxSELwaaq2zlbAj8mn7K7uub73/8+t0+e0vf9gpgc7l9zPp85n88cDg8cDg+oowabYfsdqcmv5upStKURMXgygh9ggHVwRyWYviOOSkvgdB45nkbO4+Si8y34rCgKLtZtxdlqu13XEWI0xFUsYQjuLAGGDk8sdHkvdrjx9yCUnAlE6wCI8vTpU3b7nXMJzQGqJ24NJFxQGV1fr+3pbooTxVtiqFMBoqBdIg8952RTtfS978buOVN5yJnxdOSbN6+Y20IKsQGDWoyrpW2rTlxxU3UUSriwOyMjIY5gK9Fts66IkNrXrV/v1T26Bi2alFd7Hp4Y0IIZ9P2W6+snDMOGlCIShFoyp+OB8+nMPE2uEGCGb6tj3xPbjR27zdYQtGIcXPHBT8QQqKrtGVvC1IbCzueR0+nEODfkxCkoiyLJikrV6iMQHpTFV8wuwVQvB05MVF3kMl1bQYHmc8+nI9urG/scXeKjjz4ipriaur2cxQKXZ7NnL49e11NjD/JlaUcuWW0MlC6S+8QpWWyqXWQYBmLoOGvhYZ45n4588+YNU8nY6JKhAKb6UAlFQWzrYWvZX+Qe6yR9+xu/58kR3jaF3bpVzRe2edtmj41f7R/uInGQFSjwWBBTx9MnH3B1dcNmMxCirWAeTyfOpxPH44lxPJLzRCmVvuveC9s1br9LHlKMsxhso5J1LzunmZjfRYLbrYFY0zSRa6BqQ2qcEqIXRatET1J1sduAJzISrCXd5JDUirmUVv62IzDLe7bh58o0Of8aYRg2/PCHP2QYhgVxXeiE2p7pRXXlsdq6W/a+ixbbtrdwRQ0Fl2BJetj0nM8ToWRKCfQCEWE6z9ydJkKuvHjzmqK2Ct30zJPFhmpJfy2GtGto4NRqBW31eK26DEdrNTBLl/BTV+qD/1NrgRIX+SlItEG0NoAIeHIalj+DgQLX1zd8+OH3ePLkqZ3FGNBSyNPMeTxzOBw4Hh+YZtsEt99fv9Om3p2gAl0ILqNTqWWiaKZk2/CBAiFRZ4vFMUZStEqmaiLXzJiV43kmZ6VoXB7gWq9iyZgGFvFdN7yYEtEffNtIJT5p13f4ytFMjabpZZLfbVhKXY/REquUbO2qEYrtgXZdsqGjaMNSQQFvhZsvsYOxwPUOY4Ol16XCXIx4v91uOR0OPMwzh5KxqWTI00SMiRR6aq6U2eR+upiIfeA8nU16RBRqcGqC0RraViOtniS1O6bqqIdrlLmeXPQEoWgb1NFVRoZ1WhTFJCI8sakL3cGMWCSy2Wy4ubnl+uqalDqEQB8TT6+fmbNQ0z+bJhOeH88n7l5GfvbH77S3X+q1ttxxpFLIc1kQtBg2CNEkaSQ4MzRyPI8cjkdO55G82Cx+88122/2/DKrR6SNdQ1lkbQ+1CmBo7e/aBghWHqt4pTTniSjCPM+klPjN3/yNBf01bU/7saUsGQoSdUEmYkwLkgnCrIXzPDHlmdg3xQH7zFMyPt+hVqbTyFBhqIpI5s3pgcNpZEvgxes7VJVxnNCiRCJTmUmqlDzbLvZF8NEXHYj6VLa7v5aEY9IowQfMhAskuBUP1WzX9PZYzl0xXRSjT9iaNUcGrI3XHsf19TW/9ms/5NmzD7m+vrZ7XG25xziOPDw8cHf/mlJmjscj58PuvbHdlXeHFfmTyRWdT2dEorW2Z0NMQopo6BAZmErl4XjiOE42OIYXNOsrA2s34AJsoeu6pYsksqIaC9zgEKHWNkTUCiFZbFNEGaczcR4AZb/f8+GHHwAQoqAUV3bxVr8nfm09aFvs4vgjCrZ4YZ6Y5hlJLaEOFAnMIcBm4FANue1Kpc+ZIIm7wz3naWYnibv7B1SV8zgiCF3sOE+j3Ytia3ntUEZqDRfSg5cJs/1bVLAYLn4b1GWULObYKIauiOqCAJrmqlR8DbjPPjWN1YsCdRg2PH36AR999DFPnjyxeMjans45czodyGViHEcOb/r3wnbtXCkpKlptTmO/2/JwfyRIYr+9YTwVUhK6oUMlgSTG2ZY8ZM1UiaxNpWagFwVRA2AbeVHEhpO97Q7JXI9rd3a+pbIllm2tentdKwwsAT083FPyzDAM7Pc7xvGEqKHuEiwXqcFb3iK2UEhkaVv7KwKmbtKGo7quo9bsPGN759thYD6PjJPN5VSFfrPj/v4eQuR6M6BOV7q7e8PV7S1XN9ecvxmpUijOnSWD9GHlPQNzo4JcFOx4EZRzG5Y2kw9xpUs0NSUI1pWt7Rl4hxxQMZ7/0gUE2qpeiYF+s2G727Hd7OhjZ9JcCOIyvcYpn2l0t/CXQVCHwXbpooVaJ47HE6fTiXmuoJGu23BzfeWcpYS1wo38XFWWNk9pMgiebK6BviUR1RAZjGTVNhis6xvb8IgboRb/WZFaE5ptk4mkDKgd5mjImFbbrhKC8NXXX1NL5ub6ivjkxqosD1zRSLXkAqHrlyNh771SqpLrzKwzTpf2XcvFE+AOJRBcM3McR4qadmBItqmkIUoPb97wwYcfcn11w/mbkyFb0QnzvvVGkqELAhaES10rpDa8pCy0ieZP7WcJNNHh5TMYvF9QVDNCRGqi1rDwpoo6IyQEYjcQYkfQtuGjpwsd2sZ6FUjKti9M88gm7tCT8Cd3v/pKHuA8nri7L6RoQd4CUVzQ/r4bHJVXYgoMqaO4+sM8Vea5kstle291aoC3dAxx9Yk2RFyBIYgHHht4WqIVVkRoDdTsq/hEqVKWgIUnoZRihYk7wdevXzH0tnt5qD1d9FKsig8SYcsGSlyn6l10vEnZtIG/gqH8uSpVA93+mnk6M2JBRuaZGDuOhxMhRvrU2xHNhfPxwNXVFdvtDc9fPreBKZ8MDxVq9hV4F6ia4c3LrVuG7FJSGzZRtSUVfk4kyHK38RZZqRUJds4pBSEZalbX5EFpGpjQ9QNd35Ni8uHJSBcTROjTnv32lg+ffWRKILXw5kXkn/8X74/typvJ9TeN6jH0Gw6HM13aEG+uOBwnUopsY0JCj2Kb/qapUrLdq4usn0XctF3B0CZoq1UtWTLUyoqMhp6K45la3XarPVvTpHVbM1gUrZXz8WjPRdWS//OJ6/2OfujoOlv6IN49soUtZruhrPzA6oNR9UJ2SsRap3Ou5GpoaLc1252xYdjjnImp53QyYMBsV6FUzscDt7e37Pdbzt98ZQshxHmFbt+S2u1ZiWcNi8Fcs7+/FX3K1YQMgg/+BAyZo3ERS7UWPYpNW1nLudSwJLAWGwMaIjF1xM432hUhxM5kwSQgKH1Utv3e9LhL5bUGPnkP/G7JIw93Z9AZW6QjbPo942g0hO2m4zxlOo2koSPGHpFomui5ekeo5QirrdoiBh+fk3VyHhHbB+95Ql2+Xi4UD6xTUKrQuouEbJudtOUaVpSM55FSDCB6/vw5b16/4qMPn7Hfb1GtdCESJVk8cftPMToyvPrcWox3WgXUu6/Fi85azCfHbmDW4HkD1tE5nymlms/yfKFPHaezgVixdZrVOgCCieuH6nJ+bdMeQPAhUWDhNtc2N6TO4NGFeiM+t6BqdB8toNVsb8pHmwVC0Wq5mNWUft9FjZMriRS75ZdIRwibZcOmYHTRWjO1WK4W0rvn9N8tM1XVblxWSs3Mc0XCQNfZ7t++3xLSzt64hEYPpUt2Yxr/qX0QYTW8BSZvJiXiwsstpKk7QYeR/UvN5/rrXziwgKMrGcAg/RBs0GSabIXnV199yfXVFVf7nVsuPumsnvSZCP6kk7lkEd+4Usk+5WbGb4cohiaPZaLNwbkpi7EWXT8/tr1HfZJ6mibS1Pn3NHjeXkuQRxVRu0fLc/H3W0vbM8xFaelS1SFim7nWiq4iVv2VSo0mrxKc01O1sGxkCesucCsMEkIHkhztkgaQoVoINVI00UtF8i8Yy/slXe0eZ1mdxnazt6Q7dLa3nI45FwJWySPRk7bWHrWg0Wx24ct5tHr0bPwZNIFjEdYBkKWCWG3fKAZevdeZGgtJow2paLcMs03TxMPhnlcvX/Lxxx/Sp2RJIVDE+NAh2GYcEZgx3qqC0RtUyWU2jvKiXLG23FUrqUvMs3n0qkqZM6Gavl4fbQJcnRs2ns7stjuGTVqE0q215XZfdSHvL5/XE6UW5Jv9arngSDVeWhu2vECUtFqgF7FCUdQGZ0ItFM0YnSgsPiKE4N0cc/RCJEpv0lxtuKFW6LaU2bYTTb28X7Y7V0zpwPRdb25vOY8C0oEM1iGqAMnXdUYrqJzi0dDzJpcEDSlZkdOG3LVg09hZS6C/nBSl+Vsb3BGarMzMsgpZbNiwrW4+nc+8ePkSLZn+h79GSpEiLkeohoBrWHOS2Yc3DMcw/t7sW8l8etM0m+Wx7U6TddiqWnJR1CQBYzJFjbYBZzyfmXc7p+GEJclRXMi8GioGDXXyovEiWTJUFJfOUQ/Q+LY/SyAXUMUL+VJh9q1CVDu7tla1gQguut5iXoy+ocqGeQMdQbpluBOpjsJOaKj0Qd8L2y2+dlOkI6iBH1e3H3AaA1oEQk/qHUTxDV8ES7aWJB3xmB8WgOZbqbfnE0tq73ZrazSb/dtl1DgD1MC7B1qY55kYAyEF43mnjpIzUy4cjkeev3hOniY+eHZr58QlIHPO9nMCiw7qOE0rz1ayJ7kzqPO23cCb/7VBu7AovIApNFhbnRY1FkUdrZXpPBr1brM1RaEFHfXh8VpXcX2Hi83frhBLe81HdICFSriCgXb/6vLapfgSGQGC6R69TakC7yCGaPlLsSF7we3Yl1UIQpSIOqVGw18iQZ2nmXOwtZCKImFg6DdAIIaO1G2Azt60etVYK1HbpBfmtFqrXldiubZktQV2eHRzxdHW5hr04t+WKHgSiDpPxad/XfqhBc6qhXmeOB5ta9Xtzc1yE83N1pYxLImw8ZOsPWu8lEp2z9RkhFry3FKOEIIlDw36lgC6UhZqtW0u7X2N80wYJ/phwzSdaTm4tMSofdxHDnI9xoouO8ZF2kyDfMe9M/Q0l+rApz2cGvBpWlkkJtqUbgyR1Byk68S137cEXTC5JtVgm1QIJJlh+tU7SsCn7wM1gPHOIt1wZckploya05ox2RF/3o5oWPvBJqTVJ2nX6C6XZx5Yg73VCW8xwhYvuwoiu9CUFzQeqEQbbdW+thp14uXr1xweHkxmqiV7VX1g7/JnFbQ450tgzubo5+yrR4NAU3lwB69qsi+qNsVNqV70ZVeNMytqLed5NlpHN9i0bqnzcjuaJrFNdzv3sSX3SzhpxaC1kBqnUaKsNxC81bQmC7UoWYoreYDUatSMWpYBwnZ01hWUkYDZbQy9/Tm0xM1QhFBtGKgThWn6ixvcX+GVi9p2LT9zKXTsr59yOitaLdDHBLaZx5Pw4OoFzsOTEAmEZfHBhQnapet/3g70rUGygAoeipaiSh0kqBW0uM1HQjREKdfMPBeOHuiHrjP+7AUoUPwNGYVPgIIGKwwbqpor5DyjtMl5vAugj2zX3nsA5yma7cqSfOZsWw1LLozjROp6hmFjSURwVK7dD11PU5sDsHPfOlVmp7Xqcr+CtOzIbmKQ6IoyhaZ0ksVXUVYrtIyqE1qaf9FiNf9rXM7g/jcZSCBtFkOJIZk2sChJCkznv2Ir/PNfqmJdi9QG0QLd5pp+M5OnCtKRukBM4gs91sInBNNeNgjb84RWDUh9O89i8SjNZh/53GbVl2mUfQ9qNpRzxjTTzXZTFzmPhWkaOZ2O3N3fs+l7O0cu/N2oN8tbc5g2e3FgRbkP1xaTLwsxLEPZj6TdxBYhtVgvEux1Vg4D2ZVSVJVpmggpcn1zw+s3r2gzJy1nuLTbR7fhUc7QElpD9yV4wep5WFvp2xKPWpXZlymtlLSmZuBqFVUWu+xSRwrJfK66uojbb2iDaiJQA4qvBw9/iRb/4f4IxbYo9MOGze4pw/aKabIhn6zW7qnFptCLTxaGFI2zCUgI9MNASIlp9ocnxT6ANA7amogud/PRfdZv/R0+16mCyTfoZXLQlv+ZaeZizvLJkyfc3j5hu91aAPNhFfcbNtgd7Q3UUmh+B/wQidiGokUqSJcHXkvher/jPM3MCpqUPM82XSum93Z/f2cEZQlMJRPyzEcffsjrVy9R2uKCZrDrXTHCvfFXDNBYD7Y0/dTqDWXf5CML98mqmYwa6imAGqogVEoxdKoF+RijiWCHRJSOJD3x/8/cn/1asm3pfdhvdhGxmt1kc87Jc25bVbdYxWKVRNmgZBgyBNAWBcM2/Ga/2ID/LfuBLwYMG7ZhGLItUID0IFMiIMmUxGKxikWyunvvabLdzVorutn4YYwZsXZmnrz3sqruzTjIPDv3XnutiBkjxhzjG9/4hhqYV33XJUAHDfKk/NE4w3Q6fcikfmmHcBg9TdtgvZQbLi+fknSWcYyZHA1WxvOolp3IHhkr0mihOFISJ5KKMOaEaC8bfw11qh3IoYT0pVJQcVRZXxlFm5SjagV4Srp3azneWlHAmOaZU99zPB4JTUPTtKIQoE2DOSf1GPL+KWco0qVeHba1TpxxKQvaLqPsFjMieIdRGZeKstX3MEY6nvt+oCZix74nGcPFxQX397e1nwH1wnINutnqdzUAWJ2oQYONIhtuFQARDnQ9zwJYcjJEo2ufVlq+tZWvWht/Ku/a461bBngIHUCTLR2RiBMNV6ebliMynX71mzxI4B5CQ7dp8U2HsR2bqyd0p8w8JjANTRtwDlwjJc/KfXTBE0SigVwc0RSoShPKMTNn/4aHd2UJys6SA5Cgatns6w1QQKF2qgN0m46b2wP9aaA/nUgp0e4vZNyzk5HMVRGk3u+iNiYAgPjlGKU6sEhg2epwdNCAnpxUrjRp1suK57abEv3pJNfgLMM4gnM8enTNrW70Z5e/2FK1XalH1YrKctmLyoS6U7IGpkuJtRRE0gdKKkvDmUHOM5m8UG8EnZLgxlqjvlc2eovHuSA6zQqaiOiAFe3TAt7MH4Xftc6z2V7Qtg3FGMbo6MdCMQ3GZaE0GG2mVJtNqsoQmkaROa86nYbaDEUBWxw5xgUJMLpeSzOznMGZs8kreLNUtkBusnIojbyPtTKY5Xi6ZxxHYkzsdjsu93u6tsU7X1lca7NyNovmeGHlttaBOYL+mpXCovYiwIMEsLv9lpwk6QshME29BOoKaE3TxKxvmKJIXz26fsTNzRuV+pS+maplWg95etWvLv+vYEhWPyAUm2RqQrhmp0VL0jFlTv0gZX/EhtFrijnhksEViQWcc2y6DU0IBNfgfYOzAee8VJo1iJUgXimaQKrPy7ccP0Oo3yBdXJ6UHcc5c+zv6PtROCMIL2meJsZhIMVICJbPPrvm4nIHWELTcnV9zd1hIpPAFkxWHkdRGL+wZMj1xi4POtKJviZQZ6mUhSpCJ9N1WLKukiFGcVSb7ZZnzwKb7VaErb1XdKys0LNZlQOEcFcduLx/5UyE4Lnc7YQRq0YeU+LNmzc8++QZRafwOB2bV9+3P/W8evMKp2hVIZPSxDj3MrHhoZ/EFgWhjFIleH+5QxyqsqSKSD0tt7yIsSVTg0qjEYAlapefraRvfWOj5dEmdMumbqjjQcWYq0Ebqn/PWu5wxPHDFvXLOrpuS9du6TZbfNsxRcOruxM5Co9G0CWY5oGmsRQT8K3DG2lS2OyE09wPmWxlYoaYhNw36xw1RK2b1yrBAXDe37geFUG31slM8iJ8Y4qgiMaL6LSzThqxjOHTTz/l4uKC3W6nAapu0FVjDrERKXGtPKScovBYc1wcdVHSpqud9GojVxcXTHNkmmaKd6JBqNc0TROvp1G0CIMIkGcyTdfSzg1Jn43lMGdJp6m2m1fHqeccY1yD/CjXWgciKHwrNlyQYoRkvBQ0o08Zb5WjWvISc226LcG1BC9/6iharzQcq8FEDT4o0mj4sdjufr9nu9my2W5wTcPL+4HTn39FToJ2xNPIMEw0wbDJAesN7ZywLnCxv8SHiZhhnAo+V6Q+y31aEA9VeMjvq3jo2i9fK4IKZFcozmDtSkMxRjnXVkZ+iX51xgXPD3/wQy72ezabLc55RbCzBh96m5fEWTfQxZbkxltT7VWO6n+y2u7lfs80R2a1FxfzQlOYppFh6MFKslZsIZVIMeCDW+RuqFdszLfaLme2u0irlcJcolK1pLPfqEPICogJRiTtqLU5DMC6JNMJ5cYSjKfrOrq2I3jd6J0I8J/b7iKDmKwGvuYjsV1HSoaC8Pm/fP6SYHrR+M6FON1yd3dgv294+uQKG65olDN8cXGNGyWuGCf0WRY5pZQKGI/zFaWsA09YTbWobz3zulb/aRuL8frcc6baYo1ysWW0d4wJ7wOPH2/4zm6LRfYCofKBBL3K/de9f7XUFaSQEn7BW0PjPNuuW7Tbz6up1xeXDMeBmJIoMSioYa0lzTOvTidc8EvCNKfIy1cvaZpGHGJZ62dVFKbGJclI62idzFePutUnnRpnLYvPrWN4q5hlTvosWFQeTOkEUSZleaeNmip517UbvGskyVRFB1FZMBoHabXDOQHkSl6+923HBwPUjGOaEjFNMGRKgHkqjKOgNAJ5i8ZVirMGMC1zmplTLW+LLp21GesMjoJ1RR9mL1lOLW8sASK6yemGZk3t0ZXzKnFFNnMmGxYtuvUuCHetzkfe7/e0XYs1VkWE1UmbquO5ilCf3U4N8hK2gC+Gtli2W2iCTNwxdpX2SSkJUuA9OSeaJtD3PU3T6rjX14CoD2SE1zv0DzNfYzToq1wsa3GmIAyXisLJuZUiD36KOqYwJpqmoVgdmFDbIbUjryIgFiH1m1KIJp81sVm89TRB5nTb2p23bGhldcKmlg6LZqLyQe/f8H75xxgL9/3MlAfsWDj1hcPpRG0WKbnQH3pSGtlfdDx6dEG3aXG+YbPdM86ZGAecjYKuqmZm1d7LZcYox2jdUGuShaDY1X0sHsKolumaZtUyzyLjkQV9jTHStC2PHgfarsN7T9O2i+1CRdlXtF1oOEX5ogWdSYd1juADbZvZdRtqxyVGPudwOHG530Mthzu/oDvOOaZ55pvn3whvr2a+STqJq/xKvb7FSWq5X8KR6iTraw2r/JkS/FMietnoKx9cymmChBQtlVX5nqzZf8xVi3k9DWd0pGZNrjCrpqSeYd1cpElS/MXHYrv9MBPjgdMYwQe+enGAJBQiSmEeZ47392w3DZ989oimk4oViH7xnCHPWRU8DLUkLnZrNWE528jPEZQaitVITL8WX5AfJN3WFIzSXyoNwxrPME1stlsur65puhaLoWk66R6WmqoIqtdgC85BW/G5pqhMkVXbb9i2naJSsqnFGLm7v+PR5SOE8ykbYs6CvDrvmaaRr77+Sjibes9TShK0yofp/9+1XWuEp/+u7TpBVbOcY4qJyU2cT+NKSZofSRawQjszkmBYNVbRMz3juwPeeg3k/ZJEmAWZrX/k+9Y6bboqH4XtHo7S5BPCiWQCr14dMQy03kHJxHEmxYhzmWHaMEUZey73zWNtwpgkdqJVoeIk4YFCSaOsYZWdXEy4YtMCxBQFtawRGk9FOWuAsNCk0MpDloraNM80bcdut6fddGK33ou8mq0wQMYpD+s8PK1vX8jkGHHe07ZgnMf6oO8j9zSnzDzM5B00bUtQqqL3DX3f07YSL7x89UqCXfX3OWdOpyPOW0VBi8RP9cJy0UhVJs4JA9DoVmC0h0ZAjNprYMhqtyITmrPYrdd+mpSK8Ks1ArZGhtbEmMheteiN2LhQGLXaochupcKshQVJautz9H4YZz0+GKBOc5RomAIpkXJiHGbR8dRNMMZMSRnnLE3wtG1DncdcN42abbrqCFXDby2vKGLprGaKFW5enVcFpwqI9MNZVosKl5szJLAodC7Zl5FpDT4ox1KCVLFTv5YBWDO0M1ISGaPjQEX7q0lZJ+6oQyqy8cUYaX2run+e1ATGcWCz6Zjmmfu7e+HtqU6eaDMmfR8tAS0BFKJoUB2bZn5y/dpIpZzfkhMppmVTpz6uyw1Yy6lLKUBJ1SVLNlrJ2d57FYV3q3aCMYpc1YdSJbcA6jmkmZQnnPvVO0qAaYpyTzIUEodjZJii2mWhpMw0jXhvtat9FeVfeca1BCTvqeEnFCsJg3KJBFXWny2biAZD9UEw4uSqA11mztdH1Kyc1FKM8rJEaq3ruuXr88Y8FAXkHLFEA9TqwDQwadoGjEy+kfcQJ1tlawxWSO0aEAAMw0Dbdip1cqeNGbXkKev3wL0YZGOpJMalyeatQ81Sqs/a+JKEFyYVAA1slZeb1amtSUJW/qjabl7RC6GiSJC9NJXoPT+/Q9VV1HTBmPLR2O4wRuIcGadEMo7+JB3Rs42QM3Ge5d5ZsZ+snccry6QmldI/UIPAqgFayorKyOZxdpOMeeeegtCDVlWVlddXFv1PASskUS80jafbdDRtC7mIgH7VUdVEoSbA5/JjS+WqCA3Ge0+L+J85Zilz23V4S4zCp/M+aGOoBDynU0/XVdu9F5DDCDCAkYl4Cxaia2Bq41O13bOjLAgddfCUIrir7QpYIhaVc6bYvDzL0qiTqRP5yBJbJg1QCxJcBV8RKKcbv5ybuhp9nfqwumrGfBS2mzKc+hGGRCpC6TNI57rRfaLbdPhW9UBrA46uJbB01C/3RO0Nw8KnrhWoWqWU57f63SWrAvXLlbYha1UHnKwxSj2PXKRZqmlbqcgiI9sf2q1dutKXoCSrQ1NgzLoi44Wtw/pMMZOI1quOdWUgpCjIqTVWNcmlWtW2LdMkfTPBeVHg1DXIOsb5/KjbvApHSCKfkestNcwyiwxfVkAgpYSZtTnLyuCKpbenAmTUxsWsMZtU6lLUEd61GqgqE2tjtZHkX+9I7REq+nybxX4tHzo+3CSlKE4NKnOUsnkV/ZampIR1lm7TselausZL92wtwRQhFlsjXcW2VKRSyNKLkxRRLiAvumRSelujg7UDV+dwmIp81IApLzejBgBJI3jvA9Z5LHoeTjsuMQsfb3koaqSA3tyipXMRsaPJZZlvDQZTZOOOMdKWRrhtzuncXkMIDSEEYozsdzvGOK0ZXdENt7A+jFUSpnbYFXHmBW0QqRt4lg6/ktYhCLLW2o2vlitkZg1ajUwRKlknWGmWI42yInvRVHme8w1ebFuDYtZQNSUoM3EemOdRx3/+6g/JzBNEGSl7OM6kGqBLyyR1rGloPNaJPBPAIratCGlFe6S0A1CWjchYaS6qzRrU15wlVPVra82C/Mta6qZX1gCrIiEx1TnTQdBA55Vwb5fhE1R71c8QJ1SUz5Y1uUpKCxA9v03WRr/zyT6KQvrGL8FpQYj5XdcRo6hgBB+IRUqeGEFRvfPrRo8Ggpnlgaomzfn/1ambohUQtd+6yddnUOg/WWMWpQPVgFXMUDeWstw7YywhNMrXq/N6lvSdGlyt2n/VvsvHY7upkGIil0jMjpi8CNvN08If3e62bDZeNW8rt0yVTIrYmjS3Gq0OaSBqYWmUg6Xztx7nydLyPcOy2S9JkJCFqcV0k6WKU9DhJorae+8lQNUy6fp5dtG4Ft8ieEp9FopiFqvte0muXLXdqu4igY4PAe/Fv1kraE7XtcxxZhhHmqZRzWhRs8g5rTQs3m+7eqlUb1f039WvUrT/QDmFNSilggeaVEkAqs+sNuWK/zZLclXBkbZthX9qtIJFrcIIqnyecOUzQOZjsN2CZZ4noS2UTEEklGLOeCODG/YXF4RGEgqwyzUklRWzRppuCzIByhq7cNyNFbqDmI2lTk40WlIHqywgHcW+BKh6n+yZXkKpflTWslYgnHOE4JfStHNGA1Q08VtEls6ChjVCLIpiNk2Dy0I3SYUFPVVeouy9MWNCRY/NElw2TUNSGsB2uyWVqL0LkiiWsrTY1oWHfPadxXbNme+reh4FtGdG7FZAsqy9NXWoytpwuT7zS6tBNtr0KPbojKVrOhrf6nWKlipL01VdZ63UauO57D9/iQDV6PBn50XmIg0iAp71QgWtK1zstlxe7OnaBkOmazzBOeYSZfNLsiF4ZzCa+VhrCcETQiC0gaZtcK7h5uaG0+GeOE/YphVE00pAaZ1wG6Q5XrrIDTDPE9Y4RVHFnWR1cnWMX/BeHgqTF5SlSoKInZWHMHSuiGbNrqV8alzBOK+NVjILWxyuWzKPlAqVWlGzQxc8j58+IZeZfhS+bpxn+v6kXabrrbQFQVI1i5GoRjZbp+co8hNQkmRApExxljRH4X6ofFW1zhKTvI+tWqcStKYC2XtKFnSidQ0bv6Ft6pi3NYhJqghoTdFSGIxDDwzM05Fxmil0HzS4X9aRioVZOV9Z6Coxq1B4yXgK24u96DMGS6sZYElRNpsoD6APFpdXgreMbVy5uKENdJtu0QgW5BIMRctCZnFs3jq8ZqqUtRHJVu5PQREXmTbWth1N0xKUB2WQ5MovCGpFC+r/NSDL6+aJiQSUD2dFP3KhbSDldIuV5KptZBSwE6Tp9vYW74UrW0rh0fU1h9NRUShFcUpahEnqUbIBZ9ZMuhJ0tMHAYqXsn4SWIuhfwUyF1NTpb9UJJhbIyhgVVM0aqEpFIystwhRLsJ7NZqs6fPpwW6t6hLWMW5O2NdCQrx4iE7+qoxQj0n7FEIsl4dUmE94U2q7h6uqC4KRztjZiUArEJEM7jIOw0nSq7VpFVK0zuCANOIfDLZU3V5P2pIFcTVKdc3gj4a5wfpPEWpoYU8SvREXEnAuEtiNU37IgUXKNIh+sDaqLPa62m1LCWHk+pJrg2WzqNCkr9JEiAvbzPNM0MtbYeacKMtXPC1jy5NFjDqfjIi0V46y2WzS+UBkd3me7RYNK7Yco0gQYoyDdOWfilEldUvUIlF5VSAhFyNawxmhHgQF8RbMQ23UN282OpgmEOgPe6Fhjs7JYReMaRSUTKcePwnZLQZUJhJYQggxE8M7StYGL3Ybrq0soibZp8C7oMBoZY+xqAOhEgaLGCdYKmLXb7bBekLlSDK9fvyCOE2iAZ6zFWwvOKJpndIiCoIXRZGmCLpUG5VVF0BBTFBsPAR+C8jyFR7pyUCVOqLJo+XzNz3yuw1FcJGdwvlCMp2naxaLECzm122YZIlTBj/osx5T47PNnjNPApINWYozM86hhgWru5iSN4noPSuU75hW8WBJS1kpzBQ9TSrjklu9BHaWdaye00C5qomBUQajIsxOs52K3Y7PZCZWKKhMmT0xVeDIqXWVLxhrxxil/2G4/GKB2XUPbBhm/mcXJBSulmuItqYjxXOw3bDae4A22CEOnsZLN2JIJmqL6NqiAdstmsyUEmaObskxd+PKnX3M6HJln0fnyfqTrOnzT4BtPMOC8JTQWssWUtBDS4zwDK5e0ZjbGVDTBEZogjkvRBHmdZuV6dw1QjI50LOI4pIPfgxU+hrFwff1YYG3dfOVmS2BhncXV8qYtWOfZ7y/5wQ9+yDSNZJvJKRKnib4/cTgcmKeRcRgZhlEIzCmpNVhtipFrEH1VycJrFlObu4QqUKdPrS6rluONooeRgvdoQGPJs3DJiAhUauQ9sinMKVKleZf306YsUzJz7DndP2ecbrm7N5T0vQ8a3C/rOA2jODRFI7rOEFJkTjOND1xst1xdXeJNwZJpG0cbLF5Lfcaw8JRdq92IyocKIbDZdLRdQ0yJm9s7bm7umAcpK8qItyCTNZqgm5bVZo9CTjMy8jYj/bg1ExZZNEFVhFTvvcc7pxM3iqKwslUJZcDUBJVKwagOrvI5k1U0qhiMD3Q659vqnUUDVGkS8GtyJZ9CaDo++ewzpvmSKU7M08w0DpxOR/q+X6oVVeeOYiEJUmGc2iQyThQ0sNVEriThoJZSyFFsO5ssfjGLQoUMo9D6FQVMxBgr6D+Q5gxBgrLGN3ShE7/hFKVRncBsVlS7WIvJMMWBkmamYaqqcL/yox8num6DNw5bLN4EpmmQOdeblv12y6b1UBJd62m8UxFx1S/OhWKUyuTFdusm2LYtl1d7CoZTP3Dz5obhKN3DTdPKdD0MbdtIgKB+0loLcZJgbAYcUokpZdl4UypC+zBFm1EDzuu9N4JEreytopt+RX6M2KyW2GvTkbM1yTcY1xBCq0Hz+2231jEKUIyl6TY8++ILprkn5USMM9M4cTzeczweVcYnV0xMaFBRBiQY5xbbXSp5CNoX57TYrvQMWHLMZLVNsoxotUZGbyaFdartOuNI0YjvbUGGrTqCbWRkdJ3ImAQUSTGJEDvil2wxTFNPTiNj/3HY7hST9C+4hmK9gARl5GLXCoi137JpGuZpIDiZqtc5sVuvo3qLsRjnaIKU2UMTaJqWpgm0XcMUI/d3B168eMHx/kBK6cznZtrthq7t1E9K4Juj0OBiNJLvmspXLVhFwAUwczRBejCkcUiCZmtWNR/rFOBZ3K5UawFBLI0kTskopQsDJnB9fU3Tdkqdq6V0yzTNkkh5j1P0KeWCaxqeff45MU0UsvT1xImpH7m9lXHjUnGSiV2krMCAVk+dxTlDHRELErjmOhChyDNpncooJklOc0oCRFk0mV2b0kyGbA2xFLzzyORL4QvbEgjGy/NTVqUCSXwrhiJ7bUw903Ag5UK7+/SDNvXBALVpmmXcpmyI0DYK3RoLxtE2DW0TJEvRrNSFFkvGlYw3ht12i+s6yahDg/MyVu/U99zd38ns6HGkPw7KpSxLwBdjxDiLdaJLGbynDYHGO7xTFMufzSI2kkk536zZV4Xta3MJspGKk3QrdC1gCyW7KmLF2hBUFuNzQNO0FATKLqzlwiobUlGA9bQcIbT4ZgPMUBIlRy72Ox5dXZGVEzmMAzEmjqd+aSLJJRLnSdakKBc21/KOlios2thkWPZyRFw4UwX9z3iKdWxU5fOoLI8tElynOZLmGeOtnoPDeCdkf9XJTGlgGg68fv0TYCTNW5ImCr/qI6aRQotznuAsqch4wqZ4muDZdB7DrIlAhtLQ1OlMKqvhncWGhrbb0rbiJL2OCp3nmbv7ew6HA69f3zANo2aj0kTovKftWnz00pHrncyEl7o2WmwhzSrHpuiBNLLUudPiuIIPGugpqm8QjppdBzUo/UnL4LBErTgNZGXzDTjpTD1rICpvZ9PeknLUBE/429fXjyklEfNMSjNxluSqP53IOhRhGAbGSaaxVEForGL/dpUkKlrCXigsRhoBvfPCA9QAC5RSUqRTtMppoaoXQvkRNQZTxFGSlLedavlVghKrGnIaHy3c0zQPTOOR03EkzR/uKP1lHSlNGNNqGVQqVt44No1n0zpCgBhHSpppfUswLZ13mFzw1pKcdCY3XUfXbaS5zisdgML94cDheOR4OHK4OzBNwscMYUV0AlkChIUOZRcEpPqhRWjBWEUr10Yzp2op0oWcqYUgq2Upe4amojz7nJfRFgByb63XsqqgOl23kZnmCIpam4wW2y3uoe2GhouLK+BCJ/tFcpoZhytOp56SxXb7vmcYBqY51otaUCrMmZyWNkUV/dzFdr2X0b3ZUOx5NaF2RStgAgIgWFh4k7rJy0ZvJBhNiZQisxkFbXIrN7sG83E+Mfb3HA8TaW7/mq3yZx8pzdB4QrCq5GAIpuVi39E1jhRH3hzvKWkmXF3idh3BWrVbQ3EO4wPtdsu220iDnZXAdZ5HXn39ir4f6E89/bFnnufFHudZAr1+HOi6RqqzTmIEBxTlZUq/jyTvTjkr1tqlmiX+1mM1saqxgfhcpcpoBamWzE31bxmWDR+z/I4vhu12j7UybZMlXtDmwlxUMrCW14XwIr8jNJpUJlKayTFydXnBPEujeoqR4+nINM1M06yURgHe8tKMZJhTUp8r52Y1ULY6kAc9D0kG83INTispyqCQiptxC/2EDCUWXJFGXxmKE0l2xswSt7lKmyQT88zQv6Y/3lAwtJsnH7SpDwaoVbuOYoS/Y7PoC1qEAOwcm67BIjpdOUUKCWsCJc9QIo03PLq6xG22WB/U2CK3h3vujwcO9wdGDcpyXLlRwMLvIerDbWFyjlkbsryXjMft3EK1rTwMadoKFM3GrZZIMfWWVQdZN34tISoCpSJUGCvTlyy1qUSKNdvtToSzS+28ZuFvVIQo5kgls3vvubi4ZpwiOY0Yk7AmAQ05yaDaFGfmOFOK4dgP2gCSiSkyDieGYWCeZymlp0JU7dTK67LKm6mEUQ2x5W+rWYw622Wh1ISFG4JqCRalIEyKQqWV51uEIJ3SyDTdcjq8YTi9xvtCTu7B/ftVHqJsmnGm4Ax4A1ivqKSjCZYUB+I00jhDaQwGCZoa79h0HU0B325ouw2hacDIRtiPI/f3dwzDIA6zP6rqR1l4lDZGYkq4UbRzg5bKg7UEVxt53OIMziWWhFc4SfIV/GK/lWO9FAgQfpah8vi03GoerkQde2kwuFKWDnewS1mnLFzuOto3Lu/gnKPrtlJhz5FSZnKe2e22TMMgmn3juGzy/TgteLsxRYKChbeedIiCImXSfYOUkdxqi0UvVKsBGdYyWy1hCRTKGSVQbFiTqFKdZZWm0oDeFC2VlsTQ3zGcbjkdI6U8+us0yZ/7KFm6a50tywhmawJtE/DOUvLMNJwwOVJasVtrpKLRNlKitKGl3Wxo2051qTPjNND3PXeHe/p+YBxGpmFaERYN9Jxz2DjKZq2223jRgvTe4Sw6lEG5vBap9Di7cIidqyVaSQeWErlZN/sljzLqp9RG1y2+8g9lU3dO9GGtrQ1E5uEmXwQcSDmqMSDanN1WAuASQf9sNx37/Z6SM7MmV8PQc+qnpVnSmEJKswQD2oyaU5YJaBqc2vfY7gKo6aJKOf6tm5zl+qpPFXstFS6m5ERJiWJlktjCXCzanFUSw+mG/nRL3ydK+eSvyxx/7qOUiDUFZyGo3Ta+YdM6nC3EaeJ4uMVRSLsOUzIO8c/bzYZiLK5paTZbgoJY4zRx6ntOpyO3d3dM00ycI2lOi91W/q+xhilOxFmaX4MTUKANga5rhHtc0ObWquIhfncYRqoGr6CvCuiUVbli0XU2tZLz1gKcAQNWo9qCPB/b3QXGeOqQkiVeWKhdRZ8deQtrLE3bYbEkEr5OGCSz6Vq5dvXXfd8zx8gwTDKxrGRKiQzjQJoj0zzL+OOi/VzU62MZWlTPS25kTazQ3hS93iXPqlU8s9qtgnIl12EZ8qdok7pQESLTeOJ0eEPfv8GYIMMyPnB8MEAV5DHIxZmIs2CtjJBz3uCDo20sJc2UHCFHCQpsJqcBSqHxjvbRjuQa5lw4nnpubm/46vk39IMgposY3nuNvqwRfZbGoDQXplEaU7quYbvdiQ6noYaa2rncSoewlkwNZwjqkr6v/18DVRa4vmCxy46ovIpSuLh4xDTd1SZ2yuI4UAmppFQFsbiu2/GdLy756VdfM48GayLOJZyVgEBOp0OcnSemQkyrEPlwOijaPHDqBw7+IDyYtApLL2M2y7p2pQi53tYuJzVAI7PaqJI+OVW5Hnl6clIEtRMkrJSozVSGFCPjeOTUv+b+9gVpvhf+Zt5+0Nh+mUcTpMRhXZEgVfmiwTtFhAynaWQa7nFNAzlQ8oQxlm3b4kJDcY7QbpAxqIZTf+Lu7o6b+ztubm4EIdcHdGm2OUMix3kGk7W0L0lK23g2m45N27GxZhGx0UcY64QkX8pJaRuK9Bvt5FVHaczaibqYsiZU2odBVQgQZym2a23G+/aso/R9DlMz+spbMo7NZielpzQhclRBgs/dVqsFwqmepsj9sV+mqcQ0M88D4zgyz5G5iArI4t+tFYSjFEXG1vOvF1ebSIR4X2E7vV79uqhTpdSSa6HkSM4zNuvs6oooFAShyiPH+1f0pxv63gAfR4AqlR1JroKT5rWmkSCx5Mw49ozjPR4opQUSJc9YU9jvNhjf4NsOF1pKgTnOHI9Hbm5vuLm9ZRzHJUGQXFXuc93ox3Ek90IxcdYQrKNtW7rWsd3taLzDqZMpRWpIVhOrvHCcjaL2ZRlssiBRtjavVVcr7yVUMn1fRQsNNYER2aCm6YRuxbnqBQsokKveaz0369jtLpliJMURcFjrlTqgVKksSFRMkVMvm7zQASLzNHB3f8c8RyYjwZHwYEWRw2iwLrJQLAFpZaNUvYNKEZCpPbo++vxVqaOctF+jVCWOhEETvLpWSvGa48D9/UvG/o5hsMCvPkA1CFfRGSS5Cg7nHW0wlJyY08Q8HVVtYUY4ZYngDNdXlxjf4EIDzjPPkfvjgdu7O97c3nDSoQ8rsLJGh0uDaZSxpmmeliYp7y3b7Y4QnPCy9VgmBmqVKqWT0FtMtQup7NjzLMqcw2BndCpNlBZ/laXChfrsbAoXF9ecTvM6ia0GgYuNlEVqsIIUXbuF4oSGVARo8K5g2ob1rAzp6hKDYZqTSHep/vX93S3jONH3g3B6rUgG1uQ/lyw+t24glYVSVv9adZOXoCLrpxr78NlLWXopUpLkyhSyKVjSUgkucaY/3XM8vmYa7/BBp2h+4PgZQv2LCQDC1bRkrJWuzGAtjRVyubcGGhnLF1whxQGDI3hHojCOA189f8nrm1vuDgemWTvZs1k25289coWN1nWkZKx2QVq3BrcKouCcXFqMUsJ58+YNn33yyRKcrrPCdcMDdS55cSCVj+nwLAGqEc7sxeUTXr0+kVKWkiYigxKKIRdDTIV5ynjXsu0u+OH3f4Pf+72/zf/9//H/5PbmFSUPWDPjnKoSGzToKDgnpb0alxtj4NE1xhiibiDH45FhGHj9+jU3Nzfc399rwLRyP1Slh2LF0Oo0h0W0H5mIlJKgotbKnPpqfHVZhD+mK18yJU/M44HbV9+Qy4FpvgcbSHnC/Lwm9dd8XF5esN1uZeJKFr5b4w0+GKxq2rUtOBq2bcOmNZQ4kIphv225aDpMaBli5vmL19zd3y0l/SlJ89/53HhjOJMzOQeohR6Si0wWy2mWUpLzdG3LSroQB1g5rsBSeqzIas3s69YtgZp+SuU7gZbTq6MpWBPUIUlC8/jJZzjbCGdJk5SUs/YYCwo8jjPC74PNdsuv/9on/MVPviSOJ4yZcS7hvRLf6khf1b/LalN1rGtKM2/eSHnudOq5u7vj9evXiyQaINJsVd9Qr2tB4605eyZRRFioRsY4UoIYdYNfgllRtzBFqikprzPgc0pM48D9/Q396RtSPJLT9qOx3f1+R9dJg5EzhsaL7QaJSCXAbh1d8LTBYPJMHHs2TeDq0WOKD0wZjqeBH//4xxxOR4ZxZIqiQynGuVI7zsvqpej4aP15TokpCRIdo6dpO0H/zziZuYCxlq4TDmuMnpTiIrGnLhXO7Xb9xDPXbgRsrYhjAW9WtYlsElfXT7E2kCtDxEoTlWhhFx1nKrabEuzbHT/60Rf8qz/9c2ZO6nNX26147Xo5npx0ry4SHN7d3arsT8+rV6+4u7sT/qrab7BWaGimbuwLVCUplLECCFCog0/AIRPpVLKnEfvOUX2IqjVYpxxIRAoupchwOvLm5gVxfkFOp4/Gdvf7rfStNI7gDN5LgOi1WGM3FvvoAlOgDQbyRJoHdpuOZn/JlAunceb27o6f/vRLjqcTY5yIKdVMikoLAkl0VvS/7t0aQyEJi0mFOAsDmFqqfnCUZb+c55nb21tSSjx58gRyxjhFG1l12CV4rXSNsgRzxRgKQlLNNV7AUIhcXT2mP70gxUK0hRCcaMjr85YUFEixUJLBNw3X11surx/z9VdfSsWEmdBEnK2BcVUkkCrbHtE5Frksi+F7mtCO3NzdLfHC/f09p9OJ0+mkfpUl/Fp9gaREtgjfue5z1li8zNiVSoJRSmNKuGwE1S1S6nfaVC3uXACBw+klh/vnUEapxMS/RIBapV9kjrcEqN4ZQrAEbwkOYMYbZGSkZpaYBEY77ouU9H/60294+fqGYz8wxVkmj/B+3PQcSq8EZpTPJtG7NkNZt3brLmiPyB644JnnmQeTQlTCot6K1VHWcOKsNlPTfZ0pW5HZUgzWeKZp4HAYFcF0ON/SWbciUFru9aElNCIzhbH87t/6N/n9f/KPOR7fQBHtRWuhkMTobb2nVmdT17KYlMw80DYNu82GOSU+//xz4f+dTtzf33N/f880jJxOtflqYsozBoOrnZHegk6aqEfSYF8euMQ09pS8x+ZJeFFGSPslTZz6Ww7Hl4zja5yZSHHANsJX+RYg/Jd+dJuGphWJmxKzdDyHgrep8shxrcWEbuEqYTLBBzCeKSaOhxt+8vVzvnn1Rjb3Gvibing+PM7LQHWj15/IOEoUUUJKlOfVePEB0tRSqS3393fknOiahjY0iyzKue1WVtpan6nnctbapvdOYoTC1eUT+n5kmiLOOHwAl4rQFJI4+HlKeNfSdVueffYFf+O3fgcf/glf/fQvBAUxM96P8tnSGqsJlsUQFj4TiNTLk0ePFn9yGgamaeLu7o5xGBj6npevXzNNk6L3iagomK3C26WO6hAuel19GY6xblTWOFW4SKgAiDYqapBTZPDIMN5x8+bHpPgKYyYptX4ktru/2LDdSvJSUhIU0xm8yRiTccEQdi3OWYIHYyKZmbbRkcqHE6/vDjx/+Yo3d3diTxUnPa+ksPra8+Nsv1qAAWG+V/RR5QD1tUZly6qG9KC0j5wz2+6Z3od17vh61FLP+Sfa5Tky2VBUDzQDxSSuLp/I1Kg54awno6OaswADKUOcBRjYdDueffYFf/N3fg/nd/z4L/6EaTxgmPBuouoEG1MBAgPFgVe2dzFgGi73OwrSKDKMowwIUNvtTyeev3wpIuU5CxIbo470VL1ZVcqwxkoXel3jWrHILFJvUWevS+lb91Ir0wdTmZnTSD/cc3f7FZTXWBtl+MJHYLu7fcdm04gWZskEX3A+E6wqpxiDwxGsyFEKOjwTvOF0OPDm/sirmztevr7hNA5Lgiu80Yd2W48HEmnqU1eEU5rMqq63DDxg0ed0LpBS4f7+ns1mw4sXL/H+QIqRx9fXtF3Hw4XNDy34wfkYwFFFqMoSnBqMbQkeTv1EyQXvglTZnBPKVi6QVPZRRfQ//fQZf+ff/h8yJ/hH//l/zstXz5mnA87NeDdrdbfKmKXF31fPWPsZjDG0bctutyPlzPe+8x3meWYYBl69esWN2vE0jisgApRil2eis8JJ16dlCVoNFlscrngMTpvNC0YbeDEiurL0E+TE1B8xzFgTKXngcHz5QZv6YICa4izOxRi8tXgPXmVu6mzamklUmSZBepRjVKSZ5PWbW+7u7himgVk7mLFnGcmSXddMW95HdCjlZ5XfSS17IqPqmrZZt2qjHad6Y1IW4nPTNOx2OzXMNYigyGLWrLeKTlcPXfkghUpkFhmVaU4MU6YfhQvaNJlJx7ymnJWzWpjmkabdUkrh5uaOf/7P/wXed1xcXpKSNIYZlSsqdYO3kPOqH7acq60ZuDo4b1UGRjiT2yaw3255+vgRcY5rgDrPHPpepLhiZDdGPjtMRG+J3oj+Yz9zERs+MbAfXzD/+I427Hn97BG7v/O7hE+ewvLwZ4b+nv50C2Ui52HhVGIdxn8EnhJI80T2MvvbecnmnZNUwyL22jojyIdqRlqEPJ/izPE08ebQc+hP9MMg5G/dWipSWI+16nQmtmTMmnNb3QRLUa1ZfyYkr860BrfGkFIdQSdoqm+aRbxa31x+h7KYK5RF0WJ9sESFQJnKVbES5zzTdCTbQuMbqQJYSyqJlC2m1ClSgbZp2Ww2bDZbPn/2HW5ev6TkCcjKBSyS7FQpqKI93WblNNVGRYDQNDRtyxwjl5eXpBiZxpHHT55wPB5Feu3UczwcGKZxGUvZGsN1cXw2ZEp20vGaC3EeMS3sZ8dmnLg43mJv/4RXf3HL7u/8m/jPNxQyszW4JAnCNI8c7m8YxzssvThL5o/GdsmJkuKi/OC9oP615mOdIWw8FJHVMbCUB/u+5/5w4u7+nntNUIvalWwuZ1vse/d8gQ0Mq0KE0c76EIIOODlL7o3Rio+lH0Z2W78g35JzWbUT1t95++FBk+V6btqEUc95xSJF4/ZwGMhOmuqil8lDJUZylIBWTsusjWEFPn/2BW9eveCQZ3LKOJt0E83L3iU0HbeeBxL01Elb3he8d5Sc2W46mSA1TTx+/JiD2u7Q9xzuDxz7I7FIsNlieFQcXxwSufXgpPElTiOmLeyiYzNF9qcjm/svuX8x8cm/87fxF5dkA9EWkbqLljTLkIFUBmyZlV73cdiuqOrkNQ4wGWesUkCk/B/aBotTkECQ+VIK9/cHbu8O3B8OnIaeOZUHZfNzlB8e+ty3c6zFPNUHN21YKzScBbVVQWgY2Gw2bLcb9vs919fX0nNQ30sgwAfooti46nme9bGUpQKk56lVipgL0zRDFo3umAqhCHKaStZRzSKx571njokvv/yaJ08/5/LyEafTkfs44GxSwK6CJIZcq9BV3WIB+FjuhTEOVyzFO9q2EZrZZsPjvhfK3jBwOh65u7tnmkbGecLEmc2c+H5fSM4xOwOp0PSZtsls+oTz0BxG2tNPefVNz/bf/tvY/U4oC0YGV1Q91lIMUxrRWVSkNNKf7j5oUx8MUEvO5Jjkxipc3zjRBBOJVI2W1R4sdTHk65QSQz/y+s1rhr5njrOWQVlv4hla+vbxjuHpX+I3V1FdltKK0WzeKP+tBqiBtm3OAuGasZ8XYWsJYUVi66dmCsM40Q8TwxgZhszpJF+TC/04s4mFORfl3omM0zhNXBgJbm9v7/jyq5f86Ee/zXZ3QT/cM8UjxkS9HrtkHUsDwLIOdbhBOVsTKbsLYuzASlMYbEkpsd/vuLy8IKXMUVUS7N2Bi69f8xvHgd5m0kacZZgKfp74xGTymzum00gxHYfnL8i/9QPs08daigBjpUEqpgGImBJxThKY4uxH4SgByKJx6DSZCt6opFStGIvMRm3EWJIqCuM0cjz1HE8npnkW6aeimJEx70zyWPCf99ir4ewH2hHptEHqnMtWh1fkIvxlay1d17Hb7UQj74zHVxEnc+4FgTp2VBDamrmqELNyjVMy5GKZpkS2MLcZHwWFTDmJdFyBGGdC2GCtY54Tb97csN3u2O32xNgzqzzKij6p8HLWJFA8qJxzRcSMuHJnDc475dVKWXa33XDqe0mujkfu7+45DuIzYo5spsTnfeRH/cicCraRwCFHKwNFYiK4mcYcsV9OnF7e4H7nNwkUJlNIuhauFFKcGYYDOfdYO2NI4sc+FtstUZQkVIPR21Wj1GpJ3FlLSWfNn9rcNgwDJ21Wm1NUrp1ZE249qjLJu4epoCmV61wDhKrqIuNh183POkcuhVPfs9/t8Tr9rNtsFtrGYg5v51BLYlXWf1OkhIhdef0FhILoFtttQsb7ROOdJJDZKr0qivKFtUzTxNdff0MIW/b7C+b5xDCMGnhKUiWomgADnAuHmwp6yAlbfU5LEfk44Ztv2W239ENPiomh77m/u+fueM84z8Q0s50i3zlEfqvvGWKitJ7irNpuOrPdQvv1c6abA+73fptgYDIy0hcEUU4Zco6UMmGI+id9HLab5TmS6iZYkxdfK8qYRrrji+5daONZThxPJ06nnmGciDkvvEUwZ5Wj9xxLGFF94up3jUHl0xrW0bH5zB8JiCU+PrPdbrm4uODi4kJl/SoWu8YJ6yE2W4eHFPRZ0VeKH8/ElBnGxGEYGTRAbaeZ0LaqJ5xJWhaP8wgGvPMMp4E//Gf/nL/1e1vadkvX7ehPB40X1tK5EcOkcmbPQa01QNWzOksspVG4YbvZAIV5EmDg6vKgdKAJ7o9sv37N3+h7Bg+xkcb4bczYOeP7KLQpmwnPZ06v7rF/67fwpaoHaUU1L0Gb7G0kSklIX8v0QZP6MHElF9KcwEkm5FwgBLs4S+lwkylRtrDo5Rlt7Jjmifv7e16/fs2UHFllDoyxOo/Yrk7wvc7yzA7PyqeV5FvRqMX7VYeijqlKnXgfdNJPeftNoaxuehnDVbMRIzyhaZx5+eaWV29uuL8/MY4QZ+h77XS2E7t9pulgipmQMk32DMMkWo+l8Obmht//gz9mu3/C40dXbDaX9P0dhvlML01RVOcoZe2IlyB0fQDWMYVnhoh9QLL23rPdbjHGLNO/3Fcv2Pz0Fb+B4bYfMaXQbhouQsebV/c87R4xDJH72x5nIvf9G2x/VIOS0oMlgU0YE4FZsmRkCEN2lhJ+9VwogBAsbesE8QGCRxr4WJMpd7Y5OU0CyJnj8Z5TP4psR4wq9UElpi3BYj3e3ugfJlt58ZhGZT18EIFq+a06kUqehZQSs478vLi44PLyEu/9WdFKHHGd5718dq4VhodBcSowTjPSi2hIs2WMhWFMeFcIU5SpP8Fpk4wjZ8M0j+z3jpwzr1+/4fXrO37wwx9xcXnNOJ2Y4wnqJg+LiHPGUoo7O496bQ8d5erLDa7xdOGCy/0eYJGs6qdJE8Oezc0d3//yNX/TRO4H4W03XeBqf8GbV3fkMaqMUabkSJyOhHEgkJi1OSpnsDqRKsYe7Igjqlic+Whs15Lx3hC86IA6m/Uca8OdFhItWEXnTcnEOHMaTgyDVExAmpdiWS3t3HaXxOqtY7lPrPxQa5GRuzWxWvjs4s9TivR9L8oumw3X19dcX19L0kK1XQCZmnduu8WsZ1FbVWvgMU1RhokkwzSWh7Y7J5zPNI1MLgrIRj+OPW2zxWC4ubnjxz95zm//zd/l8uoR43Rimg8syJuxklzBasRnoIlca1lt+XwfQtDtxnVs2oZKSZs+meiniX6cGPoTmze3/GB4wW/7xO14IpqC3zS/kO0uE36KKmGUEWdnFT3/OGzXIPQp781bdivAgDcoMq7sTCE5M88T/XCiHwdpoNRQMOvvndVaNbF61+eu56B2TlWZgK7daAWnSJhspIG23q85ikrDbrtjs9kQQlD09gE7W99dm5L13Yra8nmFIAPjPDOOM8M4c38auD9OnMZZeLHHgbbb0qSCS1lR1EI/HsE0WOe4Pxz5wz/6fS6uPuXy4pKu29N2IgmHiWvQrGv5EHxjqbg9DE7P1kkTL2+FIrlpW/a7HY8fP2LO0rxivnqB//qeH5mR+2kEY2h94Ppiy/3rA3mOCt85ZhLj6UA3zjTAlFnWqfbWGFtl6hLoNCwfPhz3/QyrLqQ8K+JpOZ0m3G4rsH01sjzLIvnauFDVFSGqxIEPDY0zy7i6mKQho47k1CL6h0+FGpcK0imlf52WU85vjfxdm0uCD0t58Ty7WFyhOdO4U0cuAu0OcKRs+elPX/BP//hPuDsemVPBuQ5nNlgnY2D7CN+8usM0QXiEoRBawHlijiQy98d7/uAP/xm/9Tf/e1xdPSaEHW13SYqjBvmiFmDJxCIP0VLCQLLN5QrfMra6aRSVnpBShllKASGIsfj9hqsnl/x7/5v/FT/+r/5rXn/5FcFbfuvf+G3+s//zP6A1Fn99ydX3v8O/8Xf/Lv+Xv//3Fy3JQsG6wjQcoUzSBav6adYYESD2jtJ+BJk80DWeTdvgvZXMXrN70TiVhMo4o7Zc9e3EEl++fMn9kJiLY55HgW+iJBGVv3R+LDzQ9x7rkAODfJ7XCSk1ilub9oSvPQyDJoRuGT26fs7DTHhtJqqf7zDGY/CU4ri9O/BnP/mK46lnjkKo7/vI8TRii2GYCl980TJGMFPG+ozznlM/cf1InsqXr17x+//0n/PvmY0kV7tL+vGAcIlAEk1Fb23lbOs51wDnLRDknGduYGkqKaUswttXKIepQPP8JZ+OkX//f/k/409+/5/w4vk3+OD43d/9bf67/9d/wZw9drdl/8Uzfvff/7v8n/7+3yflmRxnTHGEjQObIc6UMhNCUekVva/efjS2u99tuNhthNOVkyBkJGzOCgSwUlOMWabLvXz5kpubE8dxZpojOUaV1VkizreOb7dbGXgCFZWCOghE1qjoRJ1Vz1Q47OM4PKwQvHOsn1mDrvPGFaMd+6nIJv+Tr7/h1I+MY+R4jGK7xwHLh213txdbfHN7wz/8z/9Lut1jPn36lKbd07R7UXeoQIvSU0wWYKIicqvtfjuCZ0DaIxZeo8M5mWamTdG021d8OiX+R//bv8ef/1f/Jc+//CnWml/MdpVF7ExZ1F8M0jltLB+F7X7IbiUXOKPhGaULes9Pf/pT7g8TcV6boUyuNvjta//u8e5rjVZr7Zm/NDoAxeogFrDEeZbpVt4/sNvV55azsr3a7Zkty/s4MpbDceRPf/xTXrx6zf2xJ1tPypZxTJgCsQy45oRpAsmA8ZnQWYY50TYF6y3Hw5F/+od/yPd+8Df5tR/+BiHsuLr+lDdvjsDM0kqzUA3q9b+LoL6zJktlwGrlRYJv5wyhCfgsz7bf77h89oT/yf/uf82f/f/+a17++Kc4Cj/6nd/kH/9H/4h2f4lpO7afPOV3/4O/x//xf/9/kLU1ZlE3ImeMcTodcZB+gYnl/Oy7XWsPjg8GqOMkhHBnobQOTKE3hdZ7mQoCOOXwZCMdioUCTUuxRpgGRRyFMxrRW4t1hZjzmRQHD7XjzheT88CzLDegUgRKzNSRoNY5bYIyjOOE9xObbot1VSBXPuH8ntWiaDHmrE5rKcYxx8Kbm3v+m//2n3GIWcdyORIGvMHZBusC1lqGOdLPFl8soRhsKrhGuHYpRfqh58svf8pXX33F40dPpcOx2VM4YfOIlXdVtLR2KLJkcs76b1mdMyM0BpwTp4WRrkV0nryB2RRejz3l+5/xL/+TV8Q88PTxp5hf/y5D54jB8vhv/jqbX/sOv//nf8i09Zz6EXd7T2oic0l04wE3JumDq8iMU+kmA2P6MGT/yzru7+8opbDdtLSNo+QZR1EkWFEo67XUoLw1hCeUk84qLqjd5kUUvvKY5fXaQWrOXeM5deQskNTDINSZktcM3bravFeYJpk0s99ul03v/Jdrh/vyacoXrVlaTa5KsfTDzD/9p3/M169umFJCJHZajGkopSFT6OfMi9cHnjy9oliDbwxtcarKkChkDqcDf/wv/pjf+b2/w+XVI0LYsdlcMM+TBqlGgn+KxpnLbCx9VovqRb57LM7Uy2jTumKSDIgGsaFQvOGuJMrf+hF/8V//F/R55snVFf7XvstNGrl8dMXT3/ttdr/5A/6bP/tDptZxGmbafqJsA2aepYFqHiFOy8ZXUZZc8kdju8fTAR88267BO0OKk2hHqj4nxuKtB1NVFBw5ZU4nES/PKctAEeuWTnNjLda4tVpVBAFailL62edb80LFMjqGsv5I4jnxS66WTmX8ZoyJbtedgQKsdlt9r5ZjS0X9Fel1VjijuTimIfMHf/gv+fHXzxnnpFhawFBtl2+1XeMa0d0ticPxwL/6kz/l629ecn31GB92bLbXDEOPZaY2dViyzCM3nqpdDTwYz/rggs5WzRiR6avX58SBU2Xesi28HI6U7z3lj//Bc6ap55PPPv2FbFcSTq1Y1vWXVfxobPdDdguQi8U6yEhzUNGZ8H0/EKdIUUmjamnOroi/WYCsdc3lKGf/F89REfvVXjNGqzpis9J4N8+RaZo1QKu9AU4TsTWpMxoXSMG1SvOtPtdXVZFimafCP/7Hv8/ruwP9FJlzIVuLDzuM6zDIKNibu56w6UimxXWFNoNxQVVPEtM4cXd7x5/+2Z9zdf0JT588pmkvCM2FcI7V3wrNwDy4/ro+1r4NZp2vpf6OtYsyjEGoCY2X6uJcEq+HI+W7n/DH//Fz4tzz6WefsvntHzH9g3+EaRyf/u5vcP03fo3/7s/+kHnjGaNUUlLO5HnCb1ppZM8TpHm5hUWff+YPA5MfDFArulgKOiFA9A7nso4NDFazaeos3QqwC/W96MlQZGqTr4FtyRRjiUnfF3Q+7jtnweoQzlCXasgLxL6S96tyQIxRxcHX5qL1Rq7vX6oDtUoDMMJlOvUDX371nH7IJCNuTKSZBJVIxWJdQ2iFYB02HX7jKB7GlLC+IRUJesZxZJpH3rx5w/3xyGbTAg5n2zU4rUiUzqkVWZxKW3g32Hn473qNVadlDeudlZnEKWfiocfkwunNgTJHivXkrmM0hrkU3K7Fbhv+5J/8AS5l8hSZh4mcHalEbIwanMpaLKU+Ne5p+tU7SoC+H5Qj1pI2HmcyjbM6LQsN+mszj1m67jOrzJgFGdmYtWMSo93IsIy3W4LRBRd6cB41SJWGv8oXLJJeKh2mZvJJJzIZYzD7/Sq79MAZ17JoBRuMOMdKJZBCMMMw89XXr/nmxRtOYxSmuCkYK8T22qRVDBxOkd0kqP+UC1MuuMYzzZPouU4jz1++4OXLVzx9+qkgZ67D5k42S+WdGT0HY0TSSqEo5de+vdG/G7DWtV1+akUZwhsZ/xdvxXbvX92RxxljPaXr6CnsnSVcbLHbhj/97/4pNmbynJiGkWwMcZYRt2UaIcZlRGXRoDrrRKGP4Rj6AWNuSXPLpvNQojxqGjAWbUYrSRr3jMlYm5WvKUFRHUiRav6uDZU1INS488xa305+1+Ro8SSidr/86BzhT6p7m1JaBM8fbI6mPBgUUkBkeTTZykXPzzimMfHi5S0//fIF9/1AyiAT0azYrvFiK++1XXDBMceZOUbGceT29g3ffPMNzz77nKZxYALWVttNujVXGR05lxoAmg/Y7vlQkvMErNquVbAhpky6GzCpcHp9T5l/cduVfSJT0oTVMdYKNgqt4COw3W+3W90fDBSnep+Ys4l54gOdBm/OqrddVBbOvetqu/Cuz11DLQlVqzTdeXJUPy/GtNjsAilU8Gv5jIdPSNHEuRQrqJqByrAdp8jz53e8fHVHP0diQdjtxWB8wTvhwla/O0VHkw2pOIrxFOtIOTGMQtFpu4avvv6Kv9H39OOI96I8kPGI7JhEWeWcN13Te6MhgDkPUt8DEpwFpwaEOqa0ETsn8v2ASdC/vodpBucxXcepZBpnaC62mG3Dv/pvfx+fRDEnTpHZiE+NJRJ8g80RM83Ys9I/RSmkHzh+ZoAqU2e0nLNwILOCltK1losYXUkZ0addN2WRisjkzCJH4uzqjEQqQZxvec/6nS/7+XktQeoyXmZ1GnWiT5XsqOZqzl53/q7yt3BrKUUksIrl1E98/c1LCk6CGMxijNkYYjE4PL7Z8uTT77DZtrTbjOHEPN4RnKeQmePEPI8UMje3txxPJwoFayKbEBbZFkOhjgKkrGVRw1pGe3isqPDyOrHIFRHRkoZ10iDiThOMmXKYkKHzDnxDcy0oRKnwyDRjUibGxDzNyMwPmQZEPltTSdOWBCCnyMdwpJQYhkE3IE/XGGyR2eY1qMt1NKyxCypZyOvAA2QiTsCCzRoMWJ2KBOf8ZXh7w6/fXJGjxflR06SyOMuFD6Uj+1ieofW9zz9o2RuXzVMQ2IIEh8fTyFdfv+B4mmWSklH9AtVvtcZjfUtwnpQmYvFE45mxjKngQiAmEX2e54nj8ciLly/4/O67bLctmAZr1fGQsBrYW2REq4wrRYMl+/Cxo55vPWrQZN+5TueMTAHLmXgYYIjMd71uZRZ8g7++Au90ohGYGKUZKkkDRDIwu4gJEZciJkoF4PxmCZrzcdjuPM+UY4EcoXiCK1LSpzZBWIrzQBauioadS1KqSUEwqC6idhcbu06E491U/d1jRVwWMKD6U7Xbtzd6aUxdwYIVGNC3W56Zsx20rNI8ORuO6ndv70/MZwQZOWmDsZ7gq+bq/JbtZqz3xJykG3kamOaR58+/4fb+nov9TsJR0y6bvPjXFRiw1G7sikS93+8KoiZBwvttVyfYlUI8jjAk8v2o1IJfzHYbJ5PFSpwgZmozl4YoH4XtfpvdOpXVw8qelrKcf9FmGhllLtWqYsXfylCNvDTI5ZweoP3nIdeZK3z43bPkvioCrCV7AQTq5L/avH1+LLZbP3f5YBF0KmonBUvKZvG5p0FmPObqGY2oZzgs2AYfGrp2Q+haQufBe6YE2VgKiX6QJqXNtuPl65ec+hPHvid4gzNepnhSB49UelU90SVoWLjda4D60OeWUmUB6/OtAb2W+F3OlOMIYyLfqd0WS3EOf3mB857KjTPzjNOhNTElplkCXZMi2SV8yfhZ0P+0JKqiy/2h42cGqF3X6ci9QtuADwa3zFY2Szf/uXOsDzbakVxSJCdLyQXrMgYHrooknCNE559NTfSXxV+4E1aEzp3OzC3WkEqWDEyF6qvRWXvWSFVv4ZJV6TvruDpbs3kcMRv6MfLq5haZ1l5BdelaxTgiBpcNxTQ8/uR7fPLZJxRz5HD/NTenkwSezsg0nThiTOHlm5fcH6RTzprI559sqALCRmMlZ+uUhhWdU4yM8xV5sDp1/ZfKw1oHMsZgvcUbg5mhjIY2GoozuGIgNHz/3/odpj97yXgc6BL8D/6n/2P+4//bf0hfuWxmoDWOE4lZO7crYi5cNJltHD5kUL/Ew3uvtiCzs7MrzEm4vg7lGFsLxi2j2wT5y/r9NTgMweGaKnZfmGIiTWlBXs881wNbXrdmfYitXbpDq+OrZfxqrymlZc5yrsPOa0Z8dn2mpsj6+bWonrGMU+L27siXX7/C2CBBa1a9DavCy9mxbfdcXlwKKtU4TGNJrjDGhHMNqUTZ4KcRTOGrr7/k137jt7De4S14GpybFwdnTFk1PdT26kg8OeflyqnO8gEKpQnWWciFt9KAVwCGmTIUwlR0xrODpuX7f/tvMf3ZC6bjyCYb/t3/xd/jH/xf/0OmUvApU6bE7AsuRTaATVkDVKkAYMSXfSy2a60jZ2kydWPEtgaShE4OS7GOxjXLMi92pgCAyBFrL4DTIrZRKnUs5LnyzlZUoKJG8vX5Rr7arn5DNU9X264bvVQA6ujC8/tu3rJdSdmsNqGK6BKAZ5wyNzdHfvKT55QSkCRN0hGKEclda9k2Oy6urnDevGu7tiET6ceeYTzhguWrb77icOrxocHbQrDSm2B0WpNUMpJoki4htH3LZlm+jzm3XfNz2G6k9JlmBuOFiy62K373Z9puNBSbKGkkzxGroIlV+tzHYLvfZrfFOBkAE8R+Ia25SQHjPNYmkQS0ntA6YklgIeasvPwoANE7CMBqu2fh2ZK4rxz+h+lYFeafpumB/SYdY+vP4pmHv25Uq1ynkAHgmKbCzd2Jn3z5nIxT1RfxuTEZbDaMsWC9o9lc8MX3fsBmt2G3N6R44O7uBa11eAfDPDFOI03refH6htv7W5rNhk3XsG3AG481SfeEijTDugJyznmpqtUleehzZWgE8lwtr5E9zTv92fjQbm0xzFh++G/9LYY/fc58GiEb/t3/udotBVJizAlbZOxtSRmTMy4m9Qp2Oc8PgZLwMwJUa6Ftg8jCLAGqJZgq1QPBgNEN0laeaXVqTkoyqUSVAJEN2DgHzlLMrB3m5cFGVZ2b0exzXWBDCIH9ruXZs2cYY2Qagh61oy6pvJRzTgR6raXOG1/fjAfvq0UI/ZHl+csXfPnVlzIWkFbKP1pSKTnTeMdpnjgO99hTg3GSBYVmS9M9ptufGA9/yi4EnLeEYNluWl6+es7LN6/w1uNt5tOnX2Dwqm8m5Y7Iqikr62BxaAZW1u+vF6FTNIpkTaWsAzQFE5Smg2QNE4nX/+//FNuPmH3L9PKWv/iP/r+UANYZXv3BH/Hyn/8LfvP3/k22k8HYhhJajPfElCjZkouMzcyl4DAYb8nOkM66XX/Vh7WezaaRgRK+iO4hEKzD6xAJ0Hu/aHmKw5nniWmeiRrAONuy9DQZCeqtRQPI8mCjYvlq3fhFqFzQpnbTEboW692CmtbxqDUw9d4riV+cxVn+957r1Ay4yPtYPPd3t7y+uWWYpoptkk1Nrqxq08FcCqVp+f4Pfp3HT59g/cDx+JKblz+mc2CbhjlF5nnA2sI333zDzc0bxuGENYnvfLY/C2RqrCf/thq0S3JVqMMC1nVaN4CKQpkahFAtVyYEFQslGKLJvP5P/iF+mHH7jvnlHX/+//mHlADGwYs/+ENe/NEf86Pf+z263pDwDJgF9Z8ziLSNlB4dFlfQZ8t8NLbrvQz2CN4ICkWkAB5JAmtSIz5CNygL8zwyTaN00BqL861IqDm5vlQgRvHraUEu3nfVeg9NLXtLJU18tkwIKymLEsVZtQpYqlY1ALZvr2vd+Kl+V3Z5HQjK6zev+frlS+6HnkmrHVl/z3onlauUGXPBNN07tvvm5Y9prME3wqFOKeK94eWr57x585LT6Yg1me9/95oG4fEaI0Md8pmTXxLCGuyfURbS2TjWXIOWD9hu9oaxRL76D/9TOI34y43a7n9GaQzWG17+wR992HbLhCVBijJyGNGKdaXgsR+F7ToXaFsRpa92CzXQF3qUAW1MlcC6OMMwDIzDTLEe44OUu43wRp2B4qU6JPl4bRZe7Qg0UdP741S5xVmRRsvq1x1qc1b0yqu/rX643uMqgP+QovLwWivyKHbruL17w4tXbzgMI/NZcFqH9SUF62xKJON48ul3efzkKcaOHA8vGYaJmAa8dTLuuvVcXV/yZz9+yYuXLzHWs9103LmR7z5rNSUqb+0LD0J0WeezRHIJXI02hJeH+3UFFYuTwDG7h3YbLjZML+/4F//xP8T5gHVity//6F/yo9/9N+iG2nMExnptkoJsRFYq17hM6ZlSvzzjqr/n+BkBqnYSO+GCNY3D+Ro+Vk6HjJ4pWBmpuWTaADoGCx3JhSBUJWXRxrJWNMDOFsi8BzWtHNIQAo+ur/jud79gt9tyOp3o+15eWR52W9ZOUqvC0msQochBvSFqZuoxJfPCcXN74PXNPRjpzC9FbqwIlBuun1wyvjwwTDP92PPq5g3d/hIXAiHs2F884njzr2iClO9zjhhTGIaeFy+eY61n23lSekb2EWukIQVUTNtAKXY5LXRtqGXi9zwwgEwDsvWaDFkn6oAaS8z8wT/7Z5ymmUexxb458Sc//QMw8Pj6mnwa6G8P/PTLf0SImd1ssBF6W2SeeRapjiVio6iKgCMWx5u5+6DB/bKOEAIhNDTe0LhC42X+tlNnvtAnjM5BLmdk/iyzzUu2JBTN1rFExUCc0lsJVX0e3t0mrNUmNWfouob9xVaQdSNNhFZt/Bzxt4qC1ZJqKdLg9G2VBk1jMMaRiuPrFy/46dffKPIvpXNgqRoEZ5lS5P54xDX3+M0lhC02BPw8YcOeKQ5srQjiO29pu8DhdM+b2xvGaUvjDcPU4huPs1kz+gIkGTdaEwDWQOccVVsyaHRazLk4ev275PV9sqHEzB/8t7/PNEceZ4u7OfGnX/4+xVieXF/DaeB0e+DHz080MbEbCzYa+mDIyZEpRD0Pa4I2Tsh5RD4e2xV95yDjTV2h8Q5nzoT6NXIy1iy0qYwEhzGNoudpPKmM4Bwla6CXIae3bTdTi3znhzS2KQXFscwpB3T2PEuwWiesWQUCFkpH5cQa817brZ8klASR1vvy+Qt+8tXXJIMkvNYsHnvx1jlz6k+8ubvnR2/ZrgsXxDjhtfEwBMdut6UfZ17f3LLfZbrWc+oH2q3oN1v168sZ1XM1tUr0cHVqclunFr3fdjWIldwXYuaf/9EfMcWZi7zD3vT8q5/8ATjDk6tr8rHndHfgJ18fafL7bLe++VoZqx8VsR+F7TrnaJpGpp5ZAXGcNdK0q9SonLPYrLPCgTCGYToxp0xJSWa5G0e21T+YRV0Hzu4NReXK3rYrIeJ5J1q12+0WU7Q5tiLiBuZxIs0y9jf48OB9v+14WB1T8ME4SvE8f/mar5+/IBl12+ecZFNoG880ZeZx5P5w4P7Qs78uOBOwYc92/4TxcMS5AYPFe8d+t8Faw4tXz7E+cLHfU+KBz599V6qh5szegDp1soJ70tj7IAQFVLbLrMCKKgWu/66C/8Vi5swf/9EfMc8zl2WHux348ss/wlrLk+srynHgdHvPT7460uTMbjKYaBhCgWylSKUSlfNCs2CJYYz9SwSoUorRcWxGAtaqKynyLDqFQz9wcZjqStu2Ybffsjn0xNNMzso7KOvUkLd76pcctKw8U+cMm82Gy4s9Tx4/5tGja6wxjP2gskpyQ6qDrE5xs9nQNu3ZeFP5jFIeGvrqkDQgRkoVKVUBXl3UIjc9BMtm22LtPSCjE5+/+IZPP/sOTQw0oWGzuSDGuAQfy9zxkjn1R4JvacJmCYrqSmhoqTGzOu8zRNkUwzses/6j6ElWszWiTiC6iRCaFvvoETEbnL9g9/gxm6bhdRwpqWCv9hRb6E9HXvYHytUl2Ss/s2QoRXEuXSfRGhNE3Aqq+jMoJb+0I4QG74OO5S0EJxIttRBnKELBNZXDWXeSgjGqNZgjubgHASpGxJ0f3IDzh+49h/eS/T558pimCcLpPeu8hzPnoJSUh01962tWU30ru6+jTZPhcOo5nHrlUqvDVLNxztK2nvEk3Oh+7BmmyDZbHA0u7Oi21xxvXgjHyFQHGxjmmTe3bxjGkU0bGKcd21ZH3J6h55Lb66WdO3Xz7hrlt76zPqly3TpT6IHthutL9k+esu1aXucREovtDqcjL4cj8eKS5Kr7ll2jsLbE1Aa0NXjmo7Fd0W0OMhjFF4LP6wQ0bc6zOoHOOrN0OguqPwudyhSReCmFYu2y0a/NfXrUDe09G721lq5t2G47Hj96zDAOS/m6eqtKpwJJCs+7oB8ytN8XZNSPF0RtnAuHU899P5BNAFWHWT7RQtN4+rEwxYnj8D7bveJ481LVBcpiu6chcXN7wxwTu03Lo+vA9a5Vpy4h8NKEBu8+yabGhWZZs3W/kuNtoacHtvv4ETlbGn/F7pOnbLuO12WmxIy9uhC/2x952Z+IVxfvsd36rAiR0xhN9/RcPgbblcl3DY23BJsJLi7DP22pko5rcMICDkh5PZdM0dGuWTvjK9Xt7QRHMBv1uvr/2nwTgme33XCx33FxcUF/Oi3Jfj1kDLo0A/kHsmjvPgcsn6VVoSXHFrtN2XDsZepgUa4txix2a63h6mLH65uROc6Mo4wZffTJMzoTsHbLZnNN7p9jiJrgReI8A6LLfXd/BxRKPMp716Sq7vcV9Tfr81X36fqv86qqEGv0MpagQ9ZIxsJaQtOwe/yIXCyNE7vdbDpe8z67PRKvLklWel1MEYqBjCs+W3ujQOLie78tcZXjwzqoRtCblDVbqOjOsslL6bluTiLSr3PGMbRdx9VV4dhPDNMtOaocR5HAd7nParAVQWTJjKxqym14/OiaJ48fcXV1Rde1xKlKhLCgUCLNJFmas47dbkfbtYtIrxjxgz3zfRcNGHwI+OBXVBPxCFadZNNYpJ09UcrE8xfPmWNknrNwj2wgRglKc07Cxc0yo3YYekoLObdLE9diUsZqxod23q6d3EaqYO9sJott1XLfch1iDJWP5nZbmh98hy6DzZHu8RMZe3bpKNHAs6fEuw1TY8hTZt7uGDeeYLOW4QRnqE0YWKOBm1OnWfBl5mM4nPPaTexkipTLrNORyzK9ZmnoqEEqwm/EVP1XEQkvxUKWQDwXbS7T413e6erEQgjstluur6549uwzkeBIMj0FWO5vTazq5BOZHnUWrpVa4nrorBdnZMV2cjLMc2KKkUKruZ5eqxVEqds13A8nYswM08DN3S3768di837LdnfN7cuM807oIjkRnKMfE3d3t4zjRNxuGOcJTEMxedlsTK6rUP2CWe13XZ2Fe1Rx1jqv2VbUWM5Y7LcY3G730HafPKXdtGwvPTkBnzxR24U4wthu6DtPu+gXJopO6rLq062RzUPzrI/Kds2ig2sJXtjvDkkUS9U6rkNRrAAGzqOJVRKVh+oZsl2bWc+S12XDVX9bzaoUCYC7ruPqcs+jR1d8+smn/Plf/PkDOkuhkLVBCiQRq0Hq2072PLmqx2K7AveQkoyMnlMC22CtkBsKcq+ct2y2DcM8EdPM+C22e/cq45w0sVAyzhoomfvDrVDK4oZh2K22q8zpuu98q+3q/6vtlho8vmO7sq5Ofbbf7dj/8Hur7T59Srvp2D5qyFOGJ4+Jdx1zY8jT+21XWwIXgEIaVqpv/zj8rvdBg1RH48AbmUonDT1Fz1153xooFaS/peSoAWqtqBSMFVpQFqIqcG63ehSpHBkj4JSzhf12x+PH11xfXdB1HcPppL0CGs/lQpznxZd672nbdqFVPXj7d0CBs0RL1Vcyhhgjc4wUGtkLWfMY7wzX1xfcHUbmmJjjyIsXz/ner/2m0nkamnbP6FrM3OOCpZTM4XBHKYlh6DkeD+oLx8U+tSUW8V7nlDXz4OvF51KXfaXNQE28VBLNVKlCg99t2andumq3259htxundlvrHnbNC+qeq1rHSxzxIZv60A8r+geqLmKMok6ysVoLjXEU1cATSoAI4GadMtO2DZ99+gmHPsJxhjKTkzI3nBMeSjGLUdZMAATt6bqW73zxBY8fXbPf72QCRBaDrht6jddWbpYY3W63o2srgrpm4u/Jj8+OQsqRzaZjv9vwdXklULd2ZDoHbeehTJQinfk5e4bhqN36gTjNzMMrQRRTQfpiEqnMlDjx5tULtrsLthtPLnVsmQWr3aOVV6gBa+WR1ErtOd5RN4wq5SUvWGVIMFUn09A+vuTy3/k9fIbQGo6mcCzQuC9ouj2HmBjHkTZFLi63/Pmf/Bndoy04g1HdWgmmxNHUVKUiY9ZAa371ciewSq9422Ctcj61M5SiAaoBnFuTKi1dtl0g9I4pCnNJOp+TBgKcBbMa2GqSIbfRqsOU0cBPnjzh6ZNHPHp0zW634XQ8Lq9X6uDynFXJld1ux3a71bLqcqd/9kVrxcHoOMqSJQBTn4y1sOk8u13Lq5sDxMQUe/7sL/6UTz77Dk3wdE3LdnvFNEYoMoZ0mifmeQY88zTgrCflhnGcwbTLckj14TxYt+onzpznW+lV1oThrQtRU5bhE9lC++hisV3fGo4GjkD47Wc07QWHmBjGgZAjF/stN3/y54yPdhgHtiQJgHLCGd3w6n00gkA6az4a260NHMGCbT3GaVdzSYAVP6BVC/0FDYxkoAZaFZAJTBHUHgVJXT4FWDdRibrsAkIEb3j26ac8/eQJl5d7CYrrZ51tKpXHV33xbrcjNM2iRy1JVV6QPniY0NV/FyPlwFIHlJCXhq/6XO67wNX1nrvDS6acmdPwju3uttekWIMciCkyDCdysczTSPANMQWGYYRydUab4q/Udos+08YYmqsLdv/938Fk2HSWgzMcLHj3GU2z536KDOMopf2rHTd/8mfv2G6uPQZkbC4i7eisSBGa8lHY7mK3zmCCx7ogdpuKlnAqTar+keRqs2059pOIuyvKWnJSpFARY135WgE4vw8VnLLWcrnf8t3Pn/Ho0TVd13J/uNeTk/PLWWiHFUGtUmmPHz9ms9mooo78wnnJvL7H+qHrZ3vvcT7IfVJN9op5WGvYdA37baAJMM+JnAeev/iKKY6EKOOkpWHQC5e7C8Q48fLVc2KamaaR4+EApbDtKiVGkxPjKKr5WcGWuh71/N6220LVI1q+sViyM2sV2l2/ZbfecP+L2i1Z7nPR5EoblA1R/cBfosQPwmsKzhFckICk2GXxDeLsp1nI0NaKfmjbeDHGnDHF0jYt3//ud/iLnz6nHHpsEnJ903bELGO+coEUK0opKNLl5QU//N73efL4yYLm5BIXYr5k7rLTWCsc1RQTKYrhzVNknmbhwaggrVtkFaqBnRm7Ot9SMt2moe0COc04L41iIF2Zm9aR5oHGFaJNZEbm+cT9/WvNFCPT6cDh/sj9neNy38iUKJOIZQImKjG/0TJqFiVY3MNH7537sWhpljU4rcifoZBNWh8kU18nsh6z99w+ajgVhDPoAGtIVgxlLoHds0+5vrzAB0M5DcztRiSJVM5GhiRH1YIFoqrDGnC20DQ/RyD1SzhSSux2OzabjiZ4DJEmeBEHUVQwpSSdz1riAUuxnu9//7v48JJvXrxhnGEY60yYZYvl/N6YZYOXn4n9Br7zxRc8ffqE3aajbQPe6bjfvMpOQQ365f29D1xfX7PdbrUD9T3rWWPj92SfoRHUzRhIOWJcEC4h0ATDZhMwZYYyApmcLG9uXjLHiWH0xCmTJnHq0gQmFJZTf0+xV7x585LNdsQ5oQjIBWhXs4GqYWQ16K8OvlIpFldZ6nqa95i5BFxOg5uUYLCWm8stJyuVC6OgQXHi9Obi2T77hE8uLwjOQj8S2w3ReFxFyBQBsUX4VU4LkGgi9zHZ7mazoe06QghYk4QnVyIlK4fUyGQ+Yz3GOKw1/Pqv/5Bcfsz9aWScC2VUepYmwNaYOrCLigTUxAqKqHA0Mp/713/tB+x2W7quIQTHPE767K/Wb1iboqy1tG3LJ598wqbrFIn6+dfTGNjvd4QggvQ5zvjQ4p2cXlD01JqZXEZ5TXbv2G6e7oVGJmU/Upo4ng6UvOXm5hXTPGNMYk7X6wdbRfDjX43trjqP8tLROV7vN7zJWUYuB4MNFhkHZIlNx+7Zpzy6upTvn4b32q6MmpZAzyt2SknYj8R2cymEptFGKYcpE2Hxq7Vs4bDe4rwMZSjW8KMf/QbZ/AX3R7HbNK2+Vp9YeURrAvzArmTIw6btuL664jd/8zfYdi3OGXKKD3paKpvDKh+2NlLXKpdfAIFKA9RzqO/xLbhWKgUXxO+eDgO+7eS9DXgvI7dLicrVj8LbzgPH+wMhbCnJcpzvSVPEgtp2JKUJysw0njhZBybRNTtJ+ngroaoOkZXyUMv9S5n/Qcxwdhg4158oiBsfnefN3nGTM96DbQzmF7TbvGg1Iaori79hoZ996PhggFozjqCyIrVhQSLuQk6yCY6TTJOpm7wPULJoZi7oXdvwyZPHbLqefhiZ54RxXjUJlaNpLNZZHinP9PryksdXl7SNl89T0kRKkgHFqDwSa3AunP0sknPh9es3zPPEpmvx3nF1eYnfdMKZBC3zGmpJEWO0qUtIyt57afhIM5eX1xoQZ6zJpHnAG5m6EMnkNDH1Rw7JkOaR4fiKkgrDMLDrnKCLjSMO0hBlkDm2FeGq1q8Kee8cqkt9BuFVPtl5kKrHCq+KkRY0eIXJGLIK9+PkyUsINSNmeHV7y+vbW3YXO05zYZMMIWsDXFFErsxCOC8WYxIUHWtnEtaPP8PkfjmH07JNCEFkcRbuXS3hiX6cZOuCLjnvCcawM5anTzPGBl69PhBTDzlJhcB70fCtlIciNJj68LVNw36355MnT/nii2f4JkgXf3V2pjxwHLUR0SxlL/PAiYizyFjr1uBguacsm6h4loRxWvJFnt060g/AW2gbC3kiuEK0hUwkziP96UDwnlQy00nmuU/TltaDNRljM7nMpDSScwsm0zQNVqdt10Ou72EzlDGo/zhzqMUswdOD/H55FvXfWQMNYxlNoTjRUcbkBXGrtvv67pbXd3dcXl7SR+iSJSgto5axRLsw443DFIfJBSGufDy2a52j22xomgbntMkECZwK6MStWr+wCx1gu93xne98zqs397y5PVLKTBon3Ygt1gUh2horlZtS9Bk2OO+EinJ5xaeffsr1oyuc05nlBSA/KB9aY2l84FT/rXYsQxmUhmIkybFKSzJnrmoFCNT/lYR1RihlaSYVgw8SNBcK3kHTWEqe8C7jU4Yyv8d2jxyPR8bRyj02GedgijM5T+Q8YUxmu+n0s+3Ci/yrst0HqBvSHjzJDk2yRobV1EpL9bt3t7y+v+Py0fW32m4FHKxxkJx8jjYmfgy2G9qG0DQ6uXFtADXOiU56gZpZ1olMBkHdv3j2jOcv3/D65p4Ykw5uWLnT1vkH8aFMP7Jst1uuLq54dHXN08dPuNhtpbBQZMZ9tcVVWUJoV+c+NufM8XAkeKfKP5LMBWd0UI68boElzlCtUhLFZHlOnSEXrSaYoj07omgQpx7KDEQomVJmDocbdvtLjPfMw0AaR0wa2bYNwXuuH13x1YtbYuxJqcWYTNc1aou6jrDEBOvzaR7Y7DkYck7RWYtzK1tcQ1veZ7f2F7VbaoBaPz+J7cJqzn+ZJimAtm3p2oaua5eHsZbiC0WE3GeVD8giDG3cLIT9tE5x6trAk8dXbLdbjqeR+4N27hm5aTkJb/Ti6oJPP3nKk8eP2W46uhAWSZ+VQXUuxC9l2DpaL+eVtH86HYHMNAzSHBICu7aVzjIqU4g1OK3lcg16jTWkHMmzYbvdMKkmZM4TKUrpvsHhcRgLw+GW6GfiPDEe79g0lmkSoXNKous8/TBLZ3mwBFcDFzV9zShqV6gEMvWPOcviKirCUt5bMve349QlE1SzM8IzSVbKQxhk0gWSnc7zyDROlFLwvqEgslJVYqtqwQotQ7T+zGLkGeM+ArY+4ELAeLeU2txyo2tgqbGPKSQr6+b0PgTv2e/2pGzph0g/RcwkiJUPgSnJJDW1FqW3OPb7PRf7C64vLnn6+DEXux0JabgSp/SuIPQibVLExnPKHO7u6ZqWpgkE7wjeE5w7r6yeoQhKmNfU15jKIZJM3BoIXrjUVbM451l0TF0hkol5ZBpPDKeGkmb6+1vGcWQYRsLWCzruIM0RUyIUEecPQfWF9eMls39YTpIgXDfttwCnB7SU5ahJVzl/IdVh5hqVW+FbFHNuu5PYbgZjA6jt1sSu2qkxmWLlZ059iinl47HdRmwXK/Pt3XkMpBs2pZCyJK5S0nd457m6vCIXJ7O/p1vmKJuJdwEbArnMkpyptJMxhuADl5cXPLq85tHVNY8fP8K3fkmAyxn9pPrzCkgsPlPBjPu7e6wRPqp3Dts0S9Wq3n/zgEddHthP9WMgm75zwsdzFrwppCTUh+QKqSTSO7Z7R5xlct8mtGr/lmFMq+2aTKO6xktSiJzgX8Z2xULfsl3OqGVGSthJNRoXTiuoJqfem2+1XfFXxcjMd1fWnoOPwXad9xKMGpV1KuJnxa/KfRS1lIKxqjtepIv++vqacUoM48w096RsaIJMYpxj1tJ7oY6wtMbSdR1Pnz7h8dUjri4uudzv8c5RSloaAmssAPV+ShNqDVgrLeHu7g5jpMEqeA/bLaFrH4A9+ib69VkToMnLM6FPKN43GKOC9xbmqYcckTl/AgycjjccD9dMLjD197Q5kueJGCUxvLzcYUjkNGNIEuyGWlWr3RQsfr9+f0H814haL6Ha5sJER6z2Xbs1b9ltOgtGfm67pVCDaLEAK1SzUt9fezs+cPxMDup+v2e32dAFT47jEl1XgnjSaUPGSANUTmKcjTc1jMJZy3bTYF3HRTachgnz/DXHflQpBNmcQ2h49tmnfPrpJ1xeXGApuBrdC9S1cJIEQa2ae1IarZs8oFNNZvpTYbQWZwzXV1fLz98PddcbmbXyk0lphiwz3SmRaTwR5wEMIhLtHcY0+Bw43L3G+ZEUI3E6cbltmGPPMAzEOLHpAremp20cm8bTBdF5M5mlE3c1s3pbWVQKqlGdB+qlEpIrr3L52Vv+U4PcmlgUldyoY3wNaEejp7hEGmf22x3O+iUGLiA6Z2rLOesUkPrQlgJn1/GrPFwbZApLFiXQ4GSzLwZVVYgiu4N2jhqLcV4eKQtt23BRLIfLgTFm+tMkXCEn/CAsddIdqLzKs2fPeHJ9zeV+z6btVFVCR1AiY4JrUiUlJLM4y5wSKSasMbx69UpLpg1tE9huNuy6bnVEZxlvLfdKbUjvoxWJHYx0qTaNJ+UoNI0cKTnjLTQeTJYpNONwpCSYx5HT3SvyGOlPPdtmi7OWNjjinISqouqqjZdNo55C5Y1mNYOlvFRp4mdohjz0arfV4kthmTRRy6jnUXnV71P8sNa6Ftu1jmwt46ln23XCz1LXmElrMxBGm4bkoTJFJ7h9JLbrW9FuXG2XxVal4VK7841qShewupZt23J1Kdvg/bEnZwlenW+w3jNrGdso19M5x+XlJV98/ownjx5xsd1J5YhELIaS5FkRDjL6uyvyf86dnqaJFy9ekNJM07R0XYuzVoYKwHJPCw8RHW2DAsqCWIJsxoKqChhgSJSUZaPORtYgP7Td/v4VLhWGfiB2InO06QLH47DQrJwpNP6MKsXqW/9Ststb48nkLdfAtr5njRyq7aK2axLD4fSttisAg9VmP0Nt5KoNq7/ywwmPWIZCSAKVdcoWuaj2rgxDQBsrnaLW2+2Gq6tLpjkyxkKcDe1mQ5yzoObek0te1tsH4Y1+5/PPudzv6YIkQkUTXAlO02K358FptduqPz2OIzc3N0Jn9I62FV79rmuXOKHiG2uT6trIKbup/DGmYEpm07XkNFMpGHEaMSXhbe1XKBwPt9y1r7F4pv6Ozy6lMjzPFm8z++0GU7L8niu0wRK8aqSoL6uxQKV/LGjvWYAqvrlwrljBt9ntubtdvlCb/dew2+UoGrqnov0sEq/k/Nbz8tbxwQB1s9sR2hZjLTGLDARAMAanHdIy2m4EHQUaU6JMFlMS1svknBACVjtSvfWENlCwXPQDc0r0w8RpGHj22ec8enxNG5TzqenBMie9SDY/TZPMtp9mYpRMO/h2oROIBqpdzq9ME9ZI+XUpKdXMXQ3OaOZRsKqXKc+8t5LN5xSRJoXEOCdicTi/ITQtPrS4ueF0vMG5iMHK7PfQUvLA8XCiPx2hSOOMKZnWO7Ztw9K2kBHjsVkfBPn8Oq+6ZG2AMWj2UrOeNe8v+kZr6ekB9LIYFgXRrpWeC6rosLfgNw20jRDWq9QXrA8+tbwln5OVy1lF633zQXv7pR3rjG/RX/NFy4dxJqVZR+cJ8j7PCecTrahu47zD+cCmbfnuF59zcfGYm5t7+mFmniMxFeFOq8JDCIFnn3/Or/3g+7Qh4HW/SNoAV9GlnLOO1jsfB6mNJvrzeY44Z3n9+rVqEDu2mw2fPnl6VqaqFnyGV+rGmdIk8ljKVQyNIzgjCHhOzENPKtDYjG89yQRC9Ny+/oYmXDCPE6fDG653Tp8xaYZ88ugRp9MNjXdsu5bdRnhmIvMGLPb2LbOVzToycnH6BQ3W8xIklLOfL78KUAwmWw0UzrNygyminBHawDYE8tKl6uRJKXFZolybjUqikJgVmSDw0dhuPartBnSgQ5713macJkrSiAmFKDQglQK8utzz6z/8Ad+8vGEaI1IJMYRJOfw54UPg4uKCX/+NX+P66orOe+H9Ijx2WzKpVLucVct5DbLqmMjaJDWOowYiSex2u8GUwvaTT9Z7aiogociWQTYrCjnNlBRBn83dpoOSmOcMOTHNA7VTe9N6rG0Y0kPb7Q9v+OSRYxxnhqEHCo+vH/Hy5U8J3rJtW3abjsYH7RyUk1hBgfccH7DdrPJ7HzrWJEv3zWIwWYZmmFLwxhC6hl3bftB2KYVEAiK5JGYi3oDxH4ftLv6pyNL2fS8UQa/NZsXoLPZEjBkfMj54bABrAlcXF2w2ez759Atu7gaOpxOn06BKFTIoJpFpW0FOv/e973F1cYFXZSex67z4P5lsJv7QWo+zHmcDKcp+GqOgk7VZ6s2bNwBsupYmeD55/Jjqv88ycPm3rd8ylKjPZUmi5eoLjy529P2JoT9SYiKSaRwE6zGuIbkdcTgwng7kbOgPb3i66yDD6dDjnFLKjMU7R9c0bDcdu67F5PSWrb6bnEgS+DDBP/evcp8+PB733G691alvWekDpYjPPbNbiZkFtFjsdgHM5L5Q5B5KgpKJ+Vv2Cz0+XOIPjmQKQ5oxKeGRKTd1e7RWmjKmyYuEkoEHnWTKjaraeM5LuckZw+NHF8TLPaUYYpYNf7fbS2NIlkC0IiRVz8+oAc7zxDTNIhyvi2itCFE759hsNoTO462TBq+gKNR+h/EPwGtZQJ0A4ioAaHSOsjc0reOTTz7HWNXkM1ZKnVZKFwucbrOiZDMGi3XVgAzDOHE89kzTvLw+BNGjVNVVgeO0w7xGkVkDSZuQkqSt3LOyoBYA2WRWfcP3OUtVSsgrn8oYJ9ebzSKGvWaFBawVVObB8fBBEJ8zk0tL0de68GFn/cs/pHw+GxEkTxlSEqTdubLoOIoPinjvFuHiEBzbi0suLgyXl4+4uz9yf3+k3Uzcnw5QDPv9Jc+efc7jx9fKydSiRtbSflb92JwZhmHJ6NcJH5YU4zLQAsA6yzRPlCEvnKiafDw4Sl5K3EuAmAX1T2nW4C+RckQ6vCW5o1hc8HRNwPqOZvJMhzeMdhbaQ8lsNjtS6rm/PzKPPV3XaLmzLDQCb42KnBsWikc9tSLcXNnQvd6HmnQK/Fw3s9p8U/vMzNnf9TvCYaz8tfV3BCJYG0mM1SekgCVJ4qyJLrqGhSQNCESh+ziL9e6jtF0xJblvcZbmIJHTmxWhNzgvFSRrCsE6mrZlE1qurgJX1095+eqWUz8yzTNYw/39kaurKx4/fsSzZ8+4vr7GaxJjdPNKeX5QKRnHkZR007QegyMnMDqHaymZAkdVqpimif1uz7o1nm2ORpK72qRVtHzfBEfXePoxMZyOtI0XDdgM9PL+++DwocWGjuP4tu3qHlUmTqeeeTwBBecsXisL3js2XSNSTdTAY733v4jtmlxqxbfeMc5tt2qdoGCD0Q2cGh+YtNA0MEV4xu+x3eXsSiZmuY8ZqvjCR2O7FQAqOnTk1J+YJrvI4DljFq4/SaI8Zyw2FNquYx9ajG/4LFlub+85HGQOfT/O3N7ec3FxwZPHT/j8s88E6bRFeOQ1SchxteGUFv9uTNLGIwlavZeO+YLEDMbJvi4BdR2N/tBuQZI3Y9QX1QoMRe024K3sz/d3t5SSJJ6YC8yWzjtC1+HbDcPs6PuJ4+ENKUGaehKBVGCaZ8owcHt7h7WOrmtog2PbBr7znc9knXlXQUIk5qSHxhUvNos5M+0amCqK+QG7PQdQijUUnQJGrrzyJL5IYz1nvTZgikb44nOXqFheL8uaz6QJP3z8DB3UKuZagWwpaVOEi5hSFlTQerKWQJyTclLbdFjvFFYPWCM33SyvE56FtV6y+yzNAdZUdFAyVLKWrs+MLkb5k7OgKdY4cha6gbWOi4sL9hc7Nl1D4wMheJoQaELQB2gNx2omb7VULmUU8RfOiIbZF59/AqFlnCdSTswpY1LCJpn1bmwiCzmOrLOdc8nMs8EbGKfIMEhA7ZyjbQKX+x1Xl3vsosOnTnJx3iywekXgpJxzRmhW45BBCkaDXDUMHhL7q2HWbFE2E/MgQC8L/L8aV1Fneo7UFV2jYgTIj3EmuoZSiujDfgTHgnRI9qBJh5VGt5wg2/VGwzIRx1lRoBANVU/wgRAcxXhtvOpIpdAPI8Y6Npstl5fXbDq5/sqr0j3pgd3Wuc9GJdqcc2y6DSkmcdqlSBOWkvSNg6ZpuL6+kk2RxV0+uNYqGWKMISOlfO8NwRq2bavE/Qw5E6eIMYGuMQSdtDbZwtD34KSy4ShqI4ZpnOhPA/0wLhrIjRdnHOoAjOr07LmL0wa0upGfl+oL2jRxtrFUmz8Ptuv11RLTglQZ1sXIqkNpFhk2TFLayfqOy8x5RaKKyRSbyaqPap35aGz3vIFuKc/ZWkQEkCY9KZ/K9B1rPbbIRDfvAiFoZSe0FAy7YWLWsdK3hyMX+wsuLy+5vLzUudvlgQ96ewSvBKiZqjlZtSOrz7fWqHavWZql2rZlt90sZfEHtrs8I0Xlcgw5R4IztMGRYmLbNtrMJecyjzPGe9rGE7zDOod7j+3WwH213UE788V2N2/ZrqGcUajk5M5t9yxO+YVt9+wtl6mGS1BcwQhTnx1DeY/tPqyHFTCFbIWDaxG//THYbuXTC/IXRAppGnXvKggAYzBZR59nSylVMUE41CE0uNAQVH6o7Rr24445Fq6urtjvduz3ezabTjmfGuiUgpb9yCWTUxJt0nnWgMoI6OIsTdvQdp1OQATfCEXLhCD66dsN+/3+AZB1/rdd7oeMd48lE7SvpPGO66srvLf6jEKck9DAnNhtcJaU4ZAGyngkF4cpVarTkTKM/cTNmztKEbAsBEvXenaaWKEVy7eVXGoC+A53/R27LT+f3Z7d1wefo/GIdABD5eEW/bDKUV0b/PT+KM2ggid/qQA1awljOX3duKWskzVABIx0qRtr8XXEpFIDjNIBahm58iQtRctRMjGiJkGmaOm4lrBLkbucCyUnclrLl4ucB4Z5jhKgKp/lyaPHbLpA8F6CYetUKmq9YSvakxdOT0IzXQTlcgbhUrVb3rg3xBSJsWB9xqVMiqLXFlOhlNpYIKjDqR9o/MQ4TozzTC6Frm25vNhydbVnv9tgEPH+2txkLBgHMgtYA5UHbkopCWcZjpRPigSz5xe4eNf6LQ0kamakP183EHXIRfnDZwHBeRXrDGtAZICqWHfR7PNXf9TEo5ZnpFM54JKglaIRapX7W1QwXzrCrW7C9XvWWZoGMB0hNBhrmaOoUATf0DTSjJHTKvFV11roGWuZVJIogT2sdTrxytO1HTk0hOBpu0aCR93kr68vVaxa37u+v96vKuljkGfEO2i8JYbCfrthihPDmJhjJieZmb3Yguy40tDIjDEerGpJFMM4z5xOPX0/UBsBgnO0weMtYr+lNhrVv+Qs6yZ/Xvqra/OgjF+bQOrFfctx/qP6LNQk3SwvEAdoSp2iVX/L6mvX5LRYFl+D4aOy3RqkOuuxgCML0p4qugFmUfQwD/yzUf1O76WZZr/f0ratajQ6tvs92+2OrutomiCSTPksQVafn4tQeGKMTNO0xmPWLaL8ojTg8N6J3YYgQw+cY7vdstluHiAl3267RmzXG5rgiJEHthunBDniVMnAaHBLKX/ttks+s6Nf1HarDZrzBqw1wFmSO91/4D22+04jSRFzTlnj2I/D7y6ggPZ8eOcITUuMM6UkiRWWrviVi1vtVfyvgFemGNrW45z4wFIcu4s9bdPSKAXQAiXBkrqfgTBJA1Shocj6W+twXhpdN9sNbRaN9KYRO26cJF6brmO73XDuPcrZ9S2gXan6JRnvHE3wBG958vgKY+F06jmdelLMy7NaQQtDIceRbCcKAWclpvLGkWKhHybu76USYa2RALixwp2utBLDsu/L8XCvf+Bn/wrs9qGShcZ+1XbPn4+agNXfPOe91jgD2YN/hgzqhwPUGngYo6MyQ8AYJxqSMVJiLfmbJYMOvsH7BudXtNIoErCUVChQkgqk6yapSCA1yq/dvLUhQKcxpRQBuWkGFmM8nU4YYxbjevL4WknLLOe/rP0SvLEgFFLWUuUBLYMLEgCnYeTpxTUGZEwlIsCfUmaOkZQLc8nkbKlyLLkkbu5u6cJImk/MccY4eHR9yWefPeHxk0t2uwZskgaAIg+2tagM1totV6+zKhVUY7HKR7AIuV9mY0d1bubM8M4bWc6yLjXCh/8+C1TXBeMhjLCGvqVASlGbjgo+fARkKFh0cq3eQ+e8TjlpFxTNO3MWLHrRXQzixITnWEt9cl+91yBXUX9qAIuRBKp+uAalJWdKWrP5dZOvyYdSX5zn4uJSx5BKR3UbvJTRtQFrkRerTjJnCqvcUG1EyFmI/pu2YY4jF5d7Usm8ep04nQaQfIoYEy5GCo6UjPSW5ig2YjOlyICCoR85HE6cjiMgSV4THE3jdJqNIFxy3bw3Ca8cxXcRfRbk6DwoeMfWamDEii7Wd6qTSOrvmeVXV/bT+xzweaCUk0jifUy2u3TLW1EfsdZQYiLbWSZfqXSYdQFng9q3w6pMkvg0qc60raNppcO2FMNmL82P1thVgg5kUXKhJPG5KSeiVqymaV7WuNosGJl1bkRGcLvbsOk6Wi+0qhA8bdssifC/lu2SefUq0fcjMTk6zxJ8WKzQdf6abLciyG/z+JbE6OexXc7iXh7arzl77RJnfMB2y3mCVc7/bz4K262ouzGCFrsCXbthNIZcouz1BbzxOnUqLBWjVcKyBi/CFXdOAl1jGzZ0CH6p6CUCosngnpXnX7n+9Q9I17+zDu8DxlihE+ogoP1uu2i3egWzrHcPTaJwNiq4SIyAwRhBh5vg6UIgeHjy9JrNZsNf/ORL3tzcQrRKJyvEmMFEYsqkHIFZg0BpMnRBKq6nfhB/DXhnCY2jCRbvEY52Dcg5h/7PmqRgmfCm+cCSxH+73eoL+Ha71U+p+OhS5a2/W5Z7+BC+rfVadbxLYvWzjp8vQLVWujGbBmsM/TQxjQMkJcQrZyNnkZ3CJhoAZ1Y4XLMlSdDLMqBLYdPlM3NJ0hB1nr3klTsBLKLQMWaGftSOfgkM99stn3/2Gd5ZLYjVyH591s/yZM0SEN3WKJO6cXZBUTebLaZkpnlmnmdKyjRtC0W6smUqViKagLMtxug0FyIxThzGAyUfIUf2uy0/+tGv8cMffo+L3QVtIyPhmhDwXkrQtZtf7rtuNlk6EtfSjwbXQClGlDdcke7e7EQHDuGcvs1BtmYdL3b+NzVwOnssz+x+WTNrhMdZ6nQlU8hJVRXyjG/aD5nUL+04D4qslh2D95QUyUo7qVNE1uC1WWe0W6fJWKaYqNmy2It3DUYlMySDjNrjlqhNPzknpaZoF/Q00vf9WlFApo7c3x8opbDf7bjY77m82NF2nlAngXCeIetRHlqwSGalpdiw2264vNwzxcgnnz7GOsvd/S19P9D6rTjxZBmHiWwiQ/QUNmTV5s0p8/LVHZtmZhpHxlm5WtZwsd/w9Mk1Tx9fUcqsCU5teHl4nuIQRTA2p5W2cv4yQcI00bJGUM0stvv2UVGw2q16jkmta8OS9S9JqVm+jS1nGGoRPyMJVv5obHfhl5tV1sk7T/KBvNizjPKVEZ8B7wIYdMMXnmOKUVZWG6mtszi8pP4VBNA1sGiz2oJ0F3KMzNPENI260Qv/lCIVqxgTIbQ8efyIi4sdXRtomyAjPuuG+Je0XR88x+OB59+8pLFWgo7Zk/JEKtNfme2+U8Ks51gVN+prfmHbVRTKFOFanvnWpZv/3Ad/i+2C9kcUUZ9YGi/1px+D7Va6WU6ZhKH1nqYRilEqdmn66UJL0AC12qpRHelc8hJU1jWyTqYUFqCkNQkQICU/+FOKUM6maWQcR2k6tQ0ojVAaq2VTvLy45PJyz3YjSKpbruPhdRUe3v+lAKAJTCaz2264uNhivsqamLWkFDmeTjRug5kzblaq1ThxmiEXh8ieWVIuvHj5Dds2U+KBcR4xXiq4267lk8eP+PSTJ0rxEymrKoO22KSpccPDYFN+pHZnDVarScJ5/vnttjaSy3PtHry03i97toD10xc7z3LfxL8V3lrm9x4/l1B/KtKcJHJJM3c3b5iHCWct19fXlCK6pGJsFpszxmrnF5BT1HKqPHY14BRBXXUCcDblZDkBuaAakNWygQ/SIYqgUNM0keLEfrdjt21pmlWHqzJOi2ZoZQl818C1GluN9HNMpCKds00TyPPM0A/kXGiawGazYZyF51H5HtEkXAsYKdlDUmHzCfJM11g+ffqUH3z/e1xf7gje443Ma3bWLOLAFg04MYtRoeNOKwNtkTrRtVyNQK5Y1rT+6urhrGExttrQ8BB5qmWAt9AuwGiZLqsUkmSYHpPkFVV/1tlffampHtJMkpnmhMuZOE7kGDEUvHcEdZTWeuGz6Szmh2iflhCX9bTIaAMh+NcRvbnEJa2qwVCMM9PZQAmvZTgpO0kQPQwjm7aje/yY/X7HZtPK1I5qnmfPRKZQjPxZ5ELJlFyH4Ig+6267Y7/b8/zlG3IW5Yl5GBn7ifZiRymZFDMpG1IpDJM0KBgrzrKUmWN/ZBx6cpQRvj44Hl094vvf+5zPP3/C1dUG50RBwxgJAISL6M7OuyyISj0qKrgcykXKObOAAry7SRhFDStfUb65IlrSi6YVk7eAhWq/aDBUS3IlBGxWeSPMR2O7VQqnlMI4TVgdECKIkpX53b5Rvn8QEXNjKLVZjFreS5ofrN5k2WAKSzIlXLD1v1zSAxm/SheYxroHiN0aY7jY72l1+lSnTZ/vY0P+69puSoVpGBj7iWa3U06sgZhJxfyV2q7TaSjntqtlJ+AXt9366zVoqD6++tzqJ0pZK1zfarug42Y1YfEeZzyWRMofh+02jVA8cs700wjOU0oCUxZ+aeMaWi+0JrlfteqncUCWKX9Z7dY6oQsY1n6YGphK3JCXfohcwYGl8VXWLefM0PcSrA5C99hutrqOQk+xZk0KzqvmSffDYs7UGkqWiqVSHIsR7ewmNALxlEJKE9MwMA0TzXYLFOY5KeJfOA6FprsgmwxmBpuZpp4cJ0giZelsYbtr+c53PufzZ59yfbXH2rxwvKvUm9Akz++EWfaxqq5zziMtOS0V6Z/Xbt253SskW2OPUpQdv1S2JIRet1BtAK/n4T2U+Sx8/vbjg1Z9d3fH1dUVxjsihX4snI4HDvcHSkp0TbtoPcpiAEW5SSFIp2FS2SOzLppBkbx6rXWBaqB4HvlbaX6wWDyO4qT01ARPbpQ7lxOpwPXVJdeXlyIM/SGOhQTyFCSDSGdRQEVVJNiy7Pd7Tv1AcmrgznN9/Yi7w4lhzCRVahDHdY4G58UQ2uC5vt7y3e99h4uLnYy9NA6HSMJQCiVp175BKEfvbNBroHkePJlqLJiFH8yZcVhrZFKSOsHaEVzPuQZx5vxGvPPxa2NZNVoRwC840LLKzJymVT7mV3xIOV0C+ZwTw5TI84wFWh8IoQPEAckSamOUM6ScFrSOuu6lyHQMuyY/payb+nLUB9uuc82XxoizF+WcGIYIxSwahzLJROkoVd/2DEE5/0IkyOyCFJOlOx0rSHHTBAyVAiP3xDsvSFuRchNFxmQLUr86YglgJ2LpmYcDeR5p2sCnn37CZ5895epqT9sIj89ZFI0W2kINwh9s8m8d5+Ui4ZWXJayRZFuz/Hq1Z7YbY1wCBasq6vXn56+vaMf6pJzxvPW+5pxVfVMbKz4S25Vk31JSZJpHSDKpLxhpUrXOyRo5SaysjiqrPmKxXT1qwVjW/ME2T3lbGcJUn2uWZ/38D8A8i8yfwXJ1cSFBs9Npg0WT2fP8/4EJ/GK2myexD2ctTfj/t3duvW0kRxT++jYX3kT6Jq93vfuW5P//qTwk2RhBbIuc6a48VPdMk5KFGMjGeugDCAIocUj01FRXnao63ak9zwL2f2u71mhVBcNis7e/lxLnf2G7T12jVCQXn2Bu1+bbtptvDcXnC4WxhFtB+h+FwvwnSVphLe1V3mXfo0L+1juc8xhrs3RkmcRnIcUkgXFlYNRq4Ap5GHhhlQodAKwJxJLM2lWWEaN2+3B+wGAZ+0FL/uUzFsJqXd+Cxb/ne5LQYLrM00ieB3F5CC7OM5eL1aFCLN4FrYJGtDVQBETVGvSBUfY3xpk4P5Cmz8TLFyDy+vVb3r9/m+1WddOt0XZLHTK3BN+thJXI8qwua1m9BiWYF9I3fG7du7qwxFmVoQzKlsrCeu+l2hvLul3D5A8zOXaQHLA+h2cD1GmaWMRtJRFzZi1Gp427TjOhlLT8Y60jdJ5hHPAhEIvjWQynPFllEq7qZrjZZMiBQ3GW5QxiLUZlphZlbJyzyJx4dTqx3++XcsBiyzerluPTlfam9KrmRc1satcFTndH/vq3T9iLHhnYDwO73Y4pCsjEZA0JR3I9wXcY63M2ZkjiEd+xGy2vTkfevX1D13eatem318GtzFKbpMeQPlm6ZGXV1iUqhlQShKcCAlP9nzDPKztYJMCgZqz0/bXhCCZPSpOPDI3LWfaGpMF1gilelvLMj4aITqJ7a5ij9qNO84XehbyZr/ZYnL2yqJY0FdmiOhlg3bwNWRaqIK1GVb3POYeXRBSP9zNdCFkE2mkf3TQhkiWbyrGS+u2hME5AVfADtCldqgQs5V6OJDngtVpp8M4vG9dm3PLqNDP2W87nizLgxqBHZQ74rkNcUOcuOqiRoiaHoXMcdns+/HTP69dHNuNAl48ctdkxW7MGNHBFPF3dk/JT7EtS6deV5TnVpVzfvJ6+tcqrFWepTGPFcOdEq3bIAMmIPj9ZSBtysJAbiuIcX4ztAosweZKU/ahR/1IqIOQ1MkY3hnxUmsSYS3FZAeAmeVojR7naiKAE7qw+f57xSfDZ19scvM3zjKQZa6Dv9bQza1c/qhcrFZoSaen3+17bTSkx9APHuyP77ZYvX77+Ibarm83T96Le7IHKdtM3bbcEBbXNp6TEQJFdrIPUtfXladst11YeMS6Dl6T4omzXe79ops8xqY5nZkvLeoAm8M55nUifdQ8zN+tfJ/mxDPFVgeSKTHpVPsE7p6dCBW29E2TR7bXoQGQIPitYsF63SgdEf63fp/xnzfaKgBUkH83ahaAJpnf0Xc/hcGC32XI+n7HOIXkwz4YeGwLiA9ZZLImEJ87k+MrSdyMffn7Pm9cndtsBn4e2na18Lixtgbd9/jXqwUutYD/vc2/tdlXreOxzQZNSe3VPDKkm0vJR6KqpFbXiiqohPYdnA9T7+3tOpxNDF3CSmM5f6Z1HkuCtYwgd3ltSKhOdPcMw0g091lliUhmBEBzO6+IUxkolHsxCiV9vZuXBlcVhAsukow8Ou/HECNOkD2fnPMfjkXEcKhZzHZp4xONcEVqyOqh8ipT3lu12gw89n/71leA8x8MdMRvm2PcM/Y6YHLM48D3ODcpqGHDMWBGC2zEEuNtv6HzITIPN+nia/WuPTmaZsbnPZu2jXFD18V1nOaVmln+K0ZlVv28dcHDL32pGSl9br3lb4pdyXKEIKX6G9BnHhDCTJlV0uMxnHs6X50zq/4bD4cB+v8cBk3dIjMTgOGy2KosWI951uOApfU4uOIYQiNNEDsspeqUx3faq5TJGuQf51Zpl8d4jWd7E5E1/3OxADPM0M10mnDEqn7Lb6OR+uVod8N7iib8tLGOcSVEDuHEcidPMuN3w8ddfuX+XuJwn/vH333EhgLUk4xA74nyP+E6naOVCvAhxdhhG+mA53Z347beP7HdbQi6162EdEKe4lMJcp1+sZO61/ZaAowSoxhg9ACNV9psZDxFT2XdOJ03J+FfbLJWZdSKYRxtdbb+aCD+AfMbKTIoXIHJ+QbZ7d3fH4XBgOj+QpgtWIiRR9tSq/FfKwzsudFivCU6aI1Nd0rPaHw5kJmndghZ3V32uMSZrIgveePquo5xKpr46cD6fSVETOGcMb9+8YjP2qv8o8vSFa3yn7XZ9xy+/fOT+nTBP8Q+zXUE4m0v1fa4368IiQdbNNEosmBu/W2y3VhFZr7VqhJf11vPN3RKgfst287tBLiT5jOVCjGdMmjlPlxdhu4fDgd1ux+XBIZXdqsoMOiBEUiWFfsR3AdUBh5lZD/bJov5zSrnXOgeoJs+HpOs1XWIIu+6LXQw6IyGJ4/GEd/2iP11UJg6HA+PQ5YpVVVUwa4CaX3yEZZ4m/1mrVNr/fTweVa7QOX768DOvTvdIgn/+/gnfdVrRNCBuANeRXJ9VDxJMPeevBmd6vBfGvucvf/4Tx7s9XT5Eo9henCIxB+ZzfMrWrvf4MmStAWo+UOA77HZNqMwjnwtwS4QKdtW1NoIejnLGpK/4dAbRYfNp/vezNmWeKsE1NDQ0NDQ0NDQ0/Cj8eHXfhoaGhoaGhoaGhgotQG1oaGhoaGhoaHhRaAFqQ0NDQ0NDQ0PDi0ILUBsaGhoaGhoaGl4UWoDa0NDQ0NDQ0NDwotAC1IaGhoaGhoaGhheF/wCOFTSJ5YxtrwAAAABJRU5ErkJggg==\n", + "text/plain": [ + "<Figure size 864x864 with 16 Axes>" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "import matplotlib.pylab as plt\n", + "%matplotlib inline\n", + "\n", + "plt.figure(figsize=(12, 12))\n", + "for i in range(16):\n", + " plt.subplot(4, 4, i + 1)\n", + " plt.imshow(b[\"video\"][0, i, ...].permute(1, 2, 0))\n", + " plt.axis(\"off\")" + ] + }, + { + "cell_type": "code", + "execution_count": 70, + "metadata": {}, + "outputs": [], + "source": [ + "## Cleanup\n", + "import os, shutil\n", + "os.remove(\"./WUzgd7C1pWA.mp4\")\n", + "shutil.rmtree(\"./dataset\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.5-final" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} \ No newline at end of file diff --git a/pretrained_model/pytorch_vision_v0.10.0/examples/python/visualization_utils.ipynb b/pretrained_model/pytorch_vision_v0.10.0/examples/python/visualization_utils.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/gallery/README.rst b/pretrained_model/pytorch_vision_v0.10.0/gallery/README.rst new file mode 100644 index 0000000000000000000000000000000000000000..319052106b51ca2f637a56fdee89c54c2f428942 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/gallery/README.rst @@ -0,0 +1,4 @@ +Example gallery +=============== + +Below is a gallery of examples \ No newline at end of file diff --git a/pretrained_model/pytorch_vision_v0.10.0/gallery/assets/astronaut.jpg b/pretrained_model/pytorch_vision_v0.10.0/gallery/assets/astronaut.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/gallery/assets/dog1.jpg b/pretrained_model/pytorch_vision_v0.10.0/gallery/assets/dog1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/gallery/assets/dog2.jpg b/pretrained_model/pytorch_vision_v0.10.0/gallery/assets/dog2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/gallery/assets/imagenet_class_index.json b/pretrained_model/pytorch_vision_v0.10.0/gallery/assets/imagenet_class_index.json new file mode 100644 index 0000000000000000000000000000000000000000..5fe0dfefcd3dca3b1d169c7ab51b93de327e07e2 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/gallery/assets/imagenet_class_index.json @@ -0,0 +1 @@ +{"0": ["n01440764", "tench"], "1": ["n01443537", "goldfish"], "2": ["n01484850", "great_white_shark"], "3": ["n01491361", "tiger_shark"], "4": ["n01494475", "hammerhead"], "5": ["n01496331", "electric_ray"], "6": ["n01498041", "stingray"], "7": ["n01514668", "cock"], "8": ["n01514859", "hen"], "9": ["n01518878", "ostrich"], "10": ["n01530575", "brambling"], "11": ["n01531178", "goldfinch"], "12": ["n01532829", "house_finch"], "13": ["n01534433", "junco"], "14": ["n01537544", "indigo_bunting"], "15": ["n01558993", "robin"], "16": ["n01560419", "bulbul"], "17": ["n01580077", "jay"], "18": ["n01582220", "magpie"], "19": ["n01592084", "chickadee"], "20": ["n01601694", "water_ouzel"], "21": ["n01608432", "kite"], "22": ["n01614925", "bald_eagle"], "23": ["n01616318", "vulture"], "24": ["n01622779", "great_grey_owl"], "25": ["n01629819", "European_fire_salamander"], "26": ["n01630670", "common_newt"], "27": ["n01631663", "eft"], "28": ["n01632458", "spotted_salamander"], "29": ["n01632777", "axolotl"], "30": ["n01641577", "bullfrog"], "31": ["n01644373", "tree_frog"], "32": ["n01644900", "tailed_frog"], "33": ["n01664065", "loggerhead"], "34": ["n01665541", "leatherback_turtle"], "35": ["n01667114", "mud_turtle"], "36": ["n01667778", "terrapin"], "37": ["n01669191", "box_turtle"], "38": ["n01675722", "banded_gecko"], "39": ["n01677366", "common_iguana"], "40": ["n01682714", "American_chameleon"], "41": ["n01685808", "whiptail"], "42": ["n01687978", "agama"], "43": ["n01688243", "frilled_lizard"], "44": ["n01689811", "alligator_lizard"], "45": ["n01692333", "Gila_monster"], "46": ["n01693334", "green_lizard"], "47": ["n01694178", "African_chameleon"], "48": ["n01695060", "Komodo_dragon"], "49": ["n01697457", "African_crocodile"], "50": ["n01698640", "American_alligator"], "51": ["n01704323", "triceratops"], "52": ["n01728572", "thunder_snake"], "53": ["n01728920", "ringneck_snake"], "54": ["n01729322", "hognose_snake"], "55": ["n01729977", "green_snake"], "56": ["n01734418", "king_snake"], "57": ["n01735189", "garter_snake"], "58": ["n01737021", "water_snake"], "59": ["n01739381", "vine_snake"], "60": ["n01740131", "night_snake"], "61": ["n01742172", "boa_constrictor"], "62": ["n01744401", "rock_python"], "63": ["n01748264", "Indian_cobra"], "64": ["n01749939", "green_mamba"], "65": ["n01751748", "sea_snake"], "66": ["n01753488", "horned_viper"], "67": ["n01755581", "diamondback"], "68": ["n01756291", "sidewinder"], "69": ["n01768244", "trilobite"], "70": ["n01770081", "harvestman"], "71": ["n01770393", "scorpion"], "72": ["n01773157", "black_and_gold_garden_spider"], "73": ["n01773549", "barn_spider"], "74": ["n01773797", "garden_spider"], "75": ["n01774384", "black_widow"], "76": ["n01774750", "tarantula"], "77": ["n01775062", "wolf_spider"], "78": ["n01776313", "tick"], "79": ["n01784675", "centipede"], "80": ["n01795545", "black_grouse"], "81": ["n01796340", "ptarmigan"], "82": ["n01797886", "ruffed_grouse"], "83": ["n01798484", "prairie_chicken"], "84": ["n01806143", "peacock"], "85": ["n01806567", "quail"], "86": ["n01807496", "partridge"], "87": ["n01817953", "African_grey"], "88": ["n01818515", "macaw"], "89": ["n01819313", "sulphur-crested_cockatoo"], "90": ["n01820546", "lorikeet"], "91": ["n01824575", "coucal"], "92": ["n01828970", "bee_eater"], "93": ["n01829413", "hornbill"], "94": ["n01833805", "hummingbird"], "95": ["n01843065", "jacamar"], "96": ["n01843383", "toucan"], "97": ["n01847000", "drake"], "98": ["n01855032", "red-breasted_merganser"], "99": ["n01855672", "goose"], "100": ["n01860187", "black_swan"], "101": ["n01871265", "tusker"], "102": ["n01872401", "echidna"], "103": ["n01873310", "platypus"], "104": ["n01877812", "wallaby"], "105": ["n01882714", "koala"], "106": ["n01883070", "wombat"], "107": ["n01910747", "jellyfish"], "108": ["n01914609", "sea_anemone"], "109": ["n01917289", "brain_coral"], "110": ["n01924916", "flatworm"], "111": ["n01930112", "nematode"], "112": ["n01943899", "conch"], "113": ["n01944390", "snail"], "114": ["n01945685", "slug"], "115": ["n01950731", "sea_slug"], "116": ["n01955084", "chiton"], "117": ["n01968897", "chambered_nautilus"], "118": ["n01978287", "Dungeness_crab"], "119": ["n01978455", "rock_crab"], "120": ["n01980166", "fiddler_crab"], "121": ["n01981276", "king_crab"], "122": ["n01983481", "American_lobster"], "123": ["n01984695", "spiny_lobster"], "124": ["n01985128", "crayfish"], "125": ["n01986214", "hermit_crab"], "126": ["n01990800", "isopod"], "127": ["n02002556", "white_stork"], "128": ["n02002724", "black_stork"], "129": ["n02006656", "spoonbill"], "130": ["n02007558", "flamingo"], "131": ["n02009229", "little_blue_heron"], "132": ["n02009912", "American_egret"], "133": ["n02011460", "bittern"], "134": ["n02012849", "crane"], "135": ["n02013706", "limpkin"], "136": ["n02017213", "European_gallinule"], "137": ["n02018207", "American_coot"], "138": ["n02018795", "bustard"], "139": ["n02025239", "ruddy_turnstone"], "140": ["n02027492", "red-backed_sandpiper"], "141": ["n02028035", "redshank"], "142": ["n02033041", "dowitcher"], "143": ["n02037110", "oystercatcher"], "144": ["n02051845", "pelican"], "145": ["n02056570", "king_penguin"], "146": ["n02058221", "albatross"], "147": ["n02066245", "grey_whale"], "148": ["n02071294", "killer_whale"], "149": ["n02074367", "dugong"], "150": ["n02077923", "sea_lion"], "151": ["n02085620", "Chihuahua"], "152": ["n02085782", "Japanese_spaniel"], "153": ["n02085936", "Maltese_dog"], "154": ["n02086079", "Pekinese"], "155": ["n02086240", "Shih-Tzu"], "156": ["n02086646", "Blenheim_spaniel"], "157": ["n02086910", "papillon"], "158": ["n02087046", "toy_terrier"], "159": ["n02087394", "Rhodesian_ridgeback"], "160": ["n02088094", "Afghan_hound"], "161": ["n02088238", "basset"], "162": ["n02088364", "beagle"], "163": ["n02088466", "bloodhound"], "164": ["n02088632", "bluetick"], "165": ["n02089078", "black-and-tan_coonhound"], "166": ["n02089867", "Walker_hound"], "167": ["n02089973", "English_foxhound"], "168": ["n02090379", "redbone"], "169": ["n02090622", "borzoi"], "170": ["n02090721", "Irish_wolfhound"], "171": ["n02091032", "Italian_greyhound"], "172": ["n02091134", "whippet"], "173": ["n02091244", "Ibizan_hound"], "174": ["n02091467", "Norwegian_elkhound"], "175": ["n02091635", "otterhound"], "176": ["n02091831", "Saluki"], "177": ["n02092002", "Scottish_deerhound"], "178": ["n02092339", "Weimaraner"], "179": ["n02093256", "Staffordshire_bullterrier"], "180": ["n02093428", "American_Staffordshire_terrier"], "181": ["n02093647", "Bedlington_terrier"], "182": ["n02093754", "Border_terrier"], "183": ["n02093859", "Kerry_blue_terrier"], "184": ["n02093991", "Irish_terrier"], "185": ["n02094114", "Norfolk_terrier"], "186": ["n02094258", "Norwich_terrier"], "187": ["n02094433", "Yorkshire_terrier"], "188": ["n02095314", "wire-haired_fox_terrier"], "189": ["n02095570", "Lakeland_terrier"], "190": ["n02095889", "Sealyham_terrier"], "191": ["n02096051", "Airedale"], "192": ["n02096177", "cairn"], "193": ["n02096294", "Australian_terrier"], "194": ["n02096437", "Dandie_Dinmont"], "195": ["n02096585", "Boston_bull"], "196": ["n02097047", "miniature_schnauzer"], "197": ["n02097130", "giant_schnauzer"], "198": ["n02097209", "standard_schnauzer"], "199": ["n02097298", "Scotch_terrier"], "200": ["n02097474", "Tibetan_terrier"], "201": ["n02097658", "silky_terrier"], "202": ["n02098105", "soft-coated_wheaten_terrier"], "203": ["n02098286", "West_Highland_white_terrier"], "204": ["n02098413", "Lhasa"], "205": ["n02099267", "flat-coated_retriever"], "206": ["n02099429", "curly-coated_retriever"], "207": ["n02099601", "golden_retriever"], "208": ["n02099712", "Labrador_retriever"], "209": ["n02099849", "Chesapeake_Bay_retriever"], "210": ["n02100236", "German_short-haired_pointer"], "211": ["n02100583", "vizsla"], "212": ["n02100735", "English_setter"], "213": ["n02100877", "Irish_setter"], "214": ["n02101006", "Gordon_setter"], "215": ["n02101388", "Brittany_spaniel"], "216": ["n02101556", "clumber"], "217": ["n02102040", "English_springer"], "218": ["n02102177", "Welsh_springer_spaniel"], "219": ["n02102318", "cocker_spaniel"], "220": ["n02102480", "Sussex_spaniel"], "221": ["n02102973", "Irish_water_spaniel"], "222": ["n02104029", "kuvasz"], "223": ["n02104365", "schipperke"], "224": ["n02105056", "groenendael"], "225": ["n02105162", "malinois"], "226": ["n02105251", "briard"], "227": ["n02105412", "kelpie"], "228": ["n02105505", "komondor"], "229": ["n02105641", "Old_English_sheepdog"], "230": ["n02105855", "Shetland_sheepdog"], "231": ["n02106030", "collie"], "232": ["n02106166", "Border_collie"], "233": ["n02106382", "Bouvier_des_Flandres"], "234": ["n02106550", "Rottweiler"], "235": ["n02106662", "German_shepherd"], "236": ["n02107142", "Doberman"], "237": ["n02107312", "miniature_pinscher"], "238": ["n02107574", "Greater_Swiss_Mountain_dog"], "239": ["n02107683", "Bernese_mountain_dog"], "240": ["n02107908", "Appenzeller"], "241": ["n02108000", "EntleBucher"], "242": ["n02108089", "boxer"], "243": ["n02108422", "bull_mastiff"], "244": ["n02108551", "Tibetan_mastiff"], "245": ["n02108915", "French_bulldog"], "246": ["n02109047", "Great_Dane"], "247": ["n02109525", "Saint_Bernard"], "248": ["n02109961", "Eskimo_dog"], "249": ["n02110063", "malamute"], "250": ["n02110185", "Siberian_husky"], "251": ["n02110341", "dalmatian"], "252": ["n02110627", "affenpinscher"], "253": ["n02110806", "basenji"], "254": ["n02110958", "pug"], "255": ["n02111129", "Leonberg"], "256": ["n02111277", "Newfoundland"], "257": ["n02111500", "Great_Pyrenees"], "258": ["n02111889", "Samoyed"], "259": ["n02112018", "Pomeranian"], "260": ["n02112137", "chow"], "261": ["n02112350", "keeshond"], "262": ["n02112706", "Brabancon_griffon"], "263": ["n02113023", "Pembroke"], "264": ["n02113186", "Cardigan"], "265": ["n02113624", "toy_poodle"], "266": ["n02113712", "miniature_poodle"], "267": ["n02113799", "standard_poodle"], "268": ["n02113978", "Mexican_hairless"], "269": ["n02114367", "timber_wolf"], "270": ["n02114548", "white_wolf"], "271": ["n02114712", "red_wolf"], "272": ["n02114855", "coyote"], "273": ["n02115641", "dingo"], "274": ["n02115913", "dhole"], "275": ["n02116738", "African_hunting_dog"], "276": ["n02117135", "hyena"], "277": ["n02119022", "red_fox"], "278": ["n02119789", "kit_fox"], "279": ["n02120079", "Arctic_fox"], "280": ["n02120505", "grey_fox"], "281": ["n02123045", "tabby"], "282": ["n02123159", "tiger_cat"], "283": ["n02123394", "Persian_cat"], "284": ["n02123597", "Siamese_cat"], "285": ["n02124075", "Egyptian_cat"], "286": ["n02125311", "cougar"], "287": ["n02127052", "lynx"], "288": ["n02128385", "leopard"], "289": ["n02128757", "snow_leopard"], "290": ["n02128925", "jaguar"], "291": ["n02129165", "lion"], "292": ["n02129604", "tiger"], "293": ["n02130308", "cheetah"], "294": ["n02132136", "brown_bear"], "295": ["n02133161", "American_black_bear"], "296": ["n02134084", "ice_bear"], "297": ["n02134418", "sloth_bear"], "298": ["n02137549", "mongoose"], "299": ["n02138441", "meerkat"], "300": ["n02165105", "tiger_beetle"], "301": ["n02165456", "ladybug"], "302": ["n02167151", "ground_beetle"], "303": ["n02168699", "long-horned_beetle"], "304": ["n02169497", "leaf_beetle"], "305": ["n02172182", "dung_beetle"], "306": ["n02174001", "rhinoceros_beetle"], "307": ["n02177972", "weevil"], "308": ["n02190166", "fly"], "309": ["n02206856", "bee"], "310": ["n02219486", "ant"], "311": ["n02226429", "grasshopper"], "312": ["n02229544", "cricket"], "313": ["n02231487", "walking_stick"], "314": ["n02233338", "cockroach"], "315": ["n02236044", "mantis"], "316": ["n02256656", "cicada"], "317": ["n02259212", "leafhopper"], "318": ["n02264363", "lacewing"], "319": ["n02268443", "dragonfly"], "320": ["n02268853", "damselfly"], "321": ["n02276258", "admiral"], "322": ["n02277742", "ringlet"], "323": ["n02279972", "monarch"], "324": ["n02280649", "cabbage_butterfly"], "325": ["n02281406", "sulphur_butterfly"], "326": ["n02281787", "lycaenid"], "327": ["n02317335", "starfish"], "328": ["n02319095", "sea_urchin"], "329": ["n02321529", "sea_cucumber"], "330": ["n02325366", "wood_rabbit"], "331": ["n02326432", "hare"], "332": ["n02328150", "Angora"], "333": ["n02342885", "hamster"], "334": ["n02346627", "porcupine"], "335": ["n02356798", "fox_squirrel"], "336": ["n02361337", "marmot"], "337": ["n02363005", "beaver"], "338": ["n02364673", "guinea_pig"], "339": ["n02389026", "sorrel"], "340": ["n02391049", "zebra"], "341": ["n02395406", "hog"], "342": ["n02396427", "wild_boar"], "343": ["n02397096", "warthog"], "344": ["n02398521", "hippopotamus"], "345": ["n02403003", "ox"], "346": ["n02408429", "water_buffalo"], "347": ["n02410509", "bison"], "348": ["n02412080", "ram"], "349": ["n02415577", "bighorn"], "350": ["n02417914", "ibex"], "351": ["n02422106", "hartebeest"], "352": ["n02422699", "impala"], "353": ["n02423022", "gazelle"], "354": ["n02437312", "Arabian_camel"], "355": ["n02437616", "llama"], "356": ["n02441942", "weasel"], "357": ["n02442845", "mink"], "358": ["n02443114", "polecat"], "359": ["n02443484", "black-footed_ferret"], "360": ["n02444819", "otter"], "361": ["n02445715", "skunk"], "362": ["n02447366", "badger"], "363": ["n02454379", "armadillo"], "364": ["n02457408", "three-toed_sloth"], "365": ["n02480495", "orangutan"], "366": ["n02480855", "gorilla"], "367": ["n02481823", "chimpanzee"], "368": ["n02483362", "gibbon"], "369": ["n02483708", "siamang"], "370": ["n02484975", "guenon"], "371": ["n02486261", "patas"], "372": ["n02486410", "baboon"], "373": ["n02487347", "macaque"], "374": ["n02488291", "langur"], "375": ["n02488702", "colobus"], "376": ["n02489166", "proboscis_monkey"], "377": ["n02490219", "marmoset"], "378": ["n02492035", "capuchin"], "379": ["n02492660", "howler_monkey"], "380": ["n02493509", "titi"], "381": ["n02493793", "spider_monkey"], "382": ["n02494079", "squirrel_monkey"], "383": ["n02497673", "Madagascar_cat"], "384": ["n02500267", "indri"], "385": ["n02504013", "Indian_elephant"], "386": ["n02504458", "African_elephant"], "387": ["n02509815", "lesser_panda"], "388": ["n02510455", "giant_panda"], "389": ["n02514041", "barracouta"], "390": ["n02526121", "eel"], "391": ["n02536864", "coho"], "392": ["n02606052", "rock_beauty"], "393": ["n02607072", "anemone_fish"], "394": ["n02640242", "sturgeon"], "395": ["n02641379", "gar"], "396": ["n02643566", "lionfish"], "397": ["n02655020", "puffer"], "398": ["n02666196", "abacus"], "399": ["n02667093", "abaya"], "400": ["n02669723", "academic_gown"], "401": ["n02672831", "accordion"], "402": ["n02676566", "acoustic_guitar"], "403": ["n02687172", "aircraft_carrier"], "404": ["n02690373", "airliner"], "405": ["n02692877", "airship"], "406": ["n02699494", "altar"], "407": ["n02701002", "ambulance"], "408": ["n02704792", "amphibian"], "409": ["n02708093", "analog_clock"], "410": ["n02727426", "apiary"], "411": ["n02730930", "apron"], "412": ["n02747177", "ashcan"], "413": ["n02749479", "assault_rifle"], "414": ["n02769748", "backpack"], "415": ["n02776631", "bakery"], "416": ["n02777292", "balance_beam"], "417": ["n02782093", "balloon"], "418": ["n02783161", "ballpoint"], "419": ["n02786058", "Band_Aid"], "420": ["n02787622", "banjo"], "421": ["n02788148", "bannister"], "422": ["n02790996", "barbell"], "423": ["n02791124", "barber_chair"], "424": ["n02791270", "barbershop"], "425": ["n02793495", "barn"], "426": ["n02794156", "barometer"], "427": ["n02795169", "barrel"], "428": ["n02797295", "barrow"], "429": ["n02799071", "baseball"], "430": ["n02802426", "basketball"], "431": ["n02804414", "bassinet"], "432": ["n02804610", "bassoon"], "433": ["n02807133", "bathing_cap"], "434": ["n02808304", "bath_towel"], "435": ["n02808440", "bathtub"], "436": ["n02814533", "beach_wagon"], "437": ["n02814860", "beacon"], "438": ["n02815834", "beaker"], "439": ["n02817516", "bearskin"], "440": ["n02823428", "beer_bottle"], "441": ["n02823750", "beer_glass"], "442": ["n02825657", "bell_cote"], "443": ["n02834397", "bib"], "444": ["n02835271", "bicycle-built-for-two"], "445": ["n02837789", "bikini"], "446": ["n02840245", "binder"], "447": ["n02841315", "binoculars"], "448": ["n02843684", "birdhouse"], "449": ["n02859443", "boathouse"], "450": ["n02860847", "bobsled"], "451": ["n02865351", "bolo_tie"], "452": ["n02869837", "bonnet"], "453": ["n02870880", "bookcase"], "454": ["n02871525", "bookshop"], "455": ["n02877765", "bottlecap"], "456": ["n02879718", "bow"], "457": ["n02883205", "bow_tie"], "458": ["n02892201", "brass"], "459": ["n02892767", "brassiere"], "460": ["n02894605", "breakwater"], "461": ["n02895154", "breastplate"], "462": ["n02906734", "broom"], "463": ["n02909870", "bucket"], "464": ["n02910353", "buckle"], "465": ["n02916936", "bulletproof_vest"], "466": ["n02917067", "bullet_train"], "467": ["n02927161", "butcher_shop"], "468": ["n02930766", "cab"], "469": ["n02939185", "caldron"], "470": ["n02948072", "candle"], "471": ["n02950826", "cannon"], "472": ["n02951358", "canoe"], "473": ["n02951585", "can_opener"], "474": ["n02963159", "cardigan"], "475": ["n02965783", "car_mirror"], "476": ["n02966193", "carousel"], "477": ["n02966687", "carpenter's_kit"], "478": ["n02971356", "carton"], "479": ["n02974003", "car_wheel"], "480": ["n02977058", "cash_machine"], "481": ["n02978881", "cassette"], "482": ["n02979186", "cassette_player"], "483": ["n02980441", "castle"], "484": ["n02981792", "catamaran"], "485": ["n02988304", "CD_player"], "486": ["n02992211", "cello"], "487": ["n02992529", "cellular_telephone"], "488": ["n02999410", "chain"], "489": ["n03000134", "chainlink_fence"], "490": ["n03000247", "chain_mail"], "491": ["n03000684", "chain_saw"], "492": ["n03014705", "chest"], "493": ["n03016953", "chiffonier"], "494": ["n03017168", "chime"], "495": ["n03018349", "china_cabinet"], "496": ["n03026506", "Christmas_stocking"], "497": ["n03028079", "church"], "498": ["n03032252", "cinema"], "499": ["n03041632", "cleaver"], "500": ["n03042490", "cliff_dwelling"], "501": ["n03045698", "cloak"], "502": ["n03047690", "clog"], "503": ["n03062245", "cocktail_shaker"], "504": ["n03063599", "coffee_mug"], "505": ["n03063689", "coffeepot"], "506": ["n03065424", "coil"], "507": ["n03075370", "combination_lock"], "508": ["n03085013", "computer_keyboard"], "509": ["n03089624", "confectionery"], "510": ["n03095699", "container_ship"], "511": ["n03100240", "convertible"], "512": ["n03109150", "corkscrew"], "513": ["n03110669", "cornet"], "514": ["n03124043", "cowboy_boot"], "515": ["n03124170", "cowboy_hat"], "516": ["n03125729", "cradle"], "517": ["n03126707", "crane"], "518": ["n03127747", "crash_helmet"], "519": ["n03127925", "crate"], "520": ["n03131574", "crib"], "521": ["n03133878", "Crock_Pot"], "522": ["n03134739", "croquet_ball"], "523": ["n03141823", "crutch"], "524": ["n03146219", "cuirass"], "525": ["n03160309", "dam"], "526": ["n03179701", "desk"], "527": ["n03180011", "desktop_computer"], "528": ["n03187595", "dial_telephone"], "529": ["n03188531", "diaper"], "530": ["n03196217", "digital_clock"], "531": ["n03197337", "digital_watch"], "532": ["n03201208", "dining_table"], "533": ["n03207743", "dishrag"], "534": ["n03207941", "dishwasher"], "535": ["n03208938", "disk_brake"], "536": ["n03216828", "dock"], "537": ["n03218198", "dogsled"], "538": ["n03220513", "dome"], "539": ["n03223299", "doormat"], "540": ["n03240683", "drilling_platform"], "541": ["n03249569", "drum"], "542": ["n03250847", "drumstick"], "543": ["n03255030", "dumbbell"], "544": ["n03259280", "Dutch_oven"], "545": ["n03271574", "electric_fan"], "546": ["n03272010", "electric_guitar"], "547": ["n03272562", "electric_locomotive"], "548": ["n03290653", "entertainment_center"], "549": ["n03291819", "envelope"], "550": ["n03297495", "espresso_maker"], "551": ["n03314780", "face_powder"], "552": ["n03325584", "feather_boa"], "553": ["n03337140", "file"], "554": ["n03344393", "fireboat"], "555": ["n03345487", "fire_engine"], "556": ["n03347037", "fire_screen"], "557": ["n03355925", "flagpole"], "558": ["n03372029", "flute"], "559": ["n03376595", "folding_chair"], "560": ["n03379051", "football_helmet"], "561": ["n03384352", "forklift"], "562": ["n03388043", "fountain"], "563": ["n03388183", "fountain_pen"], "564": ["n03388549", "four-poster"], "565": ["n03393912", "freight_car"], "566": ["n03394916", "French_horn"], "567": ["n03400231", "frying_pan"], "568": ["n03404251", "fur_coat"], "569": ["n03417042", "garbage_truck"], "570": ["n03424325", "gasmask"], "571": ["n03425413", "gas_pump"], "572": ["n03443371", "goblet"], "573": ["n03444034", "go-kart"], "574": ["n03445777", "golf_ball"], "575": ["n03445924", "golfcart"], "576": ["n03447447", "gondola"], "577": ["n03447721", "gong"], "578": ["n03450230", "gown"], "579": ["n03452741", "grand_piano"], "580": ["n03457902", "greenhouse"], "581": ["n03459775", "grille"], "582": ["n03461385", "grocery_store"], "583": ["n03467068", "guillotine"], "584": ["n03476684", "hair_slide"], "585": ["n03476991", "hair_spray"], "586": ["n03478589", "half_track"], "587": ["n03481172", "hammer"], "588": ["n03482405", "hamper"], "589": ["n03483316", "hand_blower"], "590": ["n03485407", "hand-held_computer"], "591": ["n03485794", "handkerchief"], "592": ["n03492542", "hard_disc"], "593": ["n03494278", "harmonica"], "594": ["n03495258", "harp"], "595": ["n03496892", "harvester"], "596": ["n03498962", "hatchet"], "597": ["n03527444", "holster"], "598": ["n03529860", "home_theater"], "599": ["n03530642", "honeycomb"], "600": ["n03532672", "hook"], "601": ["n03534580", "hoopskirt"], "602": ["n03535780", "horizontal_bar"], "603": ["n03538406", "horse_cart"], "604": ["n03544143", "hourglass"], "605": ["n03584254", "iPod"], "606": ["n03584829", "iron"], "607": ["n03590841", "jack-o'-lantern"], "608": ["n03594734", "jean"], "609": ["n03594945", "jeep"], "610": ["n03595614", "jersey"], "611": ["n03598930", "jigsaw_puzzle"], "612": ["n03599486", "jinrikisha"], "613": ["n03602883", "joystick"], "614": ["n03617480", "kimono"], "615": ["n03623198", "knee_pad"], "616": ["n03627232", "knot"], "617": ["n03630383", "lab_coat"], "618": ["n03633091", "ladle"], "619": ["n03637318", "lampshade"], "620": ["n03642806", "laptop"], "621": ["n03649909", "lawn_mower"], "622": ["n03657121", "lens_cap"], "623": ["n03658185", "letter_opener"], "624": ["n03661043", "library"], "625": ["n03662601", "lifeboat"], "626": ["n03666591", "lighter"], "627": ["n03670208", "limousine"], "628": ["n03673027", "liner"], "629": ["n03676483", "lipstick"], "630": ["n03680355", "Loafer"], "631": ["n03690938", "lotion"], "632": ["n03691459", "loudspeaker"], "633": ["n03692522", "loupe"], "634": ["n03697007", "lumbermill"], "635": ["n03706229", "magnetic_compass"], "636": ["n03709823", "mailbag"], "637": ["n03710193", "mailbox"], "638": ["n03710637", "maillot"], "639": ["n03710721", "maillot"], "640": ["n03717622", "manhole_cover"], "641": ["n03720891", "maraca"], "642": ["n03721384", "marimba"], "643": ["n03724870", "mask"], "644": ["n03729826", "matchstick"], "645": ["n03733131", "maypole"], "646": ["n03733281", "maze"], "647": ["n03733805", "measuring_cup"], "648": ["n03742115", "medicine_chest"], "649": ["n03743016", "megalith"], "650": ["n03759954", "microphone"], "651": ["n03761084", "microwave"], "652": ["n03763968", "military_uniform"], "653": ["n03764736", "milk_can"], "654": ["n03769881", "minibus"], "655": ["n03770439", "miniskirt"], "656": ["n03770679", "minivan"], "657": ["n03773504", "missile"], "658": ["n03775071", "mitten"], "659": ["n03775546", "mixing_bowl"], "660": ["n03776460", "mobile_home"], "661": ["n03777568", "Model_T"], "662": ["n03777754", "modem"], "663": ["n03781244", "monastery"], "664": ["n03782006", "monitor"], "665": ["n03785016", "moped"], "666": ["n03786901", "mortar"], "667": ["n03787032", "mortarboard"], "668": ["n03788195", "mosque"], "669": ["n03788365", "mosquito_net"], "670": ["n03791053", "motor_scooter"], "671": ["n03792782", "mountain_bike"], "672": ["n03792972", "mountain_tent"], "673": ["n03793489", "mouse"], "674": ["n03794056", "mousetrap"], "675": ["n03796401", "moving_van"], "676": ["n03803284", "muzzle"], "677": ["n03804744", "nail"], "678": ["n03814639", "neck_brace"], "679": ["n03814906", "necklace"], "680": ["n03825788", "nipple"], "681": ["n03832673", "notebook"], "682": ["n03837869", "obelisk"], "683": ["n03838899", "oboe"], "684": ["n03840681", "ocarina"], "685": ["n03841143", "odometer"], "686": ["n03843555", "oil_filter"], "687": ["n03854065", "organ"], "688": ["n03857828", "oscilloscope"], "689": ["n03866082", "overskirt"], "690": ["n03868242", "oxcart"], "691": ["n03868863", "oxygen_mask"], "692": ["n03871628", "packet"], "693": ["n03873416", "paddle"], "694": ["n03874293", "paddlewheel"], "695": ["n03874599", "padlock"], "696": ["n03876231", "paintbrush"], "697": ["n03877472", "pajama"], "698": ["n03877845", "palace"], "699": ["n03884397", "panpipe"], "700": ["n03887697", "paper_towel"], "701": ["n03888257", "parachute"], "702": ["n03888605", "parallel_bars"], "703": ["n03891251", "park_bench"], "704": ["n03891332", "parking_meter"], "705": ["n03895866", "passenger_car"], "706": ["n03899768", "patio"], "707": ["n03902125", "pay-phone"], "708": ["n03903868", "pedestal"], "709": ["n03908618", "pencil_box"], "710": ["n03908714", "pencil_sharpener"], "711": ["n03916031", "perfume"], "712": ["n03920288", "Petri_dish"], "713": ["n03924679", "photocopier"], "714": ["n03929660", "pick"], "715": ["n03929855", "pickelhaube"], "716": ["n03930313", "picket_fence"], "717": ["n03930630", "pickup"], "718": ["n03933933", "pier"], "719": ["n03935335", "piggy_bank"], "720": ["n03937543", "pill_bottle"], "721": ["n03938244", "pillow"], "722": ["n03942813", "ping-pong_ball"], "723": ["n03944341", "pinwheel"], "724": ["n03947888", "pirate"], "725": ["n03950228", "pitcher"], "726": ["n03954731", "plane"], "727": ["n03956157", "planetarium"], "728": ["n03958227", "plastic_bag"], "729": ["n03961711", "plate_rack"], "730": ["n03967562", "plow"], "731": ["n03970156", "plunger"], "732": ["n03976467", "Polaroid_camera"], "733": ["n03976657", "pole"], "734": ["n03977966", "police_van"], "735": ["n03980874", "poncho"], "736": ["n03982430", "pool_table"], "737": ["n03983396", "pop_bottle"], "738": ["n03991062", "pot"], "739": ["n03992509", "potter's_wheel"], "740": ["n03995372", "power_drill"], "741": ["n03998194", "prayer_rug"], "742": ["n04004767", "printer"], "743": ["n04005630", "prison"], "744": ["n04008634", "projectile"], "745": ["n04009552", "projector"], "746": ["n04019541", "puck"], "747": ["n04023962", "punching_bag"], "748": ["n04026417", "purse"], "749": ["n04033901", "quill"], "750": ["n04033995", "quilt"], "751": ["n04037443", "racer"], "752": ["n04039381", "racket"], "753": ["n04040759", "radiator"], "754": ["n04041544", "radio"], "755": ["n04044716", "radio_telescope"], "756": ["n04049303", "rain_barrel"], "757": ["n04065272", "recreational_vehicle"], "758": ["n04067472", "reel"], "759": ["n04069434", "reflex_camera"], "760": ["n04070727", "refrigerator"], "761": ["n04074963", "remote_control"], "762": ["n04081281", "restaurant"], "763": ["n04086273", "revolver"], "764": ["n04090263", "rifle"], "765": ["n04099969", "rocking_chair"], "766": ["n04111531", "rotisserie"], "767": ["n04116512", "rubber_eraser"], "768": ["n04118538", "rugby_ball"], "769": ["n04118776", "rule"], "770": ["n04120489", "running_shoe"], "771": ["n04125021", "safe"], "772": ["n04127249", "safety_pin"], "773": ["n04131690", "saltshaker"], "774": ["n04133789", "sandal"], "775": ["n04136333", "sarong"], "776": ["n04141076", "sax"], "777": ["n04141327", "scabbard"], "778": ["n04141975", "scale"], "779": ["n04146614", "school_bus"], "780": ["n04147183", "schooner"], "781": ["n04149813", "scoreboard"], "782": ["n04152593", "screen"], "783": ["n04153751", "screw"], "784": ["n04154565", "screwdriver"], "785": ["n04162706", "seat_belt"], "786": ["n04179913", "sewing_machine"], "787": ["n04192698", "shield"], "788": ["n04200800", "shoe_shop"], "789": ["n04201297", "shoji"], "790": ["n04204238", "shopping_basket"], "791": ["n04204347", "shopping_cart"], "792": ["n04208210", "shovel"], "793": ["n04209133", "shower_cap"], "794": ["n04209239", "shower_curtain"], "795": ["n04228054", "ski"], "796": ["n04229816", "ski_mask"], "797": ["n04235860", "sleeping_bag"], "798": ["n04238763", "slide_rule"], "799": ["n04239074", "sliding_door"], "800": ["n04243546", "slot"], "801": ["n04251144", "snorkel"], "802": ["n04252077", "snowmobile"], "803": ["n04252225", "snowplow"], "804": ["n04254120", "soap_dispenser"], "805": ["n04254680", "soccer_ball"], "806": ["n04254777", "sock"], "807": ["n04258138", "solar_dish"], "808": ["n04259630", "sombrero"], "809": ["n04263257", "soup_bowl"], "810": ["n04264628", "space_bar"], "811": ["n04265275", "space_heater"], "812": ["n04266014", "space_shuttle"], "813": ["n04270147", "spatula"], "814": ["n04273569", "speedboat"], "815": ["n04275548", "spider_web"], "816": ["n04277352", "spindle"], "817": ["n04285008", "sports_car"], "818": ["n04286575", "spotlight"], "819": ["n04296562", "stage"], "820": ["n04310018", "steam_locomotive"], "821": ["n04311004", "steel_arch_bridge"], "822": ["n04311174", "steel_drum"], "823": ["n04317175", "stethoscope"], "824": ["n04325704", "stole"], "825": ["n04326547", "stone_wall"], "826": ["n04328186", "stopwatch"], "827": ["n04330267", "stove"], "828": ["n04332243", "strainer"], "829": ["n04335435", "streetcar"], "830": ["n04336792", "stretcher"], "831": ["n04344873", "studio_couch"], "832": ["n04346328", "stupa"], "833": ["n04347754", "submarine"], "834": ["n04350905", "suit"], "835": ["n04355338", "sundial"], "836": ["n04355933", "sunglass"], "837": ["n04356056", "sunglasses"], "838": ["n04357314", "sunscreen"], "839": ["n04366367", "suspension_bridge"], "840": ["n04367480", "swab"], "841": ["n04370456", "sweatshirt"], "842": ["n04371430", "swimming_trunks"], "843": ["n04371774", "swing"], "844": ["n04372370", "switch"], "845": ["n04376876", "syringe"], "846": ["n04380533", "table_lamp"], "847": ["n04389033", "tank"], "848": ["n04392985", "tape_player"], "849": ["n04398044", "teapot"], "850": ["n04399382", "teddy"], "851": ["n04404412", "television"], "852": ["n04409515", "tennis_ball"], "853": ["n04417672", "thatch"], "854": ["n04418357", "theater_curtain"], "855": ["n04423845", "thimble"], "856": ["n04428191", "thresher"], "857": ["n04429376", "throne"], "858": ["n04435653", "tile_roof"], "859": ["n04442312", "toaster"], "860": ["n04443257", "tobacco_shop"], "861": ["n04447861", "toilet_seat"], "862": ["n04456115", "torch"], "863": ["n04458633", "totem_pole"], "864": ["n04461696", "tow_truck"], "865": ["n04462240", "toyshop"], "866": ["n04465501", "tractor"], "867": ["n04467665", "trailer_truck"], "868": ["n04476259", "tray"], "869": ["n04479046", "trench_coat"], "870": ["n04482393", "tricycle"], "871": ["n04483307", "trimaran"], "872": ["n04485082", "tripod"], "873": ["n04486054", "triumphal_arch"], "874": ["n04487081", "trolleybus"], "875": ["n04487394", "trombone"], "876": ["n04493381", "tub"], "877": ["n04501370", "turnstile"], "878": ["n04505470", "typewriter_keyboard"], "879": ["n04507155", "umbrella"], "880": ["n04509417", "unicycle"], "881": ["n04515003", "upright"], "882": ["n04517823", "vacuum"], "883": ["n04522168", "vase"], "884": ["n04523525", "vault"], "885": ["n04525038", "velvet"], "886": ["n04525305", "vending_machine"], "887": ["n04532106", "vestment"], "888": ["n04532670", "viaduct"], "889": ["n04536866", "violin"], "890": ["n04540053", "volleyball"], "891": ["n04542943", "waffle_iron"], "892": ["n04548280", "wall_clock"], "893": ["n04548362", "wallet"], "894": ["n04550184", "wardrobe"], "895": ["n04552348", "warplane"], "896": ["n04553703", "washbasin"], "897": ["n04554684", "washer"], "898": ["n04557648", "water_bottle"], "899": ["n04560804", "water_jug"], "900": ["n04562935", "water_tower"], "901": ["n04579145", "whiskey_jug"], "902": ["n04579432", "whistle"], "903": ["n04584207", "wig"], "904": ["n04589890", "window_screen"], "905": ["n04590129", "window_shade"], "906": ["n04591157", "Windsor_tie"], "907": ["n04591713", "wine_bottle"], "908": ["n04592741", "wing"], "909": ["n04596742", "wok"], "910": ["n04597913", "wooden_spoon"], "911": ["n04599235", "wool"], "912": ["n04604644", "worm_fence"], "913": ["n04606251", "wreck"], "914": ["n04612504", "yawl"], "915": ["n04613696", "yurt"], "916": ["n06359193", "web_site"], "917": ["n06596364", "comic_book"], "918": ["n06785654", "crossword_puzzle"], "919": ["n06794110", "street_sign"], "920": ["n06874185", "traffic_light"], "921": ["n07248320", "book_jacket"], "922": ["n07565083", "menu"], "923": ["n07579787", "plate"], "924": ["n07583066", "guacamole"], "925": ["n07584110", "consomme"], "926": ["n07590611", "hot_pot"], "927": ["n07613480", "trifle"], "928": ["n07614500", "ice_cream"], "929": ["n07615774", "ice_lolly"], "930": ["n07684084", "French_loaf"], "931": ["n07693725", "bagel"], "932": ["n07695742", "pretzel"], "933": ["n07697313", "cheeseburger"], "934": ["n07697537", "hotdog"], "935": ["n07711569", "mashed_potato"], "936": ["n07714571", "head_cabbage"], "937": ["n07714990", "broccoli"], "938": ["n07715103", "cauliflower"], "939": ["n07716358", "zucchini"], "940": ["n07716906", "spaghetti_squash"], "941": ["n07717410", "acorn_squash"], "942": ["n07717556", "butternut_squash"], "943": ["n07718472", "cucumber"], "944": ["n07718747", "artichoke"], "945": ["n07720875", "bell_pepper"], "946": ["n07730033", "cardoon"], "947": ["n07734744", "mushroom"], "948": ["n07742313", "Granny_Smith"], "949": ["n07745940", "strawberry"], "950": ["n07747607", "orange"], "951": ["n07749582", "lemon"], "952": ["n07753113", "fig"], "953": ["n07753275", "pineapple"], "954": ["n07753592", "banana"], "955": ["n07754684", "jackfruit"], "956": ["n07760859", "custard_apple"], "957": ["n07768694", "pomegranate"], "958": ["n07802026", "hay"], "959": ["n07831146", "carbonara"], "960": ["n07836838", "chocolate_sauce"], "961": ["n07860988", "dough"], "962": ["n07871810", "meat_loaf"], "963": ["n07873807", "pizza"], "964": ["n07875152", "potpie"], "965": ["n07880968", "burrito"], "966": ["n07892512", "red_wine"], "967": ["n07920052", "espresso"], "968": ["n07930864", "cup"], "969": ["n07932039", "eggnog"], "970": ["n09193705", "alp"], "971": ["n09229709", "bubble"], "972": ["n09246464", "cliff"], "973": ["n09256479", "coral_reef"], "974": ["n09288635", "geyser"], "975": ["n09332890", "lakeside"], "976": ["n09399592", "promontory"], "977": ["n09421951", "sandbar"], "978": ["n09428293", "seashore"], "979": ["n09468604", "valley"], "980": ["n09472597", "volcano"], "981": ["n09835506", "ballplayer"], "982": ["n10148035", "groom"], "983": ["n10565667", "scuba_diver"], "984": ["n11879895", "rapeseed"], "985": ["n11939491", "daisy"], "986": ["n12057211", "yellow_lady's_slipper"], "987": ["n12144580", "corn"], "988": ["n12267677", "acorn"], "989": ["n12620546", "hip"], "990": ["n12768682", "buckeye"], "991": ["n12985857", "coral_fungus"], "992": ["n12998815", "agaric"], "993": ["n13037406", "gyromitra"], "994": ["n13040303", "stinkhorn"], "995": ["n13044778", "earthstar"], "996": ["n13052670", "hen-of-the-woods"], "997": ["n13054560", "bolete"], "998": ["n13133613", "ear"], "999": ["n15075141", "toilet_tissue"]} \ No newline at end of file diff --git a/pretrained_model/pytorch_vision_v0.10.0/gallery/plot_scripted_tensor_transforms.py b/pretrained_model/pytorch_vision_v0.10.0/gallery/plot_scripted_tensor_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..4eeeeb311b93b1bc8d3bdff56f90583e75d548af --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/gallery/plot_scripted_tensor_transforms.py @@ -0,0 +1,145 @@ +""" +========================= +Tensor transforms and JIT +========================= + +This example illustrates various features that are now supported by the +:ref:`image transformations <transforms>` on Tensor images. In particular, we +show how image transforms can be performed on GPU, and how one can also script +them using JIT compilation. + +Prior to v0.8.0, transforms in torchvision have traditionally been PIL-centric +and presented multiple limitations due to that. Now, since v0.8.0, transforms +implementations are Tensor and PIL compatible and we can achieve the following +new features: + +- transform multi-band torch tensor images (with more than 3-4 channels) +- torchscript transforms together with your model for deployment +- support for GPU acceleration +- batched transformation such as for videos +- read and decode data directly as torch tensor with torchscript support (for PNG and JPEG image formats) + +.. note:: + These features are only possible with **Tensor** images. +""" + +from pathlib import Path + +import matplotlib.pyplot as plt +import numpy as np + +import torch +import torchvision.transforms as T +from torchvision.io import read_image + + +plt.rcParams["savefig.bbox"] = 'tight' +torch.manual_seed(1) + + +def show(imgs): + fix, axs = plt.subplots(ncols=len(imgs), squeeze=False) + for i, img in enumerate(imgs): + img = T.ToPILImage()(img.to('cpu')) + axs[0, i].imshow(np.asarray(img)) + axs[0, i].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[]) + + +#################################### +# The :func:`~torchvision.io.read_image` function allows to read an image and +# directly load it as a tensor + +dog1 = read_image(str(Path('assets') / 'dog1.jpg')) +dog2 = read_image(str(Path('assets') / 'dog2.jpg')) +show([dog1, dog2]) + +#################################### +# Transforming images on GPU +# -------------------------- +# Most transforms natively support tensors on top of PIL images (to visualize +# the effect of the transforms, you may refer to see +# :ref:`sphx_glr_auto_examples_plot_transforms.py`). +# Using tensor images, we can run the transforms on GPUs if cuda is available! + +import torch.nn as nn + +transforms = torch.nn.Sequential( + T.RandomCrop(224), + T.RandomHorizontalFlip(p=0.3), +) + +device = 'cuda' if torch.cuda.is_available() else 'cpu' +dog1 = dog1.to(device) +dog2 = dog2.to(device) + +transformed_dog1 = transforms(dog1) +transformed_dog2 = transforms(dog2) +show([transformed_dog1, transformed_dog2]) + +#################################### +# Scriptable transforms for easier deployment via torchscript +# ----------------------------------------------------------- +# We now show how to combine image transformations and a model forward pass, +# while using ``torch.jit.script`` to obtain a single scripted module. +# +# Let's define a ``Predictor`` module that transforms the input tensor and then +# applies an ImageNet model on it. + +from torchvision.models import resnet18 + + +class Predictor(nn.Module): + + def __init__(self): + super().__init__() + self.resnet18 = resnet18(pretrained=True, progress=False).eval() + self.transforms = nn.Sequential( + T.Resize([256, ]), # We use single int value inside a list due to torchscript type restrictions + T.CenterCrop(224), + T.ConvertImageDtype(torch.float), + T.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + with torch.no_grad(): + x = self.transforms(x) + y_pred = self.resnet18(x) + return y_pred.argmax(dim=1) + + +#################################### +# Now, let's define scripted and non-scripted instances of ``Predictor`` and +# apply it on multiple tensor images of the same size + +predictor = Predictor().to(device) +scripted_predictor = torch.jit.script(predictor).to(device) + +batch = torch.stack([dog1, dog2]).to(device) + +res = predictor(batch) +res_scripted = scripted_predictor(batch) + +#################################### +# We can verify that the prediction of the scripted and non-scripted models are +# the same: + +import json + +with open(Path('assets') / 'imagenet_class_index.json', 'r') as labels_file: + labels = json.load(labels_file) + +for i, (pred, pred_scripted) in enumerate(zip(res, res_scripted)): + assert pred == pred_scripted + print(f"Prediction for Dog {i + 1}: {labels[str(pred.item())]}") + +#################################### +# Since the model is scripted, it can be easily dumped on disk an re-used + +import tempfile + +with tempfile.NamedTemporaryFile() as f: + scripted_predictor.save(f.name) + + dumped_scripted_predictor = torch.jit.load(f.name) + res_scripted_dumped = dumped_scripted_predictor(batch) +assert (res_scripted_dumped == res_scripted).all() diff --git a/pretrained_model/pytorch_vision_v0.10.0/gallery/plot_transforms.py b/pretrained_model/pytorch_vision_v0.10.0/gallery/plot_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..032dd584c26ea395043052559bde511dcc1c2afa --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/gallery/plot_transforms.py @@ -0,0 +1,282 @@ +""" +========================== +Illustration of transforms +========================== + +This example illustrates the various transforms available in :ref:`the +torchvision.transforms module <transforms>`. +""" + +from PIL import Image +from pathlib import Path +import matplotlib.pyplot as plt +import numpy as np + +import torch +import torchvision.transforms as T + + +plt.rcParams["savefig.bbox"] = 'tight' +orig_img = Image.open(Path('assets') / 'astronaut.jpg') +# if you change the seed, make sure that the randomly-applied transforms +# properly show that the image can be both transformed and *not* transformed! +torch.manual_seed(0) + + +def plot(imgs, with_orig=True, row_title=None, **imshow_kwargs): + if not isinstance(imgs[0], list): + # Make a 2d grid even if there's just 1 row + imgs = [imgs] + + num_rows = len(imgs) + num_cols = len(imgs[0]) + with_orig + fig, axs = plt.subplots(nrows=num_rows, ncols=num_cols, squeeze=False) + for row_idx, row in enumerate(imgs): + row = [orig_img] + row if with_orig else row + for col_idx, img in enumerate(row): + ax = axs[row_idx, col_idx] + ax.imshow(np.asarray(img), **imshow_kwargs) + ax.set(xticklabels=[], yticklabels=[], xticks=[], yticks=[]) + + if with_orig: + axs[0, 0].set(title='Original image') + axs[0, 0].title.set_size(8) + if row_title is not None: + for row_idx in range(num_rows): + axs[row_idx, 0].set(ylabel=row_title[row_idx]) + + plt.tight_layout() + + +#################################### +# Pad +# --- +# The :class:`~torchvision.transforms.Pad` transform +# (see also :func:`~torchvision.transforms.functional.pad`) +# fills image borders with some pixel values. +padded_imgs = [T.Pad(padding=padding)(orig_img) for padding in (3, 10, 30, 50)] +plot(padded_imgs) + +#################################### +# Resize +# ------ +# The :class:`~torchvision.transforms.Resize` transform +# (see also :func:`~torchvision.transforms.functional.resize`) +# resizes an image. +resized_imgs = [T.Resize(size=size)(orig_img) for size in (30, 50, 100, orig_img.size)] +plot(resized_imgs) + +#################################### +# CenterCrop +# ---------- +# The :class:`~torchvision.transforms.CenterCrop` transform +# (see also :func:`~torchvision.transforms.functional.center_crop`) +# crops the given image at the center. +center_crops = [T.CenterCrop(size=size)(orig_img) for size in (30, 50, 100, orig_img.size)] +plot(center_crops) + +#################################### +# FiveCrop +# -------- +# The :class:`~torchvision.transforms.FiveCrop` transform +# (see also :func:`~torchvision.transforms.functional.five_crop`) +# crops the given image into four corners and the central crop. +(top_left, top_right, bottom_left, bottom_right, center) = T.FiveCrop(size=(100, 100))(orig_img) +plot([top_left, top_right, bottom_left, bottom_right, center]) + +#################################### +# Grayscale +# --------- +# The :class:`~torchvision.transforms.Grayscale` transform +# (see also :func:`~torchvision.transforms.functional.to_grayscale`) +# converts an image to grayscale +gray_img = T.Grayscale()(orig_img) +plot([gray_img], cmap='gray') + +#################################### +# Random transforms +# ----------------- +# The following transforms are random, which means that the same transfomer +# instance will produce different result each time it transforms a given image. +# +# ColorJitter +# ~~~~~~~~~~~ +# The :class:`~torchvision.transforms.ColorJitter` transform +# randomly changes the brightness, saturation, and other properties of an image. +jitter = T.ColorJitter(brightness=.5, hue=.3) +jitted_imgs = [jitter(orig_img) for _ in range(4)] +plot(jitted_imgs) + +#################################### +# GaussianBlur +# ~~~~~~~~~~~~ +# The :class:`~torchvision.transforms.GaussianBlur` transform +# (see also :func:`~torchvision.transforms.functional.gaussian_blur`) +# performs gaussian blur transform on an image. +blurrer = T.GaussianBlur(kernel_size=(5, 9), sigma=(0.1, 5)) +blurred_imgs = [blurrer(orig_img) for _ in range(4)] +plot(blurred_imgs) + +#################################### +# RandomPerspective +# ~~~~~~~~~~~~~~~~~ +# The :class:`~torchvision.transforms.RandomPerspective` transform +# (see also :func:`~torchvision.transforms.functional.perspective`) +# performs random perspective transform on an image. +perspective_transformer = T.RandomPerspective(distortion_scale=0.6, p=1.0) +perspective_imgs = [perspective_transformer(orig_img) for _ in range(4)] +plot(perspective_imgs) + +#################################### +# RandomRotation +# ~~~~~~~~~~~~~~ +# The :class:`~torchvision.transforms.RandomRotation` transform +# (see also :func:`~torchvision.transforms.functional.rotate`) +# rotates an image with random angle. +rotater = T.RandomRotation(degrees=(0, 180)) +rotated_imgs = [rotater(orig_img) for _ in range(4)] +plot(rotated_imgs) + +#################################### +# RandomAffine +# ~~~~~~~~~~~~ +# The :class:`~torchvision.transforms.RandomAffine` transform +# (see also :func:`~torchvision.transforms.functional.affine`) +# performs random affine transform on an image. +affine_transfomer = T.RandomAffine(degrees=(30, 70), translate=(0.1, 0.3), scale=(0.5, 0.75)) +affine_imgs = [affine_transfomer(orig_img) for _ in range(4)] +plot(affine_imgs) + +#################################### +# RandomCrop +# ~~~~~~~~~~ +# The :class:`~torchvision.transforms.RandomCrop` transform +# (see also :func:`~torchvision.transforms.functional.crop`) +# crops an image at a random location. +cropper = T.RandomCrop(size=(128, 128)) +crops = [cropper(orig_img) for _ in range(4)] +plot(crops) + +#################################### +# RandomResizedCrop +# ~~~~~~~~~~~~~~~~~ +# The :class:`~torchvision.transforms.RandomResizedCrop` transform +# (see also :func:`~torchvision.transforms.functional.resized_crop`) +# crops an image at a random location, and then resizes the crop to a given +# size. +resize_cropper = T.RandomResizedCrop(size=(32, 32)) +resized_crops = [resize_cropper(orig_img) for _ in range(4)] +plot(resized_crops) + +#################################### +# RandomInvert +# ~~~~~~~~~~~~ +# The :class:`~torchvision.transforms.RandomInvert` transform +# (see also :func:`~torchvision.transforms.functional.invert`) +# randomly inverts the colors of the given image. +inverter = T.RandomInvert() +invertered_imgs = [inverter(orig_img) for _ in range(4)] +plot(invertered_imgs) + +#################################### +# RandomPosterize +# ~~~~~~~~~~~~~~~ +# The :class:`~torchvision.transforms.RandomPosterize` transform +# (see also :func:`~torchvision.transforms.functional.posterize`) +# randomly posterizes the image by reducing the number of bits +# of each color channel. +posterizer = T.RandomPosterize(bits=2) +posterized_imgs = [posterizer(orig_img) for _ in range(4)] +plot(posterized_imgs) + +#################################### +# RandomSolarize +# ~~~~~~~~~~~~~~ +# The :class:`~torchvision.transforms.RandomSolarize` transform +# (see also :func:`~torchvision.transforms.functional.solarize`) +# randomly solarizes the image by inverting all pixel values above +# the threshold. +solarizer = T.RandomSolarize(threshold=192.0) +solarized_imgs = [solarizer(orig_img) for _ in range(4)] +plot(solarized_imgs) + +#################################### +# RandomAdjustSharpness +# ~~~~~~~~~~~~~~~~~~~~~ +# The :class:`~torchvision.transforms.RandomAdjustSharpness` transform +# (see also :func:`~torchvision.transforms.functional.adjust_sharpness`) +# randomly adjusts the sharpness of the given image. +sharpness_adjuster = T.RandomAdjustSharpness(sharpness_factor=2) +sharpened_imgs = [sharpness_adjuster(orig_img) for _ in range(4)] +plot(sharpened_imgs) + +#################################### +# RandomAutocontrast +# ~~~~~~~~~~~~~~~~~~ +# The :class:`~torchvision.transforms.RandomAutocontrast` transform +# (see also :func:`~torchvision.transforms.functional.autocontrast`) +# randomly applies autocontrast to the given image. +autocontraster = T.RandomAutocontrast() +autocontrasted_imgs = [autocontraster(orig_img) for _ in range(4)] +plot(autocontrasted_imgs) + +#################################### +# RandomEqualize +# ~~~~~~~~~~~~~~ +# The :class:`~torchvision.transforms.RandomEqualize` transform +# (see also :func:`~torchvision.transforms.functional.equalize`) +# randomly equalizes the histogram of the given image. +equalizer = T.RandomEqualize() +equalized_imgs = [equalizer(orig_img) for _ in range(4)] +plot(equalized_imgs) + +#################################### +# AutoAugment +# ~~~~~~~~~~~ +# The :class:`~torchvision.transforms.AutoAugment` transform +# automatically augments data based on a given auto-augmentation policy. +# See :class:`~torchvision.transforms.AutoAugmentPolicy` for the available policies. +policies = [T.AutoAugmentPolicy.CIFAR10, T.AutoAugmentPolicy.IMAGENET, T.AutoAugmentPolicy.SVHN] +augmenters = [T.AutoAugment(policy) for policy in policies] +imgs = [ + [augmenter(orig_img) for _ in range(4)] + for augmenter in augmenters +] +row_title = [str(policy).split('.')[-1] for policy in policies] +plot(imgs, row_title=row_title) + +#################################### +# Randomly-applied transforms +# --------------------------- +# +# Some transforms are randomly-applied given a probability ``p``. That is, the +# transformed image may actually be the same as the original one, even when +# called with the same transformer instance! +# +# RandomHorizontalFlip +# ~~~~~~~~~~~~~~~~~~~~ +# The :class:`~torchvision.transforms.RandomHorizontalFlip` transform +# (see also :func:`~torchvision.transforms.functional.hflip`) +# performs horizontal flip of an image, with a given probability. +hflipper = T.RandomHorizontalFlip(p=0.5) +transformed_imgs = [hflipper(orig_img) for _ in range(4)] +plot(transformed_imgs) + +#################################### +# RandomVerticalFlip +# ~~~~~~~~~~~~~~~~~~ +# The :class:`~torchvision.transforms.RandomVerticalFlip` transform +# (see also :func:`~torchvision.transforms.functional.vflip`) +# performs vertical flip of an image, with a given probability. +vflipper = T.RandomVerticalFlip(p=0.5) +transformed_imgs = [vflipper(orig_img) for _ in range(4)] +plot(transformed_imgs) + +#################################### +# RandomApply +# ~~~~~~~~~~~ +# The :class:`~torchvision.transforms.RandomApply` transform +# randomly applies a list of transforms, with a given probability. +applier = T.RandomApply(transforms=[T.RandomCrop(size=(64, 64))], p=0.5) +transformed_imgs = [applier(orig_img) for _ in range(4)] +plot(transformed_imgs) diff --git a/pretrained_model/pytorch_vision_v0.10.0/gallery/plot_visualization_utils.py b/pretrained_model/pytorch_vision_v0.10.0/gallery/plot_visualization_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..feedee4e3cfe7ca3e37440fdb5397c3534132c46 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/gallery/plot_visualization_utils.py @@ -0,0 +1,367 @@ +""" +======================= +Visualization utilities +======================= + +This example illustrates some of the utilities that torchvision offers for +visualizing images, bounding boxes, and segmentation masks. +""" + + +import torch +import numpy as np +import matplotlib.pyplot as plt + +import torchvision.transforms.functional as F + + +plt.rcParams["savefig.bbox"] = 'tight' + + +def show(imgs): + if not isinstance(imgs, list): + imgs = [imgs] + fix, axs = plt.subplots(ncols=len(imgs), squeeze=False) + for i, img in enumerate(imgs): + img = img.detach() + img = F.to_pil_image(img) + axs[0, i].imshow(np.asarray(img)) + axs[0, i].set(xticklabels=[], yticklabels=[], xticks=[], yticks=[]) + + +#################################### +# Visualizing a grid of images +# ---------------------------- +# The :func:`~torchvision.utils.make_grid` function can be used to create a +# tensor that represents multiple images in a grid. This util requires a single +# image of dtype ``uint8`` as input. + +from torchvision.utils import make_grid +from torchvision.io import read_image +from pathlib import Path + +dog1_int = read_image(str(Path('assets') / 'dog1.jpg')) +dog2_int = read_image(str(Path('assets') / 'dog2.jpg')) + +grid = make_grid([dog1_int, dog2_int, dog1_int, dog2_int]) +show(grid) + +#################################### +# Visualizing bounding boxes +# -------------------------- +# We can use :func:`~torchvision.utils.draw_bounding_boxes` to draw boxes on an +# image. We can set the colors, labels, width as well as font and font size. +# The boxes are in ``(xmin, ymin, xmax, ymax)`` format. + +from torchvision.utils import draw_bounding_boxes + + +boxes = torch.tensor([[50, 50, 100, 200], [210, 150, 350, 430]], dtype=torch.float) +colors = ["blue", "yellow"] +result = draw_bounding_boxes(dog1_int, boxes, colors=colors, width=5) +show(result) + + +##################################### +# Naturally, we can also plot bounding boxes produced by torchvision detection +# models. Here is demo with a Faster R-CNN model loaded from +# :func:`~torchvision.models.detection.fasterrcnn_resnet50_fpn` +# model. You can also try using a RetinaNet with +# :func:`~torchvision.models.detection.retinanet_resnet50_fpn`, an SSDlite with +# :func:`~torchvision.models.detection.ssdlite320_mobilenet_v3_large` or an SSD with +# :func:`~torchvision.models.detection.ssd300_vgg16`. For more details +# on the output of such models, you may refer to :ref:`instance_seg_output`. + +from torchvision.models.detection import fasterrcnn_resnet50_fpn +from torchvision.transforms.functional import convert_image_dtype + + +batch_int = torch.stack([dog1_int, dog2_int]) +batch = convert_image_dtype(batch_int, dtype=torch.float) + +model = fasterrcnn_resnet50_fpn(pretrained=True, progress=False) +model = model.eval() + +outputs = model(batch) +print(outputs) + +##################################### +# Let's plot the boxes detected by our model. We will only plot the boxes with a +# score greater than a given threshold. + +score_threshold = .8 +dogs_with_boxes = [ + draw_bounding_boxes(dog_int, boxes=output['boxes'][output['scores'] > score_threshold], width=4) + for dog_int, output in zip(batch_int, outputs) +] +show(dogs_with_boxes) + +##################################### +# Visualizing segmentation masks +# ------------------------------ +# The :func:`~torchvision.utils.draw_segmentation_masks` function can be used to +# draw segmentation masks on images. Semantic segmentation and instance +# segmentation models have different outputs, so we will treat each +# independently. +# +# .. _semantic_seg_output: +# +# Semantic segmentation models +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# +# We will see how to use it with torchvision's FCN Resnet-50, loaded with +# :func:`~torchvision.models.segmentation.fcn_resnet50`. You can also try using +# DeepLabv3 (:func:`~torchvision.models.segmentation.deeplabv3_resnet50`) or +# lraspp mobilenet models +# (:func:`~torchvision.models.segmentation.lraspp_mobilenet_v3_large`). +# +# Let's start by looking at the ouput of the model. Remember that in general, +# images must be normalized before they're passed to a semantic segmentation +# model. + +from torchvision.models.segmentation import fcn_resnet50 + + +model = fcn_resnet50(pretrained=True, progress=False) +model = model.eval() + +normalized_batch = F.normalize(batch, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)) +output = model(normalized_batch)['out'] +print(output.shape, output.min().item(), output.max().item()) + +##################################### +# As we can see above, the output of the segmentation model is a tensor of shape +# ``(batch_size, num_classes, H, W)``. Each value is a non-normalized score, and +# we can normalize them into ``[0, 1]`` by using a softmax. After the softmax, +# we can interpret each value as a probability indicating how likely a given +# pixel is to belong to a given class. +# +# Let's plot the masks that have been detected for the dog class and for the +# boat class: + +sem_classes = [ + '__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', + 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', + 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' +] +sem_class_to_idx = {cls: idx for (idx, cls) in enumerate(sem_classes)} + +normalized_masks = torch.nn.functional.softmax(output, dim=1) + +dog_and_boat_masks = [ + normalized_masks[img_idx, sem_class_to_idx[cls]] + for img_idx in range(batch.shape[0]) + for cls in ('dog', 'boat') +] + +show(dog_and_boat_masks) + +##################################### +# As expected, the model is confident about the dog class, but not so much for +# the boat class. +# +# The :func:`~torchvision.utils.draw_segmentation_masks` function can be used to +# plots those masks on top of the original image. This function expects the +# masks to be boolean masks, but our masks above contain probabilities in ``[0, +# 1]``. To get boolean masks, we can do the following: + +class_dim = 1 +boolean_dog_masks = (normalized_masks.argmax(class_dim) == sem_class_to_idx['dog']) +print(f"shape = {boolean_dog_masks.shape}, dtype = {boolean_dog_masks.dtype}") +show([m.float() for m in boolean_dog_masks]) + + +##################################### +# The line above where we define ``boolean_dog_masks`` is a bit cryptic, but you +# can read it as the following query: "For which pixels is 'dog' the most likely +# class?" +# +# .. note:: +# While we're using the ``normalized_masks`` here, we would have +# gotten the same result by using the non-normalized scores of the model +# directly (as the softmax operation preserves the order). +# +# Now that we have boolean masks, we can use them with +# :func:`~torchvision.utils.draw_segmentation_masks` to plot them on top of the +# original images: + +from torchvision.utils import draw_segmentation_masks + +dogs_with_masks = [ + draw_segmentation_masks(img, masks=mask, alpha=0.7) + for img, mask in zip(batch_int, boolean_dog_masks) +] +show(dogs_with_masks) + +##################################### +# We can plot more than one mask per image! Remember that the model returned as +# many masks as there are classes. Let's ask the same query as above, but this +# time for *all* classes, not just the dog class: "For each pixel and each class +# C, is class C the most most likely class?" +# +# This one is a bit more involved, so we'll first show how to do it with a +# single image, and then we'll generalize to the batch + +num_classes = normalized_masks.shape[1] +dog1_masks = normalized_masks[0] +class_dim = 0 +dog1_all_classes_masks = dog1_masks.argmax(class_dim) == torch.arange(num_classes)[:, None, None] + +print(f"dog1_masks shape = {dog1_masks.shape}, dtype = {dog1_masks.dtype}") +print(f"dog1_all_classes_masks = {dog1_all_classes_masks.shape}, dtype = {dog1_all_classes_masks.dtype}") + +dog_with_all_masks = draw_segmentation_masks(dog1_int, masks=dog1_all_classes_masks, alpha=.6) +show(dog_with_all_masks) + +##################################### +# We can see in the image above that only 2 masks were drawn: the mask for the +# background and the mask for the dog. This is because the model thinks that +# only these 2 classes are the most likely ones across all the pixels. If the +# model had detected another class as the most likely among other pixels, we +# would have seen its mask above. +# +# Removing the background mask is as simple as passing +# ``masks=dog1_all_classes_masks[1:]``, because the background class is the +# class with index 0. +# +# Let's now do the same but for an entire batch of images. The code is similar +# but involves a bit more juggling with the dimensions. + +class_dim = 1 +all_classes_masks = normalized_masks.argmax(class_dim) == torch.arange(num_classes)[:, None, None, None] +print(f"shape = {all_classes_masks.shape}, dtype = {all_classes_masks.dtype}") +# The first dimension is the classes now, so we need to swap it +all_classes_masks = all_classes_masks.swapaxes(0, 1) + +dogs_with_masks = [ + draw_segmentation_masks(img, masks=mask, alpha=.6) + for img, mask in zip(batch_int, all_classes_masks) +] +show(dogs_with_masks) + + +##################################### +# .. _instance_seg_output: +# +# Instance segmentation models +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +# +# Instance segmentation models have a significantly different output from the +# semantic segmentation models. We will see here how to plot the masks for such +# models. Let's start by analyzing the output of a Mask-RCNN model. Note that +# these models don't require the images to be normalized, so we don't need to +# use the normalized batch. +# +# .. note:: +# +# We will here describe the output of a Mask-RCNN model. The models in +# :ref:`object_det_inst_seg_pers_keypoint_det` all have a similar output +# format, but some of them may have extra info like keypoints for +# :func:`~torchvision.models.detection.keypointrcnn_resnet50_fpn`, and some +# of them may not have masks, like +# :func:`~torchvision.models.detection.fasterrcnn_resnet50_fpn`. + +from torchvision.models.detection import maskrcnn_resnet50_fpn +model = maskrcnn_resnet50_fpn(pretrained=True, progress=False) +model = model.eval() + +output = model(batch) +print(output) + +##################################### +# Let's break this down. For each image in the batch, the model outputs some +# detections (or instances). The number of detections varies for each input +# image. Each instance is described by its bounding box, its label, its score +# and its mask. +# +# The way the output is organized is as follows: the output is a list of length +# ``batch_size``. Each entry in the list corresponds to an input image, and it +# is a dict with keys 'boxes', 'labels', 'scores', and 'masks'. Each value +# associated to those keys has ``num_instances`` elements in it. In our case +# above there are 3 instances detected in the first image, and 2 instances in +# the second one. +# +# The boxes can be plotted with :func:`~torchvision.utils.draw_bounding_boxes` +# as above, but here we're more interested in the masks. These masks are quite +# different from the masks that we saw above for the semantic segmentation +# models. + +dog1_output = output[0] +dog1_masks = dog1_output['masks'] +print(f"shape = {dog1_masks.shape}, dtype = {dog1_masks.dtype}, " + f"min = {dog1_masks.min()}, max = {dog1_masks.max()}") + +##################################### +# Here the masks corresponds to probabilities indicating, for each pixel, how +# likely it is to belong to the predicted label of that instance. Those +# predicted labels correspond to the 'labels' element in the same output dict. +# Let's see which labels were predicted for the instances of the first image. + +inst_classes = [ + '__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', + 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign', + 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', + 'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A', + 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', + 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', + 'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', + 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', + 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table', + 'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', + 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book', + 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush' +] + +inst_class_to_idx = {cls: idx for (idx, cls) in enumerate(inst_classes)} + +print("For the first dog, the following instances were detected:") +print([inst_classes[label] for label in dog1_output['labels']]) + +##################################### +# Interestingly, the model detects two persons in the image. Let's go ahead and +# plot those masks. Since :func:`~torchvision.utils.draw_segmentation_masks` +# expects boolean masks, we need to convert those probabilities into boolean +# values. Remember that the semantic of those masks is "How likely is this pixel +# to belong to the predicted class?". As a result, a natural way of converting +# those masks into boolean values is to threshold them with the 0.5 probability +# (one could also choose a different threshold). + +proba_threshold = 0.5 +dog1_bool_masks = dog1_output['masks'] > proba_threshold +print(f"shape = {dog1_bool_masks.shape}, dtype = {dog1_bool_masks.dtype}") + +# There's an extra dimension (1) to the masks. We need to remove it +dog1_bool_masks = dog1_bool_masks.squeeze(1) + +show(draw_segmentation_masks(dog1_int, dog1_bool_masks, alpha=0.9)) + +##################################### +# The model seems to have properly detected the dog, but it also confused trees +# with people. Looking more closely at the scores will help us plotting more +# relevant masks: + +print(dog1_output['scores']) + +##################################### +# Clearly the model is less confident about the dog detection than it is about +# the people detections. That's good news. When plotting the masks, we can ask +# for only those that have a good score. Let's use a score threshold of .75 +# here, and also plot the masks of the second dog. + +score_threshold = .75 + +boolean_masks = [ + out['masks'][out['scores'] > score_threshold] > proba_threshold + for out in output +] + +dogs_with_masks = [ + draw_segmentation_masks(img, mask.squeeze(1)) + for img, mask in zip(batch_int, boolean_masks) +] +show(dogs_with_masks) + +##################################### +# The two 'people' masks in the first image where not selected because they have +# a lower score than the score threshold. Similarly in the second image, the +# instance with class 15 (which corresponds to 'bench') was not selected. diff --git a/pretrained_model/pytorch_vision_v0.10.0/hubconf.py b/pretrained_model/pytorch_vision_v0.10.0/hubconf.py new file mode 100644 index 0000000000000000000000000000000000000000..097759bdd8935db41e0e98d7f91290474569b9eb --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/hubconf.py @@ -0,0 +1,21 @@ +# Optional list of dependencies required by the package +dependencies = ['torch'] + +# classification +from torchvision.models.alexnet import alexnet +from torchvision.models.densenet import densenet121, densenet169, densenet201, densenet161 +from torchvision.models.inception import inception_v3 +from torchvision.models.resnet import resnet18, resnet34, resnet50, resnet101, resnet152,\ + resnext50_32x4d, resnext101_32x8d, wide_resnet50_2, wide_resnet101_2 +from torchvision.models.squeezenet import squeezenet1_0, squeezenet1_1 +from torchvision.models.vgg import vgg11, vgg13, vgg16, vgg19, vgg11_bn, vgg13_bn, vgg16_bn, vgg19_bn +from torchvision.models.googlenet import googlenet +from torchvision.models.shufflenetv2 import shufflenet_v2_x0_5, shufflenet_v2_x1_0 +from torchvision.models.mobilenetv2 import mobilenet_v2 +from torchvision.models.mobilenetv3 import mobilenet_v3_large, mobilenet_v3_small +from torchvision.models.mnasnet import mnasnet0_5, mnasnet0_75, mnasnet1_0, \ + mnasnet1_3 + +# segmentation +from torchvision.models.segmentation import fcn_resnet50, fcn_resnet101, \ + deeplabv3_resnet50, deeplabv3_resnet101, deeplabv3_mobilenet_v3_large, lraspp_mobilenet_v3_large diff --git a/pretrained_model/pytorch_vision_v0.10.0/ios/CMakeLists.txt b/pretrained_model/pytorch_vision_v0.10.0/ios/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..2ac46c15018a3b5a72b034168266d08c2cdc6cd9 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/ios/CMakeLists.txt @@ -0,0 +1,30 @@ +cmake_minimum_required(VERSION 3.4.1) +set(TARGET torchvision_ops) +project(${TARGET} CXX) +set(CMAKE_CXX_STANDARD 14) +set(LIBTORCH_HEADER_ROOT ${LIBTORCH_HEADER_ROOT}) +set(LIBRARY_OUTPUT_PATH ../lib) + +file(GLOB VISION_SRCS + ../torchvision/csrc/ops/cpu/*.h + ../torchvision/csrc/ops/cpu/*.cpp + ../torchvision/csrc/ops/*.h + ../torchvision/csrc/ops/*.cpp) + +# Remove interpolate_aa sources as they are temporary code +# see https://github.com/pytorch/vision/pull/3761 +# and using TensorIterator unavailable with iOS +list(REMOVE_ITEM VISION_SRCS "${CMAKE_CURRENT_LIST_DIR}/../torchvision/csrc/ops/cpu/interpolate_aa_kernels.cpp") +list(REMOVE_ITEM VISION_SRCS "${CMAKE_CURRENT_LIST_DIR}/../torchvision/csrc/ops/interpolate_aa.cpp") +list(REMOVE_ITEM VISION_SRCS "${CMAKE_CURRENT_LIST_DIR}/../torchvision/csrc/ops/interpolate_aa.h") + +add_library(${TARGET} STATIC + ${VISION_SRCS} +) + +file(GLOB PYTORCH_HEADERS "${LIBTORCH_HEADER_ROOT}") +file(GLOB PYTORCH_HEADERS_CSRC "${LIBTORCH_HEADER_ROOT}/torch/csrc/api/include") +target_include_directories(${TARGET} PRIVATE + ${PYTORCH_HEADERS} + ${PYTORCH_HEADERS_CSRC} +) diff --git a/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp.xcodeproj/project.pbxproj b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp.xcodeproj/project.pbxproj new file mode 100644 index 0000000000000000000000000000000000000000..5e71c77e6f85c92b45e9ba62e3d48aa35b2ce14c --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp.xcodeproj/project.pbxproj @@ -0,0 +1,5933 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 50; + objects = { + +/* Begin PBXBuildFile section */ + 0C12EF3D2616383D00B66C86 /* avx.py in Resources */ = {isa = PBXBuildFile; fileRef = 0C12EEF32616383C00B66C86 /* avx.py */; }; + 0C12EF3E2616383D00B66C86 /* __init__.py in Resources */ = {isa = PBXBuildFile; fileRef = 0C12EEF42616383C00B66C86 /* __init__.py */; }; + 0C12EF3F2616383D00B66C86 /* avx2.py in Resources */ = {isa = PBXBuildFile; fileRef = 0C12EEF62616383C00B66C86 /* avx2.py */; }; + 0C12EF402616383D00B66C86 /* THTensor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0C12EF192616383C00B66C86 /* THTensor.cpp */; }; + 0C12EF412616383D00B66C86 /* THTensorMath.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0C12EF1A2616383C00B66C86 /* THTensorMath.cpp */; }; + 0C12EF422616383D00B66C86 /* THStorageCopy.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0C12EF1C2616383C00B66C86 /* THStorageCopy.cpp */; }; + 0C12EF432616383D00B66C86 /* THLapack.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0C12EF212616383C00B66C86 /* THLapack.cpp */; }; + 0C12EF442616383D00B66C86 /* THStorage.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0C12EF242616383C00B66C86 /* THStorage.cpp */; }; + 0C12EF452616383D00B66C86 /* THBlas.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0C12EF262616383C00B66C86 /* THBlas.cpp */; }; + 0C12EF462616383D00B66C86 /* THTensorLapack.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0C12EF272616383C00B66C86 /* THTensorLapack.cpp */; }; + 0C12EF472616383D00B66C86 /* libtorch_cpu.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 0C12EF332616383C00B66C86 /* libtorch_cpu.a */; }; + 0C12EF482616383D00B66C86 /* libtorch.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 0C12EF342616383C00B66C86 /* libtorch.a */; }; + 0C12EF492616383D00B66C86 /* libcpuinfo.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 0C12EF352616383C00B66C86 /* libcpuinfo.a */; }; + 0C12EF4A2616383D00B66C86 /* libXNNPACK.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 0C12EF362616383C00B66C86 /* libXNNPACK.a */; }; + 0C12EF4C2616383D00B66C86 /* libpthreadpool.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 0C12EF382616383C00B66C86 /* libpthreadpool.a */; }; + 0C12EF4D2616383D00B66C86 /* libc10.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 0C12EF392616383C00B66C86 /* libc10.a */; }; + 0C12EF4E2616383D00B66C86 /* libeigen_blas.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 0C12EF3A2616383C00B66C86 /* libeigen_blas.a */; }; + 0C12EF4F2616383D00B66C86 /* libclog.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 0C12EF3B2616383C00B66C86 /* libclog.a */; }; + 0C12EF502616383D00B66C86 /* libpytorch_qnnpack.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 0C12EF3C2616383C00B66C86 /* libpytorch_qnnpack.a */; }; + 0C12EF7626163B7600B66C86 /* frcnn_mnetv3.pt in Resources */ = {isa = PBXBuildFile; fileRef = 0C12EF7526163B7600B66C86 /* frcnn_mnetv3.pt */; }; + 0C12EF7A26163C7C00B66C86 /* libtorchvision_ops.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 0C12EF372616383C00B66C86 /* libtorchvision_ops.a */; }; + 0CEB0AC026151A8800F1F7D5 /* AppDelegate.m in Sources */ = {isa = PBXBuildFile; fileRef = 0CEB0ABF26151A8800F1F7D5 /* AppDelegate.m */; }; + 0CEB0AC626151A8800F1F7D5 /* ViewController.mm in Sources */ = {isa = PBXBuildFile; fileRef = 0CEB0AC526151A8800F1F7D5 /* ViewController.mm */; }; + 0CEB0AC926151A8800F1F7D5 /* Main.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 0CEB0AC726151A8800F1F7D5 /* Main.storyboard */; }; + 0CEB0ACB26151A8900F1F7D5 /* Assets.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = 0CEB0ACA26151A8900F1F7D5 /* Assets.xcassets */; }; + 0CEB0ACE26151A8900F1F7D5 /* LaunchScreen.storyboard in Resources */ = {isa = PBXBuildFile; fileRef = 0CEB0ACC26151A8900F1F7D5 /* LaunchScreen.storyboard */; }; + 0CEB0AD126151A8900F1F7D5 /* main.m in Sources */ = {isa = PBXBuildFile; fileRef = 0CEB0AD026151A8900F1F7D5 /* main.m */; }; + 0CEB0B3A26152ED900F1F7D5 /* ModelRunner.mm in Sources */ = {isa = PBXBuildFile; fileRef = 0CEB0B3926152ED900F1F7D5 /* ModelRunner.mm */; }; +/* End PBXBuildFile section */ + +/* Begin PBXFileReference section */ + 0C12E78A2616383A00B66C86 /* attr.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = attr.h; sourceTree = "<group>"; }; + 0C12E78B2616383A00B66C86 /* embed.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = embed.h; sourceTree = "<group>"; }; + 0C12E78C2616383A00B66C86 /* numpy.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = numpy.h; sourceTree = "<group>"; }; + 0C12E78D2616383A00B66C86 /* pybind11.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pybind11.h; sourceTree = "<group>"; }; + 0C12E78E2616383A00B66C86 /* operators.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = operators.h; sourceTree = "<group>"; }; + 0C12E78F2616383A00B66C86 /* iostream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = iostream.h; sourceTree = "<group>"; }; + 0C12E7902616383A00B66C86 /* chrono.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = chrono.h; sourceTree = "<group>"; }; + 0C12E7912616383A00B66C86 /* stl_bind.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = stl_bind.h; sourceTree = "<group>"; }; + 0C12E7922616383A00B66C86 /* buffer_info.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = buffer_info.h; sourceTree = "<group>"; }; + 0C12E7932616383A00B66C86 /* options.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = options.h; sourceTree = "<group>"; }; + 0C12E7942616383A00B66C86 /* functional.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = functional.h; sourceTree = "<group>"; }; + 0C12E7952616383A00B66C86 /* stl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = stl.h; sourceTree = "<group>"; }; + 0C12E7972616383A00B66C86 /* typeid.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = typeid.h; sourceTree = "<group>"; }; + 0C12E7982616383A00B66C86 /* descr.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = descr.h; sourceTree = "<group>"; }; + 0C12E7992616383A00B66C86 /* internals.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = internals.h; sourceTree = "<group>"; }; + 0C12E79A2616383A00B66C86 /* common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = common.h; sourceTree = "<group>"; }; + 0C12E79B2616383A00B66C86 /* class.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = class.h; sourceTree = "<group>"; }; + 0C12E79C2616383A00B66C86 /* init.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = init.h; sourceTree = "<group>"; }; + 0C12E79D2616383A00B66C86 /* common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = common.h; sourceTree = "<group>"; }; + 0C12E79E2616383A00B66C86 /* eval.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = eval.h; sourceTree = "<group>"; }; + 0C12E79F2616383A00B66C86 /* cast.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cast.h; sourceTree = "<group>"; }; + 0C12E7A02616383A00B66C86 /* eigen.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = eigen.h; sourceTree = "<group>"; }; + 0C12E7A12616383A00B66C86 /* pytypes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pytypes.h; sourceTree = "<group>"; }; + 0C12E7A22616383A00B66C86 /* complex.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = complex.h; sourceTree = "<group>"; }; + 0C12E7A52616383A00B66C86 /* optical_flow.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = optical_flow.h; sourceTree = "<group>"; }; + 0C12E7A62616383A00B66C86 /* video_decoder.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = video_decoder.h; sourceTree = "<group>"; }; + 0C12E7A72616383A00B66C86 /* video_input_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = video_input_op.h; sourceTree = "<group>"; }; + 0C12E7A82616383A00B66C86 /* video_io.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = video_io.h; sourceTree = "<group>"; }; + 0C12E7AB2616383A00B66C86 /* conv_transpose_unpool_base_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = conv_transpose_unpool_base_op.h; sourceTree = "<group>"; }; + 0C12E7AD2616383A00B66C86 /* operator_fallback_ideep.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = operator_fallback_ideep.h; sourceTree = "<group>"; }; + 0C12E7AE2616383A00B66C86 /* conv_pool_base_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = conv_pool_base_op.h; sourceTree = "<group>"; }; + 0C12E7B02616383A00B66C86 /* ideep_context.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ideep_context.h; sourceTree = "<group>"; }; + 0C12E7B12616383A00B66C86 /* ideep_operator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ideep_operator.h; sourceTree = "<group>"; }; + 0C12E7B22616383A00B66C86 /* ideep_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ideep_utils.h; sourceTree = "<group>"; }; + 0C12E7B42616383A00B66C86 /* net_async_task_graph.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = net_async_task_graph.h; sourceTree = "<group>"; }; + 0C12E7B52616383A00B66C86 /* net_simple_refcount.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = net_simple_refcount.h; sourceTree = "<group>"; }; + 0C12E7B62616383A00B66C86 /* tensor_impl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tensor_impl.h; sourceTree = "<group>"; }; + 0C12E7B72616383A00B66C86 /* plan_executor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = plan_executor.h; sourceTree = "<group>"; }; + 0C12E7B82616383A00B66C86 /* qtensor_serialization.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = qtensor_serialization.h; sourceTree = "<group>"; }; + 0C12E7B92616383A00B66C86 /* context_gpu.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = context_gpu.h; sourceTree = "<group>"; }; + 0C12E7BA2616383A00B66C86 /* observer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = observer.h; sourceTree = "<group>"; }; + 0C12E7BB2616383A00B66C86 /* blob_serializer_base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = blob_serializer_base.h; sourceTree = "<group>"; }; + 0C12E7BC2616383A00B66C86 /* memonger.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = memonger.h; sourceTree = "<group>"; }; + 0C12E7BD2616383A00B66C86 /* tensor_int8.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tensor_int8.h; sourceTree = "<group>"; }; + 0C12E7BE2616383A00B66C86 /* static_tracepoint.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = static_tracepoint.h; sourceTree = "<group>"; }; + 0C12E7BF2616383A00B66C86 /* net.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = net.h; sourceTree = "<group>"; }; + 0C12E7C02616383A00B66C86 /* numa.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = numa.h; sourceTree = "<group>"; }; + 0C12E7C12616383A00B66C86 /* scope_guard.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = scope_guard.h; sourceTree = "<group>"; }; + 0C12E7C22616383A00B66C86 /* test_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = test_utils.h; sourceTree = "<group>"; }; + 0C12E7C32616383A00B66C86 /* event.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = event.h; sourceTree = "<group>"; }; + 0C12E7C42616383A00B66C86 /* types.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = types.h; sourceTree = "<group>"; }; + 0C12E7C52616383A00B66C86 /* context_base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = context_base.h; sourceTree = "<group>"; }; + 0C12E7C62616383A00B66C86 /* operator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = operator.h; sourceTree = "<group>"; }; + 0C12E7C72616383A00B66C86 /* db.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = db.h; sourceTree = "<group>"; }; + 0C12E7C82616383A00B66C86 /* blob.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = blob.h; sourceTree = "<group>"; }; + 0C12E7C92616383A00B66C86 /* static_tracepoint_elfx86.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = static_tracepoint_elfx86.h; sourceTree = "<group>"; }; + 0C12E7CA2616383A00B66C86 /* net_async_tracing.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = net_async_tracing.h; sourceTree = "<group>"; }; + 0C12E7CB2616383A00B66C86 /* flags.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = flags.h; sourceTree = "<group>"; }; + 0C12E7CC2616383A00B66C86 /* net_async_task_future.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = net_async_task_future.h; sourceTree = "<group>"; }; + 0C12E7CD2616383A00B66C86 /* operator_schema.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = operator_schema.h; sourceTree = "<group>"; }; + 0C12E7CE2616383A00B66C86 /* context.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = context.h; sourceTree = "<group>"; }; + 0C12E7CF2616383A00B66C86 /* net_async_base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = net_async_base.h; sourceTree = "<group>"; }; + 0C12E7D02616383A00B66C86 /* prof_dag_counters.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = prof_dag_counters.h; sourceTree = "<group>"; }; + 0C12E7D12616383A00B66C86 /* logging.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = logging.h; sourceTree = "<group>"; }; + 0C12E7D22616383A00B66C86 /* net_async_scheduling.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = net_async_scheduling.h; sourceTree = "<group>"; }; + 0C12E7D32616383A00B66C86 /* graph.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = graph.h; sourceTree = "<group>"; }; + 0C12E7D42616383A00B66C86 /* common_cudnn.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = common_cudnn.h; sourceTree = "<group>"; }; + 0C12E7D52616383A00B66C86 /* net_async_task.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = net_async_task.h; sourceTree = "<group>"; }; + 0C12E7D62616383A00B66C86 /* export_caffe2_op_to_c10.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = export_caffe2_op_to_c10.h; sourceTree = "<group>"; }; + 0C12E7D72616383A00B66C86 /* net_simple.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = net_simple.h; sourceTree = "<group>"; }; + 0C12E7D82616383A00B66C86 /* workspace.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = workspace.h; sourceTree = "<group>"; }; + 0C12E7D92616383A00B66C86 /* timer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = timer.h; sourceTree = "<group>"; }; + 0C12E7DA2616383A00B66C86 /* event_cpu.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = event_cpu.h; sourceTree = "<group>"; }; + 0C12E7DB2616383A00B66C86 /* common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = common.h; sourceTree = "<group>"; }; + 0C12E7DC2616383A00B66C86 /* blob_stats.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = blob_stats.h; sourceTree = "<group>"; }; + 0C12E7DD2616383A00B66C86 /* allocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = allocator.h; sourceTree = "<group>"; }; + 0C12E7DE2616383A00B66C86 /* macros.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = macros.h; sourceTree = "<group>"; }; + 0C12E7E02616383A00B66C86 /* miopen_wrapper.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = miopen_wrapper.h; sourceTree = "<group>"; }; + 0C12E7E12616383A00B66C86 /* common_miopen.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = common_miopen.h; sourceTree = "<group>"; }; + 0C12E7E22616383A00B66C86 /* storage.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = storage.h; sourceTree = "<group>"; }; + 0C12E7E32616383A00B66C86 /* transform.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = transform.h; sourceTree = "<group>"; }; + 0C12E7E42616383A00B66C86 /* common_omp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = common_omp.h; sourceTree = "<group>"; }; + 0C12E7E52616383A00B66C86 /* export_c10_op_to_caffe2.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = export_c10_op_to_caffe2.h; sourceTree = "<group>"; }; + 0C12E7EB2616383A00B66C86 /* OpClasses.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = OpClasses.h; sourceTree = "<group>"; }; + 0C12E7EC2616383A00B66C86 /* OpEnum.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = OpEnum.h; sourceTree = "<group>"; }; + 0C12E7ED2616383A00B66C86 /* OpNames.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = OpNames.h; sourceTree = "<group>"; }; + 0C12E7EF2616383A00B66C86 /* Compiler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Compiler.h; sourceTree = "<group>"; }; + 0C12E7F02616383A00B66C86 /* NeuralNet.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = NeuralNet.h; sourceTree = "<group>"; }; + 0C12E7F12616383A00B66C86 /* ControlFlow.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ControlFlow.h; sourceTree = "<group>"; }; + 0C12E7F32616383A00B66C86 /* SubgraphMatcher.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SubgraphMatcher.h; sourceTree = "<group>"; }; + 0C12E7F42616383A00B66C86 /* Match.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Match.h; sourceTree = "<group>"; }; + 0C12E7F62616383A00B66C86 /* Algorithms.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Algorithms.h; sourceTree = "<group>"; }; + 0C12E7F72616383A00B66C86 /* TopoSort.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TopoSort.h; sourceTree = "<group>"; }; + 0C12E7F82616383A00B66C86 /* Graph.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Graph.h; sourceTree = "<group>"; }; + 0C12E7F92616383A00B66C86 /* TarjansImpl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TarjansImpl.h; sourceTree = "<group>"; }; + 0C12E7FA2616383A00B66C86 /* BinaryMatchImpl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BinaryMatchImpl.h; sourceTree = "<group>"; }; + 0C12E7FC2616383A00B66C86 /* Dot.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Dot.h; sourceTree = "<group>"; }; + 0C12E7FE2616383A00B66C86 /* Casting.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Casting.h; sourceTree = "<group>"; }; + 0C12E7FF2616383A00B66C86 /* Common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Common.h; sourceTree = "<group>"; }; + 0C12E8012616383A00B66C86 /* test_util.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = test_util.h; sourceTree = "<group>"; }; + 0C12E8022616383A00B66C86 /* module.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = module.h; sourceTree = "<group>"; }; + 0C12E8032616383A00B66C86 /* init.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = init.h; sourceTree = "<group>"; }; + 0C12E8042616383A00B66C86 /* net_dag_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = net_dag_utils.h; sourceTree = "<group>"; }; + 0C12E8052616383A00B66C86 /* stats.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = stats.h; sourceTree = "<group>"; }; + 0C12E8062616383A00B66C86 /* tensor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tensor.h; sourceTree = "<group>"; }; + 0C12E8072616383A00B66C86 /* common_gpu.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = common_gpu.h; sourceTree = "<group>"; }; + 0C12E8082616383A00B66C86 /* qtensor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = qtensor.h; sourceTree = "<group>"; }; + 0C12E8092616383A00B66C86 /* net_parallel.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = net_parallel.h; sourceTree = "<group>"; }; + 0C12E80A2616383A00B66C86 /* operator_gradient.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = operator_gradient.h; sourceTree = "<group>"; }; + 0C12E80B2616383A00B66C86 /* cudnn_wrappers.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cudnn_wrappers.h; sourceTree = "<group>"; }; + 0C12E80C2616383A00B66C86 /* distributions_stubs.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = distributions_stubs.h; sourceTree = "<group>"; }; + 0C12E80D2616383A00B66C86 /* blob_serialization.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = blob_serialization.h; sourceTree = "<group>"; }; + 0C12E80F2616383A00B66C86 /* mpi_common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mpi_common.h; sourceTree = "<group>"; }; + 0C12E8102616383A00B66C86 /* mpi_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mpi_ops.h; sourceTree = "<group>"; }; + 0C12E8122616383A00B66C86 /* caffe2_pb.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = caffe2_pb.h; sourceTree = "<group>"; }; + 0C12E8132616383A00B66C86 /* torch_pb.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = torch_pb.h; sourceTree = "<group>"; }; + 0C12E8172616383A00B66C86 /* top_k.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = top_k.h; sourceTree = "<group>"; }; + 0C12E8182616383A00B66C86 /* channel_stats_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = channel_stats_op.h; sourceTree = "<group>"; }; + 0C12E8192616383A00B66C86 /* gru_unit_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = gru_unit_op.h; sourceTree = "<group>"; }; + 0C12E81A2616383A00B66C86 /* half_float_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = half_float_ops.h; sourceTree = "<group>"; }; + 0C12E81B2616383A00B66C86 /* sqr_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sqr_op.h; sourceTree = "<group>"; }; + 0C12E81C2616383A00B66C86 /* mean_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mean_op.h; sourceTree = "<group>"; }; + 0C12E81D2616383A00B66C86 /* thresholded_relu_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = thresholded_relu_op.h; sourceTree = "<group>"; }; + 0C12E81E2616383A00B66C86 /* ctc_greedy_decoder_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ctc_greedy_decoder_op.h; sourceTree = "<group>"; }; + 0C12E81F2616383A00B66C86 /* conv_op_cache_cudnn.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = conv_op_cache_cudnn.h; sourceTree = "<group>"; }; + 0C12E8202616383A00B66C86 /* utility_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = utility_ops.h; sourceTree = "<group>"; }; + 0C12E8212616383A00B66C86 /* selu_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = selu_op.h; sourceTree = "<group>"; }; + 0C12E8222616383A00B66C86 /* map_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = map_ops.h; sourceTree = "<group>"; }; + 0C12E8232616383A00B66C86 /* roi_align_rotated_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = roi_align_rotated_op.h; sourceTree = "<group>"; }; + 0C12E8242616383A00B66C86 /* fused_rowwise_random_quantization_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fused_rowwise_random_quantization_ops.h; sourceTree = "<group>"; }; + 0C12E8252616383A00B66C86 /* stop_gradient.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = stop_gradient.h; sourceTree = "<group>"; }; + 0C12E8262616383A00B66C86 /* batch_gather_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = batch_gather_ops.h; sourceTree = "<group>"; }; + 0C12E8272616383A00B66C86 /* asin_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = asin_op.h; sourceTree = "<group>"; }; + 0C12E8282616383A00B66C86 /* cosh_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cosh_op.h; sourceTree = "<group>"; }; + 0C12E8292616383A00B66C86 /* atan_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = atan_op.h; sourceTree = "<group>"; }; + 0C12E82A2616383A00B66C86 /* reverse_packed_segs_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = reverse_packed_segs_op.h; sourceTree = "<group>"; }; + 0C12E82B2616383A00B66C86 /* given_tensor_byte_string_to_uint8_fill_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = given_tensor_byte_string_to_uint8_fill_op.h; sourceTree = "<group>"; }; + 0C12E82C2616383A00B66C86 /* ensure_clipped_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ensure_clipped_op.h; sourceTree = "<group>"; }; + 0C12E82D2616383A00B66C86 /* conv_transpose_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = conv_transpose_op.h; sourceTree = "<group>"; }; + 0C12E82E2616383A00B66C86 /* generate_proposals_op_util_nms.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = generate_proposals_op_util_nms.h; sourceTree = "<group>"; }; + 0C12E82F2616383A00B66C86 /* enforce_finite_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = enforce_finite_op.h; sourceTree = "<group>"; }; + 0C12E8302616383A00B66C86 /* conv_transpose_unpool_op_base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = conv_transpose_unpool_op_base.h; sourceTree = "<group>"; }; + 0C12E8312616383A00B66C86 /* gather_fused_8bit_rowwise_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = gather_fused_8bit_rowwise_op.h; sourceTree = "<group>"; }; + 0C12E8322616383A00B66C86 /* batch_matmul_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = batch_matmul_op.h; sourceTree = "<group>"; }; + 0C12E8332616383A00B66C86 /* batch_bucketize_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = batch_bucketize_op.h; sourceTree = "<group>"; }; + 0C12E8342616383A00B66C86 /* softsign_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = softsign_op.h; sourceTree = "<group>"; }; + 0C12E8352616383A00B66C86 /* elementwise_logical_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = elementwise_logical_ops.h; sourceTree = "<group>"; }; + 0C12E8362616383A00B66C86 /* percentile_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = percentile_op.h; sourceTree = "<group>"; }; + 0C12E8372616383A00B66C86 /* length_split_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = length_split_op.h; sourceTree = "<group>"; }; + 0C12E8382616383A00B66C86 /* locally_connected_op_impl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = locally_connected_op_impl.h; sourceTree = "<group>"; }; + 0C12E8392616383A00B66C86 /* rmac_regions_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rmac_regions_op.h; sourceTree = "<group>"; }; + 0C12E83A2616383A00B66C86 /* hard_sigmoid_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = hard_sigmoid_op.h; sourceTree = "<group>"; }; + 0C12E83B2616383A00B66C86 /* ensure_cpu_output_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ensure_cpu_output_op.h; sourceTree = "<group>"; }; + 0C12E83C2616383A00B66C86 /* batch_box_cox_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = batch_box_cox_op.h; sourceTree = "<group>"; }; + 0C12E83D2616383A00B66C86 /* ctc_beam_search_decoder_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ctc_beam_search_decoder_op.h; sourceTree = "<group>"; }; + 0C12E83E2616383A00B66C86 /* flexible_top_k.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = flexible_top_k.h; sourceTree = "<group>"; }; + 0C12E83F2616383A00B66C86 /* fully_connected_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fully_connected_op.h; sourceTree = "<group>"; }; + 0C12E8402616383A00B66C86 /* key_split_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = key_split_ops.h; sourceTree = "<group>"; }; + 0C12E8412616383A00B66C86 /* reciprocal_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = reciprocal_op.h; sourceTree = "<group>"; }; + 0C12E8422616383A00B66C86 /* roi_align_gradient_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = roi_align_gradient_op.h; sourceTree = "<group>"; }; + 0C12E8432616383A00B66C86 /* group_norm_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = group_norm_op.h; sourceTree = "<group>"; }; + 0C12E8442616383A00B66C86 /* load_save_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = load_save_op.h; sourceTree = "<group>"; }; + 0C12E8452616383A00B66C86 /* cos_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cos_op.h; sourceTree = "<group>"; }; + 0C12E8462616383A00B66C86 /* expand_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = expand_op.h; sourceTree = "<group>"; }; + 0C12E8472616383A00B66C86 /* elementwise_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = elementwise_ops.h; sourceTree = "<group>"; }; + 0C12E8482616383A00B66C86 /* im2col_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = im2col_op.h; sourceTree = "<group>"; }; + 0C12E8492616383A00B66C86 /* space_batch_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = space_batch_op.h; sourceTree = "<group>"; }; + 0C12E84A2616383A00B66C86 /* relu_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = relu_op.h; sourceTree = "<group>"; }; + 0C12E84B2616383A00B66C86 /* while_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = while_op.h; sourceTree = "<group>"; }; + 0C12E84C2616383A00B66C86 /* remove_data_blocks_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = remove_data_blocks_op.h; sourceTree = "<group>"; }; + 0C12E84D2616383A00B66C86 /* elementwise_mul_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = elementwise_mul_op.h; sourceTree = "<group>"; }; + 0C12E84E2616383A00B66C86 /* numpy_tile_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = numpy_tile_op.h; sourceTree = "<group>"; }; + 0C12E84F2616383A00B66C86 /* rowmul_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rowmul_op.h; sourceTree = "<group>"; }; + 0C12E8502616383A00B66C86 /* accumulate_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = accumulate_op.h; sourceTree = "<group>"; }; + 0C12E8512616383A00B66C86 /* sparse_lp_regularizer_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sparse_lp_regularizer_op.h; sourceTree = "<group>"; }; + 0C12E8522616383A00B66C86 /* bisect_percentile_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = bisect_percentile_op.h; sourceTree = "<group>"; }; + 0C12E8532616383A00B66C86 /* tile_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tile_op.h; sourceTree = "<group>"; }; + 0C12E8542616383A00B66C86 /* gelu_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = gelu_op.h; sourceTree = "<group>"; }; + 0C12E8552616383A00B66C86 /* stats_put_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = stats_put_ops.h; sourceTree = "<group>"; }; + 0C12E8562616383A00B66C86 /* given_tensor_fill_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = given_tensor_fill_op.h; sourceTree = "<group>"; }; + 0C12E8572616383A00B66C86 /* accuracy_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = accuracy_op.h; sourceTree = "<group>"; }; + 0C12E8582616383A00B66C86 /* bbox_transform_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = bbox_transform_op.h; sourceTree = "<group>"; }; + 0C12E8592616383A00B66C86 /* boolean_unmask_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = boolean_unmask_ops.h; sourceTree = "<group>"; }; + 0C12E85A2616383A00B66C86 /* glu_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = glu_op.h; sourceTree = "<group>"; }; + 0C12E85B2616383A00B66C86 /* resize_3d_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resize_3d_op.h; sourceTree = "<group>"; }; + 0C12E85C2616383A00B66C86 /* unsafe_coalesce.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = unsafe_coalesce.h; sourceTree = "<group>"; }; + 0C12E85D2616383A00B66C86 /* conv_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = conv_op.h; sourceTree = "<group>"; }; + 0C12E85E2616383A00B66C86 /* conv_op_impl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = conv_op_impl.h; sourceTree = "<group>"; }; + 0C12E85F2616383A00B66C86 /* erf_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = erf_op.h; sourceTree = "<group>"; }; + 0C12E8602616383A00B66C86 /* fused_rowwise_8bit_conversion_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fused_rowwise_8bit_conversion_ops.h; sourceTree = "<group>"; }; + 0C12E8612616383A00B66C86 /* locally_connected_op_util.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = locally_connected_op_util.h; sourceTree = "<group>"; }; + 0C12E8622616383A00B66C86 /* channel_backprop_stats_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = channel_backprop_stats_op.h; sourceTree = "<group>"; }; + 0C12E8632616383A00B66C86 /* order_switch_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = order_switch_ops.h; sourceTree = "<group>"; }; + 0C12E8642616383A00B66C86 /* lengths_reducer_fused_nbit_rowwise_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lengths_reducer_fused_nbit_rowwise_ops.h; sourceTree = "<group>"; }; + 0C12E8652616383A00B66C86 /* lengths_reducer_fused_8bit_rowwise_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lengths_reducer_fused_8bit_rowwise_ops.h; sourceTree = "<group>"; }; + 0C12E8662616383A00B66C86 /* load_save_op_util.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = load_save_op_util.h; sourceTree = "<group>"; }; + 0C12E8672616383A00B66C86 /* conv_transpose_op_impl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = conv_transpose_op_impl.h; sourceTree = "<group>"; }; + 0C12E8682616383A00B66C86 /* op_utils_cudnn.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = op_utils_cudnn.h; sourceTree = "<group>"; }; + 0C12E8692616383A00B66C86 /* prelu_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = prelu_op.h; sourceTree = "<group>"; }; + 0C12E86A2616383A00B66C86 /* box_with_nms_limit_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = box_with_nms_limit_op.h; sourceTree = "<group>"; }; + 0C12E86B2616383A00B66C86 /* fc_inference.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fc_inference.h; sourceTree = "<group>"; }; + 0C12E86C2616383A00B66C86 /* distance_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = distance_op.h; sourceTree = "<group>"; }; + 0C12E86D2616383A00B66C86 /* data_couple.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_couple.h; sourceTree = "<group>"; }; + 0C12E86E2616383A00B66C86 /* dataset_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dataset_ops.h; sourceTree = "<group>"; }; + 0C12E86F2616383A00B66C86 /* merge_id_lists_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = merge_id_lists_op.h; sourceTree = "<group>"; }; + 0C12E8702616383A00B66C86 /* generate_proposals_op_util_nms_gpu.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = generate_proposals_op_util_nms_gpu.h; sourceTree = "<group>"; }; + 0C12E8712616383A00B66C86 /* async_net_barrier_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = async_net_barrier_op.h; sourceTree = "<group>"; }; + 0C12E8722616383A00B66C86 /* deform_conv_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = deform_conv_op.h; sourceTree = "<group>"; }; + 0C12E8742616383A00B66C86 /* int8_relu_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_relu_op.h; sourceTree = "<group>"; }; + 0C12E8752616383A00B66C86 /* int8_channel_shuffle_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_channel_shuffle_op.h; sourceTree = "<group>"; }; + 0C12E8762616383A00B66C86 /* int8_concat_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_concat_op.h; sourceTree = "<group>"; }; + 0C12E8772616383A00B66C86 /* int8_dequantize_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_dequantize_op.h; sourceTree = "<group>"; }; + 0C12E8782616383A00B66C86 /* int8_slice_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_slice_op.h; sourceTree = "<group>"; }; + 0C12E8792616383A00B66C86 /* int8_quantize_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_quantize_op.h; sourceTree = "<group>"; }; + 0C12E87A2616383A00B66C86 /* int8_flatten_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_flatten_op.h; sourceTree = "<group>"; }; + 0C12E87B2616383A00B66C86 /* int8_max_pool_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_max_pool_op.h; sourceTree = "<group>"; }; + 0C12E87C2616383A00B66C86 /* int8_softmax_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_softmax_op.h; sourceTree = "<group>"; }; + 0C12E87D2616383A00B66C86 /* int8_average_pool_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_average_pool_op.h; sourceTree = "<group>"; }; + 0C12E87E2616383A00B66C86 /* int8_fc_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_fc_op.h; sourceTree = "<group>"; }; + 0C12E87F2616383A00B66C86 /* int8_conv_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_conv_op.h; sourceTree = "<group>"; }; + 0C12E8802616383A00B66C86 /* int8_test_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_test_utils.h; sourceTree = "<group>"; }; + 0C12E8812616383A00B66C86 /* int8_roi_align_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_roi_align_op.h; sourceTree = "<group>"; }; + 0C12E8822616383A00B66C86 /* int8_given_tensor_fill_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_given_tensor_fill_op.h; sourceTree = "<group>"; }; + 0C12E8832616383A00B66C86 /* int8_reshape_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_reshape_op.h; sourceTree = "<group>"; }; + 0C12E8842616383A00B66C86 /* int8_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_utils.h; sourceTree = "<group>"; }; + 0C12E8852616383A00B66C86 /* int8_resize_nearest_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_resize_nearest_op.h; sourceTree = "<group>"; }; + 0C12E8862616383A00B66C86 /* int8_sigmoid_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_sigmoid_op.h; sourceTree = "<group>"; }; + 0C12E8872616383A00B66C86 /* int8_simd.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_simd.h; sourceTree = "<group>"; }; + 0C12E8882616383A00B66C86 /* int8_conv_transpose_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_conv_transpose_op.h; sourceTree = "<group>"; }; + 0C12E8892616383A00B66C86 /* int8_leaky_relu_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_leaky_relu_op.h; sourceTree = "<group>"; }; + 0C12E88A2616383A00B66C86 /* int8_add_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_add_op.h; sourceTree = "<group>"; }; + 0C12E88B2616383A00B66C86 /* int8_transpose_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_transpose_op.h; sourceTree = "<group>"; }; + 0C12E88C2616383A00B66C86 /* sqrt_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sqrt_op.h; sourceTree = "<group>"; }; + 0C12E88D2616383A00B66C86 /* elementwise_div_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = elementwise_div_op.h; sourceTree = "<group>"; }; + 0C12E88E2616383A00B66C86 /* deform_conv_op_impl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = deform_conv_op_impl.h; sourceTree = "<group>"; }; + 0C12E88F2616383A00B66C86 /* feature_maps_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = feature_maps_ops.h; sourceTree = "<group>"; }; + 0C12E8902616383A00B66C86 /* text_file_reader_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = text_file_reader_utils.h; sourceTree = "<group>"; }; + 0C12E8912616383A00B66C86 /* scale_blobs_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = scale_blobs_op.h; sourceTree = "<group>"; }; + 0C12E8922616383A00B66C86 /* pool_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pool_op.h; sourceTree = "<group>"; }; + 0C12E8932616383A00B66C86 /* conv_transpose_op_mobile_impl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = conv_transpose_op_mobile_impl.h; sourceTree = "<group>"; }; + 0C12E8942616383A00B66C86 /* dense_vector_to_id_list_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dense_vector_to_id_list_op.h; sourceTree = "<group>"; }; + 0C12E8952616383A00B66C86 /* minmax_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = minmax_ops.h; sourceTree = "<group>"; }; + 0C12E8962616383A00B66C86 /* lengths_tile_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lengths_tile_op.h; sourceTree = "<group>"; }; + 0C12E8972616383A00B66C86 /* pool_op_util.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pool_op_util.h; sourceTree = "<group>"; }; + 0C12E8982616383A00B66C86 /* no_default_engine_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = no_default_engine_op.h; sourceTree = "<group>"; }; + 0C12E8992616383A00B66C86 /* onnx_while_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = onnx_while_op.h; sourceTree = "<group>"; }; + 0C12E89A2616383A00B66C86 /* reduce_front_back_sum_mean_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = reduce_front_back_sum_mean_ops.h; sourceTree = "<group>"; }; + 0C12E89B2616383A00B66C86 /* roi_pool_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = roi_pool_op.h; sourceTree = "<group>"; }; + 0C12E89C2616383A00B66C86 /* flatten_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = flatten_op.h; sourceTree = "<group>"; }; + 0C12E89D2616383A00B66C86 /* self_binning_histogram_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = self_binning_histogram_op.h; sourceTree = "<group>"; }; + 0C12E89E2616383A00B66C86 /* normalize_l1_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = normalize_l1_op.h; sourceTree = "<group>"; }; + 0C12E89F2616383A00B66C86 /* pow_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pow_op.h; sourceTree = "<group>"; }; + 0C12E8A02616383A00B66C86 /* exp_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = exp_op.h; sourceTree = "<group>"; }; + 0C12E8A12616383A00B66C86 /* heatmap_max_keypoint_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = heatmap_max_keypoint_op.h; sourceTree = "<group>"; }; + 0C12E8A22616383A00B66C86 /* assert_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = assert_op.h; sourceTree = "<group>"; }; + 0C12E8A32616383A00B66C86 /* piecewise_linear_transform_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = piecewise_linear_transform_op.h; sourceTree = "<group>"; }; + 0C12E8A42616383A00B66C86 /* cbrt_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cbrt_op.h; sourceTree = "<group>"; }; + 0C12E8A52616383A00B66C86 /* weighted_sample_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = weighted_sample_op.h; sourceTree = "<group>"; }; + 0C12E8A62616383A00B66C86 /* tanh_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tanh_op.h; sourceTree = "<group>"; }; + 0C12E8A72616383A00B66C86 /* softmax_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = softmax_op.h; sourceTree = "<group>"; }; + 0C12E8A82616383A00B66C86 /* listwise_l2r_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = listwise_l2r_op.h; sourceTree = "<group>"; }; + 0C12E8A92616383A00B66C86 /* variable_length_sequence_padding.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = variable_length_sequence_padding.h; sourceTree = "<group>"; }; + 0C12E8AA2616383A00B66C86 /* elementwise_add_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = elementwise_add_op.h; sourceTree = "<group>"; }; + 0C12E8AB2616383A00B66C86 /* leaky_relu_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = leaky_relu_op.h; sourceTree = "<group>"; }; + 0C12E8AC2616383A00B66C86 /* elementwise_linear_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = elementwise_linear_op.h; sourceTree = "<group>"; }; + 0C12E8AD2616383A00B66C86 /* elu_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = elu_op.h; sourceTree = "<group>"; }; + 0C12E8AE2616383A00B66C86 /* jsd_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = jsd_op.h; sourceTree = "<group>"; }; + 0C12E8AF2616383A00B66C86 /* collect_and_distribute_fpn_rpn_proposals_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = collect_and_distribute_fpn_rpn_proposals_op.h; sourceTree = "<group>"; }; + 0C12E8B02616383A00B66C86 /* reduce_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = reduce_ops.h; sourceTree = "<group>"; }; + 0C12E8B12616383A00B66C86 /* string_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = string_ops.h; sourceTree = "<group>"; }; + 0C12E8B22616383A00B66C86 /* boolean_mask_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = boolean_mask_ops.h; sourceTree = "<group>"; }; + 0C12E8B32616383A00B66C86 /* local_response_normalization_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = local_response_normalization_op.h; sourceTree = "<group>"; }; + 0C12E8B42616383A00B66C86 /* partition_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = partition_ops.h; sourceTree = "<group>"; }; + 0C12E8B52616383A00B66C86 /* sparse_dropout_with_replacement_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sparse_dropout_with_replacement_op.h; sourceTree = "<group>"; }; + 0C12E8B62616383A00B66C86 /* loss_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = loss_op.h; sourceTree = "<group>"; }; + 0C12E8B72616383A00B66C86 /* counter_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = counter_ops.h; sourceTree = "<group>"; }; + 0C12E8B82616383A00B66C86 /* h_softmax_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = h_softmax_op.h; sourceTree = "<group>"; }; + 0C12E8B92616383A00B66C86 /* lengths_reducer_rowwise_8bit_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lengths_reducer_rowwise_8bit_ops.h; sourceTree = "<group>"; }; + 0C12E8BA2616383A00B66C86 /* copy_rows_to_tensor_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = copy_rows_to_tensor_op.h; sourceTree = "<group>"; }; + 0C12E8BB2616383A00B66C86 /* moments_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = moments_op.h; sourceTree = "<group>"; }; + 0C12E8BC2616383A00B66C86 /* logit_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = logit_op.h; sourceTree = "<group>"; }; + 0C12E8BD2616383A00B66C86 /* perplexity_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = perplexity_op.h; sourceTree = "<group>"; }; + 0C12E8BE2616383A00B66C86 /* roi_align_rotated_gradient_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = roi_align_rotated_gradient_op.h; sourceTree = "<group>"; }; + 0C12E8BF2616383A00B66C86 /* ceil_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ceil_op.h; sourceTree = "<group>"; }; + 0C12E8C02616383A00B66C86 /* find_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = find_op.h; sourceTree = "<group>"; }; + 0C12E8C12616383A00B66C86 /* layer_norm_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = layer_norm_op.h; sourceTree = "<group>"; }; + 0C12E8C22616383A00B66C86 /* negate_gradient_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = negate_gradient_op.h; sourceTree = "<group>"; }; + 0C12E8C32616383A00B66C86 /* resize_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resize_op.h; sourceTree = "<group>"; }; + 0C12E8C42616383A00B66C86 /* lengths_reducer_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lengths_reducer_ops.h; sourceTree = "<group>"; }; + 0C12E8C52616383A00B66C86 /* batch_sparse_to_dense_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = batch_sparse_to_dense_op.h; sourceTree = "<group>"; }; + 0C12E8C62616383A00B66C86 /* replace_nan_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = replace_nan_op.h; sourceTree = "<group>"; }; + 0C12E8C72616383A00B66C86 /* max_pool_with_index_gpu.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = max_pool_with_index_gpu.h; sourceTree = "<group>"; }; + 0C12E8C82616383A00B66C86 /* find_duplicate_elements_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = find_duplicate_elements_op.h; sourceTree = "<group>"; }; + 0C12E8C92616383A00B66C86 /* expand_squeeze_dims_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = expand_squeeze_dims_op.h; sourceTree = "<group>"; }; + 0C12E8CA2616383A00B66C86 /* sinusoid_position_encoding_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sinusoid_position_encoding_op.h; sourceTree = "<group>"; }; + 0C12E8CB2616383A00B66C86 /* pack_segments.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pack_segments.h; sourceTree = "<group>"; }; + 0C12E8CC2616383A00B66C86 /* softplus_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = softplus_op.h; sourceTree = "<group>"; }; + 0C12E8CD2616383A00B66C86 /* quantile_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = quantile_op.h; sourceTree = "<group>"; }; + 0C12E8CE2616383A00B66C86 /* sinh_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sinh_op.h; sourceTree = "<group>"; }; + 0C12E8CF2616383A00B66C86 /* fused_rowwise_nbitfake_conversion_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fused_rowwise_nbitfake_conversion_ops.h; sourceTree = "<group>"; }; + 0C12E8D02616383A00B66C86 /* cross_entropy_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cross_entropy_op.h; sourceTree = "<group>"; }; + 0C12E8D12616383A00B66C86 /* feed_blob_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = feed_blob_op.h; sourceTree = "<group>"; }; + 0C12E8D22616383A00B66C86 /* slice_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = slice_op.h; sourceTree = "<group>"; }; + 0C12E8D32616383A00B66C86 /* rsqrt_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rsqrt_op.h; sourceTree = "<group>"; }; + 0C12E8D42616383A00B66C86 /* free_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = free_op.h; sourceTree = "<group>"; }; + 0C12E8D52616383A00B66C86 /* square_root_divide_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = square_root_divide_op.h; sourceTree = "<group>"; }; + 0C12E8D62616383A00B66C86 /* conv_op_shared.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = conv_op_shared.h; sourceTree = "<group>"; }; + 0C12E8D72616383A00B66C86 /* apmeter_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = apmeter_op.h; sourceTree = "<group>"; }; + 0C12E8D82616383A00B66C86 /* lstm_unit_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lstm_unit_op.h; sourceTree = "<group>"; }; + 0C12E8D92616383A00B66C86 /* index_hash_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = index_hash_ops.h; sourceTree = "<group>"; }; + 0C12E8DA2616383A00B66C86 /* lengths_pad_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lengths_pad_op.h; sourceTree = "<group>"; }; + 0C12E8DB2616383A00B66C86 /* elementwise_ops_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = elementwise_ops_utils.h; sourceTree = "<group>"; }; + 0C12E8DC2616383A00B66C86 /* sparse_normalize_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sparse_normalize_op.h; sourceTree = "<group>"; }; + 0C12E8DD2616383A00B66C86 /* multi_class_accuracy_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = multi_class_accuracy_op.h; sourceTree = "<group>"; }; + 0C12E8DE2616383A00B66C86 /* cast_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cast_op.h; sourceTree = "<group>"; }; + 0C12E8DF2616383A00B66C86 /* transpose_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = transpose_op.h; sourceTree = "<group>"; }; + 0C12E8E02616383A00B66C86 /* create_scope_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = create_scope_op.h; sourceTree = "<group>"; }; + 0C12E8E12616383A00B66C86 /* zero_gradient_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = zero_gradient_op.h; sourceTree = "<group>"; }; + 0C12E8E22616383A00B66C86 /* lstm_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lstm_utils.h; sourceTree = "<group>"; }; + 0C12E8E32616383A00B66C86 /* tt_linear_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tt_linear_op.h; sourceTree = "<group>"; }; + 0C12E8E42616383A00B66C86 /* relu_n_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = relu_n_op.h; sourceTree = "<group>"; }; + 0C12E8E52616383A00B66C86 /* generate_proposals_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = generate_proposals_op.h; sourceTree = "<group>"; }; + 0C12E8E72616383A00B66C86 /* activation_ops_miopen.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = activation_ops_miopen.h; sourceTree = "<group>"; }; + 0C12E8E82616383A00B66C86 /* lpnorm_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lpnorm_op.h; sourceTree = "<group>"; }; + 0C12E8E92616383A00B66C86 /* sequence_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sequence_ops.h; sourceTree = "<group>"; }; + 0C12E8EA2616383A00B66C86 /* abs_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = abs_op.h; sourceTree = "<group>"; }; + 0C12E8EB2616383A00B66C86 /* activation_ops_cudnn.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = activation_ops_cudnn.h; sourceTree = "<group>"; }; + 0C12E8EC2616383A00B66C86 /* elementwise_op_test.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = elementwise_op_test.h; sourceTree = "<group>"; }; + 0C12E8ED2616383A00B66C86 /* inference_lstm_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = inference_lstm_op.h; sourceTree = "<group>"; }; + 0C12E8EE2616383A00B66C86 /* concat_split_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = concat_split_op.h; sourceTree = "<group>"; }; + 0C12E8EF2616383A00B66C86 /* reduction_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = reduction_ops.h; sourceTree = "<group>"; }; + 0C12E8F02616383A00B66C86 /* gather_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = gather_op.h; sourceTree = "<group>"; }; + 0C12E8F12616383A00B66C86 /* log_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = log_op.h; sourceTree = "<group>"; }; + 0C12E8F22616383A00B66C86 /* conv_pool_op_base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = conv_pool_op_base.h; sourceTree = "<group>"; }; + 0C12E8F32616383A00B66C86 /* unique_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = unique_ops.h; sourceTree = "<group>"; }; + 0C12E8F42616383A00B66C86 /* elementwise_sub_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = elementwise_sub_op.h; sourceTree = "<group>"; }; + 0C12E8F52616383A00B66C86 /* segment_reduction_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = segment_reduction_op.h; sourceTree = "<group>"; }; + 0C12E8F62616383A00B66C86 /* fused_rowwise_nbit_conversion_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fused_rowwise_nbit_conversion_ops.h; sourceTree = "<group>"; }; + 0C12E8F72616383A00B66C86 /* stump_func_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = stump_func_op.h; sourceTree = "<group>"; }; + 0C12E8F82616383A00B66C86 /* swish_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = swish_op.h; sourceTree = "<group>"; }; + 0C12E8F92616383A00B66C86 /* pack_rnn_sequence_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pack_rnn_sequence_op.h; sourceTree = "<group>"; }; + 0C12E8FA2616383A00B66C86 /* softmax_with_loss_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = softmax_with_loss_op.h; sourceTree = "<group>"; }; + 0C12E8FB2616383A00B66C86 /* integral_image_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = integral_image_op.h; sourceTree = "<group>"; }; + 0C12E8FC2616383A00B66C86 /* mish_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mish_op.h; sourceTree = "<group>"; }; + 0C12E8FD2616383A00B66C86 /* weighted_multi_sampling_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = weighted_multi_sampling_op.h; sourceTree = "<group>"; }; + 0C12E8FE2616383A00B66C86 /* bucketize_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = bucketize_op.h; sourceTree = "<group>"; }; + 0C12E8FF2616383A00B66C86 /* is_empty_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = is_empty_op.h; sourceTree = "<group>"; }; + 0C12E9002616383A00B66C86 /* mod_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mod_op.h; sourceTree = "<group>"; }; + 0C12E9012616383A00B66C86 /* clip_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = clip_op.h; sourceTree = "<group>"; }; + 0C12E9022616383A00B66C86 /* prepend_dim_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = prepend_dim_op.h; sourceTree = "<group>"; }; + 0C12E9032616383A00B66C86 /* copy_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = copy_op.h; sourceTree = "<group>"; }; + 0C12E9042616383A00B66C86 /* rank_loss_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rank_loss_op.h; sourceTree = "<group>"; }; + 0C12E9052616383A00B66C86 /* lengths_top_k_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lengths_top_k_op.h; sourceTree = "<group>"; }; + 0C12E9062616383A00B66C86 /* summarize_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = summarize_op.h; sourceTree = "<group>"; }; + 0C12E9072616383A00B66C86 /* one_hot_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = one_hot_ops.h; sourceTree = "<group>"; }; + 0C12E9082616383A00B66C86 /* cc_bmm_bg_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cc_bmm_bg_op.h; sourceTree = "<group>"; }; + 0C12E9092616383A00B66C86 /* acos_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = acos_op.h; sourceTree = "<group>"; }; + 0C12E90A2616383A00B66C86 /* softmax_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = softmax_utils.h; sourceTree = "<group>"; }; + 0C12E90B2616383A00B66C86 /* tensor_protos_db_input.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tensor_protos_db_input.h; sourceTree = "<group>"; }; + 0C12E90C2616383A00B66C86 /* generate_proposals_op_util_boxes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = generate_proposals_op_util_boxes.h; sourceTree = "<group>"; }; + 0C12E90D2616383A00B66C86 /* conv_transpose_op_mobile.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = conv_transpose_op_mobile.h; sourceTree = "<group>"; }; + 0C12E90E2616383A00B66C86 /* arg_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = arg_ops.h; sourceTree = "<group>"; }; + 0C12E90F2616383A00B66C86 /* negative_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = negative_op.h; sourceTree = "<group>"; }; + 0C12E9102616383A00B66C86 /* operator_fallback_gpu.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = operator_fallback_gpu.h; sourceTree = "<group>"; }; + 0C12E9112616383A00B66C86 /* margin_ranking_criterion_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = margin_ranking_criterion_op.h; sourceTree = "<group>"; }; + 0C12E9122616383A00B66C86 /* matmul_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = matmul_op.h; sourceTree = "<group>"; }; + 0C12E9132616383A00B66C86 /* roi_align_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = roi_align_op.h; sourceTree = "<group>"; }; + 0C12E9142616383A00B66C86 /* pad_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pad_op.h; sourceTree = "<group>"; }; + 0C12E9152616383A00B66C86 /* histogram_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = histogram_op.h; sourceTree = "<group>"; }; + 0C12E9162616383A00B66C86 /* floor_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = floor_op.h; sourceTree = "<group>"; }; + 0C12E9172616383A00B66C86 /* normalize_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = normalize_op.h; sourceTree = "<group>"; }; + 0C12E9182616383A00B66C86 /* cube_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cube_op.h; sourceTree = "<group>"; }; + 0C12E9192616383A00B66C86 /* reshape_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = reshape_op.h; sourceTree = "<group>"; }; + 0C12E91A2616383A00B66C86 /* instance_norm_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = instance_norm_op.h; sourceTree = "<group>"; }; + 0C12E91B2616383A00B66C86 /* ngram_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ngram_ops.h; sourceTree = "<group>"; }; + 0C12E91C2616383A00B66C86 /* if_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = if_op.h; sourceTree = "<group>"; }; + 0C12E91D2616383A00B66C86 /* reduce_front_back_max_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = reduce_front_back_max_ops.h; sourceTree = "<group>"; }; + 0C12E91E2616383A00B66C86 /* reducer_functors.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = reducer_functors.h; sourceTree = "<group>"; }; + 0C12E91F2616383A00B66C86 /* affine_channel_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = affine_channel_op.h; sourceTree = "<group>"; }; + 0C12E9202616383A00B66C86 /* sigmoid_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sigmoid_op.h; sourceTree = "<group>"; }; + 0C12E9212616383A00B66C86 /* channel_shuffle_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = channel_shuffle_op.h; sourceTree = "<group>"; }; + 0C12E9222616383A00B66C86 /* locally_connected_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = locally_connected_op.h; sourceTree = "<group>"; }; + 0C12E9232616383A00B66C86 /* conditional_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = conditional_op.h; sourceTree = "<group>"; }; + 0C12E9242616383A00B66C86 /* rms_norm_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rms_norm_op.h; sourceTree = "<group>"; }; + 0C12E9252616383A00B66C86 /* dropout_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dropout_op.h; sourceTree = "<group>"; }; + 0C12E9262616383A00B66C86 /* gather_ranges_to_dense_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = gather_ranges_to_dense_op.h; sourceTree = "<group>"; }; + 0C12E9272616383A00B66C86 /* shape_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = shape_op.h; sourceTree = "<group>"; }; + 0C12E9282616383A00B66C86 /* index_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = index_ops.h; sourceTree = "<group>"; }; + 0C12E9292616383A00B66C86 /* tan_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tan_op.h; sourceTree = "<group>"; }; + 0C12E92A2616383A00B66C86 /* scale_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = scale_op.h; sourceTree = "<group>"; }; + 0C12E92B2616383A00B66C86 /* cosine_embedding_criterion_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cosine_embedding_criterion_op.h; sourceTree = "<group>"; }; + 0C12E92C2616383A00B66C86 /* sparse_to_dense_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sparse_to_dense_op.h; sourceTree = "<group>"; }; + 0C12E92D2616383A00B66C86 /* quant_decode_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = quant_decode_op.h; sourceTree = "<group>"; }; + 0C12E92F2616383A00B66C86 /* recurrent_network_blob_fetcher_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = recurrent_network_blob_fetcher_op.h; sourceTree = "<group>"; }; + 0C12E9302616383A00B66C86 /* recurrent_op_cudnn.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = recurrent_op_cudnn.h; sourceTree = "<group>"; }; + 0C12E9312616383A00B66C86 /* recurrent_network_executor_gpu.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = recurrent_network_executor_gpu.h; sourceTree = "<group>"; }; + 0C12E9322616383A00B66C86 /* recurrent_network_executor_incl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = recurrent_network_executor_incl.h; sourceTree = "<group>"; }; + 0C12E9342616383A00B66C86 /* recurrent_op_miopen.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = recurrent_op_miopen.h; sourceTree = "<group>"; }; + 0C12E9352616383A00B66C86 /* recurrent_network_executor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = recurrent_network_executor.h; sourceTree = "<group>"; }; + 0C12E9362616383A00B66C86 /* recurrent_network_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = recurrent_network_op.h; sourceTree = "<group>"; }; + 0C12E9372616383A00B66C86 /* sparse_to_dense_mask_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sparse_to_dense_mask_op.h; sourceTree = "<group>"; }; + 0C12E9382616383A00B66C86 /* sin_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sin_op.h; sourceTree = "<group>"; }; + 0C12E9392616383A00B66C86 /* upsample_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = upsample_op.h; sourceTree = "<group>"; }; + 0C12E93A2616383A00B66C86 /* filler_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = filler_op.h; sourceTree = "<group>"; }; + 0C12E93B2616383A00B66C86 /* batch_permutation_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = batch_permutation_op.h; sourceTree = "<group>"; }; + 0C12E93C2616383A00B66C86 /* spatial_softmax_with_loss_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = spatial_softmax_with_loss_op.h; sourceTree = "<group>"; }; + 0C12E93D2616383A00B66C86 /* batch_moments_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = batch_moments_op.h; sourceTree = "<group>"; }; + 0C12E93E2616383A00B66C86 /* alias_with_name.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = alias_with_name.h; sourceTree = "<group>"; }; + 0C12E93F2616383A00B66C86 /* do_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = do_op.h; sourceTree = "<group>"; }; + 0C12E9402616383A00B66C86 /* prefetch_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = prefetch_op.h; sourceTree = "<group>"; }; + 0C12E9412616383A00B66C86 /* byte_weight_dequant_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = byte_weight_dequant_op.h; sourceTree = "<group>"; }; + 0C12E9422616383A00B66C86 /* spatial_batch_norm_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = spatial_batch_norm_op.h; sourceTree = "<group>"; }; + 0C12E9442616383A00B66C86 /* helper.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = helper.h; sourceTree = "<group>"; }; + 0C12E9452616383A00B66C86 /* device.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = device.h; sourceTree = "<group>"; }; + 0C12E9462616383A00B66C86 /* onnxifi_init.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = onnxifi_init.h; sourceTree = "<group>"; }; + 0C12E9472616383A00B66C86 /* backend.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = backend.h; sourceTree = "<group>"; }; + 0C12E9492616383A00B66C86 /* schema.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = schema.h; sourceTree = "<group>"; }; + 0C12E94A2616383A00B66C86 /* constants.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = constants.h; sourceTree = "<group>"; }; + 0C12E94B2616383A00B66C86 /* operator_sets.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = operator_sets.h; sourceTree = "<group>"; }; + 0C12E94C2616383A00B66C86 /* backend_rep.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = backend_rep.h; sourceTree = "<group>"; }; + 0C12E94D2616383A00B66C86 /* onnx_exporter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = onnx_exporter.h; sourceTree = "<group>"; }; + 0C12E94E2616383A00B66C86 /* offline_tensor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = offline_tensor.h; sourceTree = "<group>"; }; + 0C12E94F2616383A00B66C86 /* onnxifi_graph_info.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = onnxifi_graph_info.h; sourceTree = "<group>"; }; + 0C12E9542616383A00B66C86 /* pybind_state.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pybind_state.h; sourceTree = "<group>"; }; + 0C12E9552616383A00B66C86 /* pybind_state_registry.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pybind_state_registry.h; sourceTree = "<group>"; }; + 0C12E95D2616383A00B66C86 /* dlpack.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dlpack.h; sourceTree = "<group>"; }; + 0C12E9692616383A00B66C86 /* pybind_state_dlpack.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pybind_state_dlpack.h; sourceTree = "<group>"; }; + 0C12E9712616383A00B66C86 /* redis_store_handler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = redis_store_handler.h; sourceTree = "<group>"; }; + 0C12E9722616383A00B66C86 /* file_store_handler_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = file_store_handler_op.h; sourceTree = "<group>"; }; + 0C12E9732616383A00B66C86 /* store_handler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = store_handler.h; sourceTree = "<group>"; }; + 0C12E9742616383A00B66C86 /* store_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = store_ops.h; sourceTree = "<group>"; }; + 0C12E9752616383A00B66C86 /* file_store_handler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = file_store_handler.h; sourceTree = "<group>"; }; + 0C12E9762616383A00B66C86 /* redis_store_handler_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = redis_store_handler_op.h; sourceTree = "<group>"; }; + 0C12E9782616383A00B66C86 /* embedding_lookup.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = embedding_lookup.h; sourceTree = "<group>"; }; + 0C12E9792616383A00B66C86 /* fused_8bit_rowwise_embedding_lookup_idx.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fused_8bit_rowwise_embedding_lookup_idx.h; sourceTree = "<group>"; }; + 0C12E97A2616383A00B66C86 /* lstm_unit_cpu-impl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "lstm_unit_cpu-impl.h"; sourceTree = "<group>"; }; + 0C12E97B2616383A00B66C86 /* embedding_lookup_idx.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = embedding_lookup_idx.h; sourceTree = "<group>"; }; + 0C12E97C2616383A00B66C86 /* adagrad.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = adagrad.h; sourceTree = "<group>"; }; + 0C12E97D2616383A00B66C86 /* lstm_unit_cpu.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lstm_unit_cpu.h; sourceTree = "<group>"; }; + 0C12E97E2616383A00B66C86 /* cvtsh_ss_bugfix.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cvtsh_ss_bugfix.h; sourceTree = "<group>"; }; + 0C12E97F2616383A00B66C86 /* common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = common.h; sourceTree = "<group>"; }; + 0C12E9802616383A00B66C86 /* math.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = math.h; sourceTree = "<group>"; }; + 0C12E9812616383A00B66C86 /* typed_axpy.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = typed_axpy.h; sourceTree = "<group>"; }; + 0C12E9822616383A00B66C86 /* fused_nbit_rowwise_conversion.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fused_nbit_rowwise_conversion.h; sourceTree = "<group>"; }; + 0C12E9832616383A00B66C86 /* fused_8bit_rowwise_embedding_lookup.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fused_8bit_rowwise_embedding_lookup.h; sourceTree = "<group>"; }; + 0C12E9842616383A00B66C86 /* lstm_unit_cpu_common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lstm_unit_cpu_common.h; sourceTree = "<group>"; }; + 0C12E9872616383A00B66C86 /* fully_connected_op_decomposition.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fully_connected_op_decomposition.h; sourceTree = "<group>"; }; + 0C12E9882616383A00B66C86 /* fully_connected_op_sparse.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fully_connected_op_sparse.h; sourceTree = "<group>"; }; + 0C12E9892616383A00B66C86 /* tt_contraction_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tt_contraction_op.h; sourceTree = "<group>"; }; + 0C12E98A2616383A00B66C86 /* fully_connected_op_prune.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fully_connected_op_prune.h; sourceTree = "<group>"; }; + 0C12E98B2616383A00B66C86 /* funhash_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = funhash_op.h; sourceTree = "<group>"; }; + 0C12E98C2616383A00B66C86 /* sparse_funhash_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sparse_funhash_op.h; sourceTree = "<group>"; }; + 0C12E98D2616383A00B66C86 /* sparse_matrix_reshape_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sparse_matrix_reshape_op.h; sourceTree = "<group>"; }; + 0C12E98E2616383A00B66C86 /* tt_pad_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tt_pad_op.h; sourceTree = "<group>"; }; + 0C12E9912616383A00B66C86 /* common_rtc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = common_rtc.h; sourceTree = "<group>"; }; + 0C12E9932616383A00B66C86 /* read_adapter_interface.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = read_adapter_interface.h; sourceTree = "<group>"; }; + 0C12E9942616383A00B66C86 /* crc_alt.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = crc_alt.h; sourceTree = "<group>"; }; + 0C12E9952616383A00B66C86 /* versions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = versions.h; sourceTree = "<group>"; }; + 0C12E9962616383A00B66C86 /* inline_container.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = inline_container.h; sourceTree = "<group>"; }; + 0C12E9972616383A00B66C86 /* file_adapter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = file_adapter.h; sourceTree = "<group>"; }; + 0C12E9982616383A00B66C86 /* istream_adapter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = istream_adapter.h; sourceTree = "<group>"; }; + 0C12E99A2616383A00B66C86 /* filler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = filler.h; sourceTree = "<group>"; }; + 0C12E99B2616383A00B66C86 /* math-detail.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "math-detail.h"; sourceTree = "<group>"; }; + 0C12E99C2616383A00B66C86 /* signal_handler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = signal_handler.h; sourceTree = "<group>"; }; + 0C12E99D2616383A00B66C86 /* cpu_neon.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cpu_neon.h; sourceTree = "<group>"; }; + 0C12E99E2616383A00B66C86 /* conversions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = conversions.h; sourceTree = "<group>"; }; + 0C12E99F2616383A00B66C86 /* string_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = string_utils.h; sourceTree = "<group>"; }; + 0C12E9A02616383A00B66C86 /* simple_queue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = simple_queue.h; sourceTree = "<group>"; }; + 0C12E9A12616383A00B66C86 /* cpuid.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cpuid.h; sourceTree = "<group>"; }; + 0C12E9A32616383A00B66C86 /* ThreadPool.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ThreadPool.h; sourceTree = "<group>"; }; + 0C12E9A42616383A00B66C86 /* ThreadPoolCommon.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ThreadPoolCommon.h; sourceTree = "<group>"; }; + 0C12E9A52616383A00B66C86 /* pthreadpool.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pthreadpool.h; sourceTree = "<group>"; }; + 0C12E9A62616383A00B66C86 /* pthreadpool-cpp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "pthreadpool-cpp.h"; sourceTree = "<group>"; }; + 0C12E9A72616383A00B66C86 /* WorkersPool.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WorkersPool.h; sourceTree = "<group>"; }; + 0C12E9A82616383A00B66C86 /* thread_pool_guard.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = thread_pool_guard.h; sourceTree = "<group>"; }; + 0C12E9AA2616383A00B66C86 /* utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = utils.h; sourceTree = "<group>"; }; + 0C12E9AB2616383A00B66C86 /* broadcast.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = broadcast.h; sourceTree = "<group>"; }; + 0C12E9AC2616383A00B66C86 /* elementwise.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = elementwise.h; sourceTree = "<group>"; }; + 0C12E9AD2616383A00B66C86 /* half_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = half_utils.h; sourceTree = "<group>"; }; + 0C12E9AE2616383A00B66C86 /* reduce.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = reduce.h; sourceTree = "<group>"; }; + 0C12E9AF2616383A00B66C86 /* transpose.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = transpose.h; sourceTree = "<group>"; }; + 0C12E9B02616383A00B66C86 /* fixed_divisor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fixed_divisor.h; sourceTree = "<group>"; }; + 0C12E9B12616383A00B66C86 /* proto_wrap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = proto_wrap.h; sourceTree = "<group>"; }; + 0C12E9B22616383A00B66C86 /* bench_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = bench_utils.h; sourceTree = "<group>"; }; + 0C12E9B32616383A00B66C86 /* cast.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cast.h; sourceTree = "<group>"; }; + 0C12E9B52616383A00B66C86 /* murmur_hash3.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = murmur_hash3.h; sourceTree = "<group>"; }; + 0C12E9B62616383A00B66C86 /* math.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = math.h; sourceTree = "<group>"; }; + 0C12E9B72616383B00B66C86 /* eigen_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = eigen_utils.h; sourceTree = "<group>"; }; + 0C12E9B82616383B00B66C86 /* smart_tensor_printer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = smart_tensor_printer.h; sourceTree = "<group>"; }; + 0C12E9B92616383B00B66C86 /* proto_convert.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = proto_convert.h; sourceTree = "<group>"; }; + 0C12E9BA2616383B00B66C86 /* proto_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = proto_utils.h; sourceTree = "<group>"; }; + 0C12E9BB2616383B00B66C86 /* cblas.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cblas.h; sourceTree = "<group>"; }; + 0C12E9BC2616383B00B66C86 /* map_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = map_utils.h; sourceTree = "<group>"; }; + 0C12E9BD2616383B00B66C86 /* zmq_helper.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = zmq_helper.h; sourceTree = "<group>"; }; + 0C12E9C12616383B00B66C86 /* ctc_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ctc_op.h; sourceTree = "<group>"; }; + 0C12E9C32616383B00B66C86 /* cuda_nccl_gpu.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cuda_nccl_gpu.h; sourceTree = "<group>"; }; + 0C12E9C92616383B00B66C86 /* allreduce_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = allreduce_ops.h; sourceTree = "<group>"; }; + 0C12E9CA2616383B00B66C86 /* allgather_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = allgather_ops.h; sourceTree = "<group>"; }; + 0C12E9CB2616383B00B66C86 /* context.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = context.h; sourceTree = "<group>"; }; + 0C12E9CC2616383B00B66C86 /* store_handler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = store_handler.h; sourceTree = "<group>"; }; + 0C12E9CD2616383B00B66C86 /* broadcast_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = broadcast_ops.h; sourceTree = "<group>"; }; + 0C12E9CE2616383B00B66C86 /* reduce_scatter_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = reduce_scatter_ops.h; sourceTree = "<group>"; }; + 0C12E9CF2616383B00B66C86 /* common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = common.h; sourceTree = "<group>"; }; + 0C12E9D02616383B00B66C86 /* common_world_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = common_world_ops.h; sourceTree = "<group>"; }; + 0C12E9D12616383B00B66C86 /* barrier_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = barrier_ops.h; sourceTree = "<group>"; }; + 0C12E9D32616383B00B66C86 /* sum_fp16_fake_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sum_fp16_fake_op.h; sourceTree = "<group>"; }; + 0C12E9D42616383B00B66C86 /* lengths_reducer_fused_4bit_rowwise_fp16_fake_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lengths_reducer_fused_4bit_rowwise_fp16_fake_op.h; sourceTree = "<group>"; }; + 0C12E9D52616383B00B66C86 /* int8_dequantize_op_nnpi.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_dequantize_op_nnpi.h; sourceTree = "<group>"; }; + 0C12E9D72616383B00B66C86 /* fp16_gemm_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fp16_gemm_utils.h; sourceTree = "<group>"; }; + 0C12E9D82616383B00B66C86 /* fp16_fma.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fp16_fma.h; sourceTree = "<group>"; }; + 0C12E9D92616383B00B66C86 /* fp16_fc_acc_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fp16_fc_acc_op.h; sourceTree = "<group>"; }; + 0C12E9DA2616383B00B66C86 /* layernorm_fp16_fake_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = layernorm_fp16_fake_op.h; sourceTree = "<group>"; }; + 0C12E9DB2616383B00B66C86 /* unary_fp16_fake_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = unary_fp16_fake_op.h; sourceTree = "<group>"; }; + 0C12E9DC2616383B00B66C86 /* int8_quantize_op_nnpi.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_quantize_op_nnpi.h; sourceTree = "<group>"; }; + 0C12E9DD2616383B00B66C86 /* lengths_reducer_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lengths_reducer_ops.h; sourceTree = "<group>"; }; + 0C12E9DE2616383B00B66C86 /* common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = common.h; sourceTree = "<group>"; }; + 0C12E9DF2616383B00B66C86 /* batch_matmul_fp16_fake_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = batch_matmul_fp16_fake_op.h; sourceTree = "<group>"; }; + 0C12E9E02616383B00B66C86 /* lengths_reducer_fused_8bit_rowwise_fp16_fake_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lengths_reducer_fused_8bit_rowwise_fp16_fake_op.h; sourceTree = "<group>"; }; + 0C12E9E12616383B00B66C86 /* spatial_batch_norm_fp16_fake_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = spatial_batch_norm_fp16_fake_op.h; sourceTree = "<group>"; }; + 0C12E9E22616383B00B66C86 /* quant_lut_fp16_fake_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = quant_lut_fp16_fake_op.h; sourceTree = "<group>"; }; + 0C12E9E32616383B00B66C86 /* int8_swish_op_nnpi.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_swish_op_nnpi.h; sourceTree = "<group>"; }; + 0C12E9E72616383B00B66C86 /* context.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = context.h; sourceTree = "<group>"; }; + 0C12E9EA2616383B00B66C86 /* prof_dag_stats_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = prof_dag_stats_op.h; sourceTree = "<group>"; }; + 0C12E9EC2616383B00B66C86 /* tensorrt_tranformer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tensorrt_tranformer.h; sourceTree = "<group>"; }; + 0C12E9ED2616383B00B66C86 /* trt_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = trt_utils.h; sourceTree = "<group>"; }; + 0C12E9EE2616383B00B66C86 /* tensorrt_op_trt.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tensorrt_op_trt.h; sourceTree = "<group>"; }; + 0C12E9F02616383B00B66C86 /* shm_mutex.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = shm_mutex.h; sourceTree = "<group>"; }; + 0C12E9F32616383B00B66C86 /* aten_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = aten_op.h; sourceTree = "<group>"; }; + 0C12E9F52616383B00B66C86 /* aten_op_template.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = aten_op_template.h; sourceTree = "<group>"; }; + 0C12E9F82616383B00B66C86 /* image_input_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = image_input_op.h; sourceTree = "<group>"; }; + 0C12E9F92616383B00B66C86 /* transform_gpu.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = transform_gpu.h; sourceTree = "<group>"; }; + 0C12E9FC2616383B00B66C86 /* fbgemm_fp16_pack_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fbgemm_fp16_pack_op.h; sourceTree = "<group>"; }; + 0C12E9FD2616383B00B66C86 /* concat_dnnlowp_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = concat_dnnlowp_op.h; sourceTree = "<group>"; }; + 0C12E9FE2616383B00B66C86 /* fully_connected_dnnlowp_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fully_connected_dnnlowp_op.h; sourceTree = "<group>"; }; + 0C12E9FF2616383B00B66C86 /* int8_quant_scheme_blob_fill.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_quant_scheme_blob_fill.h; sourceTree = "<group>"; }; + 0C12EA002616383B00B66C86 /* quantize_dnnlowp_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = quantize_dnnlowp_op.h; sourceTree = "<group>"; }; + 0C12EA012616383B00B66C86 /* batch_matmul_dnnlowp_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = batch_matmul_dnnlowp_op.h; sourceTree = "<group>"; }; + 0C12EA022616383B00B66C86 /* utility_dnnlowp_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = utility_dnnlowp_ops.h; sourceTree = "<group>"; }; + 0C12EA032616383B00B66C86 /* activation_distribution_observer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = activation_distribution_observer.h; sourceTree = "<group>"; }; + 0C12EA042616383B00B66C86 /* compute_equalization_scale.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = compute_equalization_scale.h; sourceTree = "<group>"; }; + 0C12EA052616383B00B66C86 /* caffe2_dnnlowp_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = caffe2_dnnlowp_utils.h; sourceTree = "<group>"; }; + 0C12EA062616383B00B66C86 /* dnnlowp_partition.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dnnlowp_partition.h; sourceTree = "<group>"; }; + 0C12EA072616383B00B66C86 /* fully_connected_fake_lowp_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fully_connected_fake_lowp_op.h; sourceTree = "<group>"; }; + 0C12EA082616383B00B66C86 /* op_wrapper.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = op_wrapper.h; sourceTree = "<group>"; }; + 0C12EA092616383B00B66C86 /* batch_permutation_dnnlowp_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = batch_permutation_dnnlowp_op.h; sourceTree = "<group>"; }; + 0C12EA0A2616383B00B66C86 /* conv_relu_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = conv_relu_op.h; sourceTree = "<group>"; }; + 0C12EA0B2616383B00B66C86 /* conv_pool_dnnlowp_op_base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = conv_pool_dnnlowp_op_base.h; sourceTree = "<group>"; }; + 0C12EA0C2616383B00B66C86 /* mmio.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mmio.h; sourceTree = "<group>"; }; + 0C12EA0D2616383B00B66C86 /* lstm_unit_dnnlowp_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lstm_unit_dnnlowp_op.h; sourceTree = "<group>"; }; + 0C12EA0E2616383B00B66C86 /* fbgemm_pack_matrix_cache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fbgemm_pack_matrix_cache.h; sourceTree = "<group>"; }; + 0C12EA0F2616383B00B66C86 /* im2col_dnnlowp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = im2col_dnnlowp.h; sourceTree = "<group>"; }; + 0C12EA102616383B00B66C86 /* fbgemm_pack_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fbgemm_pack_op.h; sourceTree = "<group>"; }; + 0C12EA112616383B00B66C86 /* resize_nearest_dnnlowp_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resize_nearest_dnnlowp_op.h; sourceTree = "<group>"; }; + 0C12EA122616383B00B66C86 /* group_norm_dnnlowp_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = group_norm_dnnlowp_op.h; sourceTree = "<group>"; }; + 0C12EA132616383B00B66C86 /* elementwise_dnnlowp_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = elementwise_dnnlowp_op.h; sourceTree = "<group>"; }; + 0C12EA142616383B00B66C86 /* fb_fc_packed_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fb_fc_packed_op.h; sourceTree = "<group>"; }; + 0C12EA152616383B00B66C86 /* relu_dnnlowp_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = relu_dnnlowp_op.h; sourceTree = "<group>"; }; + 0C12EA162616383B00B66C86 /* spatial_batch_norm_dnnlowp_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = spatial_batch_norm_dnnlowp_op.h; sourceTree = "<group>"; }; + 0C12EA172616383B00B66C86 /* dequantize_dnnlowp_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dequantize_dnnlowp_op.h; sourceTree = "<group>"; }; + 0C12EA182616383B00B66C86 /* kl_minimization.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = kl_minimization.h; sourceTree = "<group>"; }; + 0C12EA192616383B00B66C86 /* dynamic_histogram.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dynamic_histogram.h; sourceTree = "<group>"; }; + 0C12EA1A2616383B00B66C86 /* tanh.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tanh.h; sourceTree = "<group>"; }; + 0C12EA1B2616383B00B66C86 /* fbgemm_pack_blob.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fbgemm_pack_blob.h; sourceTree = "<group>"; }; + 0C12EA1C2616383B00B66C86 /* resize_nearest_3d_dnnlowp_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resize_nearest_3d_dnnlowp_op.h; sourceTree = "<group>"; }; + 0C12EA1D2616383B00B66C86 /* int8_gen_quant_params.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_gen_quant_params.h; sourceTree = "<group>"; }; + 0C12EA1E2616383B00B66C86 /* conv_dnnlowp_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = conv_dnnlowp_op.h; sourceTree = "<group>"; }; + 0C12EA1F2616383B00B66C86 /* sigmoid.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sigmoid.h; sourceTree = "<group>"; }; + 0C12EA202616383B00B66C86 /* channel_shuffle_dnnlowp_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = channel_shuffle_dnnlowp_op.h; sourceTree = "<group>"; }; + 0C12EA212616383B00B66C86 /* int8_gen_quant_params_min_max.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = int8_gen_quant_params_min_max.h; sourceTree = "<group>"; }; + 0C12EA222616383B00B66C86 /* quantization_error_minimization.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = quantization_error_minimization.h; sourceTree = "<group>"; }; + 0C12EA232616383B00B66C86 /* elementwise_linear_dnnlowp_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = elementwise_linear_dnnlowp_op.h; sourceTree = "<group>"; }; + 0C12EA242616383B00B66C86 /* dnnlowp_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dnnlowp_op.h; sourceTree = "<group>"; }; + 0C12EA252616383B00B66C86 /* l2_minimization.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = l2_minimization.h; sourceTree = "<group>"; }; + 0C12EA262616383B00B66C86 /* dnnlowp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dnnlowp.h; sourceTree = "<group>"; }; + 0C12EA272616383B00B66C86 /* conv_dnnlowp_acc16_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = conv_dnnlowp_acc16_op.h; sourceTree = "<group>"; }; + 0C12EA282616383B00B66C86 /* transpose.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = transpose.h; sourceTree = "<group>"; }; + 0C12EA292616383B00B66C86 /* pool_dnnlowp_op_avx2.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pool_dnnlowp_op_avx2.h; sourceTree = "<group>"; }; + 0C12EA2A2616383B00B66C86 /* fully_connected_dnnlowp_acc16_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fully_connected_dnnlowp_acc16_op.h; sourceTree = "<group>"; }; + 0C12EA2C2616383B00B66C86 /* single_op_transform.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = single_op_transform.h; sourceTree = "<group>"; }; + 0C12EA2D2616383B00B66C86 /* common_subexpression_elimination.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = common_subexpression_elimination.h; sourceTree = "<group>"; }; + 0C12EA2E2616383B00B66C86 /* conv_to_nnpack_transform.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = conv_to_nnpack_transform.h; sourceTree = "<group>"; }; + 0C12EA2F2616383B00B66C86 /* pattern_net_transform.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pattern_net_transform.h; sourceTree = "<group>"; }; + 0C12EA342616383B00B66C86 /* libopencl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = libopencl.h; sourceTree = "<group>"; }; + 0C12EA362616383B00B66C86 /* cl_platform.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cl_platform.h; sourceTree = "<group>"; }; + 0C12EA372616383B00B66C86 /* opencl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = opencl.h; sourceTree = "<group>"; }; + 0C12EA382616383B00B66C86 /* cl_ext.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cl_ext.h; sourceTree = "<group>"; }; + 0C12EA392616383B00B66C86 /* cl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cl.h; sourceTree = "<group>"; }; + 0C12EA3A2616383B00B66C86 /* cl_gl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cl_gl.h; sourceTree = "<group>"; }; + 0C12EA3B2616383B00B66C86 /* cl_gl_ext.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cl_gl_ext.h; sourceTree = "<group>"; }; + 0C12EA3E2616383B00B66C86 /* ios_caffe_defines.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ios_caffe_defines.h; sourceTree = "<group>"; }; + 0C12EA402616383B00B66C86 /* mpscnn_graph_mask.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mpscnn_graph_mask.h; sourceTree = "<group>"; }; + 0C12EA412616383B00B66C86 /* mpscnn.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mpscnn.h; sourceTree = "<group>"; }; + 0C12EA422616383B00B66C86 /* mpscnn_test.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mpscnn_test.h; sourceTree = "<group>"; }; + 0C12EA432616383B00B66C86 /* mpscnn_kernels.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mpscnn_kernels.h; sourceTree = "<group>"; }; + 0C12EA442616383B00B66C86 /* mpscnn_context.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mpscnn_context.h; sourceTree = "<group>"; }; + 0C12EA452616383B00B66C86 /* ios_caffe.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ios_caffe.h; sourceTree = "<group>"; }; + 0C12EA462616383B00B66C86 /* ios_caffe_predictor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ios_caffe_predictor.h; sourceTree = "<group>"; }; + 0C12EA482616383B00B66C86 /* snpe_ffi.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = snpe_ffi.h; sourceTree = "<group>"; }; + 0C12EA4A2616383B00B66C86 /* nnapi.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = nnapi.h; sourceTree = "<group>"; }; + 0C12EA4B2616383B00B66C86 /* NeuralNetworks.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = NeuralNetworks.h; sourceTree = "<group>"; }; + 0C12EA4C2616383B00B66C86 /* dlnnapi.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dlnnapi.h; sourceTree = "<group>"; }; + 0C12EA4E2616383B00B66C86 /* ulp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ulp.h; sourceTree = "<group>"; }; + 0C12EA4F2616383B00B66C86 /* ulp_neon.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ulp_neon.h; sourceTree = "<group>"; }; + 0C12EA522616383B00B66C86 /* libvulkan-stub.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "libvulkan-stub.h"; sourceTree = "<group>"; }; + 0C12EA542616383B00B66C86 /* vulkan.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vulkan.h; sourceTree = "<group>"; }; + 0C12EA552616383B00B66C86 /* vk_platform.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vk_platform.h; sourceTree = "<group>"; }; + 0C12EA582616383B00B66C86 /* fp16_momentum_sgd_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fp16_momentum_sgd_op.h; sourceTree = "<group>"; }; + 0C12EA592616383B00B66C86 /* rmsprop_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rmsprop_op.h; sourceTree = "<group>"; }; + 0C12EA5A2616383B00B66C86 /* lars_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lars_op.h; sourceTree = "<group>"; }; + 0C12EA5B2616383B00B66C86 /* yellowfin_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = yellowfin_op.h; sourceTree = "<group>"; }; + 0C12EA5C2616383B00B66C86 /* math_lp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = math_lp.h; sourceTree = "<group>"; }; + 0C12EA5D2616383B00B66C86 /* storm_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = storm_op.h; sourceTree = "<group>"; }; + 0C12EA5E2616383B00B66C86 /* adagrad_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = adagrad_op.h; sourceTree = "<group>"; }; + 0C12EA5F2616383B00B66C86 /* clip_tensor_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = clip_tensor_op.h; sourceTree = "<group>"; }; + 0C12EA602616383B00B66C86 /* gftrl_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = gftrl_op.h; sourceTree = "<group>"; }; + 0C12EA612616383B00B66C86 /* adadelta_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = adadelta_op.h; sourceTree = "<group>"; }; + 0C12EA622616383B00B66C86 /* learning_rate_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = learning_rate_op.h; sourceTree = "<group>"; }; + 0C12EA632616383B00B66C86 /* adagrad_fused.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = adagrad_fused.h; sourceTree = "<group>"; }; + 0C12EA642616383B00B66C86 /* adam_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = adam_op.h; sourceTree = "<group>"; }; + 0C12EA652616383B00B66C86 /* ftrl_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ftrl_op.h; sourceTree = "<group>"; }; + 0C12EA662616383B00B66C86 /* weight_scale_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = weight_scale_op.h; sourceTree = "<group>"; }; + 0C12EA672616383B00B66C86 /* learning_rate_adaption_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = learning_rate_adaption_op.h; sourceTree = "<group>"; }; + 0C12EA682616383B00B66C86 /* rowwise_counter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rowwise_counter.h; sourceTree = "<group>"; }; + 0C12EA692616383B00B66C86 /* iter_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = iter_op.h; sourceTree = "<group>"; }; + 0C12EA6A2616383B00B66C86 /* rowwise_adagrad_fused.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rowwise_adagrad_fused.h; sourceTree = "<group>"; }; + 0C12EA6B2616383B00B66C86 /* momentum_sgd_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = momentum_sgd_op.h; sourceTree = "<group>"; }; + 0C12EA6C2616383B00B66C86 /* wngrad_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = wngrad_op.h; sourceTree = "<group>"; }; + 0C12EA6D2616383B00B66C86 /* decay_adagrad_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = decay_adagrad_op.h; sourceTree = "<group>"; }; + 0C12EA6E2616383B00B66C86 /* learning_rate_functors.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = learning_rate_functors.h; sourceTree = "<group>"; }; + 0C12EA6F2616383B00B66C86 /* fp32_momentum_sgd_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fp32_momentum_sgd_op.h; sourceTree = "<group>"; }; + 0C12EA712616383B00B66C86 /* blobs_queue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = blobs_queue.h; sourceTree = "<group>"; }; + 0C12EA722616383B00B66C86 /* rebatching_queue_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rebatching_queue_ops.h; sourceTree = "<group>"; }; + 0C12EA732616383B00B66C86 /* queue_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = queue_ops.h; sourceTree = "<group>"; }; + 0C12EA742616383B00B66C86 /* rebatching_queue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rebatching_queue.h; sourceTree = "<group>"; }; + 0C12EA752616383B00B66C86 /* blobs_queue_db.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = blobs_queue_db.h; sourceTree = "<group>"; }; + 0C12EA772616383B00B66C86 /* create_db_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = create_db_op.h; sourceTree = "<group>"; }; + 0C12EA7B2616383B00B66C86 /* ast.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ast.h; sourceTree = "<group>"; }; + 0C12EA7C2616383B00B66C86 /* graphmatcher.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = graphmatcher.h; sourceTree = "<group>"; }; + 0C12EA7D2616383B00B66C86 /* device.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = device.h; sourceTree = "<group>"; }; + 0C12EA7E2616383B00B66C86 /* annotations.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = annotations.h; sourceTree = "<group>"; }; + 0C12EA7F2616383B00B66C86 /* mobile.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mobile.h; sourceTree = "<group>"; }; + 0C12EA802616383B00B66C86 /* onnxifi_transformer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = onnxifi_transformer.h; sourceTree = "<group>"; }; + 0C12EA812616383B00B66C86 /* converter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = converter.h; sourceTree = "<group>"; }; + 0C12EA822616383B00B66C86 /* backend_transformer_base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = backend_transformer_base.h; sourceTree = "<group>"; }; + 0C12EA832616383B00B66C86 /* fakefp16_transform.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fakefp16_transform.h; sourceTree = "<group>"; }; + 0C12EA842616383B00B66C86 /* fusion.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fusion.h; sourceTree = "<group>"; }; + 0C12EA852616383B00B66C86 /* shape_info.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = shape_info.h; sourceTree = "<group>"; }; + 0C12EA862616383B00B66C86 /* optimizer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = optimizer.h; sourceTree = "<group>"; }; + 0C12EA872616383B00B66C86 /* glow_net_transform.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = glow_net_transform.h; sourceTree = "<group>"; }; + 0C12EA882616383B00B66C86 /* backend_cutting.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = backend_cutting.h; sourceTree = "<group>"; }; + 0C12EA892616383B00B66C86 /* distributed.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = distributed.h; sourceTree = "<group>"; }; + 0C12EA8A2616383B00B66C86 /* onnxifi_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = onnxifi_op.h; sourceTree = "<group>"; }; + 0C12EA8B2616383B00B66C86 /* tvm_transformer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tvm_transformer.h; sourceTree = "<group>"; }; + 0C12EA8C2616383B00B66C86 /* passes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = passes.h; sourceTree = "<group>"; }; + 0C12EA8D2616383B00B66C86 /* bound_shape_inferencer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = bound_shape_inferencer.h; sourceTree = "<group>"; }; + 0C12EA8F2616383B00B66C86 /* concat_elim.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = concat_elim.h; sourceTree = "<group>"; }; + 0C12EA902616383B00B66C86 /* pointwise_elim.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pointwise_elim.h; sourceTree = "<group>"; }; + 0C12EA912616383B00B66C86 /* freeze_quantization_params.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = freeze_quantization_params.h; sourceTree = "<group>"; }; + 0C12EA922616383B00B66C86 /* in_batch_broadcast.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = in_batch_broadcast.h; sourceTree = "<group>"; }; + 0C12EA932616383B00B66C86 /* cc_amrc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cc_amrc.h; sourceTree = "<group>"; }; + 0C12EA942616383B00B66C86 /* onnx_convert.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = onnx_convert.h; sourceTree = "<group>"; }; + 0C12EA952616383B00B66C86 /* optimize_ideep.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = optimize_ideep.h; sourceTree = "<group>"; }; + 0C12EA972616383B00B66C86 /* ThreadLocalPtr.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ThreadLocalPtr.h; sourceTree = "<group>"; }; + 0C12EA982616383B00B66C86 /* InferenceGraph.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = InferenceGraph.h; sourceTree = "<group>"; }; + 0C12EA992616383B00B66C86 /* predictor_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = predictor_utils.h; sourceTree = "<group>"; }; + 0C12EA9A2616383B00B66C86 /* predictor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = predictor.h; sourceTree = "<group>"; }; + 0C12EA9B2616383B00B66C86 /* predictor_config.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = predictor_config.h; sourceTree = "<group>"; }; + 0C12EA9D2616383B00B66C86 /* data_filler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_filler.h; sourceTree = "<group>"; }; + 0C12EA9E2616383B00B66C86 /* utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = utils.h; sourceTree = "<group>"; }; + 0C12EA9F2616383B00B66C86 /* net_supplier.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = net_supplier.h; sourceTree = "<group>"; }; + 0C12EAA02616383B00B66C86 /* time_profiler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = time_profiler.h; sourceTree = "<group>"; }; + 0C12EAA12616383B00B66C86 /* emulator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = emulator.h; sourceTree = "<group>"; }; + 0C12EAA22616383B00B66C86 /* output_formatter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = output_formatter.h; sourceTree = "<group>"; }; + 0C12EAA32616383B00B66C86 /* std_output_formatter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = std_output_formatter.h; sourceTree = "<group>"; }; + 0C12EAA42616383B00B66C86 /* benchmark.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = benchmark.h; sourceTree = "<group>"; }; + 0C12EAA52616383B00B66C86 /* profiler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = profiler.h; sourceTree = "<group>"; }; + 0C12EAA62616383B00B66C86 /* transforms.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = transforms.h; sourceTree = "<group>"; }; + 0C12EAA82616383B00B66C86 /* operator_attaching_net_observer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = operator_attaching_net_observer.h; sourceTree = "<group>"; }; + 0C12EAA92616383B00B66C86 /* time_observer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = time_observer.h; sourceTree = "<group>"; }; + 0C12EAAA2616383B00B66C86 /* runcnt_observer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = runcnt_observer.h; sourceTree = "<group>"; }; + 0C12EAAB2616383B00B66C86 /* profile_observer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = profile_observer.h; sourceTree = "<group>"; }; + 0C12EAB12616383B00B66C86 /* quant_decomp_zstd_op.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = quant_decomp_zstd_op.h; sourceTree = "<group>"; }; + 0C12EAB22616383B00B66C86 /* cpuinfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cpuinfo.h; sourceTree = "<group>"; }; + 0C12EAB52616383B00B66C86 /* Size.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Size.h; sourceTree = "<group>"; }; + 0C12EAB62616383B00B66C86 /* utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = utils.h; sourceTree = "<group>"; }; + 0C12EAB72616383B00B66C86 /* Device.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Device.h; sourceTree = "<group>"; }; + 0C12EAB92616383B00B66C86 /* init.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = init.h; sourceTree = "<group>"; }; + 0C12EABA2616383B00B66C86 /* onnx.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = onnx.h; sourceTree = "<group>"; }; + 0C12EABB2616383B00B66C86 /* Types.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Types.h; sourceTree = "<group>"; }; + 0C12EABE2616383B00B66C86 /* utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = utils.h; sourceTree = "<group>"; }; + 0C12EAC02616383B00B66C86 /* container.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = container.h; sourceTree = "<group>"; }; + 0C12EAC12616383B00B66C86 /* context.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = context.h; sourceTree = "<group>"; }; + 0C12EAC32616383B00B66C86 /* cleanup_autograd_context_req.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cleanup_autograd_context_req.h; sourceTree = "<group>"; }; + 0C12EAC42616383B00B66C86 /* cleanup_autograd_context_resp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cleanup_autograd_context_resp.h; sourceTree = "<group>"; }; + 0C12EAC52616383B00B66C86 /* rref_backward_req.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rref_backward_req.h; sourceTree = "<group>"; }; + 0C12EAC62616383B00B66C86 /* rpc_with_profiling_req.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rpc_with_profiling_req.h; sourceTree = "<group>"; }; + 0C12EAC72616383B00B66C86 /* propagate_gradients_resp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = propagate_gradients_resp.h; sourceTree = "<group>"; }; + 0C12EAC82616383B00B66C86 /* propagate_gradients_req.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = propagate_gradients_req.h; sourceTree = "<group>"; }; + 0C12EAC92616383B00B66C86 /* autograd_metadata.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = autograd_metadata.h; sourceTree = "<group>"; }; + 0C12EACA2616383B00B66C86 /* rpc_with_autograd.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rpc_with_autograd.h; sourceTree = "<group>"; }; + 0C12EACB2616383B00B66C86 /* rref_backward_resp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rref_backward_resp.h; sourceTree = "<group>"; }; + 0C12EACC2616383B00B66C86 /* rpc_with_profiling_resp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rpc_with_profiling_resp.h; sourceTree = "<group>"; }; + 0C12EACD2616383B00B66C86 /* python_autograd.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_autograd.h; sourceTree = "<group>"; }; + 0C12EACE2616383B00B66C86 /* autograd.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = autograd.h; sourceTree = "<group>"; }; + 0C12EAD02616383B00B66C86 /* sendrpc_backward.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sendrpc_backward.h; sourceTree = "<group>"; }; + 0C12EAD12616383B00B66C86 /* recvrpc_backward.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = recvrpc_backward.h; sourceTree = "<group>"; }; + 0C12EAD32616383B00B66C86 /* dist_engine.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dist_engine.h; sourceTree = "<group>"; }; + 0C12EAD62616383B00B66C86 /* RpcMetricsHandler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RpcMetricsHandler.h; sourceTree = "<group>"; }; + 0C12EAD72616383B00B66C86 /* utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = utils.h; sourceTree = "<group>"; }; + 0C12EAD82616383B00B66C86 /* rref_context.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rref_context.h; sourceTree = "<group>"; }; + 0C12EAD92616383B00B66C86 /* request_callback_impl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = request_callback_impl.h; sourceTree = "<group>"; }; + 0C12EADA2616383B00B66C86 /* python_resp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_resp.h; sourceTree = "<group>"; }; + 0C12EADB2616383B00B66C86 /* rref_impl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rref_impl.h; sourceTree = "<group>"; }; + 0C12EADC2616383B00B66C86 /* request_callback.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = request_callback.h; sourceTree = "<group>"; }; + 0C12EADD2616383B00B66C86 /* types.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = types.h; sourceTree = "<group>"; }; + 0C12EADE2616383B00B66C86 /* rref_proto.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rref_proto.h; sourceTree = "<group>"; }; + 0C12EADF2616383B00B66C86 /* py_rref.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = py_rref.h; sourceTree = "<group>"; }; + 0C12EAE02616383B00B66C86 /* rpc_agent.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rpc_agent.h; sourceTree = "<group>"; }; + 0C12EAE12616383B00B66C86 /* python_functions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_functions.h; sourceTree = "<group>"; }; + 0C12EAE22616383B00B66C86 /* message.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = message.h; sourceTree = "<group>"; }; + 0C12EAE32616383B00B66C86 /* request_callback_no_python.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = request_callback_no_python.h; sourceTree = "<group>"; }; + 0C12EAE42616383B00B66C86 /* python_remote_call.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_remote_call.h; sourceTree = "<group>"; }; + 0C12EAE52616383B00B66C86 /* python_call.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_call.h; sourceTree = "<group>"; }; + 0C12EAE62616383B00B66C86 /* tensorpipe_agent.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tensorpipe_agent.h; sourceTree = "<group>"; }; + 0C12EAE72616383B00B66C86 /* script_remote_call.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = script_remote_call.h; sourceTree = "<group>"; }; + 0C12EAE92616383B00B66C86 /* testing.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = testing.h; sourceTree = "<group>"; }; + 0C12EAEA2616383B00B66C86 /* faulty_process_group_agent.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = faulty_process_group_agent.h; sourceTree = "<group>"; }; + 0C12EAEB2616383B00B66C86 /* macros.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = macros.h; sourceTree = "<group>"; }; + 0C12EAEC2616383B00B66C86 /* script_resp.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = script_resp.h; sourceTree = "<group>"; }; + 0C12EAED2616383B00B66C86 /* rpc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rpc.h; sourceTree = "<group>"; }; + 0C12EAEE2616383B00B66C86 /* rpc_command_base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rpc_command_base.h; sourceTree = "<group>"; }; + 0C12EAF02616383B00B66C86 /* remote_profiler_manager.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = remote_profiler_manager.h; sourceTree = "<group>"; }; + 0C12EAF12616383B00B66C86 /* server_process_global_profiler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = server_process_global_profiler.h; sourceTree = "<group>"; }; + 0C12EAF22616383B00B66C86 /* script_call.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = script_call.h; sourceTree = "<group>"; }; + 0C12EAF32616383B00B66C86 /* unpickled_python_remote_call.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = unpickled_python_remote_call.h; sourceTree = "<group>"; }; + 0C12EAF42616383B00B66C86 /* torchscript_functions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = torchscript_functions.h; sourceTree = "<group>"; }; + 0C12EAF52616383B00B66C86 /* unpickled_python_call.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = unpickled_python_call.h; sourceTree = "<group>"; }; + 0C12EAF62616383B00B66C86 /* tensorpipe_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tensorpipe_utils.h; sourceTree = "<group>"; }; + 0C12EAF72616383B00B66C86 /* agent_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = agent_utils.h; sourceTree = "<group>"; }; + 0C12EAF82616383B00B66C86 /* process_group_agent.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = process_group_agent.h; sourceTree = "<group>"; }; + 0C12EAF92616383B00B66C86 /* python_rpc_handler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_rpc_handler.h; sourceTree = "<group>"; }; + 0C12EAFB2616383B00B66C86 /* python_comm_hook.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_comm_hook.h; sourceTree = "<group>"; }; + 0C12EAFC2616383B00B66C86 /* c10d.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = c10d.h; sourceTree = "<group>"; }; + 0C12EAFF2616383B00B66C86 /* python_functions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_functions.h; sourceTree = "<group>"; }; + 0C12EB002616383B00B66C86 /* Functions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Functions.h; sourceTree = "<group>"; }; + 0C12EB012616383B00B66C86 /* variable_factories.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = variable_factories.h; sourceTree = "<group>"; }; + 0C12EB022616383B00B66C86 /* python_function.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_function.h; sourceTree = "<group>"; }; + 0C12EB032616383B00B66C86 /* custom_function.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = custom_function.h; sourceTree = "<group>"; }; + 0C12EB042616383B00B66C86 /* python_linalg_functions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_linalg_functions.h; sourceTree = "<group>"; }; + 0C12EB052616383B00B66C86 /* record_function_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = record_function_ops.h; sourceTree = "<group>"; }; + 0C12EB062616383B00B66C86 /* engine.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = engine.h; sourceTree = "<group>"; }; + 0C12EB072616383B00B66C86 /* edge.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = edge.h; sourceTree = "<group>"; }; + 0C12EB082616383B00B66C86 /* saved_variable.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = saved_variable.h; sourceTree = "<group>"; }; + 0C12EB092616383B00B66C86 /* python_engine.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_engine.h; sourceTree = "<group>"; }; + 0C12EB0A2616383B00B66C86 /* python_legacy_variable.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_legacy_variable.h; sourceTree = "<group>"; }; + 0C12EB0B2616383B00B66C86 /* python_cpp_function.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_cpp_function.h; sourceTree = "<group>"; }; + 0C12EB0C2616383B00B66C86 /* python_hook.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_hook.h; sourceTree = "<group>"; }; + 0C12EB0D2616383B00B66C86 /* VariableTypeUtils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = VariableTypeUtils.h; sourceTree = "<group>"; }; + 0C12EB0E2616383B00B66C86 /* python_autograd.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_autograd.h; sourceTree = "<group>"; }; + 0C12EB0F2616383B00B66C86 /* profiler_kineto.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = profiler_kineto.h; sourceTree = "<group>"; }; + 0C12EB102616383B00B66C86 /* variable.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = variable.h; sourceTree = "<group>"; }; + 0C12EB122616383B00B66C86 /* wrap_outputs.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = wrap_outputs.h; sourceTree = "<group>"; }; + 0C12EB132616383B00B66C86 /* python_arg_parsing.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_arg_parsing.h; sourceTree = "<group>"; }; + 0C12EB142616383B00B66C86 /* grad_layout_contract.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = grad_layout_contract.h; sourceTree = "<group>"; }; + 0C12EB152616383B00B66C86 /* lambda_post_hook.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lambda_post_hook.h; sourceTree = "<group>"; }; + 0C12EB162616383B00B66C86 /* error_messages.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = error_messages.h; sourceTree = "<group>"; }; + 0C12EB172616383B00B66C86 /* python_fft_functions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_fft_functions.h; sourceTree = "<group>"; }; + 0C12EB182616383B00B66C86 /* python_variable.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_variable.h; sourceTree = "<group>"; }; + 0C12EB192616383B00B66C86 /* function_hook.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = function_hook.h; sourceTree = "<group>"; }; + 0C12EB1A2616383B00B66C86 /* input_metadata.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = input_metadata.h; sourceTree = "<group>"; }; + 0C12EB1B2616383B00B66C86 /* grad_mode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = grad_mode.h; sourceTree = "<group>"; }; + 0C12EB1C2616383B00B66C86 /* symbolic.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = symbolic.h; sourceTree = "<group>"; }; + 0C12EB1D2616383B00B66C86 /* input_buffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = input_buffer.h; sourceTree = "<group>"; }; + 0C12EB1E2616383B00B66C86 /* profiler_legacy.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = profiler_legacy.h; sourceTree = "<group>"; }; + 0C12EB1F2616383B00B66C86 /* autograd.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = autograd.h; sourceTree = "<group>"; }; + 0C12EB202616383B00B66C86 /* cpp_hook.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cpp_hook.h; sourceTree = "<group>"; }; + 0C12EB222616383B00B66C86 /* utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = utils.h; sourceTree = "<group>"; }; + 0C12EB232616383B00B66C86 /* pybind.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pybind.h; sourceTree = "<group>"; }; + 0C12EB242616383B00B66C86 /* comm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = comm.h; sourceTree = "<group>"; }; + 0C12EB252616383B00B66C86 /* basic_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = basic_ops.h; sourceTree = "<group>"; }; + 0C12EB262616383B00B66C86 /* accumulate_grad.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = accumulate_grad.h; sourceTree = "<group>"; }; + 0C12EB272616383B00B66C86 /* tensor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tensor.h; sourceTree = "<group>"; }; + 0C12EB282616383B00B66C86 /* python_special_functions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_special_functions.h; sourceTree = "<group>"; }; + 0C12EB292616383B00B66C86 /* FunctionsManual.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = FunctionsManual.h; sourceTree = "<group>"; }; + 0C12EB2A2616383B00B66C86 /* forward_grad.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = forward_grad.h; sourceTree = "<group>"; }; + 0C12EB2B2616383B00B66C86 /* python_anomaly_mode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_anomaly_mode.h; sourceTree = "<group>"; }; + 0C12EB2C2616383B00B66C86 /* python_nn_functions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_nn_functions.h; sourceTree = "<group>"; }; + 0C12EB2D2616383B00B66C86 /* InferenceMode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = InferenceMode.h; sourceTree = "<group>"; }; + 0C12EB2E2616383B00B66C86 /* python_variable_indexing.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_variable_indexing.h; sourceTree = "<group>"; }; + 0C12EB2F2616383B00B66C86 /* profiler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = profiler.h; sourceTree = "<group>"; }; + 0C12EB302616383B00B66C86 /* function.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = function.h; sourceTree = "<group>"; }; + 0C12EB312616383B00B66C86 /* anomaly_mode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = anomaly_mode.h; sourceTree = "<group>"; }; + 0C12EB322616383B00B66C86 /* profiler_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = profiler_utils.h; sourceTree = "<group>"; }; + 0C12EB352616383B00B66C86 /* interpreter_impl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = interpreter_impl.h; sourceTree = "<group>"; }; + 0C12EB382616383B00B66C86 /* deploy.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = deploy.h; sourceTree = "<group>"; }; + 0C12EB3A2616383B00B66C86 /* init.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = init.h; sourceTree = "<group>"; }; + 0C12EB3C2616383B00B66C86 /* utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = utils.h; sourceTree = "<group>"; }; + 0C12EB3D2616383B00B66C86 /* THCP.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THCP.h; sourceTree = "<group>"; }; + 0C12EB3E2616383B00B66C86 /* nccl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = nccl.h; sourceTree = "<group>"; }; + 0C12EB3F2616383B00B66C86 /* python_nccl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_nccl.h; sourceTree = "<group>"; }; + 0C12EB402616383B00B66C86 /* device_set.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = device_set.h; sourceTree = "<group>"; }; + 0C12EB412616383B00B66C86 /* Event.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Event.h; sourceTree = "<group>"; }; + 0C12EB422616383B00B66C86 /* serialization.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = serialization.h; sourceTree = "<group>"; }; + 0C12EB432616383B00B66C86 /* python_comm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_comm.h; sourceTree = "<group>"; }; + 0C12EB442616383B00B66C86 /* comm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = comm.h; sourceTree = "<group>"; }; + 0C12EB452616383B00B66C86 /* Stream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Stream.h; sourceTree = "<group>"; }; + 0C12EB472616383B00B66C86 /* undef_macros.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = undef_macros.h; sourceTree = "<group>"; }; + 0C12EB482616383B00B66C86 /* restore_macros.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = restore_macros.h; sourceTree = "<group>"; }; + 0C12EB492616383B00B66C86 /* Storage.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Storage.h; sourceTree = "<group>"; }; + 0C12EB4A2616383B00B66C86 /* Module.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Module.h; sourceTree = "<group>"; }; + 0C12EB4B2616383B00B66C86 /* override_macros.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = override_macros.h; sourceTree = "<group>"; }; + 0C12EB4C2616383B00B66C86 /* serialization.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = serialization.h; sourceTree = "<group>"; }; + 0C12EB4D2616383B00B66C86 /* Exceptions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Exceptions.h; sourceTree = "<group>"; }; + 0C12EB4E2616383B00B66C86 /* QScheme.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = QScheme.h; sourceTree = "<group>"; }; + 0C12EB502616383B00B66C86 /* object_ptr.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object_ptr.h; sourceTree = "<group>"; }; + 0C12EB512616383B00B66C86 /* tensor_numpy.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tensor_numpy.h; sourceTree = "<group>"; }; + 0C12EB522616383B00B66C86 /* tensor_dtypes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tensor_dtypes.h; sourceTree = "<group>"; }; + 0C12EB532616383B00B66C86 /* python_tuples.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_tuples.h; sourceTree = "<group>"; }; + 0C12EB542616383B00B66C86 /* python_numbers.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_numbers.h; sourceTree = "<group>"; }; + 0C12EB552616383B00B66C86 /* python_scalars.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_scalars.h; sourceTree = "<group>"; }; + 0C12EB562616383B00B66C86 /* pybind.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pybind.h; sourceTree = "<group>"; }; + 0C12EB572616383B00B66C86 /* tensor_types.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tensor_types.h; sourceTree = "<group>"; }; + 0C12EB582616383B00B66C86 /* tensor_memoryformats.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tensor_memoryformats.h; sourceTree = "<group>"; }; + 0C12EB592616383B00B66C86 /* python_arg_parser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_arg_parser.h; sourceTree = "<group>"; }; + 0C12EB5A2616383B00B66C86 /* cuda_lazy_init.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cuda_lazy_init.h; sourceTree = "<group>"; }; + 0C12EB5B2616383B00B66C86 /* tensor_new.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tensor_new.h; sourceTree = "<group>"; }; + 0C12EB5C2616383B00B66C86 /* tensor_qschemes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tensor_qschemes.h; sourceTree = "<group>"; }; + 0C12EB5D2616383B00B66C86 /* python_dispatch.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_dispatch.h; sourceTree = "<group>"; }; + 0C12EB5E2616383B00B66C86 /* tensor_list.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tensor_list.h; sourceTree = "<group>"; }; + 0C12EB5F2616383B00B66C86 /* invalid_arguments.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = invalid_arguments.h; sourceTree = "<group>"; }; + 0C12EB602616383B00B66C86 /* auto_gil.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = auto_gil.h; sourceTree = "<group>"; }; + 0C12EB612616383B00B66C86 /* python_strings.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_strings.h; sourceTree = "<group>"; }; + 0C12EB622616383B00B66C86 /* byte_order.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = byte_order.h; sourceTree = "<group>"; }; + 0C12EB632616383B00B66C86 /* pycfunction_helpers.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pycfunction_helpers.h; sourceTree = "<group>"; }; + 0C12EB642616383B00B66C86 /* cuda_enabled.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cuda_enabled.h; sourceTree = "<group>"; }; + 0C12EB652616383B00B66C86 /* numpy_stub.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = numpy_stub.h; sourceTree = "<group>"; }; + 0C12EB662616383B00B66C86 /* out_types.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = out_types.h; sourceTree = "<group>"; }; + 0C12EB672616383B00B66C86 /* memory.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = memory.h; sourceTree = "<group>"; }; + 0C12EB682616383B00B66C86 /* tensor_layouts.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tensor_layouts.h; sourceTree = "<group>"; }; + 0C12EB692616383B00B66C86 /* structseq.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = structseq.h; sourceTree = "<group>"; }; + 0C12EB6A2616383B00B66C86 /* throughput_benchmark.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = throughput_benchmark.h; sourceTree = "<group>"; }; + 0C12EB6B2616383B00B66C86 /* disable_torch_function.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = disable_torch_function.h; sourceTree = "<group>"; }; + 0C12EB6C2616383B00B66C86 /* throughput_benchmark-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "throughput_benchmark-inl.h"; sourceTree = "<group>"; }; + 0C12EB6D2616383B00B66C86 /* tensor_flatten.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tensor_flatten.h; sourceTree = "<group>"; }; + 0C12EB6E2616383B00B66C86 /* tensor_apply.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tensor_apply.h; sourceTree = "<group>"; }; + 0C12EB6F2616383B00B66C86 /* init.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = init.h; sourceTree = "<group>"; }; + 0C12EB702616383B00B66C86 /* python_compat.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_compat.h; sourceTree = "<group>"; }; + 0C12EB712616383B00B66C86 /* disallow_copy.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = disallow_copy.h; sourceTree = "<group>"; }; + 0C12EB722616383B00B66C86 /* six.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = six.h; sourceTree = "<group>"; }; + 0C12EB732616383B00B66C86 /* python_stub.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_stub.h; sourceTree = "<group>"; }; + 0C12EB742616383B00B66C86 /* variadic.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = variadic.h; sourceTree = "<group>"; }; + 0C12EB752616383B00B66C86 /* Stream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Stream.h; sourceTree = "<group>"; }; + 0C12EB762616383B00B66C86 /* StorageDefs.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = StorageDefs.h; sourceTree = "<group>"; }; + 0C12EB772616383B00B66C86 /* DataLoader.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DataLoader.h; sourceTree = "<group>"; }; + 0C12EB782616383B00B66C86 /* THP.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THP.h; sourceTree = "<group>"; }; + 0C12EB792616383B00B66C86 /* python_headers.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_headers.h; sourceTree = "<group>"; }; + 0C12EB7A2616383B00B66C86 /* Layout.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Layout.h; sourceTree = "<group>"; }; + 0C12EB7B2616383B00B66C86 /* DynamicTypes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DynamicTypes.h; sourceTree = "<group>"; }; + 0C12EB7C2616383B00B66C86 /* copy_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = copy_utils.h; sourceTree = "<group>"; }; + 0C12EB7F2616383B00B66C86 /* jit_opt_limit.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = jit_opt_limit.h; sourceTree = "<group>"; }; + 0C12EB812616383B00B66C86 /* error_report.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = error_report.h; sourceTree = "<group>"; }; + 0C12EB822616383B00B66C86 /* source_range.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = source_range.h; sourceTree = "<group>"; }; + 0C12EB832616383B00B66C86 /* edit_distance.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = edit_distance.h; sourceTree = "<group>"; }; + 0C12EB842616383B00B66C86 /* canonicalize_modified_loop.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = canonicalize_modified_loop.h; sourceTree = "<group>"; }; + 0C12EB852616383B00B66C86 /* schema_matching.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = schema_matching.h; sourceTree = "<group>"; }; + 0C12EB862616383B00B66C86 /* function_schema_parser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = function_schema_parser.h; sourceTree = "<group>"; }; + 0C12EB872616383B00B66C86 /* tree_views.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tree_views.h; sourceTree = "<group>"; }; + 0C12EB882616383B00B66C86 /* ir_emitter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ir_emitter.h; sourceTree = "<group>"; }; + 0C12EB892616383B00B66C86 /* parser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = parser.h; sourceTree = "<group>"; }; + 0C12EB8A2616383B00B66C86 /* strtod.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = strtod.h; sourceTree = "<group>"; }; + 0C12EB8B2616383B00B66C86 /* tree.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tree.h; sourceTree = "<group>"; }; + 0C12EB8C2616383B00B66C86 /* concrete_module_type.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = concrete_module_type.h; sourceTree = "<group>"; }; + 0C12EB8D2616383B00B66C86 /* builtin_functions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = builtin_functions.h; sourceTree = "<group>"; }; + 0C12EB8E2616383B00B66C86 /* exit_transforms.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = exit_transforms.h; sourceTree = "<group>"; }; + 0C12EB8F2616383B00B66C86 /* parse_string_literal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = parse_string_literal.h; sourceTree = "<group>"; }; + 0C12EB902616383B00B66C86 /* sugared_value.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sugared_value.h; sourceTree = "<group>"; }; + 0C12EB912616383B00B66C86 /* inline_loop_condition.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = inline_loop_condition.h; sourceTree = "<group>"; }; + 0C12EB922616383B00B66C86 /* name_mangler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = name_mangler.h; sourceTree = "<group>"; }; + 0C12EB932616383B00B66C86 /* code_template.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = code_template.h; sourceTree = "<group>"; }; + 0C12EB942616383B00B66C86 /* tracer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tracer.h; sourceTree = "<group>"; }; + 0C12EB952616383B00B66C86 /* resolver.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resolver.h; sourceTree = "<group>"; }; + 0C12EB962616383B00B66C86 /* script_type_parser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = script_type_parser.h; sourceTree = "<group>"; }; + 0C12EB972616383B00B66C86 /* schema_type_parser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = schema_type_parser.h; sourceTree = "<group>"; }; + 0C12EB982616383B00B66C86 /* lexer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lexer.h; sourceTree = "<group>"; }; + 0C12EB992616383B00B66C86 /* versioned_symbols.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = versioned_symbols.h; sourceTree = "<group>"; }; + 0C12EB9A2616383B00B66C86 /* convert_to_ssa.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = convert_to_ssa.h; sourceTree = "<group>"; }; + 0C12EB9B2616383B00B66C86 /* mini_environment.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mini_environment.h; sourceTree = "<group>"; }; + 0C12EB9C2616383B00B66C86 /* parser_constants.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = parser_constants.h; sourceTree = "<group>"; }; + 0C12EB9E2616383B00B66C86 /* pybind.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pybind.h; sourceTree = "<group>"; }; + 0C12EB9F2616383B00B66C86 /* python_ir.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_ir.h; sourceTree = "<group>"; }; + 0C12EBA02616383B00B66C86 /* script_init.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = script_init.h; sourceTree = "<group>"; }; + 0C12EBA12616383B00B66C86 /* python_tree_views.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_tree_views.h; sourceTree = "<group>"; }; + 0C12EBA22616383B00B66C86 /* python_ivalue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_ivalue.h; sourceTree = "<group>"; }; + 0C12EBA32616383B00B66C86 /* python_custom_class.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_custom_class.h; sourceTree = "<group>"; }; + 0C12EBA42616383B00B66C86 /* update_graph_executor_opt.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = update_graph_executor_opt.h; sourceTree = "<group>"; }; + 0C12EBA52616383B00B66C86 /* python_tracer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_tracer.h; sourceTree = "<group>"; }; + 0C12EBA62616383B00B66C86 /* pybind_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pybind_utils.h; sourceTree = "<group>"; }; + 0C12EBA72616383B00B66C86 /* init.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = init.h; sourceTree = "<group>"; }; + 0C12EBA82616383B00B66C86 /* python_sugared_value.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_sugared_value.h; sourceTree = "<group>"; }; + 0C12EBA92616383B00B66C86 /* python_arg_flatten.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_arg_flatten.h; sourceTree = "<group>"; }; + 0C12EBAA2616383B00B66C86 /* module_python.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = module_python.h; sourceTree = "<group>"; }; + 0C12EBAC2616383B00B66C86 /* ir_mutator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ir_mutator.h; sourceTree = "<group>"; }; + 0C12EBAD2616383B00B66C86 /* ir_simplifier.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ir_simplifier.h; sourceTree = "<group>"; }; + 0C12EBAE2616383B00B66C86 /* ir_visitor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ir_visitor.h; sourceTree = "<group>"; }; + 0C12EBAF2616383B00B66C86 /* llvm_jit.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = llvm_jit.h; sourceTree = "<group>"; }; + 0C12EBB02616383B00B66C86 /* tensorexpr_init.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tensorexpr_init.h; sourceTree = "<group>"; }; + 0C12EBB12616383B00B66C86 /* types.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = types.h; sourceTree = "<group>"; }; + 0C12EBB22616383B00B66C86 /* mem_dependency_checker.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mem_dependency_checker.h; sourceTree = "<group>"; }; + 0C12EBB32616383B00B66C86 /* ir.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ir.h; sourceTree = "<group>"; }; + 0C12EBB42616383B00B66C86 /* exceptions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = exceptions.h; sourceTree = "<group>"; }; + 0C12EBB52616383B00B66C86 /* cuda_codegen.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cuda_codegen.h; sourceTree = "<group>"; }; + 0C12EBB62616383B00B66C86 /* hash_provider.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = hash_provider.h; sourceTree = "<group>"; }; + 0C12EBB72616383B00B66C86 /* ir_printer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ir_printer.h; sourceTree = "<group>"; }; + 0C12EBB82616383B00B66C86 /* llvm_codegen.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = llvm_codegen.h; sourceTree = "<group>"; }; + 0C12EBB92616383B00B66C86 /* expr.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = expr.h; sourceTree = "<group>"; }; + 0C12EBBA2616383B00B66C86 /* cuda_random.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cuda_random.h; sourceTree = "<group>"; }; + 0C12EBBB2616383B00B66C86 /* execution_counter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = execution_counter.h; sourceTree = "<group>"; }; + 0C12EBBC2616383B00B66C86 /* codegen.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = codegen.h; sourceTree = "<group>"; }; + 0C12EBBD2616383B00B66C86 /* unique_name_manager.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = unique_name_manager.h; sourceTree = "<group>"; }; + 0C12EBBE2616383B00B66C86 /* cpp_codegen.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cpp_codegen.h; sourceTree = "<group>"; }; + 0C12EBBF2616383B00B66C86 /* var_substitutor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = var_substitutor.h; sourceTree = "<group>"; }; + 0C12EBC02616383B00B66C86 /* eval.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = eval.h; sourceTree = "<group>"; }; + 0C12EBC12616383B00B66C86 /* bounds_inference.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = bounds_inference.h; sourceTree = "<group>"; }; + 0C12EBC22616383B00B66C86 /* intrinsic_symbols.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = intrinsic_symbols.h; sourceTree = "<group>"; }; + 0C12EBC32616383B00B66C86 /* block_codegen.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = block_codegen.h; sourceTree = "<group>"; }; + 0C12EBC42616383B00B66C86 /* external_functions_registry.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = external_functions_registry.h; sourceTree = "<group>"; }; + 0C12EBC52616383B00B66C86 /* kernel.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = kernel.h; sourceTree = "<group>"; }; + 0C12EBC62616383B00B66C86 /* loopnest.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = loopnest.h; sourceTree = "<group>"; }; + 0C12EBC72616383B00B66C86 /* bounds_overlap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = bounds_overlap.h; sourceTree = "<group>"; }; + 0C12EBC82616383B00B66C86 /* ir_verifier.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ir_verifier.h; sourceTree = "<group>"; }; + 0C12EBC92616383B00B66C86 /* dim_arg.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dim_arg.h; sourceTree = "<group>"; }; + 0C12EBCA2616383B00B66C86 /* external_functions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = external_functions.h; sourceTree = "<group>"; }; + 0C12EBCB2616383B00B66C86 /* stmt.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = stmt.h; sourceTree = "<group>"; }; + 0C12EBCC2616383B00B66C86 /* half_support.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = half_support.h; sourceTree = "<group>"; }; + 0C12EBCD2616383B00B66C86 /* registerizer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = registerizer.h; sourceTree = "<group>"; }; + 0C12EBCE2616383B00B66C86 /* reduction.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = reduction.h; sourceTree = "<group>"; }; + 0C12EBCF2616383B00B66C86 /* tensor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tensor.h; sourceTree = "<group>"; }; + 0C12EBD02616383B00B66C86 /* mem_arena.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mem_arena.h; sourceTree = "<group>"; }; + 0C12EBD12616383B00B66C86 /* analysis.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = analysis.h; sourceTree = "<group>"; }; + 0C12EBD32616383B00B66C86 /* named_value.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = named_value.h; sourceTree = "<group>"; }; + 0C12EBD42616383B00B66C86 /* irparser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = irparser.h; sourceTree = "<group>"; }; + 0C12EBD52616383B00B66C86 /* ir.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ir.h; sourceTree = "<group>"; }; + 0C12EBD62616383B00B66C86 /* graph_node_list.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = graph_node_list.h; sourceTree = "<group>"; }; + 0C12EBD72616383B00B66C86 /* ir_views.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ir_views.h; sourceTree = "<group>"; }; + 0C12EBD82616383B00B66C86 /* alias_analysis.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = alias_analysis.h; sourceTree = "<group>"; }; + 0C12EBD92616383B00B66C86 /* attributes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = attributes.h; sourceTree = "<group>"; }; + 0C12EBDA2616383B00B66C86 /* type_hashing.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = type_hashing.h; sourceTree = "<group>"; }; + 0C12EBDB2616383B00B66C86 /* constants.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = constants.h; sourceTree = "<group>"; }; + 0C12EBDC2616383B00B66C86 /* subgraph_matcher.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = subgraph_matcher.h; sourceTree = "<group>"; }; + 0C12EBDD2616383B00B66C86 /* scope.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = scope.h; sourceTree = "<group>"; }; + 0C12EBDE2616383B00B66C86 /* node_hashing.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = node_hashing.h; sourceTree = "<group>"; }; + 0C12EBE02616383B00B66C86 /* cuda.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cuda.h; sourceTree = "<group>"; }; + 0C12EBE22616383B00B66C86 /* import_source.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = import_source.h; sourceTree = "<group>"; }; + 0C12EBE32616383B00B66C86 /* export.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = export.h; sourceTree = "<group>"; }; + 0C12EBE42616383B00B66C86 /* import_export_helpers.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = import_export_helpers.h; sourceTree = "<group>"; }; + 0C12EBE52616383B00B66C86 /* type_name_uniquer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = type_name_uniquer.h; sourceTree = "<group>"; }; + 0C12EBE62616383B00B66C86 /* pickler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pickler.h; sourceTree = "<group>"; }; + 0C12EBE72616383B00B66C86 /* python_print.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_print.h; sourceTree = "<group>"; }; + 0C12EBE82616383B00B66C86 /* import_legacy.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = import_legacy.h; sourceTree = "<group>"; }; + 0C12EBE92616383B00B66C86 /* import_export_functions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = import_export_functions.h; sourceTree = "<group>"; }; + 0C12EBEA2616383B00B66C86 /* pickle.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pickle.h; sourceTree = "<group>"; }; + 0C12EBEB2616383B00B66C86 /* import_export_constants.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = import_export_constants.h; sourceTree = "<group>"; }; + 0C12EBEC2616383B00B66C86 /* source_range_serialization_impl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = source_range_serialization_impl.h; sourceTree = "<group>"; }; + 0C12EBED2616383B00B66C86 /* import.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = import.h; sourceTree = "<group>"; }; + 0C12EBEE2616383B00B66C86 /* unpickler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = unpickler.h; sourceTree = "<group>"; }; + 0C12EBEF2616383B00B66C86 /* source_range_serialization.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = source_range_serialization.h; sourceTree = "<group>"; }; + 0C12EBF02616383B00B66C86 /* onnx.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = onnx.h; sourceTree = "<group>"; }; + 0C12EBF22616383B00B66C86 /* backend_interface.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = backend_interface.h; sourceTree = "<group>"; }; + 0C12EBF32616383B00B66C86 /* backend.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = backend.h; sourceTree = "<group>"; }; + 0C12EBF42616383B00B66C86 /* backend_resolver.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = backend_resolver.h; sourceTree = "<group>"; }; + 0C12EBF52616383B00B66C86 /* backend_detail.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = backend_detail.h; sourceTree = "<group>"; }; + 0C12EBF62616383B00B66C86 /* backend_init.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = backend_init.h; sourceTree = "<group>"; }; + 0C12EBF82616383B00B66C86 /* slice_indices_adjust.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = slice_indices_adjust.h; sourceTree = "<group>"; }; + 0C12EBF92616383B00B66C86 /* operator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = operator.h; sourceTree = "<group>"; }; + 0C12EBFA2616383B00B66C86 /* interpreter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = interpreter.h; sourceTree = "<group>"; }; + 0C12EBFB2616383B00B66C86 /* register_ops_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = register_ops_utils.h; sourceTree = "<group>"; }; + 0C12EBFC2616383B00B66C86 /* jit_exception.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = jit_exception.h; sourceTree = "<group>"; }; + 0C12EBFD2616383B00B66C86 /* exception_message.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = exception_message.h; sourceTree = "<group>"; }; + 0C12EBFE2616383B00B66C86 /* argument_spec.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = argument_spec.h; sourceTree = "<group>"; }; + 0C12EBFF2616383B00B66C86 /* logging.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = logging.h; sourceTree = "<group>"; }; + 0C12EC002616383B00B66C86 /* profiling_graph_executor_impl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = profiling_graph_executor_impl.h; sourceTree = "<group>"; }; + 0C12EC012616383B00B66C86 /* custom_operator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = custom_operator.h; sourceTree = "<group>"; }; + 0C12EC032616383B00B66C86 /* fusion.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fusion.h; sourceTree = "<group>"; }; + 0C12EC042616383B00B66C86 /* passes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = passes.h; sourceTree = "<group>"; }; + 0C12EC052616383B00B66C86 /* ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ops.h; sourceTree = "<group>"; }; + 0C12EC062616383B00B66C86 /* impl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = impl.h; sourceTree = "<group>"; }; + 0C12EC072616383B00B66C86 /* init.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = init.h; sourceTree = "<group>"; }; + 0C12EC082616383B00B66C86 /* vararg_functions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vararg_functions.h; sourceTree = "<group>"; }; + 0C12EC092616383B00B66C86 /* symbolic_script.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = symbolic_script.h; sourceTree = "<group>"; }; + 0C12EC0A2616383B00B66C86 /* variable_tensor_list.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = variable_tensor_list.h; sourceTree = "<group>"; }; + 0C12EC0B2616383B00B66C86 /* autodiff.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = autodiff.h; sourceTree = "<group>"; }; + 0C12EC0C2616383B00B66C86 /* print_handler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = print_handler.h; sourceTree = "<group>"; }; + 0C12EC0D2616383B00B66C86 /* profiling_record.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = profiling_record.h; sourceTree = "<group>"; }; + 0C12EC0E2616383B00B66C86 /* graph_executor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = graph_executor.h; sourceTree = "<group>"; }; + 0C12EC0F2616383B00B66C86 /* operator_options.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = operator_options.h; sourceTree = "<group>"; }; + 0C12EC102616383B00B66C86 /* instruction.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = instruction.h; sourceTree = "<group>"; }; + 0C12EC112616383B00B66C86 /* graph_executor_impl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = graph_executor_impl.h; sourceTree = "<group>"; }; + 0C12EC132616383B00B66C86 /* remove_expands.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = remove_expands.h; sourceTree = "<group>"; }; + 0C12EC142616383B00B66C86 /* peephole_list_idioms.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = peephole_list_idioms.h; sourceTree = "<group>"; }; + 0C12EC152616383B00B66C86 /* subgraph_rewrite.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = subgraph_rewrite.h; sourceTree = "<group>"; }; + 0C12EC162616383B00B66C86 /* fuse_relu.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fuse_relu.h; sourceTree = "<group>"; }; + 0C12EC172616383B00B66C86 /* guard_elimination.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = guard_elimination.h; sourceTree = "<group>"; }; + 0C12EC182616383B00B66C86 /* peephole_alias_sensitive.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = peephole_alias_sensitive.h; sourceTree = "<group>"; }; + 0C12EC192616383B00B66C86 /* freeze_module.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = freeze_module.h; sourceTree = "<group>"; }; + 0C12EC1A2616383B00B66C86 /* clear_undefinedness.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = clear_undefinedness.h; sourceTree = "<group>"; }; + 0C12EC1B2616383B00B66C86 /* peephole.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = peephole.h; sourceTree = "<group>"; }; + 0C12EC1C2616383B00B66C86 /* remove_dropout.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = remove_dropout.h; sourceTree = "<group>"; }; + 0C12EC1D2616383B00B66C86 /* update_differentiable_graph_requires_grad.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = update_differentiable_graph_requires_grad.h; sourceTree = "<group>"; }; + 0C12EC1E2616383B00B66C86 /* metal_rewrite.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = metal_rewrite.h; sourceTree = "<group>"; }; + 0C12EC1F2616383B00B66C86 /* liveness.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = liveness.h; sourceTree = "<group>"; }; + 0C12EC212616383B00B66C86 /* eval_peephole.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = eval_peephole.h; sourceTree = "<group>"; }; + 0C12EC222616383B00B66C86 /* function_substitution.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = function_substitution.h; sourceTree = "<group>"; }; + 0C12EC232616383B00B66C86 /* helper.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = helper.h; sourceTree = "<group>"; }; + 0C12EC242616383B00B66C86 /* unpack_quantized_weights.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = unpack_quantized_weights.h; sourceTree = "<group>"; }; + 0C12EC252616383B00B66C86 /* preprocess_for_onnx.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = preprocess_for_onnx.h; sourceTree = "<group>"; }; + 0C12EC262616383B00B66C86 /* scalar_type_analysis.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = scalar_type_analysis.h; sourceTree = "<group>"; }; + 0C12EC272616383B00B66C86 /* shape_type_inference.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = shape_type_inference.h; sourceTree = "<group>"; }; + 0C12EC282616383B00B66C86 /* peephole.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = peephole.h; sourceTree = "<group>"; }; + 0C12EC292616383B00B66C86 /* eliminate_unused_items.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = eliminate_unused_items.h; sourceTree = "<group>"; }; + 0C12EC2A2616383B00B66C86 /* constant_fold.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = constant_fold.h; sourceTree = "<group>"; }; + 0C12EC2B2616383B00B66C86 /* constant_map.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = constant_map.h; sourceTree = "<group>"; }; + 0C12EC2C2616383B00B66C86 /* fixup_onnx_controlflow.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fixup_onnx_controlflow.h; sourceTree = "<group>"; }; + 0C12EC2D2616383B00B66C86 /* cast_all_constant_to_floating.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cast_all_constant_to_floating.h; sourceTree = "<group>"; }; + 0C12EC2E2616383B00B66C86 /* fold_if_node.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fold_if_node.h; sourceTree = "<group>"; }; + 0C12EC2F2616383B00B66C86 /* list_model_parameters.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = list_model_parameters.h; sourceTree = "<group>"; }; + 0C12EC312616383B00B66C86 /* common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = common.h; sourceTree = "<group>"; }; + 0C12EC322616383B00B66C86 /* pattern_conversion.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pattern_conversion.h; sourceTree = "<group>"; }; + 0C12EC332616383B00B66C86 /* pattern_encapsulation.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pattern_encapsulation.h; sourceTree = "<group>"; }; + 0C12EC342616383B00B66C86 /* remove_inplace_ops_for_onnx.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = remove_inplace_ops_for_onnx.h; sourceTree = "<group>"; }; + 0C12EC352616383B00B66C86 /* prepare_division_for_onnx.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = prepare_division_for_onnx.h; sourceTree = "<group>"; }; + 0C12EC362616383B00B66C86 /* remove_mutation.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = remove_mutation.h; sourceTree = "<group>"; }; + 0C12EC372616383B00B66C86 /* common_subexpression_elimination.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = common_subexpression_elimination.h; sourceTree = "<group>"; }; + 0C12EC382616383B00B66C86 /* batch_mm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = batch_mm.h; sourceTree = "<group>"; }; + 0C12EC392616383B00B66C86 /* constant_pooling.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = constant_pooling.h; sourceTree = "<group>"; }; + 0C12EC3A2616383B00B66C86 /* canonicalize_graph_fuser_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = canonicalize_graph_fuser_ops.h; sourceTree = "<group>"; }; + 0C12EC3B2616383B00B66C86 /* fuse_linear.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fuse_linear.h; sourceTree = "<group>"; }; + 0C12EC3C2616383B00B66C86 /* annotate_warns.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = annotate_warns.h; sourceTree = "<group>"; }; + 0C12EC3D2616383B00B66C86 /* specialize_autogradzero.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = specialize_autogradzero.h; sourceTree = "<group>"; }; + 0C12EC3E2616383B00B66C86 /* prepack_folding.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = prepack_folding.h; sourceTree = "<group>"; }; + 0C12EC3F2616383B00B66C86 /* frozen_conv_folding.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = frozen_conv_folding.h; sourceTree = "<group>"; }; + 0C12EC402616383B00B66C86 /* constant_propagation.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = constant_propagation.h; sourceTree = "<group>"; }; + 0C12EC412616383B00B66C86 /* insert_guards.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = insert_guards.h; sourceTree = "<group>"; }; + 0C12EC432616383B00B66C86 /* memory_dag.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = memory_dag.h; sourceTree = "<group>"; }; + 0C12EC442616383B00B66C86 /* subgraph_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = subgraph_utils.h; sourceTree = "<group>"; }; + 0C12EC452616383B00B66C86 /* check_alias_annotation.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = check_alias_annotation.h; sourceTree = "<group>"; }; + 0C12EC462616383B00B66C86 /* inliner.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = inliner.h; sourceTree = "<group>"; }; + 0C12EC472616383B00B66C86 /* lower_grad_of.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lower_grad_of.h; sourceTree = "<group>"; }; + 0C12EC492616383B00B66C86 /* helper.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = helper.h; sourceTree = "<group>"; }; + 0C12EC4A2616383B00B66C86 /* quantization_type.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = quantization_type.h; sourceTree = "<group>"; }; + 0C12EC4B2616383B00B66C86 /* insert_observers.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = insert_observers.h; sourceTree = "<group>"; }; + 0C12EC4C2616383B00B66C86 /* dedup_module_uses.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dedup_module_uses.h; sourceTree = "<group>"; }; + 0C12EC4D2616383B00B66C86 /* quantization_patterns.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = quantization_patterns.h; sourceTree = "<group>"; }; + 0C12EC4E2616383B00B66C86 /* finalize.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = finalize.h; sourceTree = "<group>"; }; + 0C12EC4F2616383B00B66C86 /* insert_quant_dequant.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = insert_quant_dequant.h; sourceTree = "<group>"; }; + 0C12EC502616383B00B66C86 /* fusion_passes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fusion_passes.h; sourceTree = "<group>"; }; + 0C12EC512616383B00B66C86 /* normalize_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = normalize_ops.h; sourceTree = "<group>"; }; + 0C12EC522616383B00B66C86 /* vulkan_rewrite.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vulkan_rewrite.h; sourceTree = "<group>"; }; + 0C12EC532616383B00B66C86 /* erase_number_types.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = erase_number_types.h; sourceTree = "<group>"; }; + 0C12EC542616383B00B66C86 /* graph_rewrite_helper.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = graph_rewrite_helper.h; sourceTree = "<group>"; }; + 0C12EC552616383B00B66C86 /* graph_fuser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = graph_fuser.h; sourceTree = "<group>"; }; + 0C12EC562616383B00B66C86 /* fold_conv_bn.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fold_conv_bn.h; sourceTree = "<group>"; }; + 0C12EC572616383B00B66C86 /* remove_redundant_profiles.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = remove_redundant_profiles.h; sourceTree = "<group>"; }; + 0C12EC582616383B00B66C86 /* inline_forked_closures.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = inline_forked_closures.h; sourceTree = "<group>"; }; + 0C12EC592616383B00B66C86 /* tensorexpr_fuser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tensorexpr_fuser.h; sourceTree = "<group>"; }; + 0C12EC5A2616383B00B66C86 /* decompose_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = decompose_ops.h; sourceTree = "<group>"; }; + 0C12EC5B2616383B00B66C86 /* remove_inplace_ops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = remove_inplace_ops.h; sourceTree = "<group>"; }; + 0C12EC5C2616383B00B66C86 /* inline_fork_wait.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = inline_fork_wait.h; sourceTree = "<group>"; }; + 0C12EC5D2616383B00B66C86 /* create_autodiff_subgraphs.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = create_autodiff_subgraphs.h; sourceTree = "<group>"; }; + 0C12EC5E2616383B00B66C86 /* requires_grad_analysis.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = requires_grad_analysis.h; sourceTree = "<group>"; }; + 0C12EC5F2616383B00B66C86 /* dead_code_elimination.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dead_code_elimination.h; sourceTree = "<group>"; }; + 0C12EC602616383B00B66C86 /* clear_profiling.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = clear_profiling.h; sourceTree = "<group>"; }; + 0C12EC612616383B00B66C86 /* create_functional_graphs.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = create_functional_graphs.h; sourceTree = "<group>"; }; + 0C12EC622616383B00B66C86 /* bailout_graph.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = bailout_graph.h; sourceTree = "<group>"; }; + 0C12EC632616383B00B66C86 /* lower_tuples.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lower_tuples.h; sourceTree = "<group>"; }; + 0C12EC642616383B00B66C86 /* frozen_graph_optimizations.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = frozen_graph_optimizations.h; sourceTree = "<group>"; }; + 0C12EC652616383B00B66C86 /* frozen_ops_to_mkldnn.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = frozen_ops_to_mkldnn.h; sourceTree = "<group>"; }; + 0C12EC662616383B00B66C86 /* canonicalize.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = canonicalize.h; sourceTree = "<group>"; }; + 0C12EC672616383B00B66C86 /* hoist_conv_packed_params.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = hoist_conv_packed_params.h; sourceTree = "<group>"; }; + 0C12EC682616383B00B66C86 /* loop_unrolling.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = loop_unrolling.h; sourceTree = "<group>"; }; + 0C12EC692616383B00B66C86 /* shape_analysis.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = shape_analysis.h; sourceTree = "<group>"; }; + 0C12EC6A2616383B00B66C86 /* fixup_trace_scope_blocks.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fixup_trace_scope_blocks.h; sourceTree = "<group>"; }; + 0C12EC6B2616383B00B66C86 /* remove_exceptions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = remove_exceptions.h; sourceTree = "<group>"; }; + 0C12EC6C2616383B00B66C86 /* inline_autodiff_subgraphs.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = inline_autodiff_subgraphs.h; sourceTree = "<group>"; }; + 0C12EC6D2616383B00B66C86 /* inplace_check.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = inplace_check.h; sourceTree = "<group>"; }; + 0C12EC6E2616383B00B66C86 /* cuda_graph_fuser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cuda_graph_fuser.h; sourceTree = "<group>"; }; + 0C12EC6F2616383B00B66C86 /* pass_manager.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pass_manager.h; sourceTree = "<group>"; }; + 0C12EC702616383B00B66C86 /* onnx.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = onnx.h; sourceTree = "<group>"; }; + 0C12EC712616383B00B66C86 /* xnnpack_rewrite.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = xnnpack_rewrite.h; sourceTree = "<group>"; }; + 0C12EC722616383B00B66C86 /* lift_closures.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lift_closures.h; sourceTree = "<group>"; }; + 0C12EC732616383B00B66C86 /* frozen_conv_add_relu_fusion.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = frozen_conv_add_relu_fusion.h; sourceTree = "<group>"; }; + 0C12EC742616383B00B66C86 /* lower_graph.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lower_graph.h; sourceTree = "<group>"; }; + 0C12EC782616383B00B66C86 /* type.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = type.h; sourceTree = "<group>"; }; + 0C12EC792616383B00B66C86 /* executor_kernel_arg.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = executor_kernel_arg.h; sourceTree = "<group>"; }; + 0C12EC7A2616383B00B66C86 /* utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = utils.h; sourceTree = "<group>"; }; + 0C12EC7B2616383B00B66C86 /* kernel_ir_printer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = kernel_ir_printer.h; sourceTree = "<group>"; }; + 0C12EC7D2616383B00B66C86 /* index_compute.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = index_compute.h; sourceTree = "<group>"; }; + 0C12EC7E2616383B00B66C86 /* transform_replay.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = transform_replay.h; sourceTree = "<group>"; }; + 0C12EC7F2616383B00B66C86 /* parser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = parser.h; sourceTree = "<group>"; }; + 0C12EC802616383B00B66C86 /* executor_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = executor_utils.h; sourceTree = "<group>"; }; + 0C12EC812616383B00B66C86 /* manager.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = manager.h; sourceTree = "<group>"; }; + 0C12EC822616383B00B66C86 /* scheduler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = scheduler.h; sourceTree = "<group>"; }; + 0C12EC832616383B00B66C86 /* lower_unroll.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lower_unroll.h; sourceTree = "<group>"; }; + 0C12EC852616383B00B66C86 /* ir_printer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ir_printer.h; sourceTree = "<group>"; }; + 0C12EC862616383B00B66C86 /* lower_insert_syncs.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lower_insert_syncs.h; sourceTree = "<group>"; }; + 0C12EC872616383B00B66C86 /* lower2device.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lower2device.h; sourceTree = "<group>"; }; + 0C12EC882616383B00B66C86 /* predicate_compute.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = predicate_compute.h; sourceTree = "<group>"; }; + 0C12EC892616383B00B66C86 /* compute_at.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = compute_at.h; sourceTree = "<group>"; }; + 0C12EC8A2616383B00B66C86 /* ir_all_nodes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ir_all_nodes.h; sourceTree = "<group>"; }; + 0C12EC8B2616383B00B66C86 /* mutator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mutator.h; sourceTree = "<group>"; }; + 0C12EC8D2616383B00B66C86 /* documentation.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = documentation.h; sourceTree = "<group>"; }; + 0C12EC8F2616383B00B66C86 /* fusion.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fusion.h; sourceTree = "<group>"; }; + 0C12EC902616383B00B66C86 /* lower_loops.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lower_loops.h; sourceTree = "<group>"; }; + 0C12EC912616383B00B66C86 /* interface.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = interface.h; sourceTree = "<group>"; }; + 0C12EC922616383B00B66C86 /* arith.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = arith.h; sourceTree = "<group>"; }; + 0C12EC932616383B00B66C86 /* kernel_cache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = kernel_cache.h; sourceTree = "<group>"; }; + 0C12EC942616383B00B66C86 /* codegen.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = codegen.h; sourceTree = "<group>"; }; + 0C12EC952616383B00B66C86 /* ir_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ir_utils.h; sourceTree = "<group>"; }; + 0C12EC962616383B00B66C86 /* lower_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lower_utils.h; sourceTree = "<group>"; }; + 0C12EC972616383B00B66C86 /* lower_index.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lower_index.h; sourceTree = "<group>"; }; + 0C12EC982616383B00B66C86 /* transform_rfactor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = transform_rfactor.h; sourceTree = "<group>"; }; + 0C12EC992616383B00B66C86 /* transform_iter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = transform_iter.h; sourceTree = "<group>"; }; + 0C12EC9A2616383B00B66C86 /* lower_alias_memory.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lower_alias_memory.h; sourceTree = "<group>"; }; + 0C12EC9B2616383B00B66C86 /* executor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = executor.h; sourceTree = "<group>"; }; + 0C12EC9C2616383B00B66C86 /* ir_graphviz.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ir_graphviz.h; sourceTree = "<group>"; }; + 0C12EC9D2616383B00B66C86 /* ir_iostream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ir_iostream.h; sourceTree = "<group>"; }; + 0C12EC9E2616383B00B66C86 /* partition.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = partition.h; sourceTree = "<group>"; }; + 0C12EC9F2616383B00B66C86 /* shape_inference.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = shape_inference.h; sourceTree = "<group>"; }; + 0C12ECA02616383B00B66C86 /* kernel_ir_builder.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = kernel_ir_builder.h; sourceTree = "<group>"; }; + 0C12ECA12616383B00B66C86 /* instrumentation.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = instrumentation.h; sourceTree = "<group>"; }; + 0C12ECA22616383B00B66C86 /* kernel.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = kernel.h; sourceTree = "<group>"; }; + 0C12ECA32616383B00B66C86 /* dispatch.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dispatch.h; sourceTree = "<group>"; }; + 0C12ECA42616383B00B66C86 /* lower_validation.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lower_validation.h; sourceTree = "<group>"; }; + 0C12ECA52616383B00B66C86 /* ir_internal_nodes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ir_internal_nodes.h; sourceTree = "<group>"; }; + 0C12ECA62616383B00B66C86 /* lower_thread_predicate.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lower_thread_predicate.h; sourceTree = "<group>"; }; + 0C12ECA72616383B00B66C86 /* ir_interface_nodes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ir_interface_nodes.h; sourceTree = "<group>"; }; + 0C12ECA82616383B00B66C86 /* ir_cloner.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ir_cloner.h; sourceTree = "<group>"; }; + 0C12ECA92616383B00B66C86 /* ir_base_nodes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ir_base_nodes.h; sourceTree = "<group>"; }; + 0C12ECAA2616383B00B66C86 /* executor_launch_params.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = executor_launch_params.h; sourceTree = "<group>"; }; + 0C12ECAB2616383B00B66C86 /* kernel_ir.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = kernel_ir.h; sourceTree = "<group>"; }; + 0C12ECAC2616383B00B66C86 /* iter_visitor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = iter_visitor.h; sourceTree = "<group>"; }; + 0C12ECAD2616383B00B66C86 /* expr_evaluator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = expr_evaluator.h; sourceTree = "<group>"; }; + 0C12ECAF2616383B00B66C86 /* tensor_info.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tensor_info.h; sourceTree = "<group>"; }; + 0C12ECB02616383B00B66C86 /* arg_spec.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = arg_spec.h; sourceTree = "<group>"; }; + 0C12ECB12616383B00B66C86 /* compiler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = compiler.h; sourceTree = "<group>"; }; + 0C12ECB22616383B00B66C86 /* fallback.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fallback.h; sourceTree = "<group>"; }; + 0C12ECB42616383B00B66C86 /* temp_file.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = temp_file.h; sourceTree = "<group>"; }; + 0C12ECB52616383B00B66C86 /* fused_kernel.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fused_kernel.h; sourceTree = "<group>"; }; + 0C12ECB62616383B00B66C86 /* resource_strings.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resource_strings.h; sourceTree = "<group>"; }; + 0C12ECB82616383B00B66C86 /* fused_kernel.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fused_kernel.h; sourceTree = "<group>"; }; + 0C12ECB92616383B00B66C86 /* resource_strings.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resource_strings.h; sourceTree = "<group>"; }; + 0C12ECBA2616383B00B66C86 /* partition_desc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = partition_desc.h; sourceTree = "<group>"; }; + 0C12ECBB2616383B00B66C86 /* fused_kernel.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fused_kernel.h; sourceTree = "<group>"; }; + 0C12ECBC2616383B00B66C86 /* kernel_spec.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = kernel_spec.h; sourceTree = "<group>"; }; + 0C12ECBD2616383B00B66C86 /* interface.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = interface.h; sourceTree = "<group>"; }; + 0C12ECBE2616383B00B66C86 /* kernel_cache.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = kernel_cache.h; sourceTree = "<group>"; }; + 0C12ECBF2616383B00B66C86 /* codegen.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = codegen.h; sourceTree = "<group>"; }; + 0C12ECC02616383B00B66C86 /* executor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = executor.h; sourceTree = "<group>"; }; + 0C12ECC12616383B00B66C86 /* tensor_desc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tensor_desc.h; sourceTree = "<group>"; }; + 0C12ECC32616383B00B66C86 /* file_check.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = file_check.h; sourceTree = "<group>"; }; + 0C12ECC42616383B00B66C86 /* hooks_for_testing.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = hooks_for_testing.h; sourceTree = "<group>"; }; + 0C12ECC52616383B00B66C86 /* jit_log.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = jit_log.h; sourceTree = "<group>"; }; + 0C12ECC72616383B00B66C86 /* observer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = observer.h; sourceTree = "<group>"; }; + 0C12ECC82616383B00B66C86 /* sequential.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sequential.h; sourceTree = "<group>"; }; + 0C12ECC92616383B00B66C86 /* interpreter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = interpreter.h; sourceTree = "<group>"; }; + 0C12ECCA2616383B00B66C86 /* export_data.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = export_data.h; sourceTree = "<group>"; }; + 0C12ECCB2616383B00B66C86 /* method.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = method.h; sourceTree = "<group>"; }; + 0C12ECCD2616383B00B66C86 /* sgd.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sgd.h; sourceTree = "<group>"; }; + 0C12ECCE2616383B00B66C86 /* import_data.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = import_data.h; sourceTree = "<group>"; }; + 0C12ECCF2616383B00B66C86 /* type_parser.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = type_parser.h; sourceTree = "<group>"; }; + 0C12ECD02616383B00B66C86 /* import.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = import.h; sourceTree = "<group>"; }; + 0C12ECD12616383B00B66C86 /* module.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = module.h; sourceTree = "<group>"; }; + 0C12ECD22616383B00B66C86 /* function.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = function.h; sourceTree = "<group>"; }; + 0C12ECD32616383B00B66C86 /* resource_guard.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = resource_guard.h; sourceTree = "<group>"; }; + 0C12ECD52616383B00B66C86 /* function_impl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = function_impl.h; sourceTree = "<group>"; }; + 0C12ECD62616383B00B66C86 /* method.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = method.h; sourceTree = "<group>"; }; + 0C12ECD72616383B00B66C86 /* compilation_unit.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = compilation_unit.h; sourceTree = "<group>"; }; + 0C12ECD82616383B00B66C86 /* object.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = object.h; sourceTree = "<group>"; }; + 0C12ECD92616383B00B66C86 /* module.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = module.h; sourceTree = "<group>"; }; + 0C12ECDA2616383B00B66C86 /* Storage.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Storage.h; sourceTree = "<group>"; }; + 0C12ECDE2616383B00B66C86 /* fft.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fft.h; sourceTree = "<group>"; }; + 0C12ECDF2616383B00B66C86 /* utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = utils.h; sourceTree = "<group>"; }; + 0C12ECE02616383B00B66C86 /* version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = version.h; sourceTree = "<group>"; }; + 0C12ECE32616383B00B66C86 /* normalization.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = normalization.h; sourceTree = "<group>"; }; + 0C12ECE42616383B00B66C86 /* rnn.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rnn.h; sourceTree = "<group>"; }; + 0C12ECE52616383B00B66C86 /* distance.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = distance.h; sourceTree = "<group>"; }; + 0C12ECE62616383B00B66C86 /* batchnorm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = batchnorm.h; sourceTree = "<group>"; }; + 0C12ECE72616383B00B66C86 /* linear.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = linear.h; sourceTree = "<group>"; }; + 0C12ECE82616383B00B66C86 /* instancenorm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = instancenorm.h; sourceTree = "<group>"; }; + 0C12ECE92616383B00B66C86 /* vision.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vision.h; sourceTree = "<group>"; }; + 0C12ECEA2616383B00B66C86 /* transformercoder.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = transformercoder.h; sourceTree = "<group>"; }; + 0C12ECEB2616383B00B66C86 /* dropout.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dropout.h; sourceTree = "<group>"; }; + 0C12ECEC2616383B00B66C86 /* upsampling.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = upsampling.h; sourceTree = "<group>"; }; + 0C12ECED2616383B00B66C86 /* embedding.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = embedding.h; sourceTree = "<group>"; }; + 0C12ECEE2616383C00B66C86 /* fold.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fold.h; sourceTree = "<group>"; }; + 0C12ECEF2616383C00B66C86 /* activation.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = activation.h; sourceTree = "<group>"; }; + 0C12ECF02616383C00B66C86 /* transformer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = transformer.h; sourceTree = "<group>"; }; + 0C12ECF12616383C00B66C86 /* pooling.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pooling.h; sourceTree = "<group>"; }; + 0C12ECF22616383C00B66C86 /* transformerlayer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = transformerlayer.h; sourceTree = "<group>"; }; + 0C12ECF32616383C00B66C86 /* adaptive.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = adaptive.h; sourceTree = "<group>"; }; + 0C12ECF42616383C00B66C86 /* conv.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = conv.h; sourceTree = "<group>"; }; + 0C12ECF52616383C00B66C86 /* padding.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = padding.h; sourceTree = "<group>"; }; + 0C12ECF62616383C00B66C86 /* pixelshuffle.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pixelshuffle.h; sourceTree = "<group>"; }; + 0C12ECF72616383C00B66C86 /* loss.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = loss.h; sourceTree = "<group>"; }; + 0C12ECF82616383C00B66C86 /* utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = utils.h; sourceTree = "<group>"; }; + 0C12ECFA2616383C00B66C86 /* data_parallel.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_parallel.h; sourceTree = "<group>"; }; + 0C12ECFB2616383C00B66C86 /* pimpl-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "pimpl-inl.h"; sourceTree = "<group>"; }; + 0C12ECFD2616383C00B66C86 /* rnn.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rnn.h; sourceTree = "<group>"; }; + 0C12ECFE2616383C00B66C86 /* clip_grad.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = clip_grad.h; sourceTree = "<group>"; }; + 0C12ECFF2616383C00B66C86 /* convert_parameters.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = convert_parameters.h; sourceTree = "<group>"; }; + 0C12ED002616383C00B66C86 /* options.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = options.h; sourceTree = "<group>"; }; + 0C12ED012616383C00B66C86 /* functional.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = functional.h; sourceTree = "<group>"; }; + 0C12ED022616383C00B66C86 /* modules.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = modules.h; sourceTree = "<group>"; }; + 0C12ED032616383C00B66C86 /* pimpl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pimpl.h; sourceTree = "<group>"; }; + 0C12ED042616383C00B66C86 /* module.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = module.h; sourceTree = "<group>"; }; + 0C12ED062616383C00B66C86 /* normalization.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = normalization.h; sourceTree = "<group>"; }; + 0C12ED072616383C00B66C86 /* utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = utils.h; sourceTree = "<group>"; }; + 0C12ED082616383C00B66C86 /* rnn.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rnn.h; sourceTree = "<group>"; }; + 0C12ED092616383C00B66C86 /* distance.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = distance.h; sourceTree = "<group>"; }; + 0C12ED0A2616383C00B66C86 /* batchnorm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = batchnorm.h; sourceTree = "<group>"; }; + 0C12ED0B2616383C00B66C86 /* linear.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = linear.h; sourceTree = "<group>"; }; + 0C12ED0C2616383C00B66C86 /* instancenorm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = instancenorm.h; sourceTree = "<group>"; }; + 0C12ED0D2616383C00B66C86 /* transformercoder.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = transformercoder.h; sourceTree = "<group>"; }; + 0C12ED0E2616383C00B66C86 /* _functions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = _functions.h; sourceTree = "<group>"; }; + 0C12ED102616383C00B66C86 /* named_any.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = named_any.h; sourceTree = "<group>"; }; + 0C12ED112616383C00B66C86 /* any_value.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = any_value.h; sourceTree = "<group>"; }; + 0C12ED122616383C00B66C86 /* modulelist.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = modulelist.h; sourceTree = "<group>"; }; + 0C12ED132616383C00B66C86 /* moduledict.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = moduledict.h; sourceTree = "<group>"; }; + 0C12ED142616383C00B66C86 /* sequential.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sequential.h; sourceTree = "<group>"; }; + 0C12ED152616383C00B66C86 /* functional.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = functional.h; sourceTree = "<group>"; }; + 0C12ED162616383C00B66C86 /* parameterlist.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = parameterlist.h; sourceTree = "<group>"; }; + 0C12ED172616383C00B66C86 /* parameterdict.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = parameterdict.h; sourceTree = "<group>"; }; + 0C12ED182616383C00B66C86 /* any.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = any.h; sourceTree = "<group>"; }; + 0C12ED192616383C00B66C86 /* any_module_holder.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = any_module_holder.h; sourceTree = "<group>"; }; + 0C12ED1A2616383C00B66C86 /* dropout.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dropout.h; sourceTree = "<group>"; }; + 0C12ED1B2616383C00B66C86 /* common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = common.h; sourceTree = "<group>"; }; + 0C12ED1C2616383C00B66C86 /* upsampling.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = upsampling.h; sourceTree = "<group>"; }; + 0C12ED1D2616383C00B66C86 /* embedding.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = embedding.h; sourceTree = "<group>"; }; + 0C12ED1E2616383C00B66C86 /* fold.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fold.h; sourceTree = "<group>"; }; + 0C12ED1F2616383C00B66C86 /* activation.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = activation.h; sourceTree = "<group>"; }; + 0C12ED202616383C00B66C86 /* transformer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = transformer.h; sourceTree = "<group>"; }; + 0C12ED212616383C00B66C86 /* pooling.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pooling.h; sourceTree = "<group>"; }; + 0C12ED222616383C00B66C86 /* transformerlayer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = transformerlayer.h; sourceTree = "<group>"; }; + 0C12ED232616383C00B66C86 /* adaptive.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = adaptive.h; sourceTree = "<group>"; }; + 0C12ED242616383C00B66C86 /* conv.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = conv.h; sourceTree = "<group>"; }; + 0C12ED252616383C00B66C86 /* padding.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = padding.h; sourceTree = "<group>"; }; + 0C12ED262616383C00B66C86 /* pixelshuffle.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pixelshuffle.h; sourceTree = "<group>"; }; + 0C12ED272616383C00B66C86 /* loss.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = loss.h; sourceTree = "<group>"; }; + 0C12ED282616383C00B66C86 /* init.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = init.h; sourceTree = "<group>"; }; + 0C12ED292616383C00B66C86 /* cloneable.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cloneable.h; sourceTree = "<group>"; }; + 0C12ED2B2616383C00B66C86 /* normalization.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = normalization.h; sourceTree = "<group>"; }; + 0C12ED2C2616383C00B66C86 /* distance.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = distance.h; sourceTree = "<group>"; }; + 0C12ED2D2616383C00B66C86 /* batchnorm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = batchnorm.h; sourceTree = "<group>"; }; + 0C12ED2E2616383C00B66C86 /* linear.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = linear.h; sourceTree = "<group>"; }; + 0C12ED2F2616383C00B66C86 /* instancenorm.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = instancenorm.h; sourceTree = "<group>"; }; + 0C12ED302616383C00B66C86 /* vision.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vision.h; sourceTree = "<group>"; }; + 0C12ED312616383C00B66C86 /* dropout.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dropout.h; sourceTree = "<group>"; }; + 0C12ED322616383C00B66C86 /* upsampling.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = upsampling.h; sourceTree = "<group>"; }; + 0C12ED332616383C00B66C86 /* embedding.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = embedding.h; sourceTree = "<group>"; }; + 0C12ED342616383C00B66C86 /* fold.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fold.h; sourceTree = "<group>"; }; + 0C12ED352616383C00B66C86 /* activation.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = activation.h; sourceTree = "<group>"; }; + 0C12ED362616383C00B66C86 /* pooling.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pooling.h; sourceTree = "<group>"; }; + 0C12ED372616383C00B66C86 /* conv.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = conv.h; sourceTree = "<group>"; }; + 0C12ED382616383C00B66C86 /* padding.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = padding.h; sourceTree = "<group>"; }; + 0C12ED392616383C00B66C86 /* pixelshuffle.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pixelshuffle.h; sourceTree = "<group>"; }; + 0C12ED3A2616383C00B66C86 /* loss.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = loss.h; sourceTree = "<group>"; }; + 0C12ED3C2616383C00B66C86 /* init.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = init.h; sourceTree = "<group>"; }; + 0C12ED3D2616383C00B66C86 /* enum.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = enum.h; sourceTree = "<group>"; }; + 0C12ED3E2616383C00B66C86 /* types.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = types.h; sourceTree = "<group>"; }; + 0C12ED3F2616383C00B66C86 /* all.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = all.h; sourceTree = "<group>"; }; + 0C12ED402616383C00B66C86 /* data.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data.h; sourceTree = "<group>"; }; + 0C12ED412616383C00B66C86 /* arg.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = arg.h; sourceTree = "<group>"; }; + 0C12ED432616383C00B66C86 /* rmsprop.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rmsprop.h; sourceTree = "<group>"; }; + 0C12ED442616383C00B66C86 /* lbfgs.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lbfgs.h; sourceTree = "<group>"; }; + 0C12ED452616383C00B66C86 /* optimizer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = optimizer.h; sourceTree = "<group>"; }; + 0C12ED462616383C00B66C86 /* adagrad.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = adagrad.h; sourceTree = "<group>"; }; + 0C12ED472616383C00B66C86 /* sgd.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sgd.h; sourceTree = "<group>"; }; + 0C12ED482616383C00B66C86 /* serialize.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = serialize.h; sourceTree = "<group>"; }; + 0C12ED492616383C00B66C86 /* adamw.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = adamw.h; sourceTree = "<group>"; }; + 0C12ED4B2616383C00B66C86 /* lr_scheduler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lr_scheduler.h; sourceTree = "<group>"; }; + 0C12ED4C2616383C00B66C86 /* step_lr.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = step_lr.h; sourceTree = "<group>"; }; + 0C12ED4D2616383C00B66C86 /* adam.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = adam.h; sourceTree = "<group>"; }; + 0C12ED4F2616383C00B66C86 /* archive.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = archive.h; sourceTree = "<group>"; }; + 0C12ED502616383C00B66C86 /* input-archive.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "input-archive.h"; sourceTree = "<group>"; }; + 0C12ED512616383C00B66C86 /* output-archive.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "output-archive.h"; sourceTree = "<group>"; }; + 0C12ED522616383C00B66C86 /* tensor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tensor.h; sourceTree = "<group>"; }; + 0C12ED532616383C00B66C86 /* torch.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = torch.h; sourceTree = "<group>"; }; + 0C12ED542616383C00B66C86 /* optim.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = optim.h; sourceTree = "<group>"; }; + 0C12ED552616383C00B66C86 /* jit.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = jit.h; sourceTree = "<group>"; }; + 0C12ED572616383C00B66C86 /* static.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = static.h; sourceTree = "<group>"; }; + 0C12ED582616383C00B66C86 /* TensorDataContainer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TensorDataContainer.h; sourceTree = "<group>"; }; + 0C12ED592616383C00B66C86 /* nn.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = nn.h; sourceTree = "<group>"; }; + 0C12ED5A2616383C00B66C86 /* ordered_dict.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ordered_dict.h; sourceTree = "<group>"; }; + 0C12ED5B2616383C00B66C86 /* cuda.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cuda.h; sourceTree = "<group>"; }; + 0C12ED5C2616383C00B66C86 /* autograd.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = autograd.h; sourceTree = "<group>"; }; + 0C12ED5D2616383C00B66C86 /* linalg.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = linalg.h; sourceTree = "<group>"; }; + 0C12ED5E2616383C00B66C86 /* special.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = special.h; sourceTree = "<group>"; }; + 0C12ED5F2616383C00B66C86 /* python.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python.h; sourceTree = "<group>"; }; + 0C12ED602616383C00B66C86 /* serialize.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = serialize.h; sourceTree = "<group>"; }; + 0C12ED622616383C00B66C86 /* example.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = example.h; sourceTree = "<group>"; }; + 0C12ED632616383C00B66C86 /* dataloader_options.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dataloader_options.h; sourceTree = "<group>"; }; + 0C12ED652616383C00B66C86 /* mnist.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = mnist.h; sourceTree = "<group>"; }; + 0C12ED662616383C00B66C86 /* shared.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = shared.h; sourceTree = "<group>"; }; + 0C12ED672616383C00B66C86 /* map.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = map.h; sourceTree = "<group>"; }; + 0C12ED682616383C00B66C86 /* chunk.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = chunk.h; sourceTree = "<group>"; }; + 0C12ED692616383C00B66C86 /* stateful.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = stateful.h; sourceTree = "<group>"; }; + 0C12ED6A2616383C00B66C86 /* tensor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tensor.h; sourceTree = "<group>"; }; + 0C12ED6B2616383C00B66C86 /* base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = base.h; sourceTree = "<group>"; }; + 0C12ED6C2616383C00B66C86 /* worker_exception.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = worker_exception.h; sourceTree = "<group>"; }; + 0C12ED6D2616383C00B66C86 /* dataloader.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dataloader.h; sourceTree = "<group>"; }; + 0C12ED6F2616383C00B66C86 /* data_shuttle.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = data_shuttle.h; sourceTree = "<group>"; }; + 0C12ED702616383C00B66C86 /* sequencers.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sequencers.h; sourceTree = "<group>"; }; + 0C12ED712616383C00B66C86 /* queue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = queue.h; sourceTree = "<group>"; }; + 0C12ED722616383C00B66C86 /* samplers.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = samplers.h; sourceTree = "<group>"; }; + 0C12ED742616383C00B66C86 /* lambda.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = lambda.h; sourceTree = "<group>"; }; + 0C12ED752616383C00B66C86 /* stack.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = stack.h; sourceTree = "<group>"; }; + 0C12ED762616383C00B66C86 /* collate.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = collate.h; sourceTree = "<group>"; }; + 0C12ED772616383C00B66C86 /* tensor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tensor.h; sourceTree = "<group>"; }; + 0C12ED782616383C00B66C86 /* base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = base.h; sourceTree = "<group>"; }; + 0C12ED7A2616383C00B66C86 /* sequential.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sequential.h; sourceTree = "<group>"; }; + 0C12ED7B2616383C00B66C86 /* custom_batch_request.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = custom_batch_request.h; sourceTree = "<group>"; }; + 0C12ED7C2616383C00B66C86 /* stream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = stream.h; sourceTree = "<group>"; }; + 0C12ED7D2616383C00B66C86 /* distributed.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = distributed.h; sourceTree = "<group>"; }; + 0C12ED7E2616383C00B66C86 /* serialize.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = serialize.h; sourceTree = "<group>"; }; + 0C12ED7F2616383C00B66C86 /* random.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = random.h; sourceTree = "<group>"; }; + 0C12ED802616383C00B66C86 /* base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = base.h; sourceTree = "<group>"; }; + 0C12ED812616383C00B66C86 /* datasets.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = datasets.h; sourceTree = "<group>"; }; + 0C12ED822616383C00B66C86 /* transforms.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = transforms.h; sourceTree = "<group>"; }; + 0C12ED832616383C00B66C86 /* iterator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = iterator.h; sourceTree = "<group>"; }; + 0C12ED852616383C00B66C86 /* stateless.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = stateless.h; sourceTree = "<group>"; }; + 0C12ED862616383C00B66C86 /* stateful.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = stateful.h; sourceTree = "<group>"; }; + 0C12ED872616383C00B66C86 /* base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = base.h; sourceTree = "<group>"; }; + 0C12ED882616383C00B66C86 /* expanding_array.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = expanding_array.h; sourceTree = "<group>"; }; + 0C12ED952616383C00B66C86 /* MemoryFormat.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MemoryFormat.h; sourceTree = "<group>"; }; + 0C12ED972616383C00B66C86 /* utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = utils.h; sourceTree = "<group>"; }; + 0C12ED982616383C00B66C86 /* serialization.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = serialization.h; sourceTree = "<group>"; }; + 0C12ED992616383C00B66C86 /* Storage.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Storage.h; sourceTree = "<group>"; }; + 0C12ED9B2616383C00B66C86 /* python_tensor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_tensor.h; sourceTree = "<group>"; }; + 0C12ED9C2616383C00B66C86 /* WindowsTorchApiMacro.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WindowsTorchApiMacro.h; sourceTree = "<group>"; }; + 0C12ED9D2616383C00B66C86 /* Dtype.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Dtype.h; sourceTree = "<group>"; }; + 0C12ED9E2616383C00B66C86 /* Module.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Module.h; sourceTree = "<group>"; }; + 0C12ED9F2616383C00B66C86 /* THP_export.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THP_export.h; sourceTree = "<group>"; }; + 0C12EDA02616383C00B66C86 /* python_dimname.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_dimname.h; sourceTree = "<group>"; }; + 0C12EDA12616383C00B66C86 /* CudaIPCTypes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CudaIPCTypes.h; sourceTree = "<group>"; }; + 0C12EDA22616383C00B66C86 /* Generator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Generator.h; sourceTree = "<group>"; }; + 0C12EDA32616383C00B66C86 /* TypeInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TypeInfo.h; sourceTree = "<group>"; }; + 0C12EDA42616383C00B66C86 /* PythonTypes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PythonTypes.h; sourceTree = "<group>"; }; + 0C12EDA52616383C00B66C86 /* script.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = script.h; sourceTree = "<group>"; }; + 0C12EDA62616383C00B66C86 /* library.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = library.h; sourceTree = "<group>"; }; + 0C12EDA72616383C00B66C86 /* custom_class_detail.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = custom_class_detail.h; sourceTree = "<group>"; }; + 0C12EDA82616383C00B66C86 /* custom_class.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = custom_class.h; sourceTree = "<group>"; }; + 0C12EDA92616383C00B66C86 /* extension.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = extension.h; sourceTree = "<group>"; }; + 0C12EDAA2616383C00B66C86 /* xnnpack.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = xnnpack.h; sourceTree = "<group>"; }; + 0C12EDAB2616383C00B66C86 /* fp16.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fp16.h; sourceTree = "<group>"; }; + 0C12EDAC2616383C00B66C86 /* qnnpack_func.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = qnnpack_func.h; sourceTree = "<group>"; }; + 0C12EDAD2616383C00B66C86 /* pthreadpool.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = pthreadpool.h; sourceTree = "<group>"; }; + 0C12EDAE2616383C00B66C86 /* clog.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = clog.h; sourceTree = "<group>"; }; + 0C12EDB02616383C00B66C86 /* Formatting.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Formatting.h; sourceTree = "<group>"; }; + 0C12EDB12616383C00B66C86 /* CPUFunctions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CPUFunctions.h; sourceTree = "<group>"; }; + 0C12EDB22616383C00B66C86 /* MetaFunctions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MetaFunctions.h; sourceTree = "<group>"; }; + 0C12EDB32616383C00B66C86 /* Utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Utils.h; sourceTree = "<group>"; }; + 0C12EDB42616383C00B66C86 /* CUDAGeneratorImpl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CUDAGeneratorImpl.h; sourceTree = "<group>"; }; + 0C12EDB52616383C00B66C86 /* TensorOptions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TensorOptions.h; sourceTree = "<group>"; }; + 0C12EDB62616383C00B66C86 /* TensorUtils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TensorUtils.h; sourceTree = "<group>"; }; + 0C12EDB72616383C00B66C86 /* MemoryOverlap.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MemoryOverlap.h; sourceTree = "<group>"; }; + 0C12EDB82616383C00B66C86 /* InitialTensorOptions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = InitialTensorOptions.h; sourceTree = "<group>"; }; + 0C12EDB92616383C00B66C86 /* Version.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Version.h; sourceTree = "<group>"; }; + 0C12EDBA2616383C00B66C86 /* DLConvertor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DLConvertor.h; sourceTree = "<group>"; }; + 0C12EDBB2616383C00B66C86 /* Device.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Device.h; sourceTree = "<group>"; }; + 0C12EDBD2616383C00B66C86 /* Dict_inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Dict_inl.h; sourceTree = "<group>"; }; + 0C12EDBE2616383C00B66C86 /* Formatting.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Formatting.h; sourceTree = "<group>"; }; + 0C12EDBF2616383C00B66C86 /* TensorBody.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TensorBody.h; sourceTree = "<group>"; }; + 0C12EDC12616383C00B66C86 /* adaption.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = adaption.h; sourceTree = "<group>"; }; + 0C12EDC22616383C00B66C86 /* op_allowlist.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = op_allowlist.h; sourceTree = "<group>"; }; + 0C12EDC32616383C00B66C86 /* op_registration.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = op_registration.h; sourceTree = "<group>"; }; + 0C12EDC42616383C00B66C86 /* infer_schema.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = infer_schema.h; sourceTree = "<group>"; }; + 0C12EDC52616383C00B66C86 /* jit_type_base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = jit_type_base.h; sourceTree = "<group>"; }; + 0C12EDC62616383C00B66C86 /* typeid.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = typeid.h; sourceTree = "<group>"; }; + 0C12EDC72616383C00B66C86 /* rref_interface.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = rref_interface.h; sourceTree = "<group>"; }; + 0C12EDC82616383C00B66C86 /* Range.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Range.h; sourceTree = "<group>"; }; + 0C12EDC92616383C00B66C86 /* interned_strings_class.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = interned_strings_class.h; sourceTree = "<group>"; }; + 0C12EDCA2616383C00B66C86 /* operator_name.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = operator_name.h; sourceTree = "<group>"; }; + 0C12EDCB2616383C00B66C86 /* DeprecatedTypePropertiesRegistry.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DeprecatedTypePropertiesRegistry.h; sourceTree = "<group>"; }; + 0C12EDCC2616383C00B66C86 /* Backtrace.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Backtrace.h; sourceTree = "<group>"; }; + 0C12EDCD2616383C00B66C86 /* TransformationHelper.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TransformationHelper.h; sourceTree = "<group>"; }; + 0C12EDCE2616383C00B66C86 /* blob.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = blob.h; sourceTree = "<group>"; }; + 0C12EDCF2616383C00B66C86 /* function_schema.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = function_schema.h; sourceTree = "<group>"; }; + 0C12EDD12616383C00B66C86 /* OperatorOptions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = OperatorOptions.h; sourceTree = "<group>"; }; + 0C12EDD22616383C00B66C86 /* RegistrationHandleRAII.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RegistrationHandleRAII.h; sourceTree = "<group>"; }; + 0C12EDD32616383C00B66C86 /* ObservedOperators.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ObservedOperators.h; sourceTree = "<group>"; }; + 0C12EDD42616383C00B66C86 /* DispatchKeyExtractor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DispatchKeyExtractor.h; sourceTree = "<group>"; }; + 0C12EDD52616383C00B66C86 /* Dispatcher.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Dispatcher.h; sourceTree = "<group>"; }; + 0C12EDD62616383C00B66C86 /* CppSignature.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CppSignature.h; sourceTree = "<group>"; }; + 0C12EDD72616383C00B66C86 /* OperatorEntry.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = OperatorEntry.h; sourceTree = "<group>"; }; + 0C12EDD82616383C00B66C86 /* MT19937RNGEngine.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MT19937RNGEngine.h; sourceTree = "<group>"; }; + 0C12EDD92616383C00B66C86 /* ivalue_to.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ivalue_to.h; sourceTree = "<group>"; }; + 0C12EDDA2616383C00B66C86 /* aten_interned_strings.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = aten_interned_strings.h; sourceTree = "<group>"; }; + 0C12EDDB2616383C00B66C86 /* LegacyTypeDispatch.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = LegacyTypeDispatch.h; sourceTree = "<group>"; }; + 0C12EDDC2616383C00B66C86 /* function_schema_inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = function_schema_inl.h; sourceTree = "<group>"; }; + 0C12EDDD2616383C00B66C86 /* qualified_name.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = qualified_name.h; sourceTree = "<group>"; }; + 0C12EDDE2616383C00B66C86 /* UndefinedTensorImpl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = UndefinedTensorImpl.h; sourceTree = "<group>"; }; + 0C12EDDF2616383C00B66C86 /* NamedTensor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = NamedTensor.h; sourceTree = "<group>"; }; + 0C12EDE02616383C00B66C86 /* Scalar.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Scalar.h; sourceTree = "<group>"; }; + 0C12EDE12616383C00B66C86 /* functional.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = functional.h; sourceTree = "<group>"; }; + 0C12EDE22616383C00B66C86 /* DeprecatedTypeProperties.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DeprecatedTypeProperties.h; sourceTree = "<group>"; }; + 0C12EDE32616383C00B66C86 /* interned_strings.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = interned_strings.h; sourceTree = "<group>"; }; + 0C12EDE42616383C00B66C86 /* List.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = List.h; sourceTree = "<group>"; }; + 0C12EDE52616383C00B66C86 /* ATenOpList.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ATenOpList.h; sourceTree = "<group>"; }; + 0C12EDE62616383C00B66C86 /* Dict.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Dict.h; sourceTree = "<group>"; }; + 0C12EDE72616383C00B66C86 /* grad_mode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = grad_mode.h; sourceTree = "<group>"; }; + 0C12EDE82616383C00B66C86 /* DistributionsHelper.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DistributionsHelper.h; sourceTree = "<group>"; }; + 0C12EDE92616383C00B66C86 /* Macros.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Macros.h; sourceTree = "<group>"; }; + 0C12EDEA2616383C00B66C86 /* VariableHooksInterface.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = VariableHooksInterface.h; sourceTree = "<group>"; }; + 0C12EDEB2616383C00B66C86 /* ScalarType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ScalarType.h; sourceTree = "<group>"; }; + 0C12EDEC2616383C00B66C86 /* Array.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Array.h; sourceTree = "<group>"; }; + 0C12EDED2616383C00B66C86 /* stack.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = stack.h; sourceTree = "<group>"; }; + 0C12EDEE2616383C00B66C86 /* ATenGeneral.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ATenGeneral.h; sourceTree = "<group>"; }; + 0C12EDEF2616383C00B66C86 /* UnsafeFromTH.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = UnsafeFromTH.h; sourceTree = "<group>"; }; + 0C12EDF02616383C00B66C86 /* QuantizerBase.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = QuantizerBase.h; sourceTree = "<group>"; }; + 0C12EDF12616383C00B66C86 /* alias_info.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = alias_info.h; sourceTree = "<group>"; }; + 0C12EDF22616383C00B66C86 /* List_inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = List_inl.h; sourceTree = "<group>"; }; + 0C12EDF32616383C00B66C86 /* jit_type.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = jit_type.h; sourceTree = "<group>"; }; + 0C12EDF42616383C00B66C86 /* ivalue.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ivalue.h; sourceTree = "<group>"; }; + 0C12EDF52616383C00B66C86 /* Dimname.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Dimname.h; sourceTree = "<group>"; }; + 0C12EDF62616383C00B66C86 /* Vitals.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Vitals.h; sourceTree = "<group>"; }; + 0C12EDF92616383C00B66C86 /* make_boxed_from_unboxed_functor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = make_boxed_from_unboxed_functor.h; sourceTree = "<group>"; }; + 0C12EDFA2616383C00B66C86 /* boxing.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = boxing.h; sourceTree = "<group>"; }; + 0C12EDFB2616383C00B66C86 /* test_helpers.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = test_helpers.h; sourceTree = "<group>"; }; + 0C12EDFC2616383C00B66C86 /* WrapFunctionIntoFunctor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WrapFunctionIntoFunctor.h; sourceTree = "<group>"; }; + 0C12EDFD2616383C00B66C86 /* WrapFunctionIntoRuntimeFunctor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WrapFunctionIntoRuntimeFunctor.h; sourceTree = "<group>"; }; + 0C12EDFE2616383C00B66C86 /* KernelFunction.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = KernelFunction.h; sourceTree = "<group>"; }; + 0C12EDFF2616383C00B66C86 /* KernelFunction_impl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = KernelFunction_impl.h; sourceTree = "<group>"; }; + 0C12EE002616383C00B66C86 /* builtin_function.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = builtin_function.h; sourceTree = "<group>"; }; + 0C12EE012616383C00B66C86 /* DimVector.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DimVector.h; sourceTree = "<group>"; }; + 0C12EE022616383C00B66C86 /* Reduction.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Reduction.h; sourceTree = "<group>"; }; + 0C12EE032616383C00B66C86 /* Tensor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Tensor.h; sourceTree = "<group>"; }; + 0C12EE042616383C00B66C86 /* function.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = function.h; sourceTree = "<group>"; }; + 0C12EE052616383C00B66C86 /* Generator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Generator.h; sourceTree = "<group>"; }; + 0C12EE062616383C00B66C86 /* PhiloxRNGEngine.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PhiloxRNGEngine.h; sourceTree = "<group>"; }; + 0C12EE072616383C00B66C86 /* TensorAccessor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TensorAccessor.h; sourceTree = "<group>"; }; + 0C12EE082616383C00B66C86 /* ivalue_inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ivalue_inl.h; sourceTree = "<group>"; }; + 0C12EE092616383C00B66C86 /* Variadic.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Variadic.h; sourceTree = "<group>"; }; + 0C12EE0A2616383C00B66C86 /* VmapMode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = VmapMode.h; sourceTree = "<group>"; }; + 0C12EE0B2616383C00B66C86 /* BatchedFallback.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BatchedFallback.h; sourceTree = "<group>"; }; + 0C12EE0C2616383C00B66C86 /* dlpack.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = dlpack.h; sourceTree = "<group>"; }; + 0C12EE0D2616383C00B66C86 /* Config.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Config.h; sourceTree = "<group>"; }; + 0C12EE0E2616383C00B66C86 /* SparseTensorUtils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SparseTensorUtils.h; sourceTree = "<group>"; }; + 0C12EE0F2616383C00B66C86 /* Backtrace.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Backtrace.h; sourceTree = "<group>"; }; + 0C12EE122616383C00B66C86 /* vec256_bfloat16.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vec256_bfloat16.h; sourceTree = "<group>"; }; + 0C12EE132616383C00B66C86 /* vec256_float_neon.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vec256_float_neon.h; sourceTree = "<group>"; }; + 0C12EE142616383C00B66C86 /* missing_vst1_neon.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = missing_vst1_neon.h; sourceTree = "<group>"; }; + 0C12EE152616383C00B66C86 /* vec256_qint.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vec256_qint.h; sourceTree = "<group>"; }; + 0C12EE162616383C00B66C86 /* intrinsics.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = intrinsics.h; sourceTree = "<group>"; }; + 0C12EE172616383C00B66C86 /* functional.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = functional.h; sourceTree = "<group>"; }; + 0C12EE182616383C00B66C86 /* vec256_complex_float.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vec256_complex_float.h; sourceTree = "<group>"; }; + 0C12EE192616383C00B66C86 /* vec256_double.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vec256_double.h; sourceTree = "<group>"; }; + 0C12EE1A2616383C00B66C86 /* vec256_base.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vec256_base.h; sourceTree = "<group>"; }; + 0C12EE1B2616383C00B66C86 /* vec256_float.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vec256_float.h; sourceTree = "<group>"; }; + 0C12EE1C2616383C00B66C86 /* missing_vld1_neon.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = missing_vld1_neon.h; sourceTree = "<group>"; }; + 0C12EE1D2616383C00B66C86 /* vec256.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vec256.h; sourceTree = "<group>"; }; + 0C12EE1E2616383C00B66C86 /* vec256_int.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vec256_int.h; sourceTree = "<group>"; }; + 0C12EE1F2616383C00B66C86 /* vec256_complex_double.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vec256_complex_double.h; sourceTree = "<group>"; }; + 0C12EE202616383C00B66C86 /* FlushDenormal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = FlushDenormal.h; sourceTree = "<group>"; }; + 0C12EE212616383C00B66C86 /* vml.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = vml.h; sourceTree = "<group>"; }; + 0C12EE222616383C00B66C86 /* TracerMode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TracerMode.h; sourceTree = "<group>"; }; + 0C12EE232616383C00B66C86 /* Backend.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Backend.h; sourceTree = "<group>"; }; + 0C12EE242616383C00B66C86 /* RegistrationDeclarations.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RegistrationDeclarations.h; sourceTree = "<group>"; }; + 0C12EE252616383C00B66C86 /* CompositeImplicitAutogradFunctions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CompositeImplicitAutogradFunctions.h; sourceTree = "<group>"; }; + 0C12EE262616383C00B66C86 /* PTThreadPool.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PTThreadPool.h; sourceTree = "<group>"; }; + 0C12EE272616383C00B66C86 /* OpaqueTensorImpl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = OpaqueTensorImpl.h; sourceTree = "<group>"; }; + 0C12EE282616383C00B66C86 /* LegacyTHFunctionsCPU.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = LegacyTHFunctionsCPU.h; sourceTree = "<group>"; }; + 0C12EE2A2616383C00B66C86 /* QTensorImpl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = QTensorImpl.h; sourceTree = "<group>"; }; + 0C12EE2B2616383C00B66C86 /* Quantizer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Quantizer.h; sourceTree = "<group>"; }; + 0C12EE2C2616383C00B66C86 /* record_function.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = record_function.h; sourceTree = "<group>"; }; + 0C12EE2D2616383C00B66C86 /* WrapDimUtils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WrapDimUtils.h; sourceTree = "<group>"; }; + 0C12EE2E2616383C00B66C86 /* RedispatchFunctions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RedispatchFunctions.h; sourceTree = "<group>"; }; + 0C12EE2F2616383C00B66C86 /* Context.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Context.h; sourceTree = "<group>"; }; + 0C12EE302616383C00B66C86 /* div_rtn.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = div_rtn.h; sourceTree = "<group>"; }; + 0C12EE312616383C00B66C86 /* ExpandUtils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ExpandUtils.h; sourceTree = "<group>"; }; + 0C12EE322616383C00B66C86 /* TypeDefault.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TypeDefault.h; sourceTree = "<group>"; }; + 0C12EE332616383C00B66C86 /* CPUFixedAllocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CPUFixedAllocator.h; sourceTree = "<group>"; }; + 0C12EE342616383C00B66C86 /* NamedTensor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = NamedTensor.h; sourceTree = "<group>"; }; + 0C12EE352616383C00B66C86 /* Scalar.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Scalar.h; sourceTree = "<group>"; }; + 0C12EE362616383C00B66C86 /* ParallelNativeTBB.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ParallelNativeTBB.h; sourceTree = "<group>"; }; + 0C12EE372616383C00B66C86 /* ArrayRef.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ArrayRef.h; sourceTree = "<group>"; }; + 0C12EE382616383C00B66C86 /* SequenceNumber.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SequenceNumber.h; sourceTree = "<group>"; }; + 0C12EE392616383C00B66C86 /* MatrixRef.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MatrixRef.h; sourceTree = "<group>"; }; + 0C12EE3A2616383C00B66C86 /* CompositeExplicitAutogradFunctions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CompositeExplicitAutogradFunctions.h; sourceTree = "<group>"; }; + 0C12EE3B2616383C00B66C86 /* NumericUtils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = NumericUtils.h; sourceTree = "<group>"; }; + 0C12EE3C2616383C00B66C86 /* ATen.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ATen.h; sourceTree = "<group>"; }; + 0C12EE3D2616383C00B66C86 /* TensorNames.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TensorNames.h; sourceTree = "<group>"; }; + 0C12EE3E2616383C00B66C86 /* TensorMeta.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TensorMeta.h; sourceTree = "<group>"; }; + 0C12EE3F2616383C00B66C86 /* TensorIndexing.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TensorIndexing.h; sourceTree = "<group>"; }; + 0C12EE402616383C00B66C86 /* Layout.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Layout.h; sourceTree = "<group>"; }; + 0C12EE412616383C00B66C86 /* SparseTensorImpl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SparseTensorImpl.h; sourceTree = "<group>"; }; + 0C12EE432616383C00B66C86 /* CUDAHooksInterface.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CUDAHooksInterface.h; sourceTree = "<group>"; }; + 0C12EE442616383C00B66C86 /* FunctionTraits.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = FunctionTraits.h; sourceTree = "<group>"; }; + 0C12EE452616383C00B66C86 /* HIPHooksInterface.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = HIPHooksInterface.h; sourceTree = "<group>"; }; + 0C12EE462616383C00B66C86 /* WrapDimUtilsMulti.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WrapDimUtilsMulti.h; sourceTree = "<group>"; }; + 0C12EE472616383C00B66C86 /* TensorOperators.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TensorOperators.h; sourceTree = "<group>"; }; + 0C12EE482616383C00B66C86 /* ScalarType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ScalarType.h; sourceTree = "<group>"; }; + 0C12EE492616383C00B66C86 /* cpp_custom_type_hack.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cpp_custom_type_hack.h; sourceTree = "<group>"; }; + 0C12EE4A2616383C00B66C86 /* VmapTransforms.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = VmapTransforms.h; sourceTree = "<group>"; }; + 0C12EE4B2616383C00B66C86 /* Storage.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Storage.h; sourceTree = "<group>"; }; + 0C12EE4C2616383C00B66C86 /* DeviceGuard.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DeviceGuard.h; sourceTree = "<group>"; }; + 0C12EE4D2616383C00B66C86 /* ParallelNative.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ParallelNative.h; sourceTree = "<group>"; }; + 0C12EE4E2616383C00B66C86 /* Dispatch.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Dispatch.h; sourceTree = "<group>"; }; + 0C12EE4F2616383C00B66C86 /* CPUGeneratorImpl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CPUGeneratorImpl.h; sourceTree = "<group>"; }; + 0C12EE502616383C00B66C86 /* Functions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Functions.h; sourceTree = "<group>"; }; + 0C12EE512616383C00B66C86 /* ParallelOpenMP.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ParallelOpenMP.h; sourceTree = "<group>"; }; + 0C12EE522616383C00B66C86 /* BatchedTensorImpl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BatchedTensorImpl.h; sourceTree = "<group>"; }; + 0C12EE532616383C00B66C86 /* CPUApplyUtils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CPUApplyUtils.h; sourceTree = "<group>"; }; + 0C12EE542616383C00B66C86 /* ThreadLocalState.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ThreadLocalState.h; sourceTree = "<group>"; }; + 0C12EE552616383C00B66C86 /* ScalarOps.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ScalarOps.h; sourceTree = "<group>"; }; + 0C12EE562616383C00B66C86 /* NativeFunctions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = NativeFunctions.h; sourceTree = "<group>"; }; + 0C12EE572616383C00B66C86 /* DynamicLibrary.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DynamicLibrary.h; sourceTree = "<group>"; }; + 0C12EE582616383C00B66C86 /* TensorGeometry.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TensorGeometry.h; sourceTree = "<group>"; }; + 0C12EE592616383C00B66C86 /* TensorIterator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TensorIterator.h; sourceTree = "<group>"; }; + 0C12EE5A2616383C00B66C86 /* NamedTensorUtils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = NamedTensorUtils.h; sourceTree = "<group>"; }; + 0C12EE5B2616383C00B66C86 /* Dimname.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Dimname.h; sourceTree = "<group>"; }; + 0C12EE5C2616383C00B66C86 /* autocast_mode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = autocast_mode.h; sourceTree = "<group>"; }; + 0C12EE5D2616383C00B66C86 /* Parallel.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Parallel.h; sourceTree = "<group>"; }; + 0C12EE5E2616383C00B66C86 /* DimVector.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DimVector.h; sourceTree = "<group>"; }; + 0C12EE5F2616383C00B66C86 /* InferSize.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = InferSize.h; sourceTree = "<group>"; }; + 0C12EE602616383C00B66C86 /* SmallVector.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SmallVector.h; sourceTree = "<group>"; }; + 0C12EE612616383C00B66C86 /* Tensor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Tensor.h; sourceTree = "<group>"; }; + 0C12EE622616383C00B66C86 /* Generator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Generator.h; sourceTree = "<group>"; }; + 0C12EE632616383C00B66C86 /* AccumulateType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AccumulateType.h; sourceTree = "<group>"; }; + 0C12EE642616383C00B66C86 /* TensorAccessor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TensorAccessor.h; sourceTree = "<group>"; }; + 0C12EE652616383C00B66C86 /* LegacyTHFunctionsCUDA.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = LegacyTHFunctionsCUDA.h; sourceTree = "<group>"; }; + 0C12EE6A2616383C00B66C86 /* InlineStreamGuard.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = InlineStreamGuard.h; sourceTree = "<group>"; }; + 0C12EE6B2616383C00B66C86 /* SizesAndStrides.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SizesAndStrides.h; sourceTree = "<group>"; }; + 0C12EE6C2616383C00B66C86 /* InlineDeviceGuard.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = InlineDeviceGuard.h; sourceTree = "<group>"; }; + 0C12EE6D2616383C00B66C86 /* LocalDispatchKeySet.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = LocalDispatchKeySet.h; sourceTree = "<group>"; }; + 0C12EE6E2616383C00B66C86 /* VirtualGuardImpl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = VirtualGuardImpl.h; sourceTree = "<group>"; }; + 0C12EE6F2616383C00B66C86 /* InlineEvent.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = InlineEvent.h; sourceTree = "<group>"; }; + 0C12EE702616383C00B66C86 /* DeviceGuardImplInterface.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DeviceGuardImplInterface.h; sourceTree = "<group>"; }; + 0C12EE712616383C00B66C86 /* FakeGuardImpl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = FakeGuardImpl.h; sourceTree = "<group>"; }; + 0C12EE722616383C00B66C86 /* QEngine.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = QEngine.h; sourceTree = "<group>"; }; + 0C12EE732616383C00B66C86 /* TensorOptions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TensorOptions.h; sourceTree = "<group>"; }; + 0C12EE742616383C00B66C86 /* Device.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Device.h; sourceTree = "<group>"; }; + 0C12EE752616383C00B66C86 /* CPUAllocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CPUAllocator.h; sourceTree = "<group>"; }; + 0C12EE762616383C00B66C86 /* DefaultDtype.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DefaultDtype.h; sourceTree = "<group>"; }; + 0C12EE772616383C00B66C86 /* DefaultTensorOptions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DefaultTensorOptions.h; sourceTree = "<group>"; }; + 0C12EE782616383C00B66C86 /* Event.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Event.h; sourceTree = "<group>"; }; + 0C12EE792616383C00B66C86 /* Backend.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Backend.h; sourceTree = "<group>"; }; + 0C12EE7A2616383C00B66C86 /* CompileTimeFunctionPointer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CompileTimeFunctionPointer.h; sourceTree = "<group>"; }; + 0C12EE7B2616383C00B66C86 /* WrapDimMinimal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = WrapDimMinimal.h; sourceTree = "<group>"; }; + 0C12EE7C2616383C00B66C86 /* QScheme.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = QScheme.h; sourceTree = "<group>"; }; + 0C12EE7D2616383C00B66C86 /* Stream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Stream.h; sourceTree = "<group>"; }; + 0C12EE7E2616383C00B66C86 /* UndefinedTensorImpl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = UndefinedTensorImpl.h; sourceTree = "<group>"; }; + 0C12EE7F2616383C00B66C86 /* Scalar.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Scalar.h; sourceTree = "<group>"; }; + 0C12EE802616383C00B66C86 /* thread_pool.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = thread_pool.h; sourceTree = "<group>"; }; + 0C12EE812616383C00B66C86 /* CopyBytes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CopyBytes.h; sourceTree = "<group>"; }; + 0C12EE822616383C00B66C86 /* StreamGuard.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = StreamGuard.h; sourceTree = "<group>"; }; + 0C12EE832616383C00B66C86 /* Layout.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Layout.h; sourceTree = "<group>"; }; + 0C12EE842616383C00B66C86 /* GeneratorImpl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GeneratorImpl.h; sourceTree = "<group>"; }; + 0C12EE852616383C00B66C86 /* DispatchKeySet.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DispatchKeySet.h; sourceTree = "<group>"; }; + 0C12EE862616383C00B66C86 /* Allocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Allocator.h; sourceTree = "<group>"; }; + 0C12EE872616383C00B66C86 /* TensorImpl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TensorImpl.h; sourceTree = "<group>"; }; + 0C12EE882616383C00B66C86 /* ScalarType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ScalarType.h; sourceTree = "<group>"; }; + 0C12EE892616383C00B66C86 /* Storage.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Storage.h; sourceTree = "<group>"; }; + 0C12EE8A2616383C00B66C86 /* DeviceType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DeviceType.h; sourceTree = "<group>"; }; + 0C12EE8B2616383C00B66C86 /* DeviceGuard.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DeviceGuard.h; sourceTree = "<group>"; }; + 0C12EE8C2616383C00B66C86 /* StorageImpl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = StorageImpl.h; sourceTree = "<group>"; }; + 0C12EE8D2616383C00B66C86 /* MemoryFormat.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MemoryFormat.h; sourceTree = "<group>"; }; + 0C12EE8E2616383C00B66C86 /* DispatchKey.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DispatchKey.h; sourceTree = "<group>"; }; + 0C12EE8F2616383C00B66C86 /* ScalarTypeToTypeMeta.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ScalarTypeToTypeMeta.h; sourceTree = "<group>"; }; + 0C12EE902616383C00B66C86 /* InferenceMode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = InferenceMode.h; sourceTree = "<group>"; }; + 0C12EE952616383C00B66C86 /* complex_test_common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = complex_test_common.h; sourceTree = "<group>"; }; + 0C12EE962616383C00B66C86 /* complex_math_test_common.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = complex_math_test_common.h; sourceTree = "<group>"; }; + 0C12EE972616383C00B66C86 /* Macros.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Macros.h; sourceTree = "<group>"; }; + 0C12EE992616383C00B66C86 /* Type.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Type.h; sourceTree = "<group>"; }; + 0C12EE9A2616383C00B66C86 /* order_preserving_flat_hash_map.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = order_preserving_flat_hash_map.h; sourceTree = "<group>"; }; + 0C12EE9B2616383C00B66C86 /* reverse_iterator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = reverse_iterator.h; sourceTree = "<group>"; }; + 0C12EE9C2616383C00B66C86 /* quint4x2.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = quint4x2.h; sourceTree = "<group>"; }; + 0C12EE9D2616383C00B66C86 /* Half.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Half.h; sourceTree = "<group>"; }; + 0C12EE9E2616383C00B66C86 /* flat_hash_map.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = flat_hash_map.h; sourceTree = "<group>"; }; + 0C12EE9F2616383C00B66C86 /* llvmMathExtras.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = llvmMathExtras.h; sourceTree = "<group>"; }; + 0C12EEA02616383C00B66C86 /* math_compat.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = math_compat.h; sourceTree = "<group>"; }; + 0C12EEA12616383C00B66C86 /* Bitset.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Bitset.h; sourceTree = "<group>"; }; + 0C12EEA22616383C00B66C86 /* typeid.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = typeid.h; sourceTree = "<group>"; }; + 0C12EEA32616383C00B66C86 /* intrusive_ptr.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = intrusive_ptr.h; sourceTree = "<group>"; }; + 0C12EEA42616383C00B66C86 /* string_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = string_utils.h; sourceTree = "<group>"; }; + 0C12EEA52616383C00B66C86 /* win32-headers.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "win32-headers.h"; sourceTree = "<group>"; }; + 0C12EEA62616383C00B66C86 /* AlignOf.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = AlignOf.h; sourceTree = "<group>"; }; + 0C12EEA72616383C00B66C86 /* numa.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = numa.h; sourceTree = "<group>"; }; + 0C12EEA82616383C00B66C86 /* qint32.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = qint32.h; sourceTree = "<group>"; }; + 0C12EEA92616383C00B66C86 /* MaybeOwned.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MaybeOwned.h; sourceTree = "<group>"; }; + 0C12EEAA2616383C00B66C86 /* Half-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "Half-inl.h"; sourceTree = "<group>"; }; + 0C12EEAB2616383C00B66C86 /* TypeTraits.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TypeTraits.h; sourceTree = "<group>"; }; + 0C12EEAC2616383C00B66C86 /* FunctionRef.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = FunctionRef.h; sourceTree = "<group>"; }; + 0C12EEAD2616383C00B66C86 /* Backtrace.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Backtrace.h; sourceTree = "<group>"; }; + 0C12EEAE2616383C00B66C86 /* BFloat16-inl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "BFloat16-inl.h"; sourceTree = "<group>"; }; + 0C12EEAF2616383C00B66C86 /* in_place.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = in_place.h; sourceTree = "<group>"; }; + 0C12EEB02616383C00B66C86 /* ConstexprCrc.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ConstexprCrc.h; sourceTree = "<group>"; }; + 0C12EEB12616383C00B66C86 /* IdWrapper.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = IdWrapper.h; sourceTree = "<group>"; }; + 0C12EEB22616383C00B66C86 /* Flags.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Flags.h; sourceTree = "<group>"; }; + 0C12EEB32616383C00B66C86 /* overloaded.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = overloaded.h; sourceTree = "<group>"; }; + 0C12EEB42616383C00B66C86 /* quint8.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = quint8.h; sourceTree = "<group>"; }; + 0C12EEB52616383C00B66C86 /* StringUtil.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = StringUtil.h; sourceTree = "<group>"; }; + 0C12EEB62616383C00B66C86 /* Logging.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Logging.h; sourceTree = "<group>"; }; + 0C12EEB72616383C00B66C86 /* MathConstants.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = MathConstants.h; sourceTree = "<group>"; }; + 0C12EEB82616383C00B66C86 /* Registry.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Registry.h; sourceTree = "<group>"; }; + 0C12EEB92616383C00B66C86 /* Optional.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Optional.h; sourceTree = "<group>"; }; + 0C12EEBA2616383C00B66C86 /* tempfile.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = tempfile.h; sourceTree = "<group>"; }; + 0C12EEBB2616383C00B66C86 /* ArrayRef.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ArrayRef.h; sourceTree = "<group>"; }; + 0C12EEBC2616383C00B66C86 /* thread_name.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = thread_name.h; sourceTree = "<group>"; }; + 0C12EEBD2616383C00B66C86 /* Unicode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Unicode.h; sourceTree = "<group>"; }; + 0C12EEBE2616383C00B66C86 /* TypeCast.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TypeCast.h; sourceTree = "<group>"; }; + 0C12EEBF2616383C00B66C86 /* sparse_bitset.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sparse_bitset.h; sourceTree = "<group>"; }; + 0C12EEC02616383C00B66C86 /* BFloat16.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = BFloat16.h; sourceTree = "<group>"; }; + 0C12EEC12616383C00B66C86 /* TypeList.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TypeList.h; sourceTree = "<group>"; }; + 0C12EEC22616383C00B66C86 /* TypeIndex.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TypeIndex.h; sourceTree = "<group>"; }; + 0C12EEC32616383C00B66C86 /* Array.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Array.h; sourceTree = "<group>"; }; + 0C12EEC42616383C00B66C86 /* logging_is_google_glog.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = logging_is_google_glog.h; sourceTree = "<group>"; }; + 0C12EEC52616383C00B66C86 /* Metaprogramming.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Metaprogramming.h; sourceTree = "<group>"; }; + 0C12EEC62616383C00B66C86 /* either.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = either.h; sourceTree = "<group>"; }; + 0C12EEC72616383C00B66C86 /* BFloat16-math.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "BFloat16-math.h"; sourceTree = "<group>"; }; + 0C12EEC82616383C00B66C86 /* Deprecated.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Deprecated.h; sourceTree = "<group>"; }; + 0C12EEC92616383C00B66C86 /* irange.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = irange.h; sourceTree = "<group>"; }; + 0C12EECA2616383C00B66C86 /* LeftRight.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = LeftRight.h; sourceTree = "<group>"; }; + 0C12EECB2616383C00B66C86 /* qint8.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = qint8.h; sourceTree = "<group>"; }; + 0C12EECC2616383C00B66C86 /* complex_math.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = complex_math.h; sourceTree = "<group>"; }; + 0C12EECD2616383C00B66C86 /* logging_is_not_google_glog.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = logging_is_not_google_glog.h; sourceTree = "<group>"; }; + 0C12EECE2616383C00B66C86 /* Exception.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Exception.h; sourceTree = "<group>"; }; + 0C12EECF2616383C00B66C86 /* UniqueVoidPtr.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = UniqueVoidPtr.h; sourceTree = "<group>"; }; + 0C12EED02616383C00B66C86 /* ThreadLocalDebugInfo.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ThreadLocalDebugInfo.h; sourceTree = "<group>"; }; + 0C12EED12616383C00B66C86 /* accumulate.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = accumulate.h; sourceTree = "<group>"; }; + 0C12EED22616383C00B66C86 /* C++17.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "C++17.h"; sourceTree = "<group>"; }; + 0C12EED32616383C00B66C86 /* SmallVector.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SmallVector.h; sourceTree = "<group>"; }; + 0C12EED42616383C00B66C86 /* hash.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = hash.h; sourceTree = "<group>"; }; + 0C12EED52616383C00B66C86 /* python_stub.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = python_stub.h; sourceTree = "<group>"; }; + 0C12EED62616383C00B66C86 /* complex.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = complex.h; sourceTree = "<group>"; }; + 0C12EED72616383C00B66C86 /* string_view.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = string_view.h; sourceTree = "<group>"; }; + 0C12EED82616383C00B66C86 /* variant.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = variant.h; sourceTree = "<group>"; }; + 0C12EED92616383C00B66C86 /* complex_utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = complex_utils.h; sourceTree = "<group>"; }; + 0C12EEDC2616383C00B66C86 /* CUDATest.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CUDATest.h; sourceTree = "<group>"; }; + 0C12EEDD2616383C00B66C86 /* CUDAGuardImpl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CUDAGuardImpl.h; sourceTree = "<group>"; }; + 0C12EEDE2616383C00B66C86 /* CUDAMathCompat.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CUDAMathCompat.h; sourceTree = "<group>"; }; + 0C12EEE12616383C00B66C86 /* CUDAStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CUDAStream.h; sourceTree = "<group>"; }; + 0C12EEE22616383C00B66C86 /* CUDAGuard.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CUDAGuard.h; sourceTree = "<group>"; }; + 0C12EEE32616383C00B66C86 /* CUDAGraphsC10Utils.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CUDAGraphsC10Utils.h; sourceTree = "<group>"; }; + 0C12EEE42616383C00B66C86 /* CUDAMacros.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CUDAMacros.h; sourceTree = "<group>"; }; + 0C12EEE52616383C00B66C86 /* CUDAFunctions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CUDAFunctions.h; sourceTree = "<group>"; }; + 0C12EEE62616383C00B66C86 /* CUDAException.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CUDAException.h; sourceTree = "<group>"; }; + 0C12EEE72616383C00B66C86 /* CUDACachingAllocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CUDACachingAllocator.h; sourceTree = "<group>"; }; + 0C12EEE92616383C00B66C86 /* cmake_macros.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = cmake_macros.h; sourceTree = "<group>"; }; + 0C12EEEA2616383C00B66C86 /* Export.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Export.h; sourceTree = "<group>"; }; + 0C12EEEB2616383C00B66C86 /* Macros.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Macros.h; sourceTree = "<group>"; }; + 0C12EEED2616383C00B66C86 /* CPUCachingAllocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CPUCachingAllocator.h; sourceTree = "<group>"; }; + 0C12EEEE2616383C00B66C86 /* CPUProfilingAllocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = CPUProfilingAllocator.h; sourceTree = "<group>"; }; + 0C12EEF02616383C00B66C86 /* psimd.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = psimd.h; sourceTree = "<group>"; }; + 0C12EEF12616383C00B66C86 /* fxdiv.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fxdiv.h; sourceTree = "<group>"; }; + 0C12EEF32616383C00B66C86 /* avx.py */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.python; path = avx.py; sourceTree = "<group>"; }; + 0C12EEF42616383C00B66C86 /* __init__.py */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.python; path = __init__.py; sourceTree = "<group>"; }; + 0C12EEF52616383C00B66C86 /* fp16.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = fp16.h; sourceTree = "<group>"; }; + 0C12EEF62616383C00B66C86 /* avx2.py */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.python; path = avx2.py; sourceTree = "<group>"; }; + 0C12EEF72616383C00B66C86 /* psimd.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = psimd.h; sourceTree = "<group>"; }; + 0C12EEF82616383C00B66C86 /* bitcasts.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = bitcasts.h; sourceTree = "<group>"; }; + 0C12EEFB2616383C00B66C86 /* THCUNN.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THCUNN.h; sourceTree = "<group>"; }; + 0C12EEFD2616383C00B66C86 /* THTensorDimApply.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THTensorDimApply.h; sourceTree = "<group>"; }; + 0C12EEFE2616383C00B66C86 /* THBlas.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THBlas.h; sourceTree = "<group>"; }; + 0C12EEFF2616383C00B66C86 /* THGenerateQUInt8Type.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THGenerateQUInt8Type.h; sourceTree = "<group>"; }; + 0C12EF002616383C00B66C86 /* THGenerateQInt8Type.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THGenerateQInt8Type.h; sourceTree = "<group>"; }; + 0C12EF012616383C00B66C86 /* THGenerateComplexTypes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THGenerateComplexTypes.h; sourceTree = "<group>"; }; + 0C12EF022616383C00B66C86 /* THGenerateFloatType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THGenerateFloatType.h; sourceTree = "<group>"; }; + 0C12EF032616383C00B66C86 /* THGenerateQInt32Type.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THGenerateQInt32Type.h; sourceTree = "<group>"; }; + 0C12EF042616383C00B66C86 /* THGenerateDoubleType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THGenerateDoubleType.h; sourceTree = "<group>"; }; + 0C12EF052616383C00B66C86 /* THGenerateShortType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THGenerateShortType.h; sourceTree = "<group>"; }; + 0C12EF062616383C00B66C86 /* THGenerateIntTypes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THGenerateIntTypes.h; sourceTree = "<group>"; }; + 0C12EF072616383C00B66C86 /* THGenerateLongType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THGenerateLongType.h; sourceTree = "<group>"; }; + 0C12EF082616383C00B66C86 /* THGenerateComplexFloatType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THGenerateComplexFloatType.h; sourceTree = "<group>"; }; + 0C12EF092616383C00B66C86 /* THAllocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THAllocator.h; sourceTree = "<group>"; }; + 0C12EF0A2616383C00B66C86 /* THGenerateCharType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THGenerateCharType.h; sourceTree = "<group>"; }; + 0C12EF0B2616383C00B66C86 /* THStorage.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THStorage.h; sourceTree = "<group>"; }; + 0C12EF0C2616383C00B66C86 /* THHalf.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THHalf.h; sourceTree = "<group>"; }; + 0C12EF0D2616383C00B66C86 /* THGenerateHalfType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THGenerateHalfType.h; sourceTree = "<group>"; }; + 0C12EF0E2616383C00B66C86 /* THGenerateIntType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THGenerateIntType.h; sourceTree = "<group>"; }; + 0C12EF0F2616383C00B66C86 /* THVector.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THVector.h; sourceTree = "<group>"; }; + 0C12EF102616383C00B66C86 /* THGeneral.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THGeneral.h; sourceTree = "<group>"; }; + 0C12EF112616383C00B66C86 /* THGenerateBoolType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THGenerateBoolType.h; sourceTree = "<group>"; }; + 0C12EF122616383C00B66C86 /* THLapack.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THLapack.h; sourceTree = "<group>"; }; + 0C12EF132616383C00B66C86 /* THGenerateComplexDoubleType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THGenerateComplexDoubleType.h; sourceTree = "<group>"; }; + 0C12EF142616383C00B66C86 /* THGenerateBFloat16Type.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THGenerateBFloat16Type.h; sourceTree = "<group>"; }; + 0C12EF152616383C00B66C86 /* THGenerateQTypes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THGenerateQTypes.h; sourceTree = "<group>"; }; + 0C12EF162616383C00B66C86 /* THGenerateFloatTypes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THGenerateFloatTypes.h; sourceTree = "<group>"; }; + 0C12EF182616383C00B66C86 /* THBlas.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THBlas.h; sourceTree = "<group>"; }; + 0C12EF192616383C00B66C86 /* THTensor.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = THTensor.cpp; sourceTree = "<group>"; }; + 0C12EF1A2616383C00B66C86 /* THTensorMath.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = THTensorMath.cpp; sourceTree = "<group>"; }; + 0C12EF1B2616383C00B66C86 /* THTensorMath.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THTensorMath.h; sourceTree = "<group>"; }; + 0C12EF1C2616383C00B66C86 /* THStorageCopy.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = THStorageCopy.cpp; sourceTree = "<group>"; }; + 0C12EF1D2616383C00B66C86 /* THTensorFastGetSet.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = THTensorFastGetSet.hpp; sourceTree = "<group>"; }; + 0C12EF1E2616383C00B66C86 /* THStorage.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THStorage.h; sourceTree = "<group>"; }; + 0C12EF1F2616383C00B66C86 /* THTensorLapack.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THTensorLapack.h; sourceTree = "<group>"; }; + 0C12EF202616383C00B66C86 /* THVector.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THVector.h; sourceTree = "<group>"; }; + 0C12EF212616383C00B66C86 /* THLapack.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = THLapack.cpp; sourceTree = "<group>"; }; + 0C12EF222616383C00B66C86 /* THStorageCopy.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THStorageCopy.h; sourceTree = "<group>"; }; + 0C12EF232616383C00B66C86 /* THLapack.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THLapack.h; sourceTree = "<group>"; }; + 0C12EF242616383C00B66C86 /* THStorage.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = THStorage.cpp; sourceTree = "<group>"; }; + 0C12EF252616383C00B66C86 /* THTensor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THTensor.h; sourceTree = "<group>"; }; + 0C12EF262616383C00B66C86 /* THBlas.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = THBlas.cpp; sourceTree = "<group>"; }; + 0C12EF272616383C00B66C86 /* THTensorLapack.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = THTensorLapack.cpp; sourceTree = "<group>"; }; + 0C12EF282616383C00B66C86 /* THTensor.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = THTensor.hpp; sourceTree = "<group>"; }; + 0C12EF292616383C00B66C86 /* THTensor.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THTensor.h; sourceTree = "<group>"; }; + 0C12EF2A2616383C00B66C86 /* TH.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = TH.h; sourceTree = "<group>"; }; + 0C12EF2B2616383C00B66C86 /* THTensorApply.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THTensorApply.h; sourceTree = "<group>"; }; + 0C12EF2C2616383C00B66C86 /* THStorageFunctions.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = THStorageFunctions.hpp; sourceTree = "<group>"; }; + 0C12EF2D2616383C00B66C86 /* THGenerateAllTypes.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THGenerateAllTypes.h; sourceTree = "<group>"; }; + 0C12EF2E2616383C00B66C86 /* THTensor.hpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.h; path = THTensor.hpp; sourceTree = "<group>"; }; + 0C12EF2F2616383C00B66C86 /* THGenerateByteType.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THGenerateByteType.h; sourceTree = "<group>"; }; + 0C12EF302616383C00B66C86 /* THStorageFunctions.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THStorageFunctions.h; sourceTree = "<group>"; }; + 0C12EF312616383C00B66C86 /* THGenerateQUInt4x2Type.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = THGenerateQUInt4x2Type.h; sourceTree = "<group>"; }; + 0C12EF332616383C00B66C86 /* libtorch_cpu.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libtorch_cpu.a; sourceTree = "<group>"; }; + 0C12EF342616383C00B66C86 /* libtorch.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libtorch.a; sourceTree = "<group>"; }; + 0C12EF352616383C00B66C86 /* libcpuinfo.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libcpuinfo.a; sourceTree = "<group>"; }; + 0C12EF362616383C00B66C86 /* libXNNPACK.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libXNNPACK.a; sourceTree = "<group>"; }; + 0C12EF372616383C00B66C86 /* libtorchvision_ops.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libtorchvision_ops.a; sourceTree = "<group>"; }; + 0C12EF382616383C00B66C86 /* libpthreadpool.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libpthreadpool.a; sourceTree = "<group>"; }; + 0C12EF392616383C00B66C86 /* libc10.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libc10.a; sourceTree = "<group>"; }; + 0C12EF3A2616383C00B66C86 /* libeigen_blas.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libeigen_blas.a; sourceTree = "<group>"; }; + 0C12EF3B2616383C00B66C86 /* libclog.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libclog.a; sourceTree = "<group>"; }; + 0C12EF3C2616383C00B66C86 /* libpytorch_qnnpack.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; path = libpytorch_qnnpack.a; sourceTree = "<group>"; }; + 0C12EF7526163B7600B66C86 /* frcnn_mnetv3.pt */ = {isa = PBXFileReference; lastKnownFileType = file; path = frcnn_mnetv3.pt; sourceTree = "<group>"; }; + 0CEB0ABB26151A8800F1F7D5 /* VisionTestApp.app */ = {isa = PBXFileReference; explicitFileType = wrapper.application; includeInIndex = 0; path = VisionTestApp.app; sourceTree = BUILT_PRODUCTS_DIR; }; + 0CEB0ABE26151A8800F1F7D5 /* AppDelegate.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = AppDelegate.h; sourceTree = "<group>"; }; + 0CEB0ABF26151A8800F1F7D5 /* AppDelegate.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = AppDelegate.m; sourceTree = "<group>"; }; + 0CEB0AC426151A8800F1F7D5 /* ViewController.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ViewController.h; sourceTree = "<group>"; }; + 0CEB0AC526151A8800F1F7D5 /* ViewController.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = ViewController.mm; sourceTree = "<group>"; }; + 0CEB0AC826151A8800F1F7D5 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/Main.storyboard; sourceTree = "<group>"; }; + 0CEB0ACA26151A8900F1F7D5 /* Assets.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Assets.xcassets; sourceTree = "<group>"; }; + 0CEB0ACD26151A8900F1F7D5 /* Base */ = {isa = PBXFileReference; lastKnownFileType = file.storyboard; name = Base; path = Base.lproj/LaunchScreen.storyboard; sourceTree = "<group>"; }; + 0CEB0ACF26151A8900F1F7D5 /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = "<group>"; }; + 0CEB0AD026151A8900F1F7D5 /* main.m */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.objc; path = main.m; sourceTree = "<group>"; }; + 0CEB0B3826152ED900F1F7D5 /* ModelRunner.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = ModelRunner.h; sourceTree = "<group>"; }; + 0CEB0B3926152ED900F1F7D5 /* ModelRunner.mm */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.cpp.objcpp; path = ModelRunner.mm; sourceTree = "<group>"; }; +/* End PBXFileReference section */ + +/* Begin PBXFrameworksBuildPhase section */ + 0CEB0AB826151A8800F1F7D5 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 0C12EF502616383D00B66C86 /* libpytorch_qnnpack.a in Frameworks */, + 0C12EF4C2616383D00B66C86 /* libpthreadpool.a in Frameworks */, + 0C12EF4F2616383D00B66C86 /* libclog.a in Frameworks */, + 0C12EF482616383D00B66C86 /* libtorch.a in Frameworks */, + 0C12EF4A2616383D00B66C86 /* libXNNPACK.a in Frameworks */, + 0C12EF472616383D00B66C86 /* libtorch_cpu.a in Frameworks */, + 0C12EF7A26163C7C00B66C86 /* libtorchvision_ops.a in Frameworks */, + 0C12EF492616383D00B66C86 /* libcpuinfo.a in Frameworks */, + 0C12EF4E2616383D00B66C86 /* libeigen_blas.a in Frameworks */, + 0C12EF4D2616383D00B66C86 /* libc10.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXFrameworksBuildPhase section */ + +/* Begin PBXGroup section */ + 0C12E7872616383A00B66C86 /* install */ = { + isa = PBXGroup; + children = ( + 0C12E7882616383A00B66C86 /* include */, + 0C12EF322616383C00B66C86 /* lib */, + ); + path = install; + sourceTree = "<group>"; + }; + 0C12E7882616383A00B66C86 /* include */ = { + isa = PBXGroup; + children = ( + 0C12E7892616383A00B66C86 /* pybind11 */, + 0C12E7A32616383A00B66C86 /* caffe2 */, + 0C12EAB22616383B00B66C86 /* cpuinfo.h */, + 0C12EAB32616383B00B66C86 /* torch */, + 0C12EDAA2616383C00B66C86 /* xnnpack.h */, + 0C12EDAB2616383C00B66C86 /* fp16.h */, + 0C12EDAC2616383C00B66C86 /* qnnpack_func.h */, + 0C12EDAD2616383C00B66C86 /* pthreadpool.h */, + 0C12EDAE2616383C00B66C86 /* clog.h */, + 0C12EDAF2616383C00B66C86 /* ATen */, + 0C12EE662616383C00B66C86 /* c10 */, + 0C12EEF02616383C00B66C86 /* psimd.h */, + 0C12EEF12616383C00B66C86 /* fxdiv.h */, + 0C12EEF22616383C00B66C86 /* fp16 */, + 0C12EEF92616383C00B66C86 /* THCUNN */, + 0C12EEFC2616383C00B66C86 /* TH */, + ); + path = include; + sourceTree = "<group>"; + }; + 0C12E7892616383A00B66C86 /* pybind11 */ = { + isa = PBXGroup; + children = ( + 0C12E78A2616383A00B66C86 /* attr.h */, + 0C12E78B2616383A00B66C86 /* embed.h */, + 0C12E78C2616383A00B66C86 /* numpy.h */, + 0C12E78D2616383A00B66C86 /* pybind11.h */, + 0C12E78E2616383A00B66C86 /* operators.h */, + 0C12E78F2616383A00B66C86 /* iostream.h */, + 0C12E7902616383A00B66C86 /* chrono.h */, + 0C12E7912616383A00B66C86 /* stl_bind.h */, + 0C12E7922616383A00B66C86 /* buffer_info.h */, + 0C12E7932616383A00B66C86 /* options.h */, + 0C12E7942616383A00B66C86 /* functional.h */, + 0C12E7952616383A00B66C86 /* stl.h */, + 0C12E7962616383A00B66C86 /* detail */, + 0C12E79D2616383A00B66C86 /* common.h */, + 0C12E79E2616383A00B66C86 /* eval.h */, + 0C12E79F2616383A00B66C86 /* cast.h */, + 0C12E7A02616383A00B66C86 /* eigen.h */, + 0C12E7A12616383A00B66C86 /* pytypes.h */, + 0C12E7A22616383A00B66C86 /* complex.h */, + ); + path = pybind11; + sourceTree = "<group>"; + }; + 0C12E7962616383A00B66C86 /* detail */ = { + isa = PBXGroup; + children = ( + 0C12E7972616383A00B66C86 /* typeid.h */, + 0C12E7982616383A00B66C86 /* descr.h */, + 0C12E7992616383A00B66C86 /* internals.h */, + 0C12E79A2616383A00B66C86 /* common.h */, + 0C12E79B2616383A00B66C86 /* class.h */, + 0C12E79C2616383A00B66C86 /* init.h */, + ); + path = detail; + sourceTree = "<group>"; + }; + 0C12E7A32616383A00B66C86 /* caffe2 */ = { + isa = PBXGroup; + children = ( + 0C12E7A42616383A00B66C86 /* video */, + 0C12E7A92616383A00B66C86 /* ideep */, + 0C12E7B32616383A00B66C86 /* core */, + 0C12E80E2616383A00B66C86 /* mpi */, + 0C12E8112616383A00B66C86 /* proto */, + 0C12E8142616383A00B66C86 /* test */, + 0C12E8162616383A00B66C86 /* operators */, + 0C12E9432616383A00B66C86 /* onnx */, + 0C12E9502616383A00B66C86 /* python */, + 0C12E9702616383A00B66C86 /* distributed */, + 0C12E9772616383A00B66C86 /* perfkernels */, + 0C12E9852616383A00B66C86 /* experiments */, + 0C12E9902616383A00B66C86 /* cuda_rtc */, + 0C12E9922616383A00B66C86 /* serialize */, + 0C12E9992616383A00B66C86 /* utils */, + 0C12E9BE2616383B00B66C86 /* contrib */, + 0C12E9F72616383B00B66C86 /* image */, + 0C12E9FA2616383B00B66C86 /* quantization */, + 0C12EA2B2616383B00B66C86 /* transforms */, + 0C12EA302616383B00B66C86 /* mobile */, + 0C12EA572616383B00B66C86 /* sgd */, + 0C12EA702616383B00B66C86 /* queue */, + 0C12EA762616383B00B66C86 /* db */, + 0C12EA782616383B00B66C86 /* opt */, + 0C12EA962616383B00B66C86 /* predictor */, + 0C12EAA72616383B00B66C86 /* observers */, + 0C12EAAC2616383B00B66C86 /* share */, + ); + path = caffe2; + sourceTree = "<group>"; + }; + 0C12E7A42616383A00B66C86 /* video */ = { + isa = PBXGroup; + children = ( + 0C12E7A52616383A00B66C86 /* optical_flow.h */, + 0C12E7A62616383A00B66C86 /* video_decoder.h */, + 0C12E7A72616383A00B66C86 /* video_input_op.h */, + 0C12E7A82616383A00B66C86 /* video_io.h */, + ); + path = video; + sourceTree = "<group>"; + }; + 0C12E7A92616383A00B66C86 /* ideep */ = { + isa = PBXGroup; + children = ( + 0C12E7AA2616383A00B66C86 /* operators */, + 0C12E7AF2616383A00B66C86 /* utils */, + 0C12E7B22616383A00B66C86 /* ideep_utils.h */, + ); + path = ideep; + sourceTree = "<group>"; + }; + 0C12E7AA2616383A00B66C86 /* operators */ = { + isa = PBXGroup; + children = ( + 0C12E7AB2616383A00B66C86 /* conv_transpose_unpool_base_op.h */, + 0C12E7AC2616383A00B66C86 /* quantization */, + 0C12E7AD2616383A00B66C86 /* operator_fallback_ideep.h */, + 0C12E7AE2616383A00B66C86 /* conv_pool_base_op.h */, + ); + path = operators; + sourceTree = "<group>"; + }; + 0C12E7AC2616383A00B66C86 /* quantization */ = { + isa = PBXGroup; + children = ( + ); + path = quantization; + sourceTree = "<group>"; + }; + 0C12E7AF2616383A00B66C86 /* utils */ = { + isa = PBXGroup; + children = ( + 0C12E7B02616383A00B66C86 /* ideep_context.h */, + 0C12E7B12616383A00B66C86 /* ideep_operator.h */, + ); + path = utils; + sourceTree = "<group>"; + }; + 0C12E7B32616383A00B66C86 /* core */ = { + isa = PBXGroup; + children = ( + 0C12E7B42616383A00B66C86 /* net_async_task_graph.h */, + 0C12E7B52616383A00B66C86 /* net_simple_refcount.h */, + 0C12E7B62616383A00B66C86 /* tensor_impl.h */, + 0C12E7B72616383A00B66C86 /* plan_executor.h */, + 0C12E7B82616383A00B66C86 /* qtensor_serialization.h */, + 0C12E7B92616383A00B66C86 /* context_gpu.h */, + 0C12E7BA2616383A00B66C86 /* observer.h */, + 0C12E7BB2616383A00B66C86 /* blob_serializer_base.h */, + 0C12E7BC2616383A00B66C86 /* memonger.h */, + 0C12E7BD2616383A00B66C86 /* tensor_int8.h */, + 0C12E7BE2616383A00B66C86 /* static_tracepoint.h */, + 0C12E7BF2616383A00B66C86 /* net.h */, + 0C12E7C02616383A00B66C86 /* numa.h */, + 0C12E7C12616383A00B66C86 /* scope_guard.h */, + 0C12E7C22616383A00B66C86 /* test_utils.h */, + 0C12E7C32616383A00B66C86 /* event.h */, + 0C12E7C42616383A00B66C86 /* types.h */, + 0C12E7C52616383A00B66C86 /* context_base.h */, + 0C12E7C62616383A00B66C86 /* operator.h */, + 0C12E7C72616383A00B66C86 /* db.h */, + 0C12E7C82616383A00B66C86 /* blob.h */, + 0C12E7C92616383A00B66C86 /* static_tracepoint_elfx86.h */, + 0C12E7CA2616383A00B66C86 /* net_async_tracing.h */, + 0C12E7CB2616383A00B66C86 /* flags.h */, + 0C12E7CC2616383A00B66C86 /* net_async_task_future.h */, + 0C12E7CD2616383A00B66C86 /* operator_schema.h */, + 0C12E7CE2616383A00B66C86 /* context.h */, + 0C12E7CF2616383A00B66C86 /* net_async_base.h */, + 0C12E7D02616383A00B66C86 /* prof_dag_counters.h */, + 0C12E7D12616383A00B66C86 /* logging.h */, + 0C12E7D22616383A00B66C86 /* net_async_scheduling.h */, + 0C12E7D32616383A00B66C86 /* graph.h */, + 0C12E7D42616383A00B66C86 /* common_cudnn.h */, + 0C12E7D52616383A00B66C86 /* net_async_task.h */, + 0C12E7D62616383A00B66C86 /* export_caffe2_op_to_c10.h */, + 0C12E7D72616383A00B66C86 /* net_simple.h */, + 0C12E7D82616383A00B66C86 /* workspace.h */, + 0C12E7D92616383A00B66C86 /* timer.h */, + 0C12E7DA2616383A00B66C86 /* event_cpu.h */, + 0C12E7DB2616383A00B66C86 /* common.h */, + 0C12E7DC2616383A00B66C86 /* blob_stats.h */, + 0C12E7DD2616383A00B66C86 /* allocator.h */, + 0C12E7DE2616383A00B66C86 /* macros.h */, + 0C12E7DF2616383A00B66C86 /* hip */, + 0C12E7E22616383A00B66C86 /* storage.h */, + 0C12E7E32616383A00B66C86 /* transform.h */, + 0C12E7E42616383A00B66C86 /* common_omp.h */, + 0C12E7E52616383A00B66C86 /* export_c10_op_to_caffe2.h */, + 0C12E7E62616383A00B66C86 /* nomnigraph */, + 0C12E8022616383A00B66C86 /* module.h */, + 0C12E8032616383A00B66C86 /* init.h */, + 0C12E8042616383A00B66C86 /* net_dag_utils.h */, + 0C12E8052616383A00B66C86 /* stats.h */, + 0C12E8062616383A00B66C86 /* tensor.h */, + 0C12E8072616383A00B66C86 /* common_gpu.h */, + 0C12E8082616383A00B66C86 /* qtensor.h */, + 0C12E8092616383A00B66C86 /* net_parallel.h */, + 0C12E80A2616383A00B66C86 /* operator_gradient.h */, + 0C12E80B2616383A00B66C86 /* cudnn_wrappers.h */, + 0C12E80C2616383A00B66C86 /* distributions_stubs.h */, + 0C12E80D2616383A00B66C86 /* blob_serialization.h */, + ); + path = core; + sourceTree = "<group>"; + }; + 0C12E7DF2616383A00B66C86 /* hip */ = { + isa = PBXGroup; + children = ( + 0C12E7E02616383A00B66C86 /* miopen_wrapper.h */, + 0C12E7E12616383A00B66C86 /* common_miopen.h */, + ); + path = hip; + sourceTree = "<group>"; + }; + 0C12E7E62616383A00B66C86 /* nomnigraph */ = { + isa = PBXGroup; + children = ( + 0C12E7E72616383A00B66C86 /* Representations */, + 0C12E7E82616383A00B66C86 /* include */, + 0C12E8002616383A00B66C86 /* tests */, + ); + path = nomnigraph; + sourceTree = "<group>"; + }; + 0C12E7E72616383A00B66C86 /* Representations */ = { + isa = PBXGroup; + children = ( + ); + path = Representations; + sourceTree = "<group>"; + }; + 0C12E7E82616383A00B66C86 /* include */ = { + isa = PBXGroup; + children = ( + 0C12E7E92616383A00B66C86 /* nomnigraph */, + ); + path = include; + sourceTree = "<group>"; + }; + 0C12E7E92616383A00B66C86 /* nomnigraph */ = { + isa = PBXGroup; + children = ( + 0C12E7EA2616383A00B66C86 /* Generated */, + 0C12E7EE2616383A00B66C86 /* Representations */, + 0C12E7F22616383A00B66C86 /* Transformations */, + 0C12E7F52616383A00B66C86 /* Graph */, + 0C12E7FB2616383A00B66C86 /* Converters */, + 0C12E7FD2616383A00B66C86 /* Support */, + ); + path = nomnigraph; + sourceTree = "<group>"; + }; + 0C12E7EA2616383A00B66C86 /* Generated */ = { + isa = PBXGroup; + children = ( + 0C12E7EB2616383A00B66C86 /* OpClasses.h */, + 0C12E7EC2616383A00B66C86 /* OpEnum.h */, + 0C12E7ED2616383A00B66C86 /* OpNames.h */, + ); + path = Generated; + sourceTree = "<group>"; + }; + 0C12E7EE2616383A00B66C86 /* Representations */ = { + isa = PBXGroup; + children = ( + 0C12E7EF2616383A00B66C86 /* Compiler.h */, + 0C12E7F02616383A00B66C86 /* NeuralNet.h */, + 0C12E7F12616383A00B66C86 /* ControlFlow.h */, + ); + path = Representations; + sourceTree = "<group>"; + }; + 0C12E7F22616383A00B66C86 /* Transformations */ = { + isa = PBXGroup; + children = ( + 0C12E7F32616383A00B66C86 /* SubgraphMatcher.h */, + 0C12E7F42616383A00B66C86 /* Match.h */, + ); + path = Transformations; + sourceTree = "<group>"; + }; + 0C12E7F52616383A00B66C86 /* Graph */ = { + isa = PBXGroup; + children = ( + 0C12E7F62616383A00B66C86 /* Algorithms.h */, + 0C12E7F72616383A00B66C86 /* TopoSort.h */, + 0C12E7F82616383A00B66C86 /* Graph.h */, + 0C12E7F92616383A00B66C86 /* TarjansImpl.h */, + 0C12E7FA2616383A00B66C86 /* BinaryMatchImpl.h */, + ); + path = Graph; + sourceTree = "<group>"; + }; + 0C12E7FB2616383A00B66C86 /* Converters */ = { + isa = PBXGroup; + children = ( + 0C12E7FC2616383A00B66C86 /* Dot.h */, + ); + path = Converters; + sourceTree = "<group>"; + }; + 0C12E7FD2616383A00B66C86 /* Support */ = { + isa = PBXGroup; + children = ( + 0C12E7FE2616383A00B66C86 /* Casting.h */, + 0C12E7FF2616383A00B66C86 /* Common.h */, + ); + path = Support; + sourceTree = "<group>"; + }; + 0C12E8002616383A00B66C86 /* tests */ = { + isa = PBXGroup; + children = ( + 0C12E8012616383A00B66C86 /* test_util.h */, + ); + path = tests; + sourceTree = "<group>"; + }; + 0C12E80E2616383A00B66C86 /* mpi */ = { + isa = PBXGroup; + children = ( + 0C12E80F2616383A00B66C86 /* mpi_common.h */, + 0C12E8102616383A00B66C86 /* mpi_ops.h */, + ); + path = mpi; + sourceTree = "<group>"; + }; + 0C12E8112616383A00B66C86 /* proto */ = { + isa = PBXGroup; + children = ( + 0C12E8122616383A00B66C86 /* caffe2_pb.h */, + 0C12E8132616383A00B66C86 /* torch_pb.h */, + ); + path = proto; + sourceTree = "<group>"; + }; + 0C12E8142616383A00B66C86 /* test */ = { + isa = PBXGroup; + children = ( + 0C12E8152616383A00B66C86 /* assets */, + ); + path = test; + sourceTree = "<group>"; + }; + 0C12E8152616383A00B66C86 /* assets */ = { + isa = PBXGroup; + children = ( + ); + path = assets; + sourceTree = "<group>"; + }; + 0C12E8162616383A00B66C86 /* operators */ = { + isa = PBXGroup; + children = ( + 0C12E8172616383A00B66C86 /* top_k.h */, + 0C12E8182616383A00B66C86 /* channel_stats_op.h */, + 0C12E8192616383A00B66C86 /* gru_unit_op.h */, + 0C12E81A2616383A00B66C86 /* half_float_ops.h */, + 0C12E81B2616383A00B66C86 /* sqr_op.h */, + 0C12E81C2616383A00B66C86 /* mean_op.h */, + 0C12E81D2616383A00B66C86 /* thresholded_relu_op.h */, + 0C12E81E2616383A00B66C86 /* ctc_greedy_decoder_op.h */, + 0C12E81F2616383A00B66C86 /* conv_op_cache_cudnn.h */, + 0C12E8202616383A00B66C86 /* utility_ops.h */, + 0C12E8212616383A00B66C86 /* selu_op.h */, + 0C12E8222616383A00B66C86 /* map_ops.h */, + 0C12E8232616383A00B66C86 /* roi_align_rotated_op.h */, + 0C12E8242616383A00B66C86 /* fused_rowwise_random_quantization_ops.h */, + 0C12E8252616383A00B66C86 /* stop_gradient.h */, + 0C12E8262616383A00B66C86 /* batch_gather_ops.h */, + 0C12E8272616383A00B66C86 /* asin_op.h */, + 0C12E8282616383A00B66C86 /* cosh_op.h */, + 0C12E8292616383A00B66C86 /* atan_op.h */, + 0C12E82A2616383A00B66C86 /* reverse_packed_segs_op.h */, + 0C12E82B2616383A00B66C86 /* given_tensor_byte_string_to_uint8_fill_op.h */, + 0C12E82C2616383A00B66C86 /* ensure_clipped_op.h */, + 0C12E82D2616383A00B66C86 /* conv_transpose_op.h */, + 0C12E82E2616383A00B66C86 /* generate_proposals_op_util_nms.h */, + 0C12E82F2616383A00B66C86 /* enforce_finite_op.h */, + 0C12E8302616383A00B66C86 /* conv_transpose_unpool_op_base.h */, + 0C12E8312616383A00B66C86 /* gather_fused_8bit_rowwise_op.h */, + 0C12E8322616383A00B66C86 /* batch_matmul_op.h */, + 0C12E8332616383A00B66C86 /* batch_bucketize_op.h */, + 0C12E8342616383A00B66C86 /* softsign_op.h */, + 0C12E8352616383A00B66C86 /* elementwise_logical_ops.h */, + 0C12E8362616383A00B66C86 /* percentile_op.h */, + 0C12E8372616383A00B66C86 /* length_split_op.h */, + 0C12E8382616383A00B66C86 /* locally_connected_op_impl.h */, + 0C12E8392616383A00B66C86 /* rmac_regions_op.h */, + 0C12E83A2616383A00B66C86 /* hard_sigmoid_op.h */, + 0C12E83B2616383A00B66C86 /* ensure_cpu_output_op.h */, + 0C12E83C2616383A00B66C86 /* batch_box_cox_op.h */, + 0C12E83D2616383A00B66C86 /* ctc_beam_search_decoder_op.h */, + 0C12E83E2616383A00B66C86 /* flexible_top_k.h */, + 0C12E83F2616383A00B66C86 /* fully_connected_op.h */, + 0C12E8402616383A00B66C86 /* key_split_ops.h */, + 0C12E8412616383A00B66C86 /* reciprocal_op.h */, + 0C12E8422616383A00B66C86 /* roi_align_gradient_op.h */, + 0C12E8432616383A00B66C86 /* group_norm_op.h */, + 0C12E8442616383A00B66C86 /* load_save_op.h */, + 0C12E8452616383A00B66C86 /* cos_op.h */, + 0C12E8462616383A00B66C86 /* expand_op.h */, + 0C12E8472616383A00B66C86 /* elementwise_ops.h */, + 0C12E8482616383A00B66C86 /* im2col_op.h */, + 0C12E8492616383A00B66C86 /* space_batch_op.h */, + 0C12E84A2616383A00B66C86 /* relu_op.h */, + 0C12E84B2616383A00B66C86 /* while_op.h */, + 0C12E84C2616383A00B66C86 /* remove_data_blocks_op.h */, + 0C12E84D2616383A00B66C86 /* elementwise_mul_op.h */, + 0C12E84E2616383A00B66C86 /* numpy_tile_op.h */, + 0C12E84F2616383A00B66C86 /* rowmul_op.h */, + 0C12E8502616383A00B66C86 /* accumulate_op.h */, + 0C12E8512616383A00B66C86 /* sparse_lp_regularizer_op.h */, + 0C12E8522616383A00B66C86 /* bisect_percentile_op.h */, + 0C12E8532616383A00B66C86 /* tile_op.h */, + 0C12E8542616383A00B66C86 /* gelu_op.h */, + 0C12E8552616383A00B66C86 /* stats_put_ops.h */, + 0C12E8562616383A00B66C86 /* given_tensor_fill_op.h */, + 0C12E8572616383A00B66C86 /* accuracy_op.h */, + 0C12E8582616383A00B66C86 /* bbox_transform_op.h */, + 0C12E8592616383A00B66C86 /* boolean_unmask_ops.h */, + 0C12E85A2616383A00B66C86 /* glu_op.h */, + 0C12E85B2616383A00B66C86 /* resize_3d_op.h */, + 0C12E85C2616383A00B66C86 /* unsafe_coalesce.h */, + 0C12E85D2616383A00B66C86 /* conv_op.h */, + 0C12E85E2616383A00B66C86 /* conv_op_impl.h */, + 0C12E85F2616383A00B66C86 /* erf_op.h */, + 0C12E8602616383A00B66C86 /* fused_rowwise_8bit_conversion_ops.h */, + 0C12E8612616383A00B66C86 /* locally_connected_op_util.h */, + 0C12E8622616383A00B66C86 /* channel_backprop_stats_op.h */, + 0C12E8632616383A00B66C86 /* order_switch_ops.h */, + 0C12E8642616383A00B66C86 /* lengths_reducer_fused_nbit_rowwise_ops.h */, + 0C12E8652616383A00B66C86 /* lengths_reducer_fused_8bit_rowwise_ops.h */, + 0C12E8662616383A00B66C86 /* load_save_op_util.h */, + 0C12E8672616383A00B66C86 /* conv_transpose_op_impl.h */, + 0C12E8682616383A00B66C86 /* op_utils_cudnn.h */, + 0C12E8692616383A00B66C86 /* prelu_op.h */, + 0C12E86A2616383A00B66C86 /* box_with_nms_limit_op.h */, + 0C12E86B2616383A00B66C86 /* fc_inference.h */, + 0C12E86C2616383A00B66C86 /* distance_op.h */, + 0C12E86D2616383A00B66C86 /* data_couple.h */, + 0C12E86E2616383A00B66C86 /* dataset_ops.h */, + 0C12E86F2616383A00B66C86 /* merge_id_lists_op.h */, + 0C12E8702616383A00B66C86 /* generate_proposals_op_util_nms_gpu.h */, + 0C12E8712616383A00B66C86 /* async_net_barrier_op.h */, + 0C12E8722616383A00B66C86 /* deform_conv_op.h */, + 0C12E8732616383A00B66C86 /* quantized */, + 0C12E88C2616383A00B66C86 /* sqrt_op.h */, + 0C12E88D2616383A00B66C86 /* elementwise_div_op.h */, + 0C12E88E2616383A00B66C86 /* deform_conv_op_impl.h */, + 0C12E88F2616383A00B66C86 /* feature_maps_ops.h */, + 0C12E8902616383A00B66C86 /* text_file_reader_utils.h */, + 0C12E8912616383A00B66C86 /* scale_blobs_op.h */, + 0C12E8922616383A00B66C86 /* pool_op.h */, + 0C12E8932616383A00B66C86 /* conv_transpose_op_mobile_impl.h */, + 0C12E8942616383A00B66C86 /* dense_vector_to_id_list_op.h */, + 0C12E8952616383A00B66C86 /* minmax_ops.h */, + 0C12E8962616383A00B66C86 /* lengths_tile_op.h */, + 0C12E8972616383A00B66C86 /* pool_op_util.h */, + 0C12E8982616383A00B66C86 /* no_default_engine_op.h */, + 0C12E8992616383A00B66C86 /* onnx_while_op.h */, + 0C12E89A2616383A00B66C86 /* reduce_front_back_sum_mean_ops.h */, + 0C12E89B2616383A00B66C86 /* roi_pool_op.h */, + 0C12E89C2616383A00B66C86 /* flatten_op.h */, + 0C12E89D2616383A00B66C86 /* self_binning_histogram_op.h */, + 0C12E89E2616383A00B66C86 /* normalize_l1_op.h */, + 0C12E89F2616383A00B66C86 /* pow_op.h */, + 0C12E8A02616383A00B66C86 /* exp_op.h */, + 0C12E8A12616383A00B66C86 /* heatmap_max_keypoint_op.h */, + 0C12E8A22616383A00B66C86 /* assert_op.h */, + 0C12E8A32616383A00B66C86 /* piecewise_linear_transform_op.h */, + 0C12E8A42616383A00B66C86 /* cbrt_op.h */, + 0C12E8A52616383A00B66C86 /* weighted_sample_op.h */, + 0C12E8A62616383A00B66C86 /* tanh_op.h */, + 0C12E8A72616383A00B66C86 /* softmax_op.h */, + 0C12E8A82616383A00B66C86 /* listwise_l2r_op.h */, + 0C12E8A92616383A00B66C86 /* variable_length_sequence_padding.h */, + 0C12E8AA2616383A00B66C86 /* elementwise_add_op.h */, + 0C12E8AB2616383A00B66C86 /* leaky_relu_op.h */, + 0C12E8AC2616383A00B66C86 /* elementwise_linear_op.h */, + 0C12E8AD2616383A00B66C86 /* elu_op.h */, + 0C12E8AE2616383A00B66C86 /* jsd_op.h */, + 0C12E8AF2616383A00B66C86 /* collect_and_distribute_fpn_rpn_proposals_op.h */, + 0C12E8B02616383A00B66C86 /* reduce_ops.h */, + 0C12E8B12616383A00B66C86 /* string_ops.h */, + 0C12E8B22616383A00B66C86 /* boolean_mask_ops.h */, + 0C12E8B32616383A00B66C86 /* local_response_normalization_op.h */, + 0C12E8B42616383A00B66C86 /* partition_ops.h */, + 0C12E8B52616383A00B66C86 /* sparse_dropout_with_replacement_op.h */, + 0C12E8B62616383A00B66C86 /* loss_op.h */, + 0C12E8B72616383A00B66C86 /* counter_ops.h */, + 0C12E8B82616383A00B66C86 /* h_softmax_op.h */, + 0C12E8B92616383A00B66C86 /* lengths_reducer_rowwise_8bit_ops.h */, + 0C12E8BA2616383A00B66C86 /* copy_rows_to_tensor_op.h */, + 0C12E8BB2616383A00B66C86 /* moments_op.h */, + 0C12E8BC2616383A00B66C86 /* logit_op.h */, + 0C12E8BD2616383A00B66C86 /* perplexity_op.h */, + 0C12E8BE2616383A00B66C86 /* roi_align_rotated_gradient_op.h */, + 0C12E8BF2616383A00B66C86 /* ceil_op.h */, + 0C12E8C02616383A00B66C86 /* find_op.h */, + 0C12E8C12616383A00B66C86 /* layer_norm_op.h */, + 0C12E8C22616383A00B66C86 /* negate_gradient_op.h */, + 0C12E8C32616383A00B66C86 /* resize_op.h */, + 0C12E8C42616383A00B66C86 /* lengths_reducer_ops.h */, + 0C12E8C52616383A00B66C86 /* batch_sparse_to_dense_op.h */, + 0C12E8C62616383A00B66C86 /* replace_nan_op.h */, + 0C12E8C72616383A00B66C86 /* max_pool_with_index_gpu.h */, + 0C12E8C82616383A00B66C86 /* find_duplicate_elements_op.h */, + 0C12E8C92616383A00B66C86 /* expand_squeeze_dims_op.h */, + 0C12E8CA2616383A00B66C86 /* sinusoid_position_encoding_op.h */, + 0C12E8CB2616383A00B66C86 /* pack_segments.h */, + 0C12E8CC2616383A00B66C86 /* softplus_op.h */, + 0C12E8CD2616383A00B66C86 /* quantile_op.h */, + 0C12E8CE2616383A00B66C86 /* sinh_op.h */, + 0C12E8CF2616383A00B66C86 /* fused_rowwise_nbitfake_conversion_ops.h */, + 0C12E8D02616383A00B66C86 /* cross_entropy_op.h */, + 0C12E8D12616383A00B66C86 /* feed_blob_op.h */, + 0C12E8D22616383A00B66C86 /* slice_op.h */, + 0C12E8D32616383A00B66C86 /* rsqrt_op.h */, + 0C12E8D42616383A00B66C86 /* free_op.h */, + 0C12E8D52616383A00B66C86 /* square_root_divide_op.h */, + 0C12E8D62616383A00B66C86 /* conv_op_shared.h */, + 0C12E8D72616383A00B66C86 /* apmeter_op.h */, + 0C12E8D82616383A00B66C86 /* lstm_unit_op.h */, + 0C12E8D92616383A00B66C86 /* index_hash_ops.h */, + 0C12E8DA2616383A00B66C86 /* lengths_pad_op.h */, + 0C12E8DB2616383A00B66C86 /* elementwise_ops_utils.h */, + 0C12E8DC2616383A00B66C86 /* sparse_normalize_op.h */, + 0C12E8DD2616383A00B66C86 /* multi_class_accuracy_op.h */, + 0C12E8DE2616383A00B66C86 /* cast_op.h */, + 0C12E8DF2616383A00B66C86 /* transpose_op.h */, + 0C12E8E02616383A00B66C86 /* create_scope_op.h */, + 0C12E8E12616383A00B66C86 /* zero_gradient_op.h */, + 0C12E8E22616383A00B66C86 /* lstm_utils.h */, + 0C12E8E32616383A00B66C86 /* tt_linear_op.h */, + 0C12E8E42616383A00B66C86 /* relu_n_op.h */, + 0C12E8E52616383A00B66C86 /* generate_proposals_op.h */, + 0C12E8E62616383A00B66C86 /* hip */, + 0C12E8E82616383A00B66C86 /* lpnorm_op.h */, + 0C12E8E92616383A00B66C86 /* sequence_ops.h */, + 0C12E8EA2616383A00B66C86 /* abs_op.h */, + 0C12E8EB2616383A00B66C86 /* activation_ops_cudnn.h */, + 0C12E8EC2616383A00B66C86 /* elementwise_op_test.h */, + 0C12E8ED2616383A00B66C86 /* inference_lstm_op.h */, + 0C12E8EE2616383A00B66C86 /* concat_split_op.h */, + 0C12E8EF2616383A00B66C86 /* reduction_ops.h */, + 0C12E8F02616383A00B66C86 /* gather_op.h */, + 0C12E8F12616383A00B66C86 /* log_op.h */, + 0C12E8F22616383A00B66C86 /* conv_pool_op_base.h */, + 0C12E8F32616383A00B66C86 /* unique_ops.h */, + 0C12E8F42616383A00B66C86 /* elementwise_sub_op.h */, + 0C12E8F52616383A00B66C86 /* segment_reduction_op.h */, + 0C12E8F62616383A00B66C86 /* fused_rowwise_nbit_conversion_ops.h */, + 0C12E8F72616383A00B66C86 /* stump_func_op.h */, + 0C12E8F82616383A00B66C86 /* swish_op.h */, + 0C12E8F92616383A00B66C86 /* pack_rnn_sequence_op.h */, + 0C12E8FA2616383A00B66C86 /* softmax_with_loss_op.h */, + 0C12E8FB2616383A00B66C86 /* integral_image_op.h */, + 0C12E8FC2616383A00B66C86 /* mish_op.h */, + 0C12E8FD2616383A00B66C86 /* weighted_multi_sampling_op.h */, + 0C12E8FE2616383A00B66C86 /* bucketize_op.h */, + 0C12E8FF2616383A00B66C86 /* is_empty_op.h */, + 0C12E9002616383A00B66C86 /* mod_op.h */, + 0C12E9012616383A00B66C86 /* clip_op.h */, + 0C12E9022616383A00B66C86 /* prepend_dim_op.h */, + 0C12E9032616383A00B66C86 /* copy_op.h */, + 0C12E9042616383A00B66C86 /* rank_loss_op.h */, + 0C12E9052616383A00B66C86 /* lengths_top_k_op.h */, + 0C12E9062616383A00B66C86 /* summarize_op.h */, + 0C12E9072616383A00B66C86 /* one_hot_ops.h */, + 0C12E9082616383A00B66C86 /* cc_bmm_bg_op.h */, + 0C12E9092616383A00B66C86 /* acos_op.h */, + 0C12E90A2616383A00B66C86 /* softmax_utils.h */, + 0C12E90B2616383A00B66C86 /* tensor_protos_db_input.h */, + 0C12E90C2616383A00B66C86 /* generate_proposals_op_util_boxes.h */, + 0C12E90D2616383A00B66C86 /* conv_transpose_op_mobile.h */, + 0C12E90E2616383A00B66C86 /* arg_ops.h */, + 0C12E90F2616383A00B66C86 /* negative_op.h */, + 0C12E9102616383A00B66C86 /* operator_fallback_gpu.h */, + 0C12E9112616383A00B66C86 /* margin_ranking_criterion_op.h */, + 0C12E9122616383A00B66C86 /* matmul_op.h */, + 0C12E9132616383A00B66C86 /* roi_align_op.h */, + 0C12E9142616383A00B66C86 /* pad_op.h */, + 0C12E9152616383A00B66C86 /* histogram_op.h */, + 0C12E9162616383A00B66C86 /* floor_op.h */, + 0C12E9172616383A00B66C86 /* normalize_op.h */, + 0C12E9182616383A00B66C86 /* cube_op.h */, + 0C12E9192616383A00B66C86 /* reshape_op.h */, + 0C12E91A2616383A00B66C86 /* instance_norm_op.h */, + 0C12E91B2616383A00B66C86 /* ngram_ops.h */, + 0C12E91C2616383A00B66C86 /* if_op.h */, + 0C12E91D2616383A00B66C86 /* reduce_front_back_max_ops.h */, + 0C12E91E2616383A00B66C86 /* reducer_functors.h */, + 0C12E91F2616383A00B66C86 /* affine_channel_op.h */, + 0C12E9202616383A00B66C86 /* sigmoid_op.h */, + 0C12E9212616383A00B66C86 /* channel_shuffle_op.h */, + 0C12E9222616383A00B66C86 /* locally_connected_op.h */, + 0C12E9232616383A00B66C86 /* conditional_op.h */, + 0C12E9242616383A00B66C86 /* rms_norm_op.h */, + 0C12E9252616383A00B66C86 /* dropout_op.h */, + 0C12E9262616383A00B66C86 /* gather_ranges_to_dense_op.h */, + 0C12E9272616383A00B66C86 /* shape_op.h */, + 0C12E9282616383A00B66C86 /* index_ops.h */, + 0C12E9292616383A00B66C86 /* tan_op.h */, + 0C12E92A2616383A00B66C86 /* scale_op.h */, + 0C12E92B2616383A00B66C86 /* cosine_embedding_criterion_op.h */, + 0C12E92C2616383A00B66C86 /* sparse_to_dense_op.h */, + 0C12E92D2616383A00B66C86 /* quant_decode_op.h */, + 0C12E92E2616383A00B66C86 /* rnn */, + 0C12E9372616383A00B66C86 /* sparse_to_dense_mask_op.h */, + 0C12E9382616383A00B66C86 /* sin_op.h */, + 0C12E9392616383A00B66C86 /* upsample_op.h */, + 0C12E93A2616383A00B66C86 /* filler_op.h */, + 0C12E93B2616383A00B66C86 /* batch_permutation_op.h */, + 0C12E93C2616383A00B66C86 /* spatial_softmax_with_loss_op.h */, + 0C12E93D2616383A00B66C86 /* batch_moments_op.h */, + 0C12E93E2616383A00B66C86 /* alias_with_name.h */, + 0C12E93F2616383A00B66C86 /* do_op.h */, + 0C12E9402616383A00B66C86 /* prefetch_op.h */, + 0C12E9412616383A00B66C86 /* byte_weight_dequant_op.h */, + 0C12E9422616383A00B66C86 /* spatial_batch_norm_op.h */, + ); + path = operators; + sourceTree = "<group>"; + }; + 0C12E8732616383A00B66C86 /* quantized */ = { + isa = PBXGroup; + children = ( + 0C12E8742616383A00B66C86 /* int8_relu_op.h */, + 0C12E8752616383A00B66C86 /* int8_channel_shuffle_op.h */, + 0C12E8762616383A00B66C86 /* int8_concat_op.h */, + 0C12E8772616383A00B66C86 /* int8_dequantize_op.h */, + 0C12E8782616383A00B66C86 /* int8_slice_op.h */, + 0C12E8792616383A00B66C86 /* int8_quantize_op.h */, + 0C12E87A2616383A00B66C86 /* int8_flatten_op.h */, + 0C12E87B2616383A00B66C86 /* int8_max_pool_op.h */, + 0C12E87C2616383A00B66C86 /* int8_softmax_op.h */, + 0C12E87D2616383A00B66C86 /* int8_average_pool_op.h */, + 0C12E87E2616383A00B66C86 /* int8_fc_op.h */, + 0C12E87F2616383A00B66C86 /* int8_conv_op.h */, + 0C12E8802616383A00B66C86 /* int8_test_utils.h */, + 0C12E8812616383A00B66C86 /* int8_roi_align_op.h */, + 0C12E8822616383A00B66C86 /* int8_given_tensor_fill_op.h */, + 0C12E8832616383A00B66C86 /* int8_reshape_op.h */, + 0C12E8842616383A00B66C86 /* int8_utils.h */, + 0C12E8852616383A00B66C86 /* int8_resize_nearest_op.h */, + 0C12E8862616383A00B66C86 /* int8_sigmoid_op.h */, + 0C12E8872616383A00B66C86 /* int8_simd.h */, + 0C12E8882616383A00B66C86 /* int8_conv_transpose_op.h */, + 0C12E8892616383A00B66C86 /* int8_leaky_relu_op.h */, + 0C12E88A2616383A00B66C86 /* int8_add_op.h */, + 0C12E88B2616383A00B66C86 /* int8_transpose_op.h */, + ); + path = quantized; + sourceTree = "<group>"; + }; + 0C12E8E62616383A00B66C86 /* hip */ = { + isa = PBXGroup; + children = ( + 0C12E8E72616383A00B66C86 /* activation_ops_miopen.h */, + ); + path = hip; + sourceTree = "<group>"; + }; + 0C12E92E2616383A00B66C86 /* rnn */ = { + isa = PBXGroup; + children = ( + 0C12E92F2616383A00B66C86 /* recurrent_network_blob_fetcher_op.h */, + 0C12E9302616383A00B66C86 /* recurrent_op_cudnn.h */, + 0C12E9312616383A00B66C86 /* recurrent_network_executor_gpu.h */, + 0C12E9322616383A00B66C86 /* recurrent_network_executor_incl.h */, + 0C12E9332616383A00B66C86 /* hip */, + 0C12E9352616383A00B66C86 /* recurrent_network_executor.h */, + 0C12E9362616383A00B66C86 /* recurrent_network_op.h */, + ); + path = rnn; + sourceTree = "<group>"; + }; + 0C12E9332616383A00B66C86 /* hip */ = { + isa = PBXGroup; + children = ( + 0C12E9342616383A00B66C86 /* recurrent_op_miopen.h */, + ); + path = hip; + sourceTree = "<group>"; + }; + 0C12E9432616383A00B66C86 /* onnx */ = { + isa = PBXGroup; + children = ( + 0C12E9442616383A00B66C86 /* helper.h */, + 0C12E9452616383A00B66C86 /* device.h */, + 0C12E9462616383A00B66C86 /* onnxifi_init.h */, + 0C12E9472616383A00B66C86 /* backend.h */, + 0C12E9482616383A00B66C86 /* torch_ops */, + 0C12E94C2616383A00B66C86 /* backend_rep.h */, + 0C12E94D2616383A00B66C86 /* onnx_exporter.h */, + 0C12E94E2616383A00B66C86 /* offline_tensor.h */, + 0C12E94F2616383A00B66C86 /* onnxifi_graph_info.h */, + ); + path = onnx; + sourceTree = "<group>"; + }; + 0C12E9482616383A00B66C86 /* torch_ops */ = { + isa = PBXGroup; + children = ( + 0C12E9492616383A00B66C86 /* schema.h */, + 0C12E94A2616383A00B66C86 /* constants.h */, + 0C12E94B2616383A00B66C86 /* operator_sets.h */, + ); + path = torch_ops; + sourceTree = "<group>"; + }; + 0C12E9502616383A00B66C86 /* python */ = { + isa = PBXGroup; + children = ( + 0C12E9512616383A00B66C86 /* serialized_test */, + 0C12E9542616383A00B66C86 /* pybind_state.h */, + 0C12E9552616383A00B66C86 /* pybind_state_registry.h */, + 0C12E9562616383A00B66C86 /* ideep */, + 0C12E9572616383A00B66C86 /* mint */, + 0C12E95B2616383A00B66C86 /* layers */, + 0C12E95C2616383A00B66C86 /* test */, + 0C12E95D2616383A00B66C86 /* dlpack.h */, + 0C12E95E2616383A00B66C86 /* onnx */, + 0C12E9612616383A00B66C86 /* trt */, + 0C12E9632616383A00B66C86 /* operator_test */, + 0C12E9642616383A00B66C86 /* models */, + 0C12E9662616383A00B66C86 /* docs */, + 0C12E9672616383A00B66C86 /* fakelowp */, + 0C12E9682616383A00B66C86 /* modeling */, + 0C12E9692616383A00B66C86 /* pybind_state_dlpack.h */, + 0C12E96A2616383A00B66C86 /* mkl */, + 0C12E96B2616383A00B66C86 /* examples */, + 0C12E96C2616383A00B66C86 /* benchmarks */, + 0C12E96D2616383A00B66C86 /* predictor */, + 0C12E96E2616383A00B66C86 /* helpers */, + 0C12E96F2616383A00B66C86 /* rnn */, + ); + path = python; + sourceTree = "<group>"; + }; + 0C12E9512616383A00B66C86 /* serialized_test */ = { + isa = PBXGroup; + children = ( + 0C12E9522616383A00B66C86 /* data */, + ); + path = serialized_test; + sourceTree = "<group>"; + }; + 0C12E9522616383A00B66C86 /* data */ = { + isa = PBXGroup; + children = ( + 0C12E9532616383A00B66C86 /* operator_test */, + ); + path = data; + sourceTree = "<group>"; + }; + 0C12E9532616383A00B66C86 /* operator_test */ = { + isa = PBXGroup; + children = ( + ); + path = operator_test; + sourceTree = "<group>"; + }; + 0C12E9562616383A00B66C86 /* ideep */ = { + isa = PBXGroup; + children = ( + ); + path = ideep; + sourceTree = "<group>"; + }; + 0C12E9572616383A00B66C86 /* mint */ = { + isa = PBXGroup; + children = ( + 0C12E9582616383A00B66C86 /* static */, + 0C12E95A2616383A00B66C86 /* templates */, + ); + path = mint; + sourceTree = "<group>"; + }; + 0C12E9582616383A00B66C86 /* static */ = { + isa = PBXGroup; + children = ( + 0C12E9592616383A00B66C86 /* css */, + ); + path = static; + sourceTree = "<group>"; + }; + 0C12E9592616383A00B66C86 /* css */ = { + isa = PBXGroup; + children = ( + ); + path = css; + sourceTree = "<group>"; + }; + 0C12E95A2616383A00B66C86 /* templates */ = { + isa = PBXGroup; + children = ( + ); + path = templates; + sourceTree = "<group>"; + }; + 0C12E95B2616383A00B66C86 /* layers */ = { + isa = PBXGroup; + children = ( + ); + path = layers; + sourceTree = "<group>"; + }; + 0C12E95C2616383A00B66C86 /* test */ = { + isa = PBXGroup; + children = ( + ); + path = test; + sourceTree = "<group>"; + }; + 0C12E95E2616383A00B66C86 /* onnx */ = { + isa = PBXGroup; + children = ( + 0C12E95F2616383A00B66C86 /* bin */, + 0C12E9602616383A00B66C86 /* tests */, + ); + path = onnx; + sourceTree = "<group>"; + }; + 0C12E95F2616383A00B66C86 /* bin */ = { + isa = PBXGroup; + children = ( + ); + path = bin; + sourceTree = "<group>"; + }; + 0C12E9602616383A00B66C86 /* tests */ = { + isa = PBXGroup; + children = ( + ); + path = tests; + sourceTree = "<group>"; + }; + 0C12E9612616383A00B66C86 /* trt */ = { + isa = PBXGroup; + children = ( + 0C12E9622616383A00B66C86 /* data */, + ); + path = trt; + sourceTree = "<group>"; + }; + 0C12E9622616383A00B66C86 /* data */ = { + isa = PBXGroup; + children = ( + ); + path = data; + sourceTree = "<group>"; + }; + 0C12E9632616383A00B66C86 /* operator_test */ = { + isa = PBXGroup; + children = ( + ); + path = operator_test; + sourceTree = "<group>"; + }; + 0C12E9642616383A00B66C86 /* models */ = { + isa = PBXGroup; + children = ( + 0C12E9652616383A00B66C86 /* seq2seq */, + ); + path = models; + sourceTree = "<group>"; + }; + 0C12E9652616383A00B66C86 /* seq2seq */ = { + isa = PBXGroup; + children = ( + ); + path = seq2seq; + sourceTree = "<group>"; + }; + 0C12E9662616383A00B66C86 /* docs */ = { + isa = PBXGroup; + children = ( + ); + path = docs; + sourceTree = "<group>"; + }; + 0C12E9672616383A00B66C86 /* fakelowp */ = { + isa = PBXGroup; + children = ( + ); + path = fakelowp; + sourceTree = "<group>"; + }; + 0C12E9682616383A00B66C86 /* modeling */ = { + isa = PBXGroup; + children = ( + ); + path = modeling; + sourceTree = "<group>"; + }; + 0C12E96A2616383A00B66C86 /* mkl */ = { + isa = PBXGroup; + children = ( + ); + path = mkl; + sourceTree = "<group>"; + }; + 0C12E96B2616383A00B66C86 /* examples */ = { + isa = PBXGroup; + children = ( + ); + path = examples; + sourceTree = "<group>"; + }; + 0C12E96C2616383A00B66C86 /* benchmarks */ = { + isa = PBXGroup; + children = ( + ); + path = benchmarks; + sourceTree = "<group>"; + }; + 0C12E96D2616383A00B66C86 /* predictor */ = { + isa = PBXGroup; + children = ( + ); + path = predictor; + sourceTree = "<group>"; + }; + 0C12E96E2616383A00B66C86 /* helpers */ = { + isa = PBXGroup; + children = ( + ); + path = helpers; + sourceTree = "<group>"; + }; + 0C12E96F2616383A00B66C86 /* rnn */ = { + isa = PBXGroup; + children = ( + ); + path = rnn; + sourceTree = "<group>"; + }; + 0C12E9702616383A00B66C86 /* distributed */ = { + isa = PBXGroup; + children = ( + 0C12E9712616383A00B66C86 /* redis_store_handler.h */, + 0C12E9722616383A00B66C86 /* file_store_handler_op.h */, + 0C12E9732616383A00B66C86 /* store_handler.h */, + 0C12E9742616383A00B66C86 /* store_ops.h */, + 0C12E9752616383A00B66C86 /* file_store_handler.h */, + 0C12E9762616383A00B66C86 /* redis_store_handler_op.h */, + ); + path = distributed; + sourceTree = "<group>"; + }; + 0C12E9772616383A00B66C86 /* perfkernels */ = { + isa = PBXGroup; + children = ( + 0C12E9782616383A00B66C86 /* embedding_lookup.h */, + 0C12E9792616383A00B66C86 /* fused_8bit_rowwise_embedding_lookup_idx.h */, + 0C12E97A2616383A00B66C86 /* lstm_unit_cpu-impl.h */, + 0C12E97B2616383A00B66C86 /* embedding_lookup_idx.h */, + 0C12E97C2616383A00B66C86 /* adagrad.h */, + 0C12E97D2616383A00B66C86 /* lstm_unit_cpu.h */, + 0C12E97E2616383A00B66C86 /* cvtsh_ss_bugfix.h */, + 0C12E97F2616383A00B66C86 /* common.h */, + 0C12E9802616383A00B66C86 /* math.h */, + 0C12E9812616383A00B66C86 /* typed_axpy.h */, + 0C12E9822616383A00B66C86 /* fused_nbit_rowwise_conversion.h */, + 0C12E9832616383A00B66C86 /* fused_8bit_rowwise_embedding_lookup.h */, + 0C12E9842616383A00B66C86 /* lstm_unit_cpu_common.h */, + ); + path = perfkernels; + sourceTree = "<group>"; + }; + 0C12E9852616383A00B66C86 /* experiments */ = { + isa = PBXGroup; + children = ( + 0C12E9862616383A00B66C86 /* operators */, + 0C12E98F2616383A00B66C86 /* python */, + ); + path = experiments; + sourceTree = "<group>"; + }; + 0C12E9862616383A00B66C86 /* operators */ = { + isa = PBXGroup; + children = ( + 0C12E9872616383A00B66C86 /* fully_connected_op_decomposition.h */, + 0C12E9882616383A00B66C86 /* fully_connected_op_sparse.h */, + 0C12E9892616383A00B66C86 /* tt_contraction_op.h */, + 0C12E98A2616383A00B66C86 /* fully_connected_op_prune.h */, + 0C12E98B2616383A00B66C86 /* funhash_op.h */, + 0C12E98C2616383A00B66C86 /* sparse_funhash_op.h */, + 0C12E98D2616383A00B66C86 /* sparse_matrix_reshape_op.h */, + 0C12E98E2616383A00B66C86 /* tt_pad_op.h */, + ); + path = operators; + sourceTree = "<group>"; + }; + 0C12E98F2616383A00B66C86 /* python */ = { + isa = PBXGroup; + children = ( + ); + path = python; + sourceTree = "<group>"; + }; + 0C12E9902616383A00B66C86 /* cuda_rtc */ = { + isa = PBXGroup; + children = ( + 0C12E9912616383A00B66C86 /* common_rtc.h */, + ); + path = cuda_rtc; + sourceTree = "<group>"; + }; + 0C12E9922616383A00B66C86 /* serialize */ = { + isa = PBXGroup; + children = ( + 0C12E9932616383A00B66C86 /* read_adapter_interface.h */, + 0C12E9942616383A00B66C86 /* crc_alt.h */, + 0C12E9952616383A00B66C86 /* versions.h */, + 0C12E9962616383A00B66C86 /* inline_container.h */, + 0C12E9972616383A00B66C86 /* file_adapter.h */, + 0C12E9982616383A00B66C86 /* istream_adapter.h */, + ); + path = serialize; + sourceTree = "<group>"; + }; + 0C12E9992616383A00B66C86 /* utils */ = { + isa = PBXGroup; + children = ( + 0C12E99A2616383A00B66C86 /* filler.h */, + 0C12E99B2616383A00B66C86 /* math-detail.h */, + 0C12E99C2616383A00B66C86 /* signal_handler.h */, + 0C12E99D2616383A00B66C86 /* cpu_neon.h */, + 0C12E99E2616383A00B66C86 /* conversions.h */, + 0C12E99F2616383A00B66C86 /* string_utils.h */, + 0C12E9A02616383A00B66C86 /* simple_queue.h */, + 0C12E9A12616383A00B66C86 /* cpuid.h */, + 0C12E9A22616383A00B66C86 /* threadpool */, + 0C12E9A92616383A00B66C86 /* math */, + 0C12E9B02616383A00B66C86 /* fixed_divisor.h */, + 0C12E9B12616383A00B66C86 /* proto_wrap.h */, + 0C12E9B22616383A00B66C86 /* bench_utils.h */, + 0C12E9B32616383A00B66C86 /* cast.h */, + 0C12E9B42616383A00B66C86 /* hip */, + 0C12E9B52616383A00B66C86 /* murmur_hash3.h */, + 0C12E9B62616383A00B66C86 /* math.h */, + 0C12E9B72616383B00B66C86 /* eigen_utils.h */, + 0C12E9B82616383B00B66C86 /* smart_tensor_printer.h */, + 0C12E9B92616383B00B66C86 /* proto_convert.h */, + 0C12E9BA2616383B00B66C86 /* proto_utils.h */, + 0C12E9BB2616383B00B66C86 /* cblas.h */, + 0C12E9BC2616383B00B66C86 /* map_utils.h */, + 0C12E9BD2616383B00B66C86 /* zmq_helper.h */, + ); + path = utils; + sourceTree = "<group>"; + }; + 0C12E9A22616383A00B66C86 /* threadpool */ = { + isa = PBXGroup; + children = ( + 0C12E9A32616383A00B66C86 /* ThreadPool.h */, + 0C12E9A42616383A00B66C86 /* ThreadPoolCommon.h */, + 0C12E9A52616383A00B66C86 /* pthreadpool.h */, + 0C12E9A62616383A00B66C86 /* pthreadpool-cpp.h */, + 0C12E9A72616383A00B66C86 /* WorkersPool.h */, + 0C12E9A82616383A00B66C86 /* thread_pool_guard.h */, + ); + path = threadpool; + sourceTree = "<group>"; + }; + 0C12E9A92616383A00B66C86 /* math */ = { + isa = PBXGroup; + children = ( + 0C12E9AA2616383A00B66C86 /* utils.h */, + 0C12E9AB2616383A00B66C86 /* broadcast.h */, + 0C12E9AC2616383A00B66C86 /* elementwise.h */, + 0C12E9AD2616383A00B66C86 /* half_utils.h */, + 0C12E9AE2616383A00B66C86 /* reduce.h */, + 0C12E9AF2616383A00B66C86 /* transpose.h */, + ); + path = math; + sourceTree = "<group>"; + }; + 0C12E9B42616383A00B66C86 /* hip */ = { + isa = PBXGroup; + children = ( + ); + path = hip; + sourceTree = "<group>"; + }; + 0C12E9BE2616383B00B66C86 /* contrib */ = { + isa = PBXGroup; + children = ( + 0C12E9BF2616383B00B66C86 /* nnpack */, + 0C12E9C02616383B00B66C86 /* warpctc */, + 0C12E9C22616383B00B66C86 /* nccl */, + 0C12E9C42616383B00B66C86 /* ideep */, + 0C12E9C52616383B00B66C86 /* docker-ubuntu-14.04 */, + 0C12E9C62616383B00B66C86 /* playground */, + 0C12E9C82616383B00B66C86 /* gloo */, + 0C12E9D22616383B00B66C86 /* fakelowp */, + 0C12E9E42616383B00B66C86 /* script */, + 0C12E9E62616383B00B66C86 /* opencl */, + 0C12E9E92616383B00B66C86 /* prof */, + 0C12E9EB2616383B00B66C86 /* tensorrt */, + 0C12E9EF2616383B00B66C86 /* shm_mutex */, + 0C12E9F12616383B00B66C86 /* tensorboard */, + 0C12E9F22616383B00B66C86 /* aten */, + 0C12E9F62616383B00B66C86 /* pytorch */, + ); + path = contrib; + sourceTree = "<group>"; + }; + 0C12E9BF2616383B00B66C86 /* nnpack */ = { + isa = PBXGroup; + children = ( + ); + path = nnpack; + sourceTree = "<group>"; + }; + 0C12E9C02616383B00B66C86 /* warpctc */ = { + isa = PBXGroup; + children = ( + 0C12E9C12616383B00B66C86 /* ctc_op.h */, + ); + path = warpctc; + sourceTree = "<group>"; + }; + 0C12E9C22616383B00B66C86 /* nccl */ = { + isa = PBXGroup; + children = ( + 0C12E9C32616383B00B66C86 /* cuda_nccl_gpu.h */, + ); + path = nccl; + sourceTree = "<group>"; + }; + 0C12E9C42616383B00B66C86 /* ideep */ = { + isa = PBXGroup; + children = ( + ); + path = ideep; + sourceTree = "<group>"; + }; + 0C12E9C52616383B00B66C86 /* docker-ubuntu-14.04 */ = { + isa = PBXGroup; + children = ( + ); + path = "docker-ubuntu-14.04"; + sourceTree = "<group>"; + }; + 0C12E9C62616383B00B66C86 /* playground */ = { + isa = PBXGroup; + children = ( + 0C12E9C72616383B00B66C86 /* resnetdemo */, + ); + path = playground; + sourceTree = "<group>"; + }; + 0C12E9C72616383B00B66C86 /* resnetdemo */ = { + isa = PBXGroup; + children = ( + ); + path = resnetdemo; + sourceTree = "<group>"; + }; + 0C12E9C82616383B00B66C86 /* gloo */ = { + isa = PBXGroup; + children = ( + 0C12E9C92616383B00B66C86 /* allreduce_ops.h */, + 0C12E9CA2616383B00B66C86 /* allgather_ops.h */, + 0C12E9CB2616383B00B66C86 /* context.h */, + 0C12E9CC2616383B00B66C86 /* store_handler.h */, + 0C12E9CD2616383B00B66C86 /* broadcast_ops.h */, + 0C12E9CE2616383B00B66C86 /* reduce_scatter_ops.h */, + 0C12E9CF2616383B00B66C86 /* common.h */, + 0C12E9D02616383B00B66C86 /* common_world_ops.h */, + 0C12E9D12616383B00B66C86 /* barrier_ops.h */, + ); + path = gloo; + sourceTree = "<group>"; + }; + 0C12E9D22616383B00B66C86 /* fakelowp */ = { + isa = PBXGroup; + children = ( + 0C12E9D32616383B00B66C86 /* sum_fp16_fake_op.h */, + 0C12E9D42616383B00B66C86 /* lengths_reducer_fused_4bit_rowwise_fp16_fake_op.h */, + 0C12E9D52616383B00B66C86 /* int8_dequantize_op_nnpi.h */, + 0C12E9D62616383B00B66C86 /* test */, + 0C12E9D72616383B00B66C86 /* fp16_gemm_utils.h */, + 0C12E9D82616383B00B66C86 /* fp16_fma.h */, + 0C12E9D92616383B00B66C86 /* fp16_fc_acc_op.h */, + 0C12E9DA2616383B00B66C86 /* layernorm_fp16_fake_op.h */, + 0C12E9DB2616383B00B66C86 /* unary_fp16_fake_op.h */, + 0C12E9DC2616383B00B66C86 /* int8_quantize_op_nnpi.h */, + 0C12E9DD2616383B00B66C86 /* lengths_reducer_ops.h */, + 0C12E9DE2616383B00B66C86 /* common.h */, + 0C12E9DF2616383B00B66C86 /* batch_matmul_fp16_fake_op.h */, + 0C12E9E02616383B00B66C86 /* lengths_reducer_fused_8bit_rowwise_fp16_fake_op.h */, + 0C12E9E12616383B00B66C86 /* spatial_batch_norm_fp16_fake_op.h */, + 0C12E9E22616383B00B66C86 /* quant_lut_fp16_fake_op.h */, + 0C12E9E32616383B00B66C86 /* int8_swish_op_nnpi.h */, + ); + path = fakelowp; + sourceTree = "<group>"; + }; + 0C12E9D62616383B00B66C86 /* test */ = { + isa = PBXGroup; + children = ( + ); + path = test; + sourceTree = "<group>"; + }; + 0C12E9E42616383B00B66C86 /* script */ = { + isa = PBXGroup; + children = ( + 0C12E9E52616383B00B66C86 /* examples */, + ); + path = script; + sourceTree = "<group>"; + }; + 0C12E9E52616383B00B66C86 /* examples */ = { + isa = PBXGroup; + children = ( + ); + path = examples; + sourceTree = "<group>"; + }; + 0C12E9E62616383B00B66C86 /* opencl */ = { + isa = PBXGroup; + children = ( + 0C12E9E72616383B00B66C86 /* context.h */, + 0C12E9E82616383B00B66C86 /* OpenCL */, + ); + path = opencl; + sourceTree = "<group>"; + }; + 0C12E9E82616383B00B66C86 /* OpenCL */ = { + isa = PBXGroup; + children = ( + ); + path = OpenCL; + sourceTree = "<group>"; + }; + 0C12E9E92616383B00B66C86 /* prof */ = { + isa = PBXGroup; + children = ( + 0C12E9EA2616383B00B66C86 /* prof_dag_stats_op.h */, + ); + path = prof; + sourceTree = "<group>"; + }; + 0C12E9EB2616383B00B66C86 /* tensorrt */ = { + isa = PBXGroup; + children = ( + 0C12E9EC2616383B00B66C86 /* tensorrt_tranformer.h */, + 0C12E9ED2616383B00B66C86 /* trt_utils.h */, + 0C12E9EE2616383B00B66C86 /* tensorrt_op_trt.h */, + ); + path = tensorrt; + sourceTree = "<group>"; + }; + 0C12E9EF2616383B00B66C86 /* shm_mutex */ = { + isa = PBXGroup; + children = ( + 0C12E9F02616383B00B66C86 /* shm_mutex.h */, + ); + path = shm_mutex; + sourceTree = "<group>"; + }; + 0C12E9F12616383B00B66C86 /* tensorboard */ = { + isa = PBXGroup; + children = ( + ); + path = tensorboard; + sourceTree = "<group>"; + }; + 0C12E9F22616383B00B66C86 /* aten */ = { + isa = PBXGroup; + children = ( + 0C12E9F32616383B00B66C86 /* aten_op.h */, + 0C12E9F42616383B00B66C86 /* docs */, + 0C12E9F52616383B00B66C86 /* aten_op_template.h */, + ); + path = aten; + sourceTree = "<group>"; + }; + 0C12E9F42616383B00B66C86 /* docs */ = { + isa = PBXGroup; + children = ( + ); + path = docs; + sourceTree = "<group>"; + }; + 0C12E9F62616383B00B66C86 /* pytorch */ = { + isa = PBXGroup; + children = ( + ); + path = pytorch; + sourceTree = "<group>"; + }; + 0C12E9F72616383B00B66C86 /* image */ = { + isa = PBXGroup; + children = ( + 0C12E9F82616383B00B66C86 /* image_input_op.h */, + 0C12E9F92616383B00B66C86 /* transform_gpu.h */, + ); + path = image; + sourceTree = "<group>"; + }; + 0C12E9FA2616383B00B66C86 /* quantization */ = { + isa = PBXGroup; + children = ( + 0C12E9FB2616383B00B66C86 /* server */, + ); + path = quantization; + sourceTree = "<group>"; + }; + 0C12E9FB2616383B00B66C86 /* server */ = { + isa = PBXGroup; + children = ( + 0C12E9FC2616383B00B66C86 /* fbgemm_fp16_pack_op.h */, + 0C12E9FD2616383B00B66C86 /* concat_dnnlowp_op.h */, + 0C12E9FE2616383B00B66C86 /* fully_connected_dnnlowp_op.h */, + 0C12E9FF2616383B00B66C86 /* int8_quant_scheme_blob_fill.h */, + 0C12EA002616383B00B66C86 /* quantize_dnnlowp_op.h */, + 0C12EA012616383B00B66C86 /* batch_matmul_dnnlowp_op.h */, + 0C12EA022616383B00B66C86 /* utility_dnnlowp_ops.h */, + 0C12EA032616383B00B66C86 /* activation_distribution_observer.h */, + 0C12EA042616383B00B66C86 /* compute_equalization_scale.h */, + 0C12EA052616383B00B66C86 /* caffe2_dnnlowp_utils.h */, + 0C12EA062616383B00B66C86 /* dnnlowp_partition.h */, + 0C12EA072616383B00B66C86 /* fully_connected_fake_lowp_op.h */, + 0C12EA082616383B00B66C86 /* op_wrapper.h */, + 0C12EA092616383B00B66C86 /* batch_permutation_dnnlowp_op.h */, + 0C12EA0A2616383B00B66C86 /* conv_relu_op.h */, + 0C12EA0B2616383B00B66C86 /* conv_pool_dnnlowp_op_base.h */, + 0C12EA0C2616383B00B66C86 /* mmio.h */, + 0C12EA0D2616383B00B66C86 /* lstm_unit_dnnlowp_op.h */, + 0C12EA0E2616383B00B66C86 /* fbgemm_pack_matrix_cache.h */, + 0C12EA0F2616383B00B66C86 /* im2col_dnnlowp.h */, + 0C12EA102616383B00B66C86 /* fbgemm_pack_op.h */, + 0C12EA112616383B00B66C86 /* resize_nearest_dnnlowp_op.h */, + 0C12EA122616383B00B66C86 /* group_norm_dnnlowp_op.h */, + 0C12EA132616383B00B66C86 /* elementwise_dnnlowp_op.h */, + 0C12EA142616383B00B66C86 /* fb_fc_packed_op.h */, + 0C12EA152616383B00B66C86 /* relu_dnnlowp_op.h */, + 0C12EA162616383B00B66C86 /* spatial_batch_norm_dnnlowp_op.h */, + 0C12EA172616383B00B66C86 /* dequantize_dnnlowp_op.h */, + 0C12EA182616383B00B66C86 /* kl_minimization.h */, + 0C12EA192616383B00B66C86 /* dynamic_histogram.h */, + 0C12EA1A2616383B00B66C86 /* tanh.h */, + 0C12EA1B2616383B00B66C86 /* fbgemm_pack_blob.h */, + 0C12EA1C2616383B00B66C86 /* resize_nearest_3d_dnnlowp_op.h */, + 0C12EA1D2616383B00B66C86 /* int8_gen_quant_params.h */, + 0C12EA1E2616383B00B66C86 /* conv_dnnlowp_op.h */, + 0C12EA1F2616383B00B66C86 /* sigmoid.h */, + 0C12EA202616383B00B66C86 /* channel_shuffle_dnnlowp_op.h */, + 0C12EA212616383B00B66C86 /* int8_gen_quant_params_min_max.h */, + 0C12EA222616383B00B66C86 /* quantization_error_minimization.h */, + 0C12EA232616383B00B66C86 /* elementwise_linear_dnnlowp_op.h */, + 0C12EA242616383B00B66C86 /* dnnlowp_op.h */, + 0C12EA252616383B00B66C86 /* l2_minimization.h */, + 0C12EA262616383B00B66C86 /* dnnlowp.h */, + 0C12EA272616383B00B66C86 /* conv_dnnlowp_acc16_op.h */, + 0C12EA282616383B00B66C86 /* transpose.h */, + 0C12EA292616383B00B66C86 /* pool_dnnlowp_op_avx2.h */, + 0C12EA2A2616383B00B66C86 /* fully_connected_dnnlowp_acc16_op.h */, + ); + path = server; + sourceTree = "<group>"; + }; + 0C12EA2B2616383B00B66C86 /* transforms */ = { + isa = PBXGroup; + children = ( + 0C12EA2C2616383B00B66C86 /* single_op_transform.h */, + 0C12EA2D2616383B00B66C86 /* common_subexpression_elimination.h */, + 0C12EA2E2616383B00B66C86 /* conv_to_nnpack_transform.h */, + 0C12EA2F2616383B00B66C86 /* pattern_net_transform.h */, + ); + path = transforms; + sourceTree = "<group>"; + }; + 0C12EA302616383B00B66C86 /* mobile */ = { + isa = PBXGroup; + children = ( + 0C12EA312616383B00B66C86 /* contrib */, + ); + path = mobile; + sourceTree = "<group>"; + }; + 0C12EA312616383B00B66C86 /* contrib */ = { + isa = PBXGroup; + children = ( + 0C12EA322616383B00B66C86 /* libopencl-stub */, + 0C12EA3D2616383B00B66C86 /* ios */, + 0C12EA472616383B00B66C86 /* snpe */, + 0C12EA492616383B00B66C86 /* nnapi */, + 0C12EA4D2616383B00B66C86 /* ulp2 */, + 0C12EA502616383B00B66C86 /* libvulkan-stub */, + ); + path = contrib; + sourceTree = "<group>"; + }; + 0C12EA322616383B00B66C86 /* libopencl-stub */ = { + isa = PBXGroup; + children = ( + 0C12EA332616383B00B66C86 /* include */, + 0C12EA3C2616383B00B66C86 /* src */, + ); + path = "libopencl-stub"; + sourceTree = "<group>"; + }; + 0C12EA332616383B00B66C86 /* include */ = { + isa = PBXGroup; + children = ( + 0C12EA342616383B00B66C86 /* libopencl.h */, + 0C12EA352616383B00B66C86 /* CL */, + ); + path = include; + sourceTree = "<group>"; + }; + 0C12EA352616383B00B66C86 /* CL */ = { + isa = PBXGroup; + children = ( + 0C12EA362616383B00B66C86 /* cl_platform.h */, + 0C12EA372616383B00B66C86 /* opencl.h */, + 0C12EA382616383B00B66C86 /* cl_ext.h */, + 0C12EA392616383B00B66C86 /* cl.h */, + 0C12EA3A2616383B00B66C86 /* cl_gl.h */, + 0C12EA3B2616383B00B66C86 /* cl_gl_ext.h */, + ); + path = CL; + sourceTree = "<group>"; + }; + 0C12EA3C2616383B00B66C86 /* src */ = { + isa = PBXGroup; + children = ( + ); + path = src; + sourceTree = "<group>"; + }; + 0C12EA3D2616383B00B66C86 /* ios */ = { + isa = PBXGroup; + children = ( + 0C12EA3E2616383B00B66C86 /* ios_caffe_defines.h */, + 0C12EA3F2616383B00B66C86 /* mpscnn */, + 0C12EA452616383B00B66C86 /* ios_caffe.h */, + 0C12EA462616383B00B66C86 /* ios_caffe_predictor.h */, + ); + path = ios; + sourceTree = "<group>"; + }; + 0C12EA3F2616383B00B66C86 /* mpscnn */ = { + isa = PBXGroup; + children = ( + 0C12EA402616383B00B66C86 /* mpscnn_graph_mask.h */, + 0C12EA412616383B00B66C86 /* mpscnn.h */, + 0C12EA422616383B00B66C86 /* mpscnn_test.h */, + 0C12EA432616383B00B66C86 /* mpscnn_kernels.h */, + 0C12EA442616383B00B66C86 /* mpscnn_context.h */, + ); + path = mpscnn; + sourceTree = "<group>"; + }; + 0C12EA472616383B00B66C86 /* snpe */ = { + isa = PBXGroup; + children = ( + 0C12EA482616383B00B66C86 /* snpe_ffi.h */, + ); + path = snpe; + sourceTree = "<group>"; + }; + 0C12EA492616383B00B66C86 /* nnapi */ = { + isa = PBXGroup; + children = ( + 0C12EA4A2616383B00B66C86 /* nnapi.h */, + 0C12EA4B2616383B00B66C86 /* NeuralNetworks.h */, + 0C12EA4C2616383B00B66C86 /* dlnnapi.h */, + ); + path = nnapi; + sourceTree = "<group>"; + }; + 0C12EA4D2616383B00B66C86 /* ulp2 */ = { + isa = PBXGroup; + children = ( + 0C12EA4E2616383B00B66C86 /* ulp.h */, + 0C12EA4F2616383B00B66C86 /* ulp_neon.h */, + ); + path = ulp2; + sourceTree = "<group>"; + }; + 0C12EA502616383B00B66C86 /* libvulkan-stub */ = { + isa = PBXGroup; + children = ( + 0C12EA512616383B00B66C86 /* include */, + 0C12EA562616383B00B66C86 /* src */, + ); + path = "libvulkan-stub"; + sourceTree = "<group>"; + }; + 0C12EA512616383B00B66C86 /* include */ = { + isa = PBXGroup; + children = ( + 0C12EA522616383B00B66C86 /* libvulkan-stub.h */, + 0C12EA532616383B00B66C86 /* vulkan */, + ); + path = include; + sourceTree = "<group>"; + }; + 0C12EA532616383B00B66C86 /* vulkan */ = { + isa = PBXGroup; + children = ( + 0C12EA542616383B00B66C86 /* vulkan.h */, + 0C12EA552616383B00B66C86 /* vk_platform.h */, + ); + path = vulkan; + sourceTree = "<group>"; + }; + 0C12EA562616383B00B66C86 /* src */ = { + isa = PBXGroup; + children = ( + ); + path = src; + sourceTree = "<group>"; + }; + 0C12EA572616383B00B66C86 /* sgd */ = { + isa = PBXGroup; + children = ( + 0C12EA582616383B00B66C86 /* fp16_momentum_sgd_op.h */, + 0C12EA592616383B00B66C86 /* rmsprop_op.h */, + 0C12EA5A2616383B00B66C86 /* lars_op.h */, + 0C12EA5B2616383B00B66C86 /* yellowfin_op.h */, + 0C12EA5C2616383B00B66C86 /* math_lp.h */, + 0C12EA5D2616383B00B66C86 /* storm_op.h */, + 0C12EA5E2616383B00B66C86 /* adagrad_op.h */, + 0C12EA5F2616383B00B66C86 /* clip_tensor_op.h */, + 0C12EA602616383B00B66C86 /* gftrl_op.h */, + 0C12EA612616383B00B66C86 /* adadelta_op.h */, + 0C12EA622616383B00B66C86 /* learning_rate_op.h */, + 0C12EA632616383B00B66C86 /* adagrad_fused.h */, + 0C12EA642616383B00B66C86 /* adam_op.h */, + 0C12EA652616383B00B66C86 /* ftrl_op.h */, + 0C12EA662616383B00B66C86 /* weight_scale_op.h */, + 0C12EA672616383B00B66C86 /* learning_rate_adaption_op.h */, + 0C12EA682616383B00B66C86 /* rowwise_counter.h */, + 0C12EA692616383B00B66C86 /* iter_op.h */, + 0C12EA6A2616383B00B66C86 /* rowwise_adagrad_fused.h */, + 0C12EA6B2616383B00B66C86 /* momentum_sgd_op.h */, + 0C12EA6C2616383B00B66C86 /* wngrad_op.h */, + 0C12EA6D2616383B00B66C86 /* decay_adagrad_op.h */, + 0C12EA6E2616383B00B66C86 /* learning_rate_functors.h */, + 0C12EA6F2616383B00B66C86 /* fp32_momentum_sgd_op.h */, + ); + path = sgd; + sourceTree = "<group>"; + }; + 0C12EA702616383B00B66C86 /* queue */ = { + isa = PBXGroup; + children = ( + 0C12EA712616383B00B66C86 /* blobs_queue.h */, + 0C12EA722616383B00B66C86 /* rebatching_queue_ops.h */, + 0C12EA732616383B00B66C86 /* queue_ops.h */, + 0C12EA742616383B00B66C86 /* rebatching_queue.h */, + 0C12EA752616383B00B66C86 /* blobs_queue_db.h */, + ); + path = queue; + sourceTree = "<group>"; + }; + 0C12EA762616383B00B66C86 /* db */ = { + isa = PBXGroup; + children = ( + 0C12EA772616383B00B66C86 /* create_db_op.h */, + ); + path = db; + sourceTree = "<group>"; + }; + 0C12EA782616383B00B66C86 /* opt */ = { + isa = PBXGroup; + children = ( + 0C12EA792616383B00B66C86 /* nql */, + 0C12EA7D2616383B00B66C86 /* device.h */, + 0C12EA7E2616383B00B66C86 /* annotations.h */, + 0C12EA7F2616383B00B66C86 /* mobile.h */, + 0C12EA802616383B00B66C86 /* onnxifi_transformer.h */, + 0C12EA812616383B00B66C86 /* converter.h */, + 0C12EA822616383B00B66C86 /* backend_transformer_base.h */, + 0C12EA832616383B00B66C86 /* fakefp16_transform.h */, + 0C12EA842616383B00B66C86 /* fusion.h */, + 0C12EA852616383B00B66C86 /* shape_info.h */, + 0C12EA862616383B00B66C86 /* optimizer.h */, + 0C12EA872616383B00B66C86 /* glow_net_transform.h */, + 0C12EA882616383B00B66C86 /* backend_cutting.h */, + 0C12EA892616383B00B66C86 /* distributed.h */, + 0C12EA8A2616383B00B66C86 /* onnxifi_op.h */, + 0C12EA8B2616383B00B66C86 /* tvm_transformer.h */, + 0C12EA8C2616383B00B66C86 /* passes.h */, + 0C12EA8D2616383B00B66C86 /* bound_shape_inferencer.h */, + 0C12EA8E2616383B00B66C86 /* custom */, + 0C12EA942616383B00B66C86 /* onnx_convert.h */, + 0C12EA952616383B00B66C86 /* optimize_ideep.h */, + ); + path = opt; + sourceTree = "<group>"; + }; + 0C12EA792616383B00B66C86 /* nql */ = { + isa = PBXGroup; + children = ( + 0C12EA7A2616383B00B66C86 /* tests */, + 0C12EA7B2616383B00B66C86 /* ast.h */, + 0C12EA7C2616383B00B66C86 /* graphmatcher.h */, + ); + path = nql; + sourceTree = "<group>"; + }; + 0C12EA7A2616383B00B66C86 /* tests */ = { + isa = PBXGroup; + children = ( + ); + path = tests; + sourceTree = "<group>"; + }; + 0C12EA8E2616383B00B66C86 /* custom */ = { + isa = PBXGroup; + children = ( + 0C12EA8F2616383B00B66C86 /* concat_elim.h */, + 0C12EA902616383B00B66C86 /* pointwise_elim.h */, + 0C12EA912616383B00B66C86 /* freeze_quantization_params.h */, + 0C12EA922616383B00B66C86 /* in_batch_broadcast.h */, + 0C12EA932616383B00B66C86 /* cc_amrc.h */, + ); + path = custom; + sourceTree = "<group>"; + }; + 0C12EA962616383B00B66C86 /* predictor */ = { + isa = PBXGroup; + children = ( + 0C12EA972616383B00B66C86 /* ThreadLocalPtr.h */, + 0C12EA982616383B00B66C86 /* InferenceGraph.h */, + 0C12EA992616383B00B66C86 /* predictor_utils.h */, + 0C12EA9A2616383B00B66C86 /* predictor.h */, + 0C12EA9B2616383B00B66C86 /* predictor_config.h */, + 0C12EA9C2616383B00B66C86 /* emulator */, + 0C12EAA62616383B00B66C86 /* transforms.h */, + ); + path = predictor; + sourceTree = "<group>"; + }; + 0C12EA9C2616383B00B66C86 /* emulator */ = { + isa = PBXGroup; + children = ( + 0C12EA9D2616383B00B66C86 /* data_filler.h */, + 0C12EA9E2616383B00B66C86 /* utils.h */, + 0C12EA9F2616383B00B66C86 /* net_supplier.h */, + 0C12EAA02616383B00B66C86 /* time_profiler.h */, + 0C12EAA12616383B00B66C86 /* emulator.h */, + 0C12EAA22616383B00B66C86 /* output_formatter.h */, + 0C12EAA32616383B00B66C86 /* std_output_formatter.h */, + 0C12EAA42616383B00B66C86 /* benchmark.h */, + 0C12EAA52616383B00B66C86 /* profiler.h */, + ); + path = emulator; + sourceTree = "<group>"; + }; + 0C12EAA72616383B00B66C86 /* observers */ = { + isa = PBXGroup; + children = ( + 0C12EAA82616383B00B66C86 /* operator_attaching_net_observer.h */, + 0C12EAA92616383B00B66C86 /* time_observer.h */, + 0C12EAAA2616383B00B66C86 /* runcnt_observer.h */, + 0C12EAAB2616383B00B66C86 /* profile_observer.h */, + ); + path = observers; + sourceTree = "<group>"; + }; + 0C12EAAC2616383B00B66C86 /* share */ = { + isa = PBXGroup; + children = ( + 0C12EAAD2616383B00B66C86 /* contrib */, + ); + path = share; + sourceTree = "<group>"; + }; + 0C12EAAD2616383B00B66C86 /* contrib */ = { + isa = PBXGroup; + children = ( + 0C12EAAE2616383B00B66C86 /* nnpack */, + 0C12EAAF2616383B00B66C86 /* depthwise */, + 0C12EAB02616383B00B66C86 /* zstd */, + ); + path = contrib; + sourceTree = "<group>"; + }; + 0C12EAAE2616383B00B66C86 /* nnpack */ = { + isa = PBXGroup; + children = ( + ); + path = nnpack; + sourceTree = "<group>"; + }; + 0C12EAAF2616383B00B66C86 /* depthwise */ = { + isa = PBXGroup; + children = ( + ); + path = depthwise; + sourceTree = "<group>"; + }; + 0C12EAB02616383B00B66C86 /* zstd */ = { + isa = PBXGroup; + children = ( + 0C12EAB12616383B00B66C86 /* quant_decomp_zstd_op.h */, + ); + path = zstd; + sourceTree = "<group>"; + }; + 0C12EAB32616383B00B66C86 /* torch */ = { + isa = PBXGroup; + children = ( + 0C12EAB42616383B00B66C86 /* csrc */, + 0C12EDA52616383C00B66C86 /* script.h */, + 0C12EDA62616383C00B66C86 /* library.h */, + 0C12EDA72616383C00B66C86 /* custom_class_detail.h */, + 0C12EDA82616383C00B66C86 /* custom_class.h */, + 0C12EDA92616383C00B66C86 /* extension.h */, + ); + path = torch; + sourceTree = "<group>"; + }; + 0C12EAB42616383B00B66C86 /* csrc */ = { + isa = PBXGroup; + children = ( + 0C12EAB52616383B00B66C86 /* Size.h */, + 0C12EAB62616383B00B66C86 /* utils.h */, + 0C12EAB72616383B00B66C86 /* Device.h */, + 0C12EAB82616383B00B66C86 /* onnx */, + 0C12EABB2616383B00B66C86 /* Types.h */, + 0C12EABC2616383B00B66C86 /* distributed */, + 0C12EAFD2616383B00B66C86 /* autograd */, + 0C12EB332616383B00B66C86 /* deploy */, + 0C12EB392616383B00B66C86 /* multiprocessing */, + 0C12EB3B2616383B00B66C86 /* cuda */, + 0C12EB4C2616383B00B66C86 /* serialization.h */, + 0C12EB4D2616383B00B66C86 /* Exceptions.h */, + 0C12EB4E2616383B00B66C86 /* QScheme.h */, + 0C12EB4F2616383B00B66C86 /* utils */, + 0C12EB752616383B00B66C86 /* Stream.h */, + 0C12EB762616383B00B66C86 /* StorageDefs.h */, + 0C12EB772616383B00B66C86 /* DataLoader.h */, + 0C12EB782616383B00B66C86 /* THP.h */, + 0C12EB792616383B00B66C86 /* python_headers.h */, + 0C12EB7A2616383B00B66C86 /* Layout.h */, + 0C12EB7B2616383B00B66C86 /* DynamicTypes.h */, + 0C12EB7C2616383B00B66C86 /* copy_utils.h */, + 0C12EB7D2616383B00B66C86 /* jit */, + 0C12ECDA2616383B00B66C86 /* Storage.h */, + 0C12ECDB2616383B00B66C86 /* api */, + 0C12ED952616383C00B66C86 /* MemoryFormat.h */, + 0C12ED962616383C00B66C86 /* generic */, + 0C12ED9A2616383C00B66C86 /* tensor */, + 0C12ED9C2616383C00B66C86 /* WindowsTorchApiMacro.h */, + 0C12ED9D2616383C00B66C86 /* Dtype.h */, + 0C12ED9E2616383C00B66C86 /* Module.h */, + 0C12ED9F2616383C00B66C86 /* THP_export.h */, + 0C12EDA02616383C00B66C86 /* python_dimname.h */, + 0C12EDA12616383C00B66C86 /* CudaIPCTypes.h */, + 0C12EDA22616383C00B66C86 /* Generator.h */, + 0C12EDA32616383C00B66C86 /* TypeInfo.h */, + 0C12EDA42616383C00B66C86 /* PythonTypes.h */, + ); + path = csrc; + sourceTree = "<group>"; + }; + 0C12EAB82616383B00B66C86 /* onnx */ = { + isa = PBXGroup; + children = ( + 0C12EAB92616383B00B66C86 /* init.h */, + 0C12EABA2616383B00B66C86 /* onnx.h */, + ); + path = onnx; + sourceTree = "<group>"; + }; + 0C12EABC2616383B00B66C86 /* distributed */ = { + isa = PBXGroup; + children = ( + 0C12EABD2616383B00B66C86 /* autograd */, + 0C12EAD42616383B00B66C86 /* rpc */, + 0C12EAFA2616383B00B66C86 /* c10d */, + ); + path = distributed; + sourceTree = "<group>"; + }; + 0C12EABD2616383B00B66C86 /* autograd */ = { + isa = PBXGroup; + children = ( + 0C12EABE2616383B00B66C86 /* utils.h */, + 0C12EABF2616383B00B66C86 /* context */, + 0C12EAC22616383B00B66C86 /* rpc_messages */, + 0C12EACD2616383B00B66C86 /* python_autograd.h */, + 0C12EACE2616383B00B66C86 /* autograd.h */, + 0C12EACF2616383B00B66C86 /* functions */, + 0C12EAD22616383B00B66C86 /* engine */, + ); + path = autograd; + sourceTree = "<group>"; + }; + 0C12EABF2616383B00B66C86 /* context */ = { + isa = PBXGroup; + children = ( + 0C12EAC02616383B00B66C86 /* container.h */, + 0C12EAC12616383B00B66C86 /* context.h */, + ); + path = context; + sourceTree = "<group>"; + }; + 0C12EAC22616383B00B66C86 /* rpc_messages */ = { + isa = PBXGroup; + children = ( + 0C12EAC32616383B00B66C86 /* cleanup_autograd_context_req.h */, + 0C12EAC42616383B00B66C86 /* cleanup_autograd_context_resp.h */, + 0C12EAC52616383B00B66C86 /* rref_backward_req.h */, + 0C12EAC62616383B00B66C86 /* rpc_with_profiling_req.h */, + 0C12EAC72616383B00B66C86 /* propagate_gradients_resp.h */, + 0C12EAC82616383B00B66C86 /* propagate_gradients_req.h */, + 0C12EAC92616383B00B66C86 /* autograd_metadata.h */, + 0C12EACA2616383B00B66C86 /* rpc_with_autograd.h */, + 0C12EACB2616383B00B66C86 /* rref_backward_resp.h */, + 0C12EACC2616383B00B66C86 /* rpc_with_profiling_resp.h */, + ); + path = rpc_messages; + sourceTree = "<group>"; + }; + 0C12EACF2616383B00B66C86 /* functions */ = { + isa = PBXGroup; + children = ( + 0C12EAD02616383B00B66C86 /* sendrpc_backward.h */, + 0C12EAD12616383B00B66C86 /* recvrpc_backward.h */, + ); + path = functions; + sourceTree = "<group>"; + }; + 0C12EAD22616383B00B66C86 /* engine */ = { + isa = PBXGroup; + children = ( + 0C12EAD32616383B00B66C86 /* dist_engine.h */, + ); + path = engine; + sourceTree = "<group>"; + }; + 0C12EAD42616383B00B66C86 /* rpc */ = { + isa = PBXGroup; + children = ( + 0C12EAD52616383B00B66C86 /* metrics */, + 0C12EAD72616383B00B66C86 /* utils.h */, + 0C12EAD82616383B00B66C86 /* rref_context.h */, + 0C12EAD92616383B00B66C86 /* request_callback_impl.h */, + 0C12EADA2616383B00B66C86 /* python_resp.h */, + 0C12EADB2616383B00B66C86 /* rref_impl.h */, + 0C12EADC2616383B00B66C86 /* request_callback.h */, + 0C12EADD2616383B00B66C86 /* types.h */, + 0C12EADE2616383B00B66C86 /* rref_proto.h */, + 0C12EADF2616383B00B66C86 /* py_rref.h */, + 0C12EAE02616383B00B66C86 /* rpc_agent.h */, + 0C12EAE12616383B00B66C86 /* python_functions.h */, + 0C12EAE22616383B00B66C86 /* message.h */, + 0C12EAE32616383B00B66C86 /* request_callback_no_python.h */, + 0C12EAE42616383B00B66C86 /* python_remote_call.h */, + 0C12EAE52616383B00B66C86 /* python_call.h */, + 0C12EAE62616383B00B66C86 /* tensorpipe_agent.h */, + 0C12EAE72616383B00B66C86 /* script_remote_call.h */, + 0C12EAE82616383B00B66C86 /* testing */, + 0C12EAEB2616383B00B66C86 /* macros.h */, + 0C12EAEC2616383B00B66C86 /* script_resp.h */, + 0C12EAED2616383B00B66C86 /* rpc.h */, + 0C12EAEE2616383B00B66C86 /* rpc_command_base.h */, + 0C12EAEF2616383B00B66C86 /* profiler */, + 0C12EAF22616383B00B66C86 /* script_call.h */, + 0C12EAF32616383B00B66C86 /* unpickled_python_remote_call.h */, + 0C12EAF42616383B00B66C86 /* torchscript_functions.h */, + 0C12EAF52616383B00B66C86 /* unpickled_python_call.h */, + 0C12EAF62616383B00B66C86 /* tensorpipe_utils.h */, + 0C12EAF72616383B00B66C86 /* agent_utils.h */, + 0C12EAF82616383B00B66C86 /* process_group_agent.h */, + 0C12EAF92616383B00B66C86 /* python_rpc_handler.h */, + ); + path = rpc; + sourceTree = "<group>"; + }; + 0C12EAD52616383B00B66C86 /* metrics */ = { + isa = PBXGroup; + children = ( + 0C12EAD62616383B00B66C86 /* RpcMetricsHandler.h */, + ); + path = metrics; + sourceTree = "<group>"; + }; + 0C12EAE82616383B00B66C86 /* testing */ = { + isa = PBXGroup; + children = ( + 0C12EAE92616383B00B66C86 /* testing.h */, + 0C12EAEA2616383B00B66C86 /* faulty_process_group_agent.h */, + ); + path = testing; + sourceTree = "<group>"; + }; + 0C12EAEF2616383B00B66C86 /* profiler */ = { + isa = PBXGroup; + children = ( + 0C12EAF02616383B00B66C86 /* remote_profiler_manager.h */, + 0C12EAF12616383B00B66C86 /* server_process_global_profiler.h */, + ); + path = profiler; + sourceTree = "<group>"; + }; + 0C12EAFA2616383B00B66C86 /* c10d */ = { + isa = PBXGroup; + children = ( + 0C12EAFB2616383B00B66C86 /* python_comm_hook.h */, + 0C12EAFC2616383B00B66C86 /* c10d.h */, + ); + path = c10d; + sourceTree = "<group>"; + }; + 0C12EAFD2616383B00B66C86 /* autograd */ = { + isa = PBXGroup; + children = ( + 0C12EAFE2616383B00B66C86 /* generated */, + 0C12EB022616383B00B66C86 /* python_function.h */, + 0C12EB032616383B00B66C86 /* custom_function.h */, + 0C12EB042616383B00B66C86 /* python_linalg_functions.h */, + 0C12EB052616383B00B66C86 /* record_function_ops.h */, + 0C12EB062616383B00B66C86 /* engine.h */, + 0C12EB072616383B00B66C86 /* edge.h */, + 0C12EB082616383B00B66C86 /* saved_variable.h */, + 0C12EB092616383B00B66C86 /* python_engine.h */, + 0C12EB0A2616383B00B66C86 /* python_legacy_variable.h */, + 0C12EB0B2616383B00B66C86 /* python_cpp_function.h */, + 0C12EB0C2616383B00B66C86 /* python_hook.h */, + 0C12EB0D2616383B00B66C86 /* VariableTypeUtils.h */, + 0C12EB0E2616383B00B66C86 /* python_autograd.h */, + 0C12EB0F2616383B00B66C86 /* profiler_kineto.h */, + 0C12EB102616383B00B66C86 /* variable.h */, + 0C12EB112616383B00B66C86 /* utils */, + 0C12EB172616383B00B66C86 /* python_fft_functions.h */, + 0C12EB182616383B00B66C86 /* python_variable.h */, + 0C12EB192616383B00B66C86 /* function_hook.h */, + 0C12EB1A2616383B00B66C86 /* input_metadata.h */, + 0C12EB1B2616383B00B66C86 /* grad_mode.h */, + 0C12EB1C2616383B00B66C86 /* symbolic.h */, + 0C12EB1D2616383B00B66C86 /* input_buffer.h */, + 0C12EB1E2616383B00B66C86 /* profiler_legacy.h */, + 0C12EB1F2616383B00B66C86 /* autograd.h */, + 0C12EB202616383B00B66C86 /* cpp_hook.h */, + 0C12EB212616383B00B66C86 /* functions */, + 0C12EB282616383B00B66C86 /* python_special_functions.h */, + 0C12EB292616383B00B66C86 /* FunctionsManual.h */, + 0C12EB2A2616383B00B66C86 /* forward_grad.h */, + 0C12EB2B2616383B00B66C86 /* python_anomaly_mode.h */, + 0C12EB2C2616383B00B66C86 /* python_nn_functions.h */, + 0C12EB2D2616383B00B66C86 /* InferenceMode.h */, + 0C12EB2E2616383B00B66C86 /* python_variable_indexing.h */, + 0C12EB2F2616383B00B66C86 /* profiler.h */, + 0C12EB302616383B00B66C86 /* function.h */, + 0C12EB312616383B00B66C86 /* anomaly_mode.h */, + 0C12EB322616383B00B66C86 /* profiler_utils.h */, + ); + path = autograd; + sourceTree = "<group>"; + }; + 0C12EAFE2616383B00B66C86 /* generated */ = { + isa = PBXGroup; + children = ( + 0C12EAFF2616383B00B66C86 /* python_functions.h */, + 0C12EB002616383B00B66C86 /* Functions.h */, + 0C12EB012616383B00B66C86 /* variable_factories.h */, + ); + path = generated; + sourceTree = "<group>"; + }; + 0C12EB112616383B00B66C86 /* utils */ = { + isa = PBXGroup; + children = ( + 0C12EB122616383B00B66C86 /* wrap_outputs.h */, + 0C12EB132616383B00B66C86 /* python_arg_parsing.h */, + 0C12EB142616383B00B66C86 /* grad_layout_contract.h */, + 0C12EB152616383B00B66C86 /* lambda_post_hook.h */, + 0C12EB162616383B00B66C86 /* error_messages.h */, + ); + path = utils; + sourceTree = "<group>"; + }; + 0C12EB212616383B00B66C86 /* functions */ = { + isa = PBXGroup; + children = ( + 0C12EB222616383B00B66C86 /* utils.h */, + 0C12EB232616383B00B66C86 /* pybind.h */, + 0C12EB242616383B00B66C86 /* comm.h */, + 0C12EB252616383B00B66C86 /* basic_ops.h */, + 0C12EB262616383B00B66C86 /* accumulate_grad.h */, + 0C12EB272616383B00B66C86 /* tensor.h */, + ); + path = functions; + sourceTree = "<group>"; + }; + 0C12EB332616383B00B66C86 /* deploy */ = { + isa = PBXGroup; + children = ( + 0C12EB342616383B00B66C86 /* interpreter */, + 0C12EB372616383B00B66C86 /* example */, + 0C12EB382616383B00B66C86 /* deploy.h */, + ); + path = deploy; + sourceTree = "<group>"; + }; + 0C12EB342616383B00B66C86 /* interpreter */ = { + isa = PBXGroup; + children = ( + 0C12EB352616383B00B66C86 /* interpreter_impl.h */, + 0C12EB362616383B00B66C86 /* third_party */, + ); + path = interpreter; + sourceTree = "<group>"; + }; + 0C12EB362616383B00B66C86 /* third_party */ = { + isa = PBXGroup; + children = ( + ); + path = third_party; + sourceTree = "<group>"; + }; + 0C12EB372616383B00B66C86 /* example */ = { + isa = PBXGroup; + children = ( + ); + path = example; + sourceTree = "<group>"; + }; + 0C12EB392616383B00B66C86 /* multiprocessing */ = { + isa = PBXGroup; + children = ( + 0C12EB3A2616383B00B66C86 /* init.h */, + ); + path = multiprocessing; + sourceTree = "<group>"; + }; + 0C12EB3B2616383B00B66C86 /* cuda */ = { + isa = PBXGroup; + children = ( + 0C12EB3C2616383B00B66C86 /* utils.h */, + 0C12EB3D2616383B00B66C86 /* THCP.h */, + 0C12EB3E2616383B00B66C86 /* nccl.h */, + 0C12EB3F2616383B00B66C86 /* python_nccl.h */, + 0C12EB402616383B00B66C86 /* device_set.h */, + 0C12EB412616383B00B66C86 /* Event.h */, + 0C12EB422616383B00B66C86 /* serialization.h */, + 0C12EB432616383B00B66C86 /* python_comm.h */, + 0C12EB442616383B00B66C86 /* comm.h */, + 0C12EB452616383B00B66C86 /* Stream.h */, + 0C12EB462616383B00B66C86 /* shared */, + 0C12EB472616383B00B66C86 /* undef_macros.h */, + 0C12EB482616383B00B66C86 /* restore_macros.h */, + 0C12EB492616383B00B66C86 /* Storage.h */, + 0C12EB4A2616383B00B66C86 /* Module.h */, + 0C12EB4B2616383B00B66C86 /* override_macros.h */, + ); + path = cuda; + sourceTree = "<group>"; + }; + 0C12EB462616383B00B66C86 /* shared */ = { + isa = PBXGroup; + children = ( + ); + path = shared; + sourceTree = "<group>"; + }; + 0C12EB4F2616383B00B66C86 /* utils */ = { + isa = PBXGroup; + children = ( + 0C12EB502616383B00B66C86 /* object_ptr.h */, + 0C12EB512616383B00B66C86 /* tensor_numpy.h */, + 0C12EB522616383B00B66C86 /* tensor_dtypes.h */, + 0C12EB532616383B00B66C86 /* python_tuples.h */, + 0C12EB542616383B00B66C86 /* python_numbers.h */, + 0C12EB552616383B00B66C86 /* python_scalars.h */, + 0C12EB562616383B00B66C86 /* pybind.h */, + 0C12EB572616383B00B66C86 /* tensor_types.h */, + 0C12EB582616383B00B66C86 /* tensor_memoryformats.h */, + 0C12EB592616383B00B66C86 /* python_arg_parser.h */, + 0C12EB5A2616383B00B66C86 /* cuda_lazy_init.h */, + 0C12EB5B2616383B00B66C86 /* tensor_new.h */, + 0C12EB5C2616383B00B66C86 /* tensor_qschemes.h */, + 0C12EB5D2616383B00B66C86 /* python_dispatch.h */, + 0C12EB5E2616383B00B66C86 /* tensor_list.h */, + 0C12EB5F2616383B00B66C86 /* invalid_arguments.h */, + 0C12EB602616383B00B66C86 /* auto_gil.h */, + 0C12EB612616383B00B66C86 /* python_strings.h */, + 0C12EB622616383B00B66C86 /* byte_order.h */, + 0C12EB632616383B00B66C86 /* pycfunction_helpers.h */, + 0C12EB642616383B00B66C86 /* cuda_enabled.h */, + 0C12EB652616383B00B66C86 /* numpy_stub.h */, + 0C12EB662616383B00B66C86 /* out_types.h */, + 0C12EB672616383B00B66C86 /* memory.h */, + 0C12EB682616383B00B66C86 /* tensor_layouts.h */, + 0C12EB692616383B00B66C86 /* structseq.h */, + 0C12EB6A2616383B00B66C86 /* throughput_benchmark.h */, + 0C12EB6B2616383B00B66C86 /* disable_torch_function.h */, + 0C12EB6C2616383B00B66C86 /* throughput_benchmark-inl.h */, + 0C12EB6D2616383B00B66C86 /* tensor_flatten.h */, + 0C12EB6E2616383B00B66C86 /* tensor_apply.h */, + 0C12EB6F2616383B00B66C86 /* init.h */, + 0C12EB702616383B00B66C86 /* python_compat.h */, + 0C12EB712616383B00B66C86 /* disallow_copy.h */, + 0C12EB722616383B00B66C86 /* six.h */, + 0C12EB732616383B00B66C86 /* python_stub.h */, + 0C12EB742616383B00B66C86 /* variadic.h */, + ); + path = utils; + sourceTree = "<group>"; + }; + 0C12EB7D2616383B00B66C86 /* jit */ = { + isa = PBXGroup; + children = ( + 0C12EB7E2616383B00B66C86 /* generated */, + 0C12EB7F2616383B00B66C86 /* jit_opt_limit.h */, + 0C12EB802616383B00B66C86 /* frontend */, + 0C12EB9D2616383B00B66C86 /* python */, + 0C12EBAB2616383B00B66C86 /* tensorexpr */, + 0C12EBD22616383B00B66C86 /* ir */, + 0C12EBDF2616383B00B66C86 /* cuda */, + 0C12EBE12616383B00B66C86 /* serialization */, + 0C12EBF12616383B00B66C86 /* backends */, + 0C12EBF72616383B00B66C86 /* runtime */, + 0C12EC122616383B00B66C86 /* passes */, + 0C12EC752616383B00B66C86 /* docs */, + 0C12EC762616383B00B66C86 /* codegen */, + 0C12ECC22616383B00B66C86 /* testing */, + 0C12ECC52616383B00B66C86 /* jit_log.h */, + 0C12ECC62616383B00B66C86 /* mobile */, + 0C12ECD32616383B00B66C86 /* resource_guard.h */, + 0C12ECD42616383B00B66C86 /* api */, + ); + path = jit; + sourceTree = "<group>"; + }; + 0C12EB7E2616383B00B66C86 /* generated */ = { + isa = PBXGroup; + children = ( + ); + path = generated; + sourceTree = "<group>"; + }; + 0C12EB802616383B00B66C86 /* frontend */ = { + isa = PBXGroup; + children = ( + 0C12EB812616383B00B66C86 /* error_report.h */, + 0C12EB822616383B00B66C86 /* source_range.h */, + 0C12EB832616383B00B66C86 /* edit_distance.h */, + 0C12EB842616383B00B66C86 /* canonicalize_modified_loop.h */, + 0C12EB852616383B00B66C86 /* schema_matching.h */, + 0C12EB862616383B00B66C86 /* function_schema_parser.h */, + 0C12EB872616383B00B66C86 /* tree_views.h */, + 0C12EB882616383B00B66C86 /* ir_emitter.h */, + 0C12EB892616383B00B66C86 /* parser.h */, + 0C12EB8A2616383B00B66C86 /* strtod.h */, + 0C12EB8B2616383B00B66C86 /* tree.h */, + 0C12EB8C2616383B00B66C86 /* concrete_module_type.h */, + 0C12EB8D2616383B00B66C86 /* builtin_functions.h */, + 0C12EB8E2616383B00B66C86 /* exit_transforms.h */, + 0C12EB8F2616383B00B66C86 /* parse_string_literal.h */, + 0C12EB902616383B00B66C86 /* sugared_value.h */, + 0C12EB912616383B00B66C86 /* inline_loop_condition.h */, + 0C12EB922616383B00B66C86 /* name_mangler.h */, + 0C12EB932616383B00B66C86 /* code_template.h */, + 0C12EB942616383B00B66C86 /* tracer.h */, + 0C12EB952616383B00B66C86 /* resolver.h */, + 0C12EB962616383B00B66C86 /* script_type_parser.h */, + 0C12EB972616383B00B66C86 /* schema_type_parser.h */, + 0C12EB982616383B00B66C86 /* lexer.h */, + 0C12EB992616383B00B66C86 /* versioned_symbols.h */, + 0C12EB9A2616383B00B66C86 /* convert_to_ssa.h */, + 0C12EB9B2616383B00B66C86 /* mini_environment.h */, + 0C12EB9C2616383B00B66C86 /* parser_constants.h */, + ); + path = frontend; + sourceTree = "<group>"; + }; + 0C12EB9D2616383B00B66C86 /* python */ = { + isa = PBXGroup; + children = ( + 0C12EB9E2616383B00B66C86 /* pybind.h */, + 0C12EB9F2616383B00B66C86 /* python_ir.h */, + 0C12EBA02616383B00B66C86 /* script_init.h */, + 0C12EBA12616383B00B66C86 /* python_tree_views.h */, + 0C12EBA22616383B00B66C86 /* python_ivalue.h */, + 0C12EBA32616383B00B66C86 /* python_custom_class.h */, + 0C12EBA42616383B00B66C86 /* update_graph_executor_opt.h */, + 0C12EBA52616383B00B66C86 /* python_tracer.h */, + 0C12EBA62616383B00B66C86 /* pybind_utils.h */, + 0C12EBA72616383B00B66C86 /* init.h */, + 0C12EBA82616383B00B66C86 /* python_sugared_value.h */, + 0C12EBA92616383B00B66C86 /* python_arg_flatten.h */, + 0C12EBAA2616383B00B66C86 /* module_python.h */, + ); + path = python; + sourceTree = "<group>"; + }; + 0C12EBAB2616383B00B66C86 /* tensorexpr */ = { + isa = PBXGroup; + children = ( + 0C12EBAC2616383B00B66C86 /* ir_mutator.h */, + 0C12EBAD2616383B00B66C86 /* ir_simplifier.h */, + 0C12EBAE2616383B00B66C86 /* ir_visitor.h */, + 0C12EBAF2616383B00B66C86 /* llvm_jit.h */, + 0C12EBB02616383B00B66C86 /* tensorexpr_init.h */, + 0C12EBB12616383B00B66C86 /* types.h */, + 0C12EBB22616383B00B66C86 /* mem_dependency_checker.h */, + 0C12EBB32616383B00B66C86 /* ir.h */, + 0C12EBB42616383B00B66C86 /* exceptions.h */, + 0C12EBB52616383B00B66C86 /* cuda_codegen.h */, + 0C12EBB62616383B00B66C86 /* hash_provider.h */, + 0C12EBB72616383B00B66C86 /* ir_printer.h */, + 0C12EBB82616383B00B66C86 /* llvm_codegen.h */, + 0C12EBB92616383B00B66C86 /* expr.h */, + 0C12EBBA2616383B00B66C86 /* cuda_random.h */, + 0C12EBBB2616383B00B66C86 /* execution_counter.h */, + 0C12EBBC2616383B00B66C86 /* codegen.h */, + 0C12EBBD2616383B00B66C86 /* unique_name_manager.h */, + 0C12EBBE2616383B00B66C86 /* cpp_codegen.h */, + 0C12EBBF2616383B00B66C86 /* var_substitutor.h */, + 0C12EBC02616383B00B66C86 /* eval.h */, + 0C12EBC12616383B00B66C86 /* bounds_inference.h */, + 0C12EBC22616383B00B66C86 /* intrinsic_symbols.h */, + 0C12EBC32616383B00B66C86 /* block_codegen.h */, + 0C12EBC42616383B00B66C86 /* external_functions_registry.h */, + 0C12EBC52616383B00B66C86 /* kernel.h */, + 0C12EBC62616383B00B66C86 /* loopnest.h */, + 0C12EBC72616383B00B66C86 /* bounds_overlap.h */, + 0C12EBC82616383B00B66C86 /* ir_verifier.h */, + 0C12EBC92616383B00B66C86 /* dim_arg.h */, + 0C12EBCA2616383B00B66C86 /* external_functions.h */, + 0C12EBCB2616383B00B66C86 /* stmt.h */, + 0C12EBCC2616383B00B66C86 /* half_support.h */, + 0C12EBCD2616383B00B66C86 /* registerizer.h */, + 0C12EBCE2616383B00B66C86 /* reduction.h */, + 0C12EBCF2616383B00B66C86 /* tensor.h */, + 0C12EBD02616383B00B66C86 /* mem_arena.h */, + 0C12EBD12616383B00B66C86 /* analysis.h */, + ); + path = tensorexpr; + sourceTree = "<group>"; + }; + 0C12EBD22616383B00B66C86 /* ir */ = { + isa = PBXGroup; + children = ( + 0C12EBD32616383B00B66C86 /* named_value.h */, + 0C12EBD42616383B00B66C86 /* irparser.h */, + 0C12EBD52616383B00B66C86 /* ir.h */, + 0C12EBD62616383B00B66C86 /* graph_node_list.h */, + 0C12EBD72616383B00B66C86 /* ir_views.h */, + 0C12EBD82616383B00B66C86 /* alias_analysis.h */, + 0C12EBD92616383B00B66C86 /* attributes.h */, + 0C12EBDA2616383B00B66C86 /* type_hashing.h */, + 0C12EBDB2616383B00B66C86 /* constants.h */, + 0C12EBDC2616383B00B66C86 /* subgraph_matcher.h */, + 0C12EBDD2616383B00B66C86 /* scope.h */, + 0C12EBDE2616383B00B66C86 /* node_hashing.h */, + ); + path = ir; + sourceTree = "<group>"; + }; + 0C12EBDF2616383B00B66C86 /* cuda */ = { + isa = PBXGroup; + children = ( + 0C12EBE02616383B00B66C86 /* cuda.h */, + ); + path = cuda; + sourceTree = "<group>"; + }; + 0C12EBE12616383B00B66C86 /* serialization */ = { + isa = PBXGroup; + children = ( + 0C12EBE22616383B00B66C86 /* import_source.h */, + 0C12EBE32616383B00B66C86 /* export.h */, + 0C12EBE42616383B00B66C86 /* import_export_helpers.h */, + 0C12EBE52616383B00B66C86 /* type_name_uniquer.h */, + 0C12EBE62616383B00B66C86 /* pickler.h */, + 0C12EBE72616383B00B66C86 /* python_print.h */, + 0C12EBE82616383B00B66C86 /* import_legacy.h */, + 0C12EBE92616383B00B66C86 /* import_export_functions.h */, + 0C12EBEA2616383B00B66C86 /* pickle.h */, + 0C12EBEB2616383B00B66C86 /* import_export_constants.h */, + 0C12EBEC2616383B00B66C86 /* source_range_serialization_impl.h */, + 0C12EBED2616383B00B66C86 /* import.h */, + 0C12EBEE2616383B00B66C86 /* unpickler.h */, + 0C12EBEF2616383B00B66C86 /* source_range_serialization.h */, + 0C12EBF02616383B00B66C86 /* onnx.h */, + ); + path = serialization; + sourceTree = "<group>"; + }; + 0C12EBF12616383B00B66C86 /* backends */ = { + isa = PBXGroup; + children = ( + 0C12EBF22616383B00B66C86 /* backend_interface.h */, + 0C12EBF32616383B00B66C86 /* backend.h */, + 0C12EBF42616383B00B66C86 /* backend_resolver.h */, + 0C12EBF52616383B00B66C86 /* backend_detail.h */, + 0C12EBF62616383B00B66C86 /* backend_init.h */, + ); + path = backends; + sourceTree = "<group>"; + }; + 0C12EBF72616383B00B66C86 /* runtime */ = { + isa = PBXGroup; + children = ( + 0C12EBF82616383B00B66C86 /* slice_indices_adjust.h */, + 0C12EBF92616383B00B66C86 /* operator.h */, + 0C12EBFA2616383B00B66C86 /* interpreter.h */, + 0C12EBFB2616383B00B66C86 /* register_ops_utils.h */, + 0C12EBFC2616383B00B66C86 /* jit_exception.h */, + 0C12EBFD2616383B00B66C86 /* exception_message.h */, + 0C12EBFE2616383B00B66C86 /* argument_spec.h */, + 0C12EBFF2616383B00B66C86 /* logging.h */, + 0C12EC002616383B00B66C86 /* profiling_graph_executor_impl.h */, + 0C12EC012616383B00B66C86 /* custom_operator.h */, + 0C12EC022616383B00B66C86 /* static */, + 0C12EC082616383B00B66C86 /* vararg_functions.h */, + 0C12EC092616383B00B66C86 /* symbolic_script.h */, + 0C12EC0A2616383B00B66C86 /* variable_tensor_list.h */, + 0C12EC0B2616383B00B66C86 /* autodiff.h */, + 0C12EC0C2616383B00B66C86 /* print_handler.h */, + 0C12EC0D2616383B00B66C86 /* profiling_record.h */, + 0C12EC0E2616383B00B66C86 /* graph_executor.h */, + 0C12EC0F2616383B00B66C86 /* operator_options.h */, + 0C12EC102616383B00B66C86 /* instruction.h */, + 0C12EC112616383B00B66C86 /* graph_executor_impl.h */, + ); + path = runtime; + sourceTree = "<group>"; + }; + 0C12EC022616383B00B66C86 /* static */ = { + isa = PBXGroup; + children = ( + 0C12EC032616383B00B66C86 /* fusion.h */, + 0C12EC042616383B00B66C86 /* passes.h */, + 0C12EC052616383B00B66C86 /* ops.h */, + 0C12EC062616383B00B66C86 /* impl.h */, + 0C12EC072616383B00B66C86 /* init.h */, + ); + path = static; + sourceTree = "<group>"; + }; + 0C12EC122616383B00B66C86 /* passes */ = { + isa = PBXGroup; + children = ( + 0C12EC132616383B00B66C86 /* remove_expands.h */, + 0C12EC142616383B00B66C86 /* peephole_list_idioms.h */, + 0C12EC152616383B00B66C86 /* subgraph_rewrite.h */, + 0C12EC162616383B00B66C86 /* fuse_relu.h */, + 0C12EC172616383B00B66C86 /* guard_elimination.h */, + 0C12EC182616383B00B66C86 /* peephole_alias_sensitive.h */, + 0C12EC192616383B00B66C86 /* freeze_module.h */, + 0C12EC1A2616383B00B66C86 /* clear_undefinedness.h */, + 0C12EC1B2616383B00B66C86 /* peephole.h */, + 0C12EC1C2616383B00B66C86 /* remove_dropout.h */, + 0C12EC1D2616383B00B66C86 /* update_differentiable_graph_requires_grad.h */, + 0C12EC1E2616383B00B66C86 /* metal_rewrite.h */, + 0C12EC1F2616383B00B66C86 /* liveness.h */, + 0C12EC202616383B00B66C86 /* onnx */, + 0C12EC362616383B00B66C86 /* remove_mutation.h */, + 0C12EC372616383B00B66C86 /* common_subexpression_elimination.h */, + 0C12EC382616383B00B66C86 /* batch_mm.h */, + 0C12EC392616383B00B66C86 /* constant_pooling.h */, + 0C12EC3A2616383B00B66C86 /* canonicalize_graph_fuser_ops.h */, + 0C12EC3B2616383B00B66C86 /* fuse_linear.h */, + 0C12EC3C2616383B00B66C86 /* annotate_warns.h */, + 0C12EC3D2616383B00B66C86 /* specialize_autogradzero.h */, + 0C12EC3E2616383B00B66C86 /* prepack_folding.h */, + 0C12EC3F2616383B00B66C86 /* frozen_conv_folding.h */, + 0C12EC402616383B00B66C86 /* constant_propagation.h */, + 0C12EC412616383B00B66C86 /* insert_guards.h */, + 0C12EC422616383B00B66C86 /* utils */, + 0C12EC462616383B00B66C86 /* inliner.h */, + 0C12EC472616383B00B66C86 /* lower_grad_of.h */, + 0C12EC482616383B00B66C86 /* quantization */, + 0C12EC512616383B00B66C86 /* normalize_ops.h */, + 0C12EC522616383B00B66C86 /* vulkan_rewrite.h */, + 0C12EC532616383B00B66C86 /* erase_number_types.h */, + 0C12EC542616383B00B66C86 /* graph_rewrite_helper.h */, + 0C12EC552616383B00B66C86 /* graph_fuser.h */, + 0C12EC562616383B00B66C86 /* fold_conv_bn.h */, + 0C12EC572616383B00B66C86 /* remove_redundant_profiles.h */, + 0C12EC582616383B00B66C86 /* inline_forked_closures.h */, + 0C12EC592616383B00B66C86 /* tensorexpr_fuser.h */, + 0C12EC5A2616383B00B66C86 /* decompose_ops.h */, + 0C12EC5B2616383B00B66C86 /* remove_inplace_ops.h */, + 0C12EC5C2616383B00B66C86 /* inline_fork_wait.h */, + 0C12EC5D2616383B00B66C86 /* create_autodiff_subgraphs.h */, + 0C12EC5E2616383B00B66C86 /* requires_grad_analysis.h */, + 0C12EC5F2616383B00B66C86 /* dead_code_elimination.h */, + 0C12EC602616383B00B66C86 /* clear_profiling.h */, + 0C12EC612616383B00B66C86 /* create_functional_graphs.h */, + 0C12EC622616383B00B66C86 /* bailout_graph.h */, + 0C12EC632616383B00B66C86 /* lower_tuples.h */, + 0C12EC642616383B00B66C86 /* frozen_graph_optimizations.h */, + 0C12EC652616383B00B66C86 /* frozen_ops_to_mkldnn.h */, + 0C12EC662616383B00B66C86 /* canonicalize.h */, + 0C12EC672616383B00B66C86 /* hoist_conv_packed_params.h */, + 0C12EC682616383B00B66C86 /* loop_unrolling.h */, + 0C12EC692616383B00B66C86 /* shape_analysis.h */, + 0C12EC6A2616383B00B66C86 /* fixup_trace_scope_blocks.h */, + 0C12EC6B2616383B00B66C86 /* remove_exceptions.h */, + 0C12EC6C2616383B00B66C86 /* inline_autodiff_subgraphs.h */, + 0C12EC6D2616383B00B66C86 /* inplace_check.h */, + 0C12EC6E2616383B00B66C86 /* cuda_graph_fuser.h */, + 0C12EC6F2616383B00B66C86 /* pass_manager.h */, + 0C12EC702616383B00B66C86 /* onnx.h */, + 0C12EC712616383B00B66C86 /* xnnpack_rewrite.h */, + 0C12EC722616383B00B66C86 /* lift_closures.h */, + 0C12EC732616383B00B66C86 /* frozen_conv_add_relu_fusion.h */, + 0C12EC742616383B00B66C86 /* lower_graph.h */, + ); + path = passes; + sourceTree = "<group>"; + }; + 0C12EC202616383B00B66C86 /* onnx */ = { + isa = PBXGroup; + children = ( + 0C12EC212616383B00B66C86 /* eval_peephole.h */, + 0C12EC222616383B00B66C86 /* function_substitution.h */, + 0C12EC232616383B00B66C86 /* helper.h */, + 0C12EC242616383B00B66C86 /* unpack_quantized_weights.h */, + 0C12EC252616383B00B66C86 /* preprocess_for_onnx.h */, + 0C12EC262616383B00B66C86 /* scalar_type_analysis.h */, + 0C12EC272616383B00B66C86 /* shape_type_inference.h */, + 0C12EC282616383B00B66C86 /* peephole.h */, + 0C12EC292616383B00B66C86 /* eliminate_unused_items.h */, + 0C12EC2A2616383B00B66C86 /* constant_fold.h */, + 0C12EC2B2616383B00B66C86 /* constant_map.h */, + 0C12EC2C2616383B00B66C86 /* fixup_onnx_controlflow.h */, + 0C12EC2D2616383B00B66C86 /* cast_all_constant_to_floating.h */, + 0C12EC2E2616383B00B66C86 /* fold_if_node.h */, + 0C12EC2F2616383B00B66C86 /* list_model_parameters.h */, + 0C12EC302616383B00B66C86 /* pattern_conversion */, + 0C12EC342616383B00B66C86 /* remove_inplace_ops_for_onnx.h */, + 0C12EC352616383B00B66C86 /* prepare_division_for_onnx.h */, + ); + path = onnx; + sourceTree = "<group>"; + }; + 0C12EC302616383B00B66C86 /* pattern_conversion */ = { + isa = PBXGroup; + children = ( + 0C12EC312616383B00B66C86 /* common.h */, + 0C12EC322616383B00B66C86 /* pattern_conversion.h */, + 0C12EC332616383B00B66C86 /* pattern_encapsulation.h */, + ); + path = pattern_conversion; + sourceTree = "<group>"; + }; + 0C12EC422616383B00B66C86 /* utils */ = { + isa = PBXGroup; + children = ( + 0C12EC432616383B00B66C86 /* memory_dag.h */, + 0C12EC442616383B00B66C86 /* subgraph_utils.h */, + 0C12EC452616383B00B66C86 /* check_alias_annotation.h */, + ); + path = utils; + sourceTree = "<group>"; + }; + 0C12EC482616383B00B66C86 /* quantization */ = { + isa = PBXGroup; + children = ( + 0C12EC492616383B00B66C86 /* helper.h */, + 0C12EC4A2616383B00B66C86 /* quantization_type.h */, + 0C12EC4B2616383B00B66C86 /* insert_observers.h */, + 0C12EC4C2616383B00B66C86 /* dedup_module_uses.h */, + 0C12EC4D2616383B00B66C86 /* quantization_patterns.h */, + 0C12EC4E2616383B00B66C86 /* finalize.h */, + 0C12EC4F2616383B00B66C86 /* insert_quant_dequant.h */, + 0C12EC502616383B00B66C86 /* fusion_passes.h */, + ); + path = quantization; + sourceTree = "<group>"; + }; + 0C12EC752616383B00B66C86 /* docs */ = { + isa = PBXGroup; + children = ( + ); + path = docs; + sourceTree = "<group>"; + }; + 0C12EC762616383B00B66C86 /* codegen */ = { + isa = PBXGroup; + children = ( + 0C12EC772616383B00B66C86 /* cuda */, + 0C12ECAE2616383B00B66C86 /* fuser */, + ); + path = codegen; + sourceTree = "<group>"; + }; + 0C12EC772616383B00B66C86 /* cuda */ = { + isa = PBXGroup; + children = ( + 0C12EC782616383B00B66C86 /* type.h */, + 0C12EC792616383B00B66C86 /* executor_kernel_arg.h */, + 0C12EC7A2616383B00B66C86 /* utils.h */, + 0C12EC7B2616383B00B66C86 /* kernel_ir_printer.h */, + 0C12EC7C2616383B00B66C86 /* tools */, + 0C12EC7D2616383B00B66C86 /* index_compute.h */, + 0C12EC7E2616383B00B66C86 /* transform_replay.h */, + 0C12EC7F2616383B00B66C86 /* parser.h */, + 0C12EC802616383B00B66C86 /* executor_utils.h */, + 0C12EC812616383B00B66C86 /* manager.h */, + 0C12EC822616383B00B66C86 /* scheduler.h */, + 0C12EC832616383B00B66C86 /* lower_unroll.h */, + 0C12EC842616383B00B66C86 /* runtime */, + 0C12EC852616383B00B66C86 /* ir_printer.h */, + 0C12EC862616383B00B66C86 /* lower_insert_syncs.h */, + 0C12EC872616383B00B66C86 /* lower2device.h */, + 0C12EC882616383B00B66C86 /* predicate_compute.h */, + 0C12EC892616383B00B66C86 /* compute_at.h */, + 0C12EC8A2616383B00B66C86 /* ir_all_nodes.h */, + 0C12EC8B2616383B00B66C86 /* mutator.h */, + 0C12EC8C2616383B00B66C86 /* docs */, + 0C12EC8F2616383B00B66C86 /* fusion.h */, + 0C12EC902616383B00B66C86 /* lower_loops.h */, + 0C12EC912616383B00B66C86 /* interface.h */, + 0C12EC922616383B00B66C86 /* arith.h */, + 0C12EC932616383B00B66C86 /* kernel_cache.h */, + 0C12EC942616383B00B66C86 /* codegen.h */, + 0C12EC952616383B00B66C86 /* ir_utils.h */, + 0C12EC962616383B00B66C86 /* lower_utils.h */, + 0C12EC972616383B00B66C86 /* lower_index.h */, + 0C12EC982616383B00B66C86 /* transform_rfactor.h */, + 0C12EC992616383B00B66C86 /* transform_iter.h */, + 0C12EC9A2616383B00B66C86 /* lower_alias_memory.h */, + 0C12EC9B2616383B00B66C86 /* executor.h */, + 0C12EC9C2616383B00B66C86 /* ir_graphviz.h */, + 0C12EC9D2616383B00B66C86 /* ir_iostream.h */, + 0C12EC9E2616383B00B66C86 /* partition.h */, + 0C12EC9F2616383B00B66C86 /* shape_inference.h */, + 0C12ECA02616383B00B66C86 /* kernel_ir_builder.h */, + 0C12ECA12616383B00B66C86 /* instrumentation.h */, + 0C12ECA22616383B00B66C86 /* kernel.h */, + 0C12ECA32616383B00B66C86 /* dispatch.h */, + 0C12ECA42616383B00B66C86 /* lower_validation.h */, + 0C12ECA52616383B00B66C86 /* ir_internal_nodes.h */, + 0C12ECA62616383B00B66C86 /* lower_thread_predicate.h */, + 0C12ECA72616383B00B66C86 /* ir_interface_nodes.h */, + 0C12ECA82616383B00B66C86 /* ir_cloner.h */, + 0C12ECA92616383B00B66C86 /* ir_base_nodes.h */, + 0C12ECAA2616383B00B66C86 /* executor_launch_params.h */, + 0C12ECAB2616383B00B66C86 /* kernel_ir.h */, + 0C12ECAC2616383B00B66C86 /* iter_visitor.h */, + 0C12ECAD2616383B00B66C86 /* expr_evaluator.h */, + ); + path = cuda; + sourceTree = "<group>"; + }; + 0C12EC7C2616383B00B66C86 /* tools */ = { + isa = PBXGroup; + children = ( + ); + path = tools; + sourceTree = "<group>"; + }; + 0C12EC842616383B00B66C86 /* runtime */ = { + isa = PBXGroup; + children = ( + ); + path = runtime; + sourceTree = "<group>"; + }; + 0C12EC8C2616383B00B66C86 /* docs */ = { + isa = PBXGroup; + children = ( + 0C12EC8D2616383B00B66C86 /* documentation.h */, + 0C12EC8E2616383B00B66C86 /* images */, + ); + path = docs; + sourceTree = "<group>"; + }; + 0C12EC8E2616383B00B66C86 /* images */ = { + isa = PBXGroup; + children = ( + ); + path = images; + sourceTree = "<group>"; + }; + 0C12ECAE2616383B00B66C86 /* fuser */ = { + isa = PBXGroup; + children = ( + 0C12ECAF2616383B00B66C86 /* tensor_info.h */, + 0C12ECB02616383B00B66C86 /* arg_spec.h */, + 0C12ECB12616383B00B66C86 /* compiler.h */, + 0C12ECB22616383B00B66C86 /* fallback.h */, + 0C12ECB32616383B00B66C86 /* cpu */, + 0C12ECB72616383B00B66C86 /* cuda */, + 0C12ECBA2616383B00B66C86 /* partition_desc.h */, + 0C12ECBB2616383B00B66C86 /* fused_kernel.h */, + 0C12ECBC2616383B00B66C86 /* kernel_spec.h */, + 0C12ECBD2616383B00B66C86 /* interface.h */, + 0C12ECBE2616383B00B66C86 /* kernel_cache.h */, + 0C12ECBF2616383B00B66C86 /* codegen.h */, + 0C12ECC02616383B00B66C86 /* executor.h */, + 0C12ECC12616383B00B66C86 /* tensor_desc.h */, + ); + path = fuser; + sourceTree = "<group>"; + }; + 0C12ECB32616383B00B66C86 /* cpu */ = { + isa = PBXGroup; + children = ( + 0C12ECB42616383B00B66C86 /* temp_file.h */, + 0C12ECB52616383B00B66C86 /* fused_kernel.h */, + 0C12ECB62616383B00B66C86 /* resource_strings.h */, + ); + path = cpu; + sourceTree = "<group>"; + }; + 0C12ECB72616383B00B66C86 /* cuda */ = { + isa = PBXGroup; + children = ( + 0C12ECB82616383B00B66C86 /* fused_kernel.h */, + 0C12ECB92616383B00B66C86 /* resource_strings.h */, + ); + path = cuda; + sourceTree = "<group>"; + }; + 0C12ECC22616383B00B66C86 /* testing */ = { + isa = PBXGroup; + children = ( + 0C12ECC32616383B00B66C86 /* file_check.h */, + 0C12ECC42616383B00B66C86 /* hooks_for_testing.h */, + ); + path = testing; + sourceTree = "<group>"; + }; + 0C12ECC62616383B00B66C86 /* mobile */ = { + isa = PBXGroup; + children = ( + 0C12ECC72616383B00B66C86 /* observer.h */, + 0C12ECC82616383B00B66C86 /* sequential.h */, + 0C12ECC92616383B00B66C86 /* interpreter.h */, + 0C12ECCA2616383B00B66C86 /* export_data.h */, + 0C12ECCB2616383B00B66C86 /* method.h */, + 0C12ECCC2616383B00B66C86 /* optim */, + 0C12ECCE2616383B00B66C86 /* import_data.h */, + 0C12ECCF2616383B00B66C86 /* type_parser.h */, + 0C12ECD02616383B00B66C86 /* import.h */, + 0C12ECD12616383B00B66C86 /* module.h */, + 0C12ECD22616383B00B66C86 /* function.h */, + ); + path = mobile; + sourceTree = "<group>"; + }; + 0C12ECCC2616383B00B66C86 /* optim */ = { + isa = PBXGroup; + children = ( + 0C12ECCD2616383B00B66C86 /* sgd.h */, + ); + path = optim; + sourceTree = "<group>"; + }; + 0C12ECD42616383B00B66C86 /* api */ = { + isa = PBXGroup; + children = ( + 0C12ECD52616383B00B66C86 /* function_impl.h */, + 0C12ECD62616383B00B66C86 /* method.h */, + 0C12ECD72616383B00B66C86 /* compilation_unit.h */, + 0C12ECD82616383B00B66C86 /* object.h */, + 0C12ECD92616383B00B66C86 /* module.h */, + ); + path = api; + sourceTree = "<group>"; + }; + 0C12ECDB2616383B00B66C86 /* api */ = { + isa = PBXGroup; + children = ( + 0C12ECDC2616383B00B66C86 /* include */, + 0C12ED892616383C00B66C86 /* src */, + ); + path = api; + sourceTree = "<group>"; + }; + 0C12ECDC2616383B00B66C86 /* include */ = { + isa = PBXGroup; + children = ( + 0C12ECDD2616383B00B66C86 /* torch */, + ); + path = include; + sourceTree = "<group>"; + }; + 0C12ECDD2616383B00B66C86 /* torch */ = { + isa = PBXGroup; + children = ( + 0C12ECDE2616383B00B66C86 /* fft.h */, + 0C12ECDF2616383B00B66C86 /* utils.h */, + 0C12ECE02616383B00B66C86 /* version.h */, + 0C12ECE12616383B00B66C86 /* nn */, + 0C12ED3B2616383C00B66C86 /* python */, + 0C12ED3D2616383C00B66C86 /* enum.h */, + 0C12ED3E2616383C00B66C86 /* types.h */, + 0C12ED3F2616383C00B66C86 /* all.h */, + 0C12ED402616383C00B66C86 /* data.h */, + 0C12ED412616383C00B66C86 /* arg.h */, + 0C12ED422616383C00B66C86 /* optim */, + 0C12ED4E2616383C00B66C86 /* serialize */, + 0C12ED532616383C00B66C86 /* torch.h */, + 0C12ED542616383C00B66C86 /* optim.h */, + 0C12ED552616383C00B66C86 /* jit.h */, + 0C12ED562616383C00B66C86 /* detail */, + 0C12ED592616383C00B66C86 /* nn.h */, + 0C12ED5A2616383C00B66C86 /* ordered_dict.h */, + 0C12ED5B2616383C00B66C86 /* cuda.h */, + 0C12ED5C2616383C00B66C86 /* autograd.h */, + 0C12ED5D2616383C00B66C86 /* linalg.h */, + 0C12ED5E2616383C00B66C86 /* special.h */, + 0C12ED5F2616383C00B66C86 /* python.h */, + 0C12ED602616383C00B66C86 /* serialize.h */, + 0C12ED612616383C00B66C86 /* data */, + 0C12ED882616383C00B66C86 /* expanding_array.h */, + ); + path = torch; + sourceTree = "<group>"; + }; + 0C12ECE12616383B00B66C86 /* nn */ = { + isa = PBXGroup; + children = ( + 0C12ECE22616383B00B66C86 /* options */, + 0C12ECF82616383C00B66C86 /* utils.h */, + 0C12ECF92616383C00B66C86 /* parallel */, + 0C12ECFB2616383C00B66C86 /* pimpl-inl.h */, + 0C12ECFC2616383C00B66C86 /* utils */, + 0C12ED002616383C00B66C86 /* options.h */, + 0C12ED012616383C00B66C86 /* functional.h */, + 0C12ED022616383C00B66C86 /* modules.h */, + 0C12ED032616383C00B66C86 /* pimpl.h */, + 0C12ED042616383C00B66C86 /* module.h */, + 0C12ED052616383C00B66C86 /* modules */, + 0C12ED282616383C00B66C86 /* init.h */, + 0C12ED292616383C00B66C86 /* cloneable.h */, + 0C12ED2A2616383C00B66C86 /* functional */, + ); + path = nn; + sourceTree = "<group>"; + }; + 0C12ECE22616383B00B66C86 /* options */ = { + isa = PBXGroup; + children = ( + 0C12ECE32616383B00B66C86 /* normalization.h */, + 0C12ECE42616383B00B66C86 /* rnn.h */, + 0C12ECE52616383B00B66C86 /* distance.h */, + 0C12ECE62616383B00B66C86 /* batchnorm.h */, + 0C12ECE72616383B00B66C86 /* linear.h */, + 0C12ECE82616383B00B66C86 /* instancenorm.h */, + 0C12ECE92616383B00B66C86 /* vision.h */, + 0C12ECEA2616383B00B66C86 /* transformercoder.h */, + 0C12ECEB2616383B00B66C86 /* dropout.h */, + 0C12ECEC2616383B00B66C86 /* upsampling.h */, + 0C12ECED2616383B00B66C86 /* embedding.h */, + 0C12ECEE2616383C00B66C86 /* fold.h */, + 0C12ECEF2616383C00B66C86 /* activation.h */, + 0C12ECF02616383C00B66C86 /* transformer.h */, + 0C12ECF12616383C00B66C86 /* pooling.h */, + 0C12ECF22616383C00B66C86 /* transformerlayer.h */, + 0C12ECF32616383C00B66C86 /* adaptive.h */, + 0C12ECF42616383C00B66C86 /* conv.h */, + 0C12ECF52616383C00B66C86 /* padding.h */, + 0C12ECF62616383C00B66C86 /* pixelshuffle.h */, + 0C12ECF72616383C00B66C86 /* loss.h */, + ); + path = options; + sourceTree = "<group>"; + }; + 0C12ECF92616383C00B66C86 /* parallel */ = { + isa = PBXGroup; + children = ( + 0C12ECFA2616383C00B66C86 /* data_parallel.h */, + ); + path = parallel; + sourceTree = "<group>"; + }; + 0C12ECFC2616383C00B66C86 /* utils */ = { + isa = PBXGroup; + children = ( + 0C12ECFD2616383C00B66C86 /* rnn.h */, + 0C12ECFE2616383C00B66C86 /* clip_grad.h */, + 0C12ECFF2616383C00B66C86 /* convert_parameters.h */, + ); + path = utils; + sourceTree = "<group>"; + }; + 0C12ED052616383C00B66C86 /* modules */ = { + isa = PBXGroup; + children = ( + 0C12ED062616383C00B66C86 /* normalization.h */, + 0C12ED072616383C00B66C86 /* utils.h */, + 0C12ED082616383C00B66C86 /* rnn.h */, + 0C12ED092616383C00B66C86 /* distance.h */, + 0C12ED0A2616383C00B66C86 /* batchnorm.h */, + 0C12ED0B2616383C00B66C86 /* linear.h */, + 0C12ED0C2616383C00B66C86 /* instancenorm.h */, + 0C12ED0D2616383C00B66C86 /* transformercoder.h */, + 0C12ED0E2616383C00B66C86 /* _functions.h */, + 0C12ED0F2616383C00B66C86 /* container */, + 0C12ED1A2616383C00B66C86 /* dropout.h */, + 0C12ED1B2616383C00B66C86 /* common.h */, + 0C12ED1C2616383C00B66C86 /* upsampling.h */, + 0C12ED1D2616383C00B66C86 /* embedding.h */, + 0C12ED1E2616383C00B66C86 /* fold.h */, + 0C12ED1F2616383C00B66C86 /* activation.h */, + 0C12ED202616383C00B66C86 /* transformer.h */, + 0C12ED212616383C00B66C86 /* pooling.h */, + 0C12ED222616383C00B66C86 /* transformerlayer.h */, + 0C12ED232616383C00B66C86 /* adaptive.h */, + 0C12ED242616383C00B66C86 /* conv.h */, + 0C12ED252616383C00B66C86 /* padding.h */, + 0C12ED262616383C00B66C86 /* pixelshuffle.h */, + 0C12ED272616383C00B66C86 /* loss.h */, + ); + path = modules; + sourceTree = "<group>"; + }; + 0C12ED0F2616383C00B66C86 /* container */ = { + isa = PBXGroup; + children = ( + 0C12ED102616383C00B66C86 /* named_any.h */, + 0C12ED112616383C00B66C86 /* any_value.h */, + 0C12ED122616383C00B66C86 /* modulelist.h */, + 0C12ED132616383C00B66C86 /* moduledict.h */, + 0C12ED142616383C00B66C86 /* sequential.h */, + 0C12ED152616383C00B66C86 /* functional.h */, + 0C12ED162616383C00B66C86 /* parameterlist.h */, + 0C12ED172616383C00B66C86 /* parameterdict.h */, + 0C12ED182616383C00B66C86 /* any.h */, + 0C12ED192616383C00B66C86 /* any_module_holder.h */, + ); + path = container; + sourceTree = "<group>"; + }; + 0C12ED2A2616383C00B66C86 /* functional */ = { + isa = PBXGroup; + children = ( + 0C12ED2B2616383C00B66C86 /* normalization.h */, + 0C12ED2C2616383C00B66C86 /* distance.h */, + 0C12ED2D2616383C00B66C86 /* batchnorm.h */, + 0C12ED2E2616383C00B66C86 /* linear.h */, + 0C12ED2F2616383C00B66C86 /* instancenorm.h */, + 0C12ED302616383C00B66C86 /* vision.h */, + 0C12ED312616383C00B66C86 /* dropout.h */, + 0C12ED322616383C00B66C86 /* upsampling.h */, + 0C12ED332616383C00B66C86 /* embedding.h */, + 0C12ED342616383C00B66C86 /* fold.h */, + 0C12ED352616383C00B66C86 /* activation.h */, + 0C12ED362616383C00B66C86 /* pooling.h */, + 0C12ED372616383C00B66C86 /* conv.h */, + 0C12ED382616383C00B66C86 /* padding.h */, + 0C12ED392616383C00B66C86 /* pixelshuffle.h */, + 0C12ED3A2616383C00B66C86 /* loss.h */, + ); + path = functional; + sourceTree = "<group>"; + }; + 0C12ED3B2616383C00B66C86 /* python */ = { + isa = PBXGroup; + children = ( + 0C12ED3C2616383C00B66C86 /* init.h */, + ); + path = python; + sourceTree = "<group>"; + }; + 0C12ED422616383C00B66C86 /* optim */ = { + isa = PBXGroup; + children = ( + 0C12ED432616383C00B66C86 /* rmsprop.h */, + 0C12ED442616383C00B66C86 /* lbfgs.h */, + 0C12ED452616383C00B66C86 /* optimizer.h */, + 0C12ED462616383C00B66C86 /* adagrad.h */, + 0C12ED472616383C00B66C86 /* sgd.h */, + 0C12ED482616383C00B66C86 /* serialize.h */, + 0C12ED492616383C00B66C86 /* adamw.h */, + 0C12ED4A2616383C00B66C86 /* schedulers */, + 0C12ED4D2616383C00B66C86 /* adam.h */, + ); + path = optim; + sourceTree = "<group>"; + }; + 0C12ED4A2616383C00B66C86 /* schedulers */ = { + isa = PBXGroup; + children = ( + 0C12ED4B2616383C00B66C86 /* lr_scheduler.h */, + 0C12ED4C2616383C00B66C86 /* step_lr.h */, + ); + path = schedulers; + sourceTree = "<group>"; + }; + 0C12ED4E2616383C00B66C86 /* serialize */ = { + isa = PBXGroup; + children = ( + 0C12ED4F2616383C00B66C86 /* archive.h */, + 0C12ED502616383C00B66C86 /* input-archive.h */, + 0C12ED512616383C00B66C86 /* output-archive.h */, + 0C12ED522616383C00B66C86 /* tensor.h */, + ); + path = serialize; + sourceTree = "<group>"; + }; + 0C12ED562616383C00B66C86 /* detail */ = { + isa = PBXGroup; + children = ( + 0C12ED572616383C00B66C86 /* static.h */, + 0C12ED582616383C00B66C86 /* TensorDataContainer.h */, + ); + path = detail; + sourceTree = "<group>"; + }; + 0C12ED612616383C00B66C86 /* data */ = { + isa = PBXGroup; + children = ( + 0C12ED622616383C00B66C86 /* example.h */, + 0C12ED632616383C00B66C86 /* dataloader_options.h */, + 0C12ED642616383C00B66C86 /* datasets */, + 0C12ED6C2616383C00B66C86 /* worker_exception.h */, + 0C12ED6D2616383C00B66C86 /* dataloader.h */, + 0C12ED6E2616383C00B66C86 /* detail */, + 0C12ED722616383C00B66C86 /* samplers.h */, + 0C12ED732616383C00B66C86 /* transforms */, + 0C12ED792616383C00B66C86 /* samplers */, + 0C12ED812616383C00B66C86 /* datasets.h */, + 0C12ED822616383C00B66C86 /* transforms.h */, + 0C12ED832616383C00B66C86 /* iterator.h */, + 0C12ED842616383C00B66C86 /* dataloader */, + ); + path = data; + sourceTree = "<group>"; + }; + 0C12ED642616383C00B66C86 /* datasets */ = { + isa = PBXGroup; + children = ( + 0C12ED652616383C00B66C86 /* mnist.h */, + 0C12ED662616383C00B66C86 /* shared.h */, + 0C12ED672616383C00B66C86 /* map.h */, + 0C12ED682616383C00B66C86 /* chunk.h */, + 0C12ED692616383C00B66C86 /* stateful.h */, + 0C12ED6A2616383C00B66C86 /* tensor.h */, + 0C12ED6B2616383C00B66C86 /* base.h */, + ); + path = datasets; + sourceTree = "<group>"; + }; + 0C12ED6E2616383C00B66C86 /* detail */ = { + isa = PBXGroup; + children = ( + 0C12ED6F2616383C00B66C86 /* data_shuttle.h */, + 0C12ED702616383C00B66C86 /* sequencers.h */, + 0C12ED712616383C00B66C86 /* queue.h */, + ); + path = detail; + sourceTree = "<group>"; + }; + 0C12ED732616383C00B66C86 /* transforms */ = { + isa = PBXGroup; + children = ( + 0C12ED742616383C00B66C86 /* lambda.h */, + 0C12ED752616383C00B66C86 /* stack.h */, + 0C12ED762616383C00B66C86 /* collate.h */, + 0C12ED772616383C00B66C86 /* tensor.h */, + 0C12ED782616383C00B66C86 /* base.h */, + ); + path = transforms; + sourceTree = "<group>"; + }; + 0C12ED792616383C00B66C86 /* samplers */ = { + isa = PBXGroup; + children = ( + 0C12ED7A2616383C00B66C86 /* sequential.h */, + 0C12ED7B2616383C00B66C86 /* custom_batch_request.h */, + 0C12ED7C2616383C00B66C86 /* stream.h */, + 0C12ED7D2616383C00B66C86 /* distributed.h */, + 0C12ED7E2616383C00B66C86 /* serialize.h */, + 0C12ED7F2616383C00B66C86 /* random.h */, + 0C12ED802616383C00B66C86 /* base.h */, + ); + path = samplers; + sourceTree = "<group>"; + }; + 0C12ED842616383C00B66C86 /* dataloader */ = { + isa = PBXGroup; + children = ( + 0C12ED852616383C00B66C86 /* stateless.h */, + 0C12ED862616383C00B66C86 /* stateful.h */, + 0C12ED872616383C00B66C86 /* base.h */, + ); + path = dataloader; + sourceTree = "<group>"; + }; + 0C12ED892616383C00B66C86 /* src */ = { + isa = PBXGroup; + children = ( + 0C12ED8A2616383C00B66C86 /* nn */, + 0C12ED8E2616383C00B66C86 /* python */, + 0C12ED8F2616383C00B66C86 /* optim */, + 0C12ED912616383C00B66C86 /* serialize */, + 0C12ED922616383C00B66C86 /* data */, + ); + path = src; + sourceTree = "<group>"; + }; + 0C12ED8A2616383C00B66C86 /* nn */ = { + isa = PBXGroup; + children = ( + 0C12ED8B2616383C00B66C86 /* options */, + 0C12ED8C2616383C00B66C86 /* modules */, + ); + path = nn; + sourceTree = "<group>"; + }; + 0C12ED8B2616383C00B66C86 /* options */ = { + isa = PBXGroup; + children = ( + ); + path = options; + sourceTree = "<group>"; + }; + 0C12ED8C2616383C00B66C86 /* modules */ = { + isa = PBXGroup; + children = ( + 0C12ED8D2616383C00B66C86 /* container */, + ); + path = modules; + sourceTree = "<group>"; + }; + 0C12ED8D2616383C00B66C86 /* container */ = { + isa = PBXGroup; + children = ( + ); + path = container; + sourceTree = "<group>"; + }; + 0C12ED8E2616383C00B66C86 /* python */ = { + isa = PBXGroup; + children = ( + ); + path = python; + sourceTree = "<group>"; + }; + 0C12ED8F2616383C00B66C86 /* optim */ = { + isa = PBXGroup; + children = ( + 0C12ED902616383C00B66C86 /* schedulers */, + ); + path = optim; + sourceTree = "<group>"; + }; + 0C12ED902616383C00B66C86 /* schedulers */ = { + isa = PBXGroup; + children = ( + ); + path = schedulers; + sourceTree = "<group>"; + }; + 0C12ED912616383C00B66C86 /* serialize */ = { + isa = PBXGroup; + children = ( + ); + path = serialize; + sourceTree = "<group>"; + }; + 0C12ED922616383C00B66C86 /* data */ = { + isa = PBXGroup; + children = ( + 0C12ED932616383C00B66C86 /* datasets */, + 0C12ED942616383C00B66C86 /* samplers */, + ); + path = data; + sourceTree = "<group>"; + }; + 0C12ED932616383C00B66C86 /* datasets */ = { + isa = PBXGroup; + children = ( + ); + path = datasets; + sourceTree = "<group>"; + }; + 0C12ED942616383C00B66C86 /* samplers */ = { + isa = PBXGroup; + children = ( + ); + path = samplers; + sourceTree = "<group>"; + }; + 0C12ED962616383C00B66C86 /* generic */ = { + isa = PBXGroup; + children = ( + 0C12ED972616383C00B66C86 /* utils.h */, + 0C12ED982616383C00B66C86 /* serialization.h */, + 0C12ED992616383C00B66C86 /* Storage.h */, + ); + path = generic; + sourceTree = "<group>"; + }; + 0C12ED9A2616383C00B66C86 /* tensor */ = { + isa = PBXGroup; + children = ( + 0C12ED9B2616383C00B66C86 /* python_tensor.h */, + ); + path = tensor; + sourceTree = "<group>"; + }; + 0C12EDAF2616383C00B66C86 /* ATen */ = { + isa = PBXGroup; + children = ( + 0C12EDB02616383C00B66C86 /* Formatting.h */, + 0C12EDB12616383C00B66C86 /* CPUFunctions.h */, + 0C12EDB22616383C00B66C86 /* MetaFunctions.h */, + 0C12EDB32616383C00B66C86 /* Utils.h */, + 0C12EDB42616383C00B66C86 /* CUDAGeneratorImpl.h */, + 0C12EDB52616383C00B66C86 /* TensorOptions.h */, + 0C12EDB62616383C00B66C86 /* TensorUtils.h */, + 0C12EDB72616383C00B66C86 /* MemoryOverlap.h */, + 0C12EDB82616383C00B66C86 /* InitialTensorOptions.h */, + 0C12EDB92616383C00B66C86 /* Version.h */, + 0C12EDBA2616383C00B66C86 /* DLConvertor.h */, + 0C12EDBB2616383C00B66C86 /* Device.h */, + 0C12EDBC2616383C00B66C86 /* core */, + 0C12EE0A2616383C00B66C86 /* VmapMode.h */, + 0C12EE0B2616383C00B66C86 /* BatchedFallback.h */, + 0C12EE0C2616383C00B66C86 /* dlpack.h */, + 0C12EE0D2616383C00B66C86 /* Config.h */, + 0C12EE0E2616383C00B66C86 /* SparseTensorUtils.h */, + 0C12EE0F2616383C00B66C86 /* Backtrace.h */, + 0C12EE102616383C00B66C86 /* cpu */, + 0C12EE222616383C00B66C86 /* TracerMode.h */, + 0C12EE232616383C00B66C86 /* Backend.h */, + 0C12EE242616383C00B66C86 /* RegistrationDeclarations.h */, + 0C12EE252616383C00B66C86 /* CompositeImplicitAutogradFunctions.h */, + 0C12EE262616383C00B66C86 /* PTThreadPool.h */, + 0C12EE272616383C00B66C86 /* OpaqueTensorImpl.h */, + 0C12EE282616383C00B66C86 /* LegacyTHFunctionsCPU.h */, + 0C12EE292616383C00B66C86 /* quantized */, + 0C12EE2C2616383C00B66C86 /* record_function.h */, + 0C12EE2D2616383C00B66C86 /* WrapDimUtils.h */, + 0C12EE2E2616383C00B66C86 /* RedispatchFunctions.h */, + 0C12EE2F2616383C00B66C86 /* Context.h */, + 0C12EE302616383C00B66C86 /* div_rtn.h */, + 0C12EE312616383C00B66C86 /* ExpandUtils.h */, + 0C12EE322616383C00B66C86 /* TypeDefault.h */, + 0C12EE332616383C00B66C86 /* CPUFixedAllocator.h */, + 0C12EE342616383C00B66C86 /* NamedTensor.h */, + 0C12EE352616383C00B66C86 /* Scalar.h */, + 0C12EE362616383C00B66C86 /* ParallelNativeTBB.h */, + 0C12EE372616383C00B66C86 /* ArrayRef.h */, + 0C12EE382616383C00B66C86 /* SequenceNumber.h */, + 0C12EE392616383C00B66C86 /* MatrixRef.h */, + 0C12EE3A2616383C00B66C86 /* CompositeExplicitAutogradFunctions.h */, + 0C12EE3B2616383C00B66C86 /* NumericUtils.h */, + 0C12EE3C2616383C00B66C86 /* ATen.h */, + 0C12EE3D2616383C00B66C86 /* TensorNames.h */, + 0C12EE3E2616383C00B66C86 /* TensorMeta.h */, + 0C12EE3F2616383C00B66C86 /* TensorIndexing.h */, + 0C12EE402616383C00B66C86 /* Layout.h */, + 0C12EE412616383C00B66C86 /* SparseTensorImpl.h */, + 0C12EE422616383C00B66C86 /* detail */, + 0C12EE462616383C00B66C86 /* WrapDimUtilsMulti.h */, + 0C12EE472616383C00B66C86 /* TensorOperators.h */, + 0C12EE482616383C00B66C86 /* ScalarType.h */, + 0C12EE492616383C00B66C86 /* cpp_custom_type_hack.h */, + 0C12EE4A2616383C00B66C86 /* VmapTransforms.h */, + 0C12EE4B2616383C00B66C86 /* Storage.h */, + 0C12EE4C2616383C00B66C86 /* DeviceGuard.h */, + 0C12EE4D2616383C00B66C86 /* ParallelNative.h */, + 0C12EE4E2616383C00B66C86 /* Dispatch.h */, + 0C12EE4F2616383C00B66C86 /* CPUGeneratorImpl.h */, + 0C12EE502616383C00B66C86 /* Functions.h */, + 0C12EE512616383C00B66C86 /* ParallelOpenMP.h */, + 0C12EE522616383C00B66C86 /* BatchedTensorImpl.h */, + 0C12EE532616383C00B66C86 /* CPUApplyUtils.h */, + 0C12EE542616383C00B66C86 /* ThreadLocalState.h */, + 0C12EE552616383C00B66C86 /* ScalarOps.h */, + 0C12EE562616383C00B66C86 /* NativeFunctions.h */, + 0C12EE572616383C00B66C86 /* DynamicLibrary.h */, + 0C12EE582616383C00B66C86 /* TensorGeometry.h */, + 0C12EE592616383C00B66C86 /* TensorIterator.h */, + 0C12EE5A2616383C00B66C86 /* NamedTensorUtils.h */, + 0C12EE5B2616383C00B66C86 /* Dimname.h */, + 0C12EE5C2616383C00B66C86 /* autocast_mode.h */, + 0C12EE5D2616383C00B66C86 /* Parallel.h */, + 0C12EE5E2616383C00B66C86 /* DimVector.h */, + 0C12EE5F2616383C00B66C86 /* InferSize.h */, + 0C12EE602616383C00B66C86 /* SmallVector.h */, + 0C12EE612616383C00B66C86 /* Tensor.h */, + 0C12EE622616383C00B66C86 /* Generator.h */, + 0C12EE632616383C00B66C86 /* AccumulateType.h */, + 0C12EE642616383C00B66C86 /* TensorAccessor.h */, + 0C12EE652616383C00B66C86 /* LegacyTHFunctionsCUDA.h */, + ); + path = ATen; + sourceTree = "<group>"; + }; + 0C12EDBC2616383C00B66C86 /* core */ = { + isa = PBXGroup; + children = ( + 0C12EDBD2616383C00B66C86 /* Dict_inl.h */, + 0C12EDBE2616383C00B66C86 /* Formatting.h */, + 0C12EDBF2616383C00B66C86 /* TensorBody.h */, + 0C12EDC02616383C00B66C86 /* op_registration */, + 0C12EDC52616383C00B66C86 /* jit_type_base.h */, + 0C12EDC62616383C00B66C86 /* typeid.h */, + 0C12EDC72616383C00B66C86 /* rref_interface.h */, + 0C12EDC82616383C00B66C86 /* Range.h */, + 0C12EDC92616383C00B66C86 /* interned_strings_class.h */, + 0C12EDCA2616383C00B66C86 /* operator_name.h */, + 0C12EDCB2616383C00B66C86 /* DeprecatedTypePropertiesRegistry.h */, + 0C12EDCC2616383C00B66C86 /* Backtrace.h */, + 0C12EDCD2616383C00B66C86 /* TransformationHelper.h */, + 0C12EDCE2616383C00B66C86 /* blob.h */, + 0C12EDCF2616383C00B66C86 /* function_schema.h */, + 0C12EDD02616383C00B66C86 /* dispatch */, + 0C12EDD82616383C00B66C86 /* MT19937RNGEngine.h */, + 0C12EDD92616383C00B66C86 /* ivalue_to.h */, + 0C12EDDA2616383C00B66C86 /* aten_interned_strings.h */, + 0C12EDDB2616383C00B66C86 /* LegacyTypeDispatch.h */, + 0C12EDDC2616383C00B66C86 /* function_schema_inl.h */, + 0C12EDDD2616383C00B66C86 /* qualified_name.h */, + 0C12EDDE2616383C00B66C86 /* UndefinedTensorImpl.h */, + 0C12EDDF2616383C00B66C86 /* NamedTensor.h */, + 0C12EDE02616383C00B66C86 /* Scalar.h */, + 0C12EDE12616383C00B66C86 /* functional.h */, + 0C12EDE22616383C00B66C86 /* DeprecatedTypeProperties.h */, + 0C12EDE32616383C00B66C86 /* interned_strings.h */, + 0C12EDE42616383C00B66C86 /* List.h */, + 0C12EDE52616383C00B66C86 /* ATenOpList.h */, + 0C12EDE62616383C00B66C86 /* Dict.h */, + 0C12EDE72616383C00B66C86 /* grad_mode.h */, + 0C12EDE82616383C00B66C86 /* DistributionsHelper.h */, + 0C12EDE92616383C00B66C86 /* Macros.h */, + 0C12EDEA2616383C00B66C86 /* VariableHooksInterface.h */, + 0C12EDEB2616383C00B66C86 /* ScalarType.h */, + 0C12EDEC2616383C00B66C86 /* Array.h */, + 0C12EDED2616383C00B66C86 /* stack.h */, + 0C12EDEE2616383C00B66C86 /* ATenGeneral.h */, + 0C12EDEF2616383C00B66C86 /* UnsafeFromTH.h */, + 0C12EDF02616383C00B66C86 /* QuantizerBase.h */, + 0C12EDF12616383C00B66C86 /* alias_info.h */, + 0C12EDF22616383C00B66C86 /* List_inl.h */, + 0C12EDF32616383C00B66C86 /* jit_type.h */, + 0C12EDF42616383C00B66C86 /* ivalue.h */, + 0C12EDF52616383C00B66C86 /* Dimname.h */, + 0C12EDF62616383C00B66C86 /* Vitals.h */, + 0C12EDF72616383C00B66C86 /* boxing */, + 0C12EE002616383C00B66C86 /* builtin_function.h */, + 0C12EE012616383C00B66C86 /* DimVector.h */, + 0C12EE022616383C00B66C86 /* Reduction.h */, + 0C12EE032616383C00B66C86 /* Tensor.h */, + 0C12EE042616383C00B66C86 /* function.h */, + 0C12EE052616383C00B66C86 /* Generator.h */, + 0C12EE062616383C00B66C86 /* PhiloxRNGEngine.h */, + 0C12EE072616383C00B66C86 /* TensorAccessor.h */, + 0C12EE082616383C00B66C86 /* ivalue_inl.h */, + 0C12EE092616383C00B66C86 /* Variadic.h */, + ); + path = core; + sourceTree = "<group>"; + }; + 0C12EDC02616383C00B66C86 /* op_registration */ = { + isa = PBXGroup; + children = ( + 0C12EDC12616383C00B66C86 /* adaption.h */, + 0C12EDC22616383C00B66C86 /* op_allowlist.h */, + 0C12EDC32616383C00B66C86 /* op_registration.h */, + 0C12EDC42616383C00B66C86 /* infer_schema.h */, + ); + path = op_registration; + sourceTree = "<group>"; + }; + 0C12EDD02616383C00B66C86 /* dispatch */ = { + isa = PBXGroup; + children = ( + 0C12EDD12616383C00B66C86 /* OperatorOptions.h */, + 0C12EDD22616383C00B66C86 /* RegistrationHandleRAII.h */, + 0C12EDD32616383C00B66C86 /* ObservedOperators.h */, + 0C12EDD42616383C00B66C86 /* DispatchKeyExtractor.h */, + 0C12EDD52616383C00B66C86 /* Dispatcher.h */, + 0C12EDD62616383C00B66C86 /* CppSignature.h */, + 0C12EDD72616383C00B66C86 /* OperatorEntry.h */, + ); + path = dispatch; + sourceTree = "<group>"; + }; + 0C12EDF72616383C00B66C86 /* boxing */ = { + isa = PBXGroup; + children = ( + 0C12EDF82616383C00B66C86 /* impl */, + 0C12EDFE2616383C00B66C86 /* KernelFunction.h */, + 0C12EDFF2616383C00B66C86 /* KernelFunction_impl.h */, + ); + path = boxing; + sourceTree = "<group>"; + }; + 0C12EDF82616383C00B66C86 /* impl */ = { + isa = PBXGroup; + children = ( + 0C12EDF92616383C00B66C86 /* make_boxed_from_unboxed_functor.h */, + 0C12EDFA2616383C00B66C86 /* boxing.h */, + 0C12EDFB2616383C00B66C86 /* test_helpers.h */, + 0C12EDFC2616383C00B66C86 /* WrapFunctionIntoFunctor.h */, + 0C12EDFD2616383C00B66C86 /* WrapFunctionIntoRuntimeFunctor.h */, + ); + path = impl; + sourceTree = "<group>"; + }; + 0C12EE102616383C00B66C86 /* cpu */ = { + isa = PBXGroup; + children = ( + 0C12EE112616383C00B66C86 /* vec256 */, + 0C12EE202616383C00B66C86 /* FlushDenormal.h */, + 0C12EE212616383C00B66C86 /* vml.h */, + ); + path = cpu; + sourceTree = "<group>"; + }; + 0C12EE112616383C00B66C86 /* vec256 */ = { + isa = PBXGroup; + children = ( + 0C12EE122616383C00B66C86 /* vec256_bfloat16.h */, + 0C12EE132616383C00B66C86 /* vec256_float_neon.h */, + 0C12EE142616383C00B66C86 /* missing_vst1_neon.h */, + 0C12EE152616383C00B66C86 /* vec256_qint.h */, + 0C12EE162616383C00B66C86 /* intrinsics.h */, + 0C12EE172616383C00B66C86 /* functional.h */, + 0C12EE182616383C00B66C86 /* vec256_complex_float.h */, + 0C12EE192616383C00B66C86 /* vec256_double.h */, + 0C12EE1A2616383C00B66C86 /* vec256_base.h */, + 0C12EE1B2616383C00B66C86 /* vec256_float.h */, + 0C12EE1C2616383C00B66C86 /* missing_vld1_neon.h */, + 0C12EE1D2616383C00B66C86 /* vec256.h */, + 0C12EE1E2616383C00B66C86 /* vec256_int.h */, + 0C12EE1F2616383C00B66C86 /* vec256_complex_double.h */, + ); + path = vec256; + sourceTree = "<group>"; + }; + 0C12EE292616383C00B66C86 /* quantized */ = { + isa = PBXGroup; + children = ( + 0C12EE2A2616383C00B66C86 /* QTensorImpl.h */, + 0C12EE2B2616383C00B66C86 /* Quantizer.h */, + ); + path = quantized; + sourceTree = "<group>"; + }; + 0C12EE422616383C00B66C86 /* detail */ = { + isa = PBXGroup; + children = ( + 0C12EE432616383C00B66C86 /* CUDAHooksInterface.h */, + 0C12EE442616383C00B66C86 /* FunctionTraits.h */, + 0C12EE452616383C00B66C86 /* HIPHooksInterface.h */, + ); + path = detail; + sourceTree = "<group>"; + }; + 0C12EE662616383C00B66C86 /* c10 */ = { + isa = PBXGroup; + children = ( + 0C12EE672616383C00B66C86 /* benchmark */, + 0C12EE682616383C00B66C86 /* core */, + 0C12EE912616383C00B66C86 /* test */, + 0C12EE982616383C00B66C86 /* util */, + 0C12EEDA2616383C00B66C86 /* cuda */, + 0C12EEE82616383C00B66C86 /* macros */, + 0C12EEEC2616383C00B66C86 /* mobile */, + 0C12EEEF2616383C00B66C86 /* hip */, + ); + path = c10; + sourceTree = "<group>"; + }; + 0C12EE672616383C00B66C86 /* benchmark */ = { + isa = PBXGroup; + children = ( + ); + path = benchmark; + sourceTree = "<group>"; + }; + 0C12EE682616383C00B66C86 /* core */ = { + isa = PBXGroup; + children = ( + 0C12EE692616383C00B66C86 /* impl */, + 0C12EE722616383C00B66C86 /* QEngine.h */, + 0C12EE732616383C00B66C86 /* TensorOptions.h */, + 0C12EE742616383C00B66C86 /* Device.h */, + 0C12EE752616383C00B66C86 /* CPUAllocator.h */, + 0C12EE762616383C00B66C86 /* DefaultDtype.h */, + 0C12EE772616383C00B66C86 /* DefaultTensorOptions.h */, + 0C12EE782616383C00B66C86 /* Event.h */, + 0C12EE792616383C00B66C86 /* Backend.h */, + 0C12EE7A2616383C00B66C86 /* CompileTimeFunctionPointer.h */, + 0C12EE7B2616383C00B66C86 /* WrapDimMinimal.h */, + 0C12EE7C2616383C00B66C86 /* QScheme.h */, + 0C12EE7D2616383C00B66C86 /* Stream.h */, + 0C12EE7E2616383C00B66C86 /* UndefinedTensorImpl.h */, + 0C12EE7F2616383C00B66C86 /* Scalar.h */, + 0C12EE802616383C00B66C86 /* thread_pool.h */, + 0C12EE812616383C00B66C86 /* CopyBytes.h */, + 0C12EE822616383C00B66C86 /* StreamGuard.h */, + 0C12EE832616383C00B66C86 /* Layout.h */, + 0C12EE842616383C00B66C86 /* GeneratorImpl.h */, + 0C12EE852616383C00B66C86 /* DispatchKeySet.h */, + 0C12EE862616383C00B66C86 /* Allocator.h */, + 0C12EE872616383C00B66C86 /* TensorImpl.h */, + 0C12EE882616383C00B66C86 /* ScalarType.h */, + 0C12EE892616383C00B66C86 /* Storage.h */, + 0C12EE8A2616383C00B66C86 /* DeviceType.h */, + 0C12EE8B2616383C00B66C86 /* DeviceGuard.h */, + 0C12EE8C2616383C00B66C86 /* StorageImpl.h */, + 0C12EE8D2616383C00B66C86 /* MemoryFormat.h */, + 0C12EE8E2616383C00B66C86 /* DispatchKey.h */, + 0C12EE8F2616383C00B66C86 /* ScalarTypeToTypeMeta.h */, + 0C12EE902616383C00B66C86 /* InferenceMode.h */, + ); + path = core; + sourceTree = "<group>"; + }; + 0C12EE692616383C00B66C86 /* impl */ = { + isa = PBXGroup; + children = ( + 0C12EE6A2616383C00B66C86 /* InlineStreamGuard.h */, + 0C12EE6B2616383C00B66C86 /* SizesAndStrides.h */, + 0C12EE6C2616383C00B66C86 /* InlineDeviceGuard.h */, + 0C12EE6D2616383C00B66C86 /* LocalDispatchKeySet.h */, + 0C12EE6E2616383C00B66C86 /* VirtualGuardImpl.h */, + 0C12EE6F2616383C00B66C86 /* InlineEvent.h */, + 0C12EE702616383C00B66C86 /* DeviceGuardImplInterface.h */, + 0C12EE712616383C00B66C86 /* FakeGuardImpl.h */, + ); + path = impl; + sourceTree = "<group>"; + }; + 0C12EE912616383C00B66C86 /* test */ = { + isa = PBXGroup; + children = ( + 0C12EE922616383C00B66C86 /* core */, + 0C12EE942616383C00B66C86 /* util */, + ); + path = test; + sourceTree = "<group>"; + }; + 0C12EE922616383C00B66C86 /* core */ = { + isa = PBXGroup; + children = ( + 0C12EE932616383C00B66C86 /* impl */, + ); + path = core; + sourceTree = "<group>"; + }; + 0C12EE932616383C00B66C86 /* impl */ = { + isa = PBXGroup; + children = ( + ); + path = impl; + sourceTree = "<group>"; + }; + 0C12EE942616383C00B66C86 /* util */ = { + isa = PBXGroup; + children = ( + 0C12EE952616383C00B66C86 /* complex_test_common.h */, + 0C12EE962616383C00B66C86 /* complex_math_test_common.h */, + 0C12EE972616383C00B66C86 /* Macros.h */, + ); + path = util; + sourceTree = "<group>"; + }; + 0C12EE982616383C00B66C86 /* util */ = { + isa = PBXGroup; + children = ( + 0C12EE992616383C00B66C86 /* Type.h */, + 0C12EE9A2616383C00B66C86 /* order_preserving_flat_hash_map.h */, + 0C12EE9B2616383C00B66C86 /* reverse_iterator.h */, + 0C12EE9C2616383C00B66C86 /* quint4x2.h */, + 0C12EE9D2616383C00B66C86 /* Half.h */, + 0C12EE9E2616383C00B66C86 /* flat_hash_map.h */, + 0C12EE9F2616383C00B66C86 /* llvmMathExtras.h */, + 0C12EEA02616383C00B66C86 /* math_compat.h */, + 0C12EEA12616383C00B66C86 /* Bitset.h */, + 0C12EEA22616383C00B66C86 /* typeid.h */, + 0C12EEA32616383C00B66C86 /* intrusive_ptr.h */, + 0C12EEA42616383C00B66C86 /* string_utils.h */, + 0C12EEA52616383C00B66C86 /* win32-headers.h */, + 0C12EEA62616383C00B66C86 /* AlignOf.h */, + 0C12EEA72616383C00B66C86 /* numa.h */, + 0C12EEA82616383C00B66C86 /* qint32.h */, + 0C12EEA92616383C00B66C86 /* MaybeOwned.h */, + 0C12EEAA2616383C00B66C86 /* Half-inl.h */, + 0C12EEAB2616383C00B66C86 /* TypeTraits.h */, + 0C12EEAC2616383C00B66C86 /* FunctionRef.h */, + 0C12EEAD2616383C00B66C86 /* Backtrace.h */, + 0C12EEAE2616383C00B66C86 /* BFloat16-inl.h */, + 0C12EEAF2616383C00B66C86 /* in_place.h */, + 0C12EEB02616383C00B66C86 /* ConstexprCrc.h */, + 0C12EEB12616383C00B66C86 /* IdWrapper.h */, + 0C12EEB22616383C00B66C86 /* Flags.h */, + 0C12EEB32616383C00B66C86 /* overloaded.h */, + 0C12EEB42616383C00B66C86 /* quint8.h */, + 0C12EEB52616383C00B66C86 /* StringUtil.h */, + 0C12EEB62616383C00B66C86 /* Logging.h */, + 0C12EEB72616383C00B66C86 /* MathConstants.h */, + 0C12EEB82616383C00B66C86 /* Registry.h */, + 0C12EEB92616383C00B66C86 /* Optional.h */, + 0C12EEBA2616383C00B66C86 /* tempfile.h */, + 0C12EEBB2616383C00B66C86 /* ArrayRef.h */, + 0C12EEBC2616383C00B66C86 /* thread_name.h */, + 0C12EEBD2616383C00B66C86 /* Unicode.h */, + 0C12EEBE2616383C00B66C86 /* TypeCast.h */, + 0C12EEBF2616383C00B66C86 /* sparse_bitset.h */, + 0C12EEC02616383C00B66C86 /* BFloat16.h */, + 0C12EEC12616383C00B66C86 /* TypeList.h */, + 0C12EEC22616383C00B66C86 /* TypeIndex.h */, + 0C12EEC32616383C00B66C86 /* Array.h */, + 0C12EEC42616383C00B66C86 /* logging_is_google_glog.h */, + 0C12EEC52616383C00B66C86 /* Metaprogramming.h */, + 0C12EEC62616383C00B66C86 /* either.h */, + 0C12EEC72616383C00B66C86 /* BFloat16-math.h */, + 0C12EEC82616383C00B66C86 /* Deprecated.h */, + 0C12EEC92616383C00B66C86 /* irange.h */, + 0C12EECA2616383C00B66C86 /* LeftRight.h */, + 0C12EECB2616383C00B66C86 /* qint8.h */, + 0C12EECC2616383C00B66C86 /* complex_math.h */, + 0C12EECD2616383C00B66C86 /* logging_is_not_google_glog.h */, + 0C12EECE2616383C00B66C86 /* Exception.h */, + 0C12EECF2616383C00B66C86 /* UniqueVoidPtr.h */, + 0C12EED02616383C00B66C86 /* ThreadLocalDebugInfo.h */, + 0C12EED12616383C00B66C86 /* accumulate.h */, + 0C12EED22616383C00B66C86 /* C++17.h */, + 0C12EED32616383C00B66C86 /* SmallVector.h */, + 0C12EED42616383C00B66C86 /* hash.h */, + 0C12EED52616383C00B66C86 /* python_stub.h */, + 0C12EED62616383C00B66C86 /* complex.h */, + 0C12EED72616383C00B66C86 /* string_view.h */, + 0C12EED82616383C00B66C86 /* variant.h */, + 0C12EED92616383C00B66C86 /* complex_utils.h */, + ); + path = util; + sourceTree = "<group>"; + }; + 0C12EEDA2616383C00B66C86 /* cuda */ = { + isa = PBXGroup; + children = ( + 0C12EEDB2616383C00B66C86 /* impl */, + 0C12EEDE2616383C00B66C86 /* CUDAMathCompat.h */, + 0C12EEDF2616383C00B66C86 /* test */, + 0C12EEE12616383C00B66C86 /* CUDAStream.h */, + 0C12EEE22616383C00B66C86 /* CUDAGuard.h */, + 0C12EEE32616383C00B66C86 /* CUDAGraphsC10Utils.h */, + 0C12EEE42616383C00B66C86 /* CUDAMacros.h */, + 0C12EEE52616383C00B66C86 /* CUDAFunctions.h */, + 0C12EEE62616383C00B66C86 /* CUDAException.h */, + 0C12EEE72616383C00B66C86 /* CUDACachingAllocator.h */, + ); + path = cuda; + sourceTree = "<group>"; + }; + 0C12EEDB2616383C00B66C86 /* impl */ = { + isa = PBXGroup; + children = ( + 0C12EEDC2616383C00B66C86 /* CUDATest.h */, + 0C12EEDD2616383C00B66C86 /* CUDAGuardImpl.h */, + ); + path = impl; + sourceTree = "<group>"; + }; + 0C12EEDF2616383C00B66C86 /* test */ = { + isa = PBXGroup; + children = ( + 0C12EEE02616383C00B66C86 /* impl */, + ); + path = test; + sourceTree = "<group>"; + }; + 0C12EEE02616383C00B66C86 /* impl */ = { + isa = PBXGroup; + children = ( + ); + path = impl; + sourceTree = "<group>"; + }; + 0C12EEE82616383C00B66C86 /* macros */ = { + isa = PBXGroup; + children = ( + 0C12EEE92616383C00B66C86 /* cmake_macros.h */, + 0C12EEEA2616383C00B66C86 /* Export.h */, + 0C12EEEB2616383C00B66C86 /* Macros.h */, + ); + path = macros; + sourceTree = "<group>"; + }; + 0C12EEEC2616383C00B66C86 /* mobile */ = { + isa = PBXGroup; + children = ( + 0C12EEED2616383C00B66C86 /* CPUCachingAllocator.h */, + 0C12EEEE2616383C00B66C86 /* CPUProfilingAllocator.h */, + ); + path = mobile; + sourceTree = "<group>"; + }; + 0C12EEEF2616383C00B66C86 /* hip */ = { + isa = PBXGroup; + children = ( + ); + path = hip; + sourceTree = "<group>"; + }; + 0C12EEF22616383C00B66C86 /* fp16 */ = { + isa = PBXGroup; + children = ( + 0C12EEF32616383C00B66C86 /* avx.py */, + 0C12EEF42616383C00B66C86 /* __init__.py */, + 0C12EEF52616383C00B66C86 /* fp16.h */, + 0C12EEF62616383C00B66C86 /* avx2.py */, + 0C12EEF72616383C00B66C86 /* psimd.h */, + 0C12EEF82616383C00B66C86 /* bitcasts.h */, + ); + path = fp16; + sourceTree = "<group>"; + }; + 0C12EEF92616383C00B66C86 /* THCUNN */ = { + isa = PBXGroup; + children = ( + 0C12EEFA2616383C00B66C86 /* generic */, + ); + path = THCUNN; + sourceTree = "<group>"; + }; + 0C12EEFA2616383C00B66C86 /* generic */ = { + isa = PBXGroup; + children = ( + 0C12EEFB2616383C00B66C86 /* THCUNN.h */, + ); + path = generic; + sourceTree = "<group>"; + }; + 0C12EEFC2616383C00B66C86 /* TH */ = { + isa = PBXGroup; + children = ( + 0C12EEFD2616383C00B66C86 /* THTensorDimApply.h */, + 0C12EEFE2616383C00B66C86 /* THBlas.h */, + 0C12EEFF2616383C00B66C86 /* THGenerateQUInt8Type.h */, + 0C12EF002616383C00B66C86 /* THGenerateQInt8Type.h */, + 0C12EF012616383C00B66C86 /* THGenerateComplexTypes.h */, + 0C12EF022616383C00B66C86 /* THGenerateFloatType.h */, + 0C12EF032616383C00B66C86 /* THGenerateQInt32Type.h */, + 0C12EF042616383C00B66C86 /* THGenerateDoubleType.h */, + 0C12EF052616383C00B66C86 /* THGenerateShortType.h */, + 0C12EF062616383C00B66C86 /* THGenerateIntTypes.h */, + 0C12EF072616383C00B66C86 /* THGenerateLongType.h */, + 0C12EF082616383C00B66C86 /* THGenerateComplexFloatType.h */, + 0C12EF092616383C00B66C86 /* THAllocator.h */, + 0C12EF0A2616383C00B66C86 /* THGenerateCharType.h */, + 0C12EF0B2616383C00B66C86 /* THStorage.h */, + 0C12EF0C2616383C00B66C86 /* THHalf.h */, + 0C12EF0D2616383C00B66C86 /* THGenerateHalfType.h */, + 0C12EF0E2616383C00B66C86 /* THGenerateIntType.h */, + 0C12EF0F2616383C00B66C86 /* THVector.h */, + 0C12EF102616383C00B66C86 /* THGeneral.h */, + 0C12EF112616383C00B66C86 /* THGenerateBoolType.h */, + 0C12EF122616383C00B66C86 /* THLapack.h */, + 0C12EF132616383C00B66C86 /* THGenerateComplexDoubleType.h */, + 0C12EF142616383C00B66C86 /* THGenerateBFloat16Type.h */, + 0C12EF152616383C00B66C86 /* THGenerateQTypes.h */, + 0C12EF162616383C00B66C86 /* THGenerateFloatTypes.h */, + 0C12EF172616383C00B66C86 /* generic */, + 0C12EF292616383C00B66C86 /* THTensor.h */, + 0C12EF2A2616383C00B66C86 /* TH.h */, + 0C12EF2B2616383C00B66C86 /* THTensorApply.h */, + 0C12EF2C2616383C00B66C86 /* THStorageFunctions.hpp */, + 0C12EF2D2616383C00B66C86 /* THGenerateAllTypes.h */, + 0C12EF2E2616383C00B66C86 /* THTensor.hpp */, + 0C12EF2F2616383C00B66C86 /* THGenerateByteType.h */, + 0C12EF302616383C00B66C86 /* THStorageFunctions.h */, + 0C12EF312616383C00B66C86 /* THGenerateQUInt4x2Type.h */, + ); + path = TH; + sourceTree = "<group>"; + }; + 0C12EF172616383C00B66C86 /* generic */ = { + isa = PBXGroup; + children = ( + 0C12EF182616383C00B66C86 /* THBlas.h */, + 0C12EF192616383C00B66C86 /* THTensor.cpp */, + 0C12EF1A2616383C00B66C86 /* THTensorMath.cpp */, + 0C12EF1B2616383C00B66C86 /* THTensorMath.h */, + 0C12EF1C2616383C00B66C86 /* THStorageCopy.cpp */, + 0C12EF1D2616383C00B66C86 /* THTensorFastGetSet.hpp */, + 0C12EF1E2616383C00B66C86 /* THStorage.h */, + 0C12EF1F2616383C00B66C86 /* THTensorLapack.h */, + 0C12EF202616383C00B66C86 /* THVector.h */, + 0C12EF212616383C00B66C86 /* THLapack.cpp */, + 0C12EF222616383C00B66C86 /* THStorageCopy.h */, + 0C12EF232616383C00B66C86 /* THLapack.h */, + 0C12EF242616383C00B66C86 /* THStorage.cpp */, + 0C12EF252616383C00B66C86 /* THTensor.h */, + 0C12EF262616383C00B66C86 /* THBlas.cpp */, + 0C12EF272616383C00B66C86 /* THTensorLapack.cpp */, + 0C12EF282616383C00B66C86 /* THTensor.hpp */, + ); + path = generic; + sourceTree = "<group>"; + }; + 0C12EF322616383C00B66C86 /* lib */ = { + isa = PBXGroup; + children = ( + 0C12EF332616383C00B66C86 /* libtorch_cpu.a */, + 0C12EF342616383C00B66C86 /* libtorch.a */, + 0C12EF352616383C00B66C86 /* libcpuinfo.a */, + 0C12EF362616383C00B66C86 /* libXNNPACK.a */, + 0C12EF372616383C00B66C86 /* libtorchvision_ops.a */, + 0C12EF382616383C00B66C86 /* libpthreadpool.a */, + 0C12EF392616383C00B66C86 /* libc10.a */, + 0C12EF3A2616383C00B66C86 /* libeigen_blas.a */, + 0C12EF3B2616383C00B66C86 /* libclog.a */, + 0C12EF3C2616383C00B66C86 /* libpytorch_qnnpack.a */, + ); + path = lib; + sourceTree = "<group>"; + }; + 0C12EF6F26163A4C00B66C86 /* Frameworks */ = { + isa = PBXGroup; + children = ( + ); + name = Frameworks; + sourceTree = "<group>"; + }; + 0CEB0AB226151A8800F1F7D5 = { + isa = PBXGroup; + children = ( + 0C12E7872616383A00B66C86 /* install */, + 0CEB0ABD26151A8800F1F7D5 /* VisionTestApp */, + 0CEB0ABC26151A8800F1F7D5 /* Products */, + 0C12EF6F26163A4C00B66C86 /* Frameworks */, + ); + sourceTree = "<group>"; + }; + 0CEB0ABC26151A8800F1F7D5 /* Products */ = { + isa = PBXGroup; + children = ( + 0CEB0ABB26151A8800F1F7D5 /* VisionTestApp.app */, + ); + name = Products; + sourceTree = "<group>"; + }; + 0CEB0ABD26151A8800F1F7D5 /* VisionTestApp */ = { + isa = PBXGroup; + children = ( + 0CEB0B3826152ED900F1F7D5 /* ModelRunner.h */, + 0CEB0B3926152ED900F1F7D5 /* ModelRunner.mm */, + 0CEB0ABE26151A8800F1F7D5 /* AppDelegate.h */, + 0CEB0ABF26151A8800F1F7D5 /* AppDelegate.m */, + 0CEB0AC426151A8800F1F7D5 /* ViewController.h */, + 0CEB0AC526151A8800F1F7D5 /* ViewController.mm */, + 0CEB0AC726151A8800F1F7D5 /* Main.storyboard */, + 0CEB0ACA26151A8900F1F7D5 /* Assets.xcassets */, + 0CEB0ACC26151A8900F1F7D5 /* LaunchScreen.storyboard */, + 0CEB0ACF26151A8900F1F7D5 /* Info.plist */, + 0CEB0AD026151A8900F1F7D5 /* main.m */, + 0C12EF7526163B7600B66C86 /* frcnn_mnetv3.pt */, + ); + path = VisionTestApp; + sourceTree = "<group>"; + }; +/* End PBXGroup section */ + +/* Begin PBXNativeTarget section */ + 0CEB0ABA26151A8800F1F7D5 /* VisionTestApp */ = { + isa = PBXNativeTarget; + buildConfigurationList = 0CEB0AEA26151A8900F1F7D5 /* Build configuration list for PBXNativeTarget "VisionTestApp" */; + buildPhases = ( + 0CEB0AB726151A8800F1F7D5 /* Sources */, + 0CEB0AB826151A8800F1F7D5 /* Frameworks */, + 0CEB0AB926151A8800F1F7D5 /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = VisionTestApp; + productName = VisionTestApp; + productReference = 0CEB0ABB26151A8800F1F7D5 /* VisionTestApp.app */; + productType = "com.apple.product-type.application"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + 0CEB0AB326151A8800F1F7D5 /* Project object */ = { + isa = PBXProject; + attributes = { + LastUpgradeCheck = 1240; + TargetAttributes = { + 0CEB0ABA26151A8800F1F7D5 = { + CreatedOnToolsVersion = 12.4; + }; + }; + }; + buildConfigurationList = 0CEB0AB626151A8800F1F7D5 /* Build configuration list for PBXProject "VisionTestApp" */; + compatibilityVersion = "Xcode 9.3"; + developmentRegion = en; + hasScannedForEncodings = 0; + knownRegions = ( + en, + Base, + ); + mainGroup = 0CEB0AB226151A8800F1F7D5; + productRefGroup = 0CEB0ABC26151A8800F1F7D5 /* Products */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + 0CEB0ABA26151A8800F1F7D5 /* VisionTestApp */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXResourcesBuildPhase section */ + 0CEB0AB926151A8800F1F7D5 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 0C12EF3D2616383D00B66C86 /* avx.py in Resources */, + 0CEB0ACE26151A8900F1F7D5 /* LaunchScreen.storyboard in Resources */, + 0C12EF3F2616383D00B66C86 /* avx2.py in Resources */, + 0C12EF7626163B7600B66C86 /* frcnn_mnetv3.pt in Resources */, + 0CEB0ACB26151A8900F1F7D5 /* Assets.xcassets in Resources */, + 0C12EF3E2616383D00B66C86 /* __init__.py in Resources */, + 0CEB0AC926151A8800F1F7D5 /* Main.storyboard in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXResourcesBuildPhase section */ + +/* Begin PBXSourcesBuildPhase section */ + 0CEB0AB726151A8800F1F7D5 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 0CEB0AC626151A8800F1F7D5 /* ViewController.mm in Sources */, + 0CEB0AC026151A8800F1F7D5 /* AppDelegate.m in Sources */, + 0C12EF412616383D00B66C86 /* THTensorMath.cpp in Sources */, + 0C12EF422616383D00B66C86 /* THStorageCopy.cpp in Sources */, + 0C12EF462616383D00B66C86 /* THTensorLapack.cpp in Sources */, + 0CEB0AD126151A8900F1F7D5 /* main.m in Sources */, + 0C12EF432616383D00B66C86 /* THLapack.cpp in Sources */, + 0C12EF402616383D00B66C86 /* THTensor.cpp in Sources */, + 0C12EF442616383D00B66C86 /* THStorage.cpp in Sources */, + 0CEB0B3A26152ED900F1F7D5 /* ModelRunner.mm in Sources */, + 0C12EF452616383D00B66C86 /* THBlas.cpp in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin PBXVariantGroup section */ + 0CEB0AC726151A8800F1F7D5 /* Main.storyboard */ = { + isa = PBXVariantGroup; + children = ( + 0CEB0AC826151A8800F1F7D5 /* Base */, + ); + name = Main.storyboard; + sourceTree = "<group>"; + }; + 0CEB0ACC26151A8900F1F7D5 /* LaunchScreen.storyboard */ = { + isa = PBXVariantGroup; + children = ( + 0CEB0ACD26151A8900F1F7D5 /* Base */, + ); + name = LaunchScreen.storyboard; + sourceTree = "<group>"; + }; +/* End PBXVariantGroup section */ + +/* Begin XCBuildConfiguration section */ + 0CEB0AE826151A8900F1F7D5 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++14"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_COMMA = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = dwarf; + ENABLE_BITCODE = NO; + ENABLE_STRICT_OBJC_MSGSEND = YES; + ENABLE_TESTABILITY = YES; + GCC_C_LANGUAGE_STANDARD = gnu11; + GCC_DYNAMIC_NO_PIC = NO; + GCC_NO_COMMON_BLOCKS = YES; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_PREPROCESSOR_DEFINITIONS = ( + "DEBUG=1", + "$(inherited)", + ); + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ""; + IPHONEOS_DEPLOYMENT_TARGET = 13.0; + LIBRARY_SEARCH_PATHS = ""; + MTL_ENABLE_DEBUG_INFO = INCLUDE_SOURCE; + MTL_FAST_MATH = YES; + ONLY_ACTIVE_ARCH = YES; + OTHER_LDFLAGS = ""; + SDKROOT = iphoneos; + }; + name = Debug; + }; + 0CEB0AE926151A8900F1F7D5 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ALWAYS_SEARCH_USER_PATHS = NO; + CLANG_ANALYZER_NONNULL = YES; + CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE; + CLANG_CXX_LANGUAGE_STANDARD = "gnu++14"; + CLANG_CXX_LIBRARY = "libc++"; + CLANG_ENABLE_MODULES = YES; + CLANG_ENABLE_OBJC_ARC = YES; + CLANG_ENABLE_OBJC_WEAK = YES; + CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES; + CLANG_WARN_BOOL_CONVERSION = YES; + CLANG_WARN_COMMA = YES; + CLANG_WARN_CONSTANT_CONVERSION = YES; + CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES; + CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR; + CLANG_WARN_DOCUMENTATION_COMMENTS = YES; + CLANG_WARN_EMPTY_BODY = YES; + CLANG_WARN_ENUM_CONVERSION = YES; + CLANG_WARN_INFINITE_RECURSION = YES; + CLANG_WARN_INT_CONVERSION = YES; + CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES; + CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES; + CLANG_WARN_OBJC_LITERAL_CONVERSION = YES; + CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR; + CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES; + CLANG_WARN_RANGE_LOOP_ANALYSIS = YES; + CLANG_WARN_STRICT_PROTOTYPES = YES; + CLANG_WARN_SUSPICIOUS_MOVE = YES; + CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE; + CLANG_WARN_UNREACHABLE_CODE = YES; + CLANG_WARN__DUPLICATE_METHOD_MATCH = YES; + COPY_PHASE_STRIP = NO; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + ENABLE_BITCODE = NO; + ENABLE_NS_ASSERTIONS = NO; + ENABLE_STRICT_OBJC_MSGSEND = YES; + GCC_C_LANGUAGE_STANDARD = gnu11; + GCC_NO_COMMON_BLOCKS = YES; + GCC_WARN_64_TO_32_BIT_CONVERSION = YES; + GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR; + GCC_WARN_UNDECLARED_SELECTOR = YES; + GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE; + GCC_WARN_UNUSED_FUNCTION = YES; + GCC_WARN_UNUSED_VARIABLE = YES; + HEADER_SEARCH_PATHS = ""; + IPHONEOS_DEPLOYMENT_TARGET = 13.0; + LIBRARY_SEARCH_PATHS = ""; + MTL_ENABLE_DEBUG_INFO = NO; + MTL_FAST_MATH = YES; + OTHER_LDFLAGS = ""; + SDKROOT = iphoneos; + VALIDATE_PRODUCT = YES; + }; + name = Release; + }; + 0CEB0AEB26151A8900F1F7D5 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; + CODE_SIGN_STYLE = Automatic; + ENABLE_BITCODE = NO; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + "$(PROJECT_DIR)/install/include", + ); + INFOPLIST_FILE = VisionTestApp/Info.plist; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + ); + LIBRARY_SEARCH_PATHS = ( + "$(inherited)", + "$(PROJECT_DIR)/VisionTestApp", + "$(PROJECT_DIR)", + "$(PROJECT_DIR)/install/lib", + ); + OTHER_LDFLAGS = ( + "$(inherited)", + "-ObjC", + "-l\"XNNPACK\"", + "-l\"c++\"", + "-l\"c10\"", + "-l\"clog\"", + "-l\"cpuinfo\"", + "-l\"eigen_blas\"", + "-l\"pthreadpool\"", + "-l\"pytorch_qnnpack\"", + "-l\"stdc++\"", + "-l\"torch\"", + "-l\"torch_cpu\"", + "-l\"torchvision_ops\"", + "-force_load", + "$(PROJECT_DIR)/install/lib/libtorch.a", + "-force_load", + "$(PROJECT_DIR)/install/lib/libtorch_cpu.a", + "-force_load", + "$(PROJECT_DIR)/install/lib/libtorchvision_ops.a", + ); + PRODUCT_BUNDLE_IDENTIFIER = com.pytorch.ios.VisionTestApp.VisionTestApp; + PRODUCT_NAME = "$(TARGET_NAME)"; + TARGETED_DEVICE_FAMILY = "1,2"; + }; + name = Debug; + }; + 0CEB0AEC26151A8900F1F7D5 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + ASSETCATALOG_COMPILER_APPICON_NAME = AppIcon; + ASSETCATALOG_COMPILER_GLOBAL_ACCENT_COLOR_NAME = AccentColor; + CODE_SIGN_STYLE = Automatic; + ENABLE_BITCODE = NO; + HEADER_SEARCH_PATHS = ( + "$(inherited)", + "$(PROJECT_DIR)/install/include", + ); + INFOPLIST_FILE = VisionTestApp/Info.plist; + LD_RUNPATH_SEARCH_PATHS = ( + "$(inherited)", + "@executable_path/Frameworks", + ); + LIBRARY_SEARCH_PATHS = ( + "$(inherited)", + "$(PROJECT_DIR)/VisionTestApp", + "$(PROJECT_DIR)", + "$(PROJECT_DIR)/install/lib", + ); + OTHER_LDFLAGS = ( + "$(inherited)", + "-ObjC", + "-l\"XNNPACK\"", + "-l\"c++\"", + "-l\"c10\"", + "-l\"clog\"", + "-l\"cpuinfo\"", + "-l\"eigen_blas\"", + "-l\"pthreadpool\"", + "-l\"pytorch_qnnpack\"", + "-l\"stdc++\"", + "-l\"torch\"", + "-l\"torch_cpu\"", + "-l\"torchvision_ops\"", + "-force_load", + "$(PROJECT_DIR)/install/lib/libtorch.a", + "-force_load", + "$(PROJECT_DIR)/install/lib/libtorch_cpu.a", + "-force_load", + "$(PROJECT_DIR)/install/lib/libtorchvision_ops.a", + ); + PRODUCT_BUNDLE_IDENTIFIER = com.pytorch.ios.VisionTestApp.VisionTestApp; + PRODUCT_NAME = "$(TARGET_NAME)"; + TARGETED_DEVICE_FAMILY = "1,2"; + }; + name = Release; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + 0CEB0AB626151A8800F1F7D5 /* Build configuration list for PBXProject "VisionTestApp" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 0CEB0AE826151A8900F1F7D5 /* Debug */, + 0CEB0AE926151A8900F1F7D5 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 0CEB0AEA26151A8900F1F7D5 /* Build configuration list for PBXNativeTarget "VisionTestApp" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 0CEB0AEB26151A8900F1F7D5 /* Debug */, + 0CEB0AEC26151A8900F1F7D5 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + }; + rootObject = 0CEB0AB326151A8800F1F7D5 /* Project object */; +} diff --git a/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp.xcodeproj/project.xcworkspace/contents.xcworkspacedata b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp.xcodeproj/project.xcworkspace/contents.xcworkspacedata new file mode 100644 index 0000000000000000000000000000000000000000..919434a6254f0e9651f402737811be6634a03e9c --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp.xcodeproj/project.xcworkspace/contents.xcworkspacedata @@ -0,0 +1,7 @@ +<?xml version="1.0" encoding="UTF-8"?> +<Workspace + version = "1.0"> + <FileRef + location = "self:"> + </FileRef> +</Workspace> diff --git a/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist new file mode 100644 index 0000000000000000000000000000000000000000..18d981003d68d0546c4804ac2ff47dd97c6e7921 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp.xcodeproj/project.xcworkspace/xcshareddata/IDEWorkspaceChecks.plist @@ -0,0 +1,8 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> +<plist version="1.0"> +<dict> + <key>IDEDidComputeMac32BitWarning</key> + <true/> +</dict> +</plist> diff --git a/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/AppDelegate.h b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/AppDelegate.h new file mode 100644 index 0000000000000000000000000000000000000000..0dde86886e66cd44ba74061cba82f3982a4a3733 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/AppDelegate.h @@ -0,0 +1,7 @@ +#import <UIKit/UIKit.h> + +@interface AppDelegate : UIResponder <UIApplicationDelegate> + +@property(strong, nonatomic) UIWindow *window; + +@end diff --git a/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/AppDelegate.m b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/AppDelegate.m new file mode 100644 index 0000000000000000000000000000000000000000..a20d3987c804ada904cd41bfcb567863de0a060b --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/AppDelegate.m @@ -0,0 +1,44 @@ + +#import "AppDelegate.h" + +@interface AppDelegate () + +@end + +@implementation AppDelegate + + +- (BOOL)application:(UIApplication *)application didFinishLaunchingWithOptions:(NSDictionary *)launchOptions { + // Override point for customization after application launch. + return YES; +} + + +- (void)applicationWillResignActive:(UIApplication *)application { + // Sent when the application is about to move from active to inactive state. This can occur for certain types of temporary interruptions (such as an incoming phone call or SMS message) or when the user quits the application and it begins the transition to the background state. + // Use this method to pause ongoing tasks, disable timers, and invalidate graphics rendering callbacks. Games should use this method to pause the game. +} + + +- (void)applicationDidEnterBackground:(UIApplication *)application { + // Use this method to release shared resources, save user data, invalidate timers, and store enough application state information to restore your application to its current state in case it is terminated later. + // If your application supports background execution, this method is called instead of applicationWillTerminate: when the user quits. +} + + +- (void)applicationWillEnterForeground:(UIApplication *)application { + // Called as part of the transition from the background to the active state; here you can undo many of the changes made on entering the background. +} + + +- (void)applicationDidBecomeActive:(UIApplication *)application { + // Restart any tasks that were paused (or not yet started) while the application was inactive. If the application was previously in the background, optionally refresh the user interface. +} + + +- (void)applicationWillTerminate:(UIApplication *)application { + // Called when the application is about to terminate. Save data if appropriate. See also applicationDidEnterBackground:. +} + + +@end diff --git a/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/Assets.xcassets/AccentColor.colorset/Contents.json b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/Assets.xcassets/AccentColor.colorset/Contents.json new file mode 100644 index 0000000000000000000000000000000000000000..eb8789700816459c1e1480e0b34781d9fb78a1ca --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/Assets.xcassets/AccentColor.colorset/Contents.json @@ -0,0 +1,11 @@ +{ + "colors" : [ + { + "idiom" : "universal" + } + ], + "info" : { + "author" : "xcode", + "version" : 1 + } +} diff --git a/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/Assets.xcassets/AppIcon.appiconset/Contents.json b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/Assets.xcassets/AppIcon.appiconset/Contents.json new file mode 100644 index 0000000000000000000000000000000000000000..9221b9bb1a35f5de270a41afa01305478221ae32 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/Assets.xcassets/AppIcon.appiconset/Contents.json @@ -0,0 +1,98 @@ +{ + "images" : [ + { + "idiom" : "iphone", + "scale" : "2x", + "size" : "20x20" + }, + { + "idiom" : "iphone", + "scale" : "3x", + "size" : "20x20" + }, + { + "idiom" : "iphone", + "scale" : "2x", + "size" : "29x29" + }, + { + "idiom" : "iphone", + "scale" : "3x", + "size" : "29x29" + }, + { + "idiom" : "iphone", + "scale" : "2x", + "size" : "40x40" + }, + { + "idiom" : "iphone", + "scale" : "3x", + "size" : "40x40" + }, + { + "idiom" : "iphone", + "scale" : "2x", + "size" : "60x60" + }, + { + "idiom" : "iphone", + "scale" : "3x", + "size" : "60x60" + }, + { + "idiom" : "ipad", + "scale" : "1x", + "size" : "20x20" + }, + { + "idiom" : "ipad", + "scale" : "2x", + "size" : "20x20" + }, + { + "idiom" : "ipad", + "scale" : "1x", + "size" : "29x29" + }, + { + "idiom" : "ipad", + "scale" : "2x", + "size" : "29x29" + }, + { + "idiom" : "ipad", + "scale" : "1x", + "size" : "40x40" + }, + { + "idiom" : "ipad", + "scale" : "2x", + "size" : "40x40" + }, + { + "idiom" : "ipad", + "scale" : "1x", + "size" : "76x76" + }, + { + "idiom" : "ipad", + "scale" : "2x", + "size" : "76x76" + }, + { + "idiom" : "ipad", + "scale" : "2x", + "size" : "83.5x83.5" + }, + { + "idiom" : "ios-marketing", + "scale" : "1x", + "size" : "1024x1024" + } + ], + "info" : { + "author" : "xcode", + "version" : 1 + } +} diff --git a/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/Assets.xcassets/Contents.json b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/Assets.xcassets/Contents.json new file mode 100644 index 0000000000000000000000000000000000000000..73c00596a7fca3f3d4bdd64053b69d86745f9e10 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/Assets.xcassets/Contents.json @@ -0,0 +1,6 @@ +{ + "info" : { + "author" : "xcode", + "version" : 1 + } +} diff --git a/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/Base.lproj/LaunchScreen.storyboard b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/Base.lproj/LaunchScreen.storyboard new file mode 100644 index 0000000000000000000000000000000000000000..0b64f641701c6bef5891c901d90b4108a3d87578 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/Base.lproj/LaunchScreen.storyboard @@ -0,0 +1,33 @@ +<?xml version="1.0" encoding="UTF-8"?> +<document type="com.apple.InterfaceBuilder3.CocoaTouch.Storyboard.XIB" version="3.0" toolsVersion="17701" targetRuntime="iOS.CocoaTouch" propertyAccessControl="none" useAutolayout="YES" launchScreen="YES" useTraitCollections="YES" useSafeAreas="YES" colorMatched="YES" initialViewController="01J-lp-oVM"> + <device id="retina6_1" orientation="portrait" appearance="light"/> + <dependencies> + <deployment identifier="iOS"/> + <plugIn identifier="com.apple.InterfaceBuilder.IBCocoaTouchPlugin" version="17703"/> + <capability name="Safe area layout guides" minToolsVersion="9.0"/> + <capability name="System colors in document resources" minToolsVersion="11.0"/> + <capability name="documents saved in the Xcode 8 format" minToolsVersion="8.0"/> + </dependencies> + <scenes> + <!--View Controller--> + <scene sceneID="EHf-IW-A2E"> + <objects> + <viewController id="01J-lp-oVM" sceneMemberID="viewController"> + <view key="view" contentMode="scaleToFill" id="Ze5-6b-2t3"> + <rect key="frame" x="0.0" y="0.0" width="414" height="896"/> + <autoresizingMask key="autoresizingMask" widthSizable="YES" heightSizable="YES"/> + <viewLayoutGuide key="safeArea" id="6Tk-OE-BBY"/> + <color key="backgroundColor" systemColor="systemBackgroundColor"/> + </view> + </viewController> + <placeholder placeholderIdentifier="IBFirstResponder" id="iYj-Kq-Ea1" userLabel="First Responder" sceneMemberID="firstResponder"/> + </objects> + <point key="canvasLocation" x="53" y="375"/> + </scene> + </scenes> + <resources> + <systemColor name="systemBackgroundColor"> + <color white="1" alpha="1" colorSpace="custom" customColorSpace="genericGamma22GrayColorSpace"/> + </systemColor> + </resources> +</document> diff --git a/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/Base.lproj/Main.storyboard b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/Base.lproj/Main.storyboard new file mode 100644 index 0000000000000000000000000000000000000000..b20f277b049cfd330dbc30c24bc6166342dbe191 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/Base.lproj/Main.storyboard @@ -0,0 +1,79 @@ +<?xml version="1.0" encoding="UTF-8"?> +<document type="com.apple.InterfaceBuilder3.CocoaTouch.Storyboard.XIB" version="3.0" toolsVersion="17701" targetRuntime="iOS.CocoaTouch" propertyAccessControl="none" useAutolayout="YES" useTraitCollections="YES" useSafeAreas="YES" colorMatched="YES" initialViewController="C8u-kp-ZGJ"> + <device id="retina6_1" orientation="portrait" appearance="light"/> + <dependencies> + <deployment identifier="iOS"/> + <plugIn identifier="com.apple.InterfaceBuilder.IBCocoaTouchPlugin" version="17703"/> + <capability name="Safe area layout guides" minToolsVersion="9.0"/> + <capability name="System colors in document resources" minToolsVersion="11.0"/> + <capability name="documents saved in the Xcode 8 format" minToolsVersion="8.0"/> + </dependencies> + <scenes> + <!--Test Vision Ops--> + <scene sceneID="tne-QT-ifu"> + <objects> + <viewController id="BYZ-38-t0r" customClass="ViewController" sceneMemberID="viewController"> + <view key="view" contentMode="scaleToFill" id="8bC-Xf-vdC"> + <rect key="frame" x="0.0" y="0.0" width="414" height="896"/> + <autoresizingMask key="autoresizingMask" widthSizable="YES" heightSizable="YES"/> + <subviews> + <textView clipsSubviews="YES" multipleTouchEnabled="YES" contentMode="scaleToFill" textAlignment="natural" translatesAutoresizingMaskIntoConstraints="NO" id="gaT-0L-TqB"> + <rect key="frame" x="20" y="88" width="374" height="774"/> + <color key="backgroundColor" systemColor="systemBackgroundColor"/> + <color key="textColor" systemColor="labelColor"/> + <fontDescription key="fontDescription" type="system" pointSize="14"/> + <textInputTraits key="textInputTraits" autocapitalizationType="sentences"/> + </textView> + </subviews> + <viewLayoutGuide key="safeArea" id="6Tk-OE-BBY"/> + <color key="backgroundColor" systemColor="systemBackgroundColor"/> + <constraints> + <constraint firstItem="gaT-0L-TqB" firstAttribute="leading" secondItem="6Tk-OE-BBY" secondAttribute="leading" constant="20" id="3b1-x4-VD4"/> + <constraint firstItem="gaT-0L-TqB" firstAttribute="bottom" secondItem="6Tk-OE-BBY" secondAttribute="bottom" id="URh-hA-LJV"/> + <constraint firstItem="6Tk-OE-BBY" firstAttribute="trailing" secondItem="gaT-0L-TqB" secondAttribute="trailing" constant="20" id="avy-0f-meB"/> + <constraint firstItem="gaT-0L-TqB" firstAttribute="top" secondItem="6Tk-OE-BBY" secondAttribute="top" id="d1Q-UA-AUb"/> + </constraints> + </view> + <navigationItem key="navigationItem" title="Test Vision Ops" id="QV8-E1-9z9"> + <barButtonItem key="rightBarButtonItem" title="Redo" id="ZQ5-yr-k4l"> + <connections> + <action selector="rerun:" destination="BYZ-38-t0r" id="F5t-Nr-XmE"/> + </connections> + </barButtonItem> + </navigationItem> + <connections> + <outlet property="textView" destination="gaT-0L-TqB" id="lMF-Rf-ics"/> + </connections> + </viewController> + <placeholder placeholderIdentifier="IBFirstResponder" id="dkx-z0-nzr" sceneMemberID="firstResponder"/> + </objects> + <point key="canvasLocation" x="1047.8260869565217" y="137.94642857142856"/> + </scene> + <!--Navigation Controller--> + <scene sceneID="LxF-da-Ea2"> + <objects> + <navigationController automaticallyAdjustsScrollViewInsets="NO" id="C8u-kp-ZGJ" sceneMemberID="viewController"> + <toolbarItems/> + <navigationBar key="navigationBar" contentMode="scaleToFill" id="SkD-La-Hwl"> + <rect key="frame" x="0.0" y="44" width="414" height="44"/> + <autoresizingMask key="autoresizingMask"/> + </navigationBar> + <nil name="viewControllers"/> + <connections> + <segue destination="BYZ-38-t0r" kind="relationship" relationship="rootViewController" id="s4b-GK-ujM"/> + </connections> + </navigationController> + <placeholder placeholderIdentifier="IBFirstResponder" id="tCY-46-VMM" userLabel="First Responder" customClass="UIResponder" sceneMemberID="firstResponder"/> + </objects> + <point key="canvasLocation" x="137.68115942028987" y="137.94642857142856"/> + </scene> + </scenes> + <resources> + <systemColor name="labelColor"> + <color white="0.0" alpha="1" colorSpace="custom" customColorSpace="genericGamma22GrayColorSpace"/> + </systemColor> + <systemColor name="systemBackgroundColor"> + <color white="1" alpha="1" colorSpace="custom" customColorSpace="genericGamma22GrayColorSpace"/> + </systemColor> + </resources> +</document> diff --git a/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/Info.plist b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/Info.plist new file mode 100644 index 0000000000000000000000000000000000000000..5bae3d0ded59eb2e782633419776fafedc0b1f7e --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/Info.plist @@ -0,0 +1,45 @@ +<?xml version="1.0" encoding="UTF-8"?> +<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd"> +<plist version="1.0"> +<dict> + <key>CFBundleDevelopmentRegion</key> + <string>$(DEVELOPMENT_LANGUAGE)</string> + <key>CFBundleExecutable</key> + <string>$(EXECUTABLE_NAME)</string> + <key>CFBundleIdentifier</key> + <string>$(PRODUCT_BUNDLE_IDENTIFIER)</string> + <key>CFBundleInfoDictionaryVersion</key> + <string>6.0</string> + <key>CFBundleName</key> + <string>$(PRODUCT_NAME)</string> + <key>CFBundlePackageType</key> + <string>$(PRODUCT_BUNDLE_PACKAGE_TYPE)</string> + <key>CFBundleShortVersionString</key> + <string>1.0</string> + <key>CFBundleVersion</key> + <string>1</string> + <key>LSRequiresIPhoneOS</key> + <true/> + <key>UIApplicationSupportsIndirectInputEvents</key> + <true/> + <key>UILaunchStoryboardName</key> + <string>LaunchScreen</string> + <key>UIMainStoryboardFile</key> + <string>Main</string> + <key>UIRequiredDeviceCapabilities</key> + <array> + <string>armv7</string> + </array> + <key>UISupportedInterfaceOrientations</key> + <array> + <string>UIInterfaceOrientationPortrait</string> + </array> + <key>UISupportedInterfaceOrientations~ipad</key> + <array> + <string>UIInterfaceOrientationPortrait</string> + <string>UIInterfaceOrientationPortraitUpsideDown</string> + <string>UIInterfaceOrientationLandscapeLeft</string> + <string>UIInterfaceOrientationLandscapeRight</string> + </array> +</dict> +</plist> diff --git a/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/ModelRunner.h b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/ModelRunner.h new file mode 100644 index 0000000000000000000000000000000000000000..f71c80c981c1177eb8740793f700c1753c49f6e3 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/ModelRunner.h @@ -0,0 +1,13 @@ + +#import <Foundation/Foundation.h> + +NS_ASSUME_NONNULL_BEGIN + +@interface ModelRunner : NSObject + ++ (NSString* )run; ++ (BOOL)setUp; + +@end + +NS_ASSUME_NONNULL_END diff --git a/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/ModelRunner.mm b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/ModelRunner.mm new file mode 100644 index 0000000000000000000000000000000000000000..dea3822df26a324001ce394f71185b659281c2f9 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/ModelRunner.mm @@ -0,0 +1,73 @@ + +#import "ModelRunner.h" +#include <string> +#include <vector> +#include "ATen/ATen.h" +#include "caffe2/core/timer.h" +#include "caffe2/utils/string_utils.h" +#include "torch/csrc/autograd/grad_mode.h" +#include "torch/csrc/jit/serialization/import.h" +#include "torch/script.h" + +static NSString *model_name = @"frcnn_mnetv3"; +static NSString *model_suffix = @"pt"; +static NSString *model_path = nil; +static int warmup = 5; +static int iter = 20; + +@implementation ModelRunner + ++ (NSString *)run { + std::vector<std::string> logs; +#define UI_LOG(fmt, ...) \ + { \ + NSString* log = [NSString stringWithFormat:fmt, __VA_ARGS__]; \ + NSLog(@"%@", log); \ + logs.push_back(log.UTF8String); \ + } + + auto module = torch::jit::load(std::string(model_path.UTF8String)); + module.eval(); + + std::vector<c10::IValue> inputs; + auto img_tensor = torch::ones({3, 224, 224}, at::ScalarType::Float); + inputs.push_back(c10::List<at::Tensor>(img_tensor)); + torch::autograd::AutoGradMode guard(false); + at::InferenceMode nonVarTypeModeGuard(true); + + UI_LOG(@"Running warmup runs...", nil); + for (int i = 0; i < warmup; ++i) { + module.forward(inputs); + } + UI_LOG(@"Warmup runs finished.\nMain runs...", nil); + caffe2::Timer timer; + auto millis = timer.MilliSeconds(); + for (int i = 0; i < iter; ++i) { + module.forward(inputs); + } + millis = timer.MilliSeconds(); + UI_LOG(@"Main run finished. \nMilliseconds per iter: %.3f", millis / iter, nil); + UI_LOG(@"Iters per second: : %.3f", 1000.0 * iter / millis, nil); + UI_LOG(@"Done.", nil); + + std::cout << module.forward(inputs) << std::endl; + + NSString* log_text = @""; + for (auto& msg : logs) { + log_text = [log_text stringByAppendingString:[NSString stringWithUTF8String:msg.c_str()]]; + log_text = [log_text stringByAppendingString:@"\n"]; + } + return log_text; +} + ++ (BOOL)setUp { + model_path = [[NSBundle mainBundle] pathForResource:model_name ofType:model_suffix]; + if (![[NSFileManager defaultManager] fileExistsAtPath:model_path]) { + NSLog(@"Invalid model path!"); + model_path = nil; + return NO; + } + return YES; +} + +@end diff --git a/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/ViewController.h b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/ViewController.h new file mode 100644 index 0000000000000000000000000000000000000000..7df67432f9212c3778bcca984c55e003dbcbccd9 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/ViewController.h @@ -0,0 +1,8 @@ + +#import <UIKit/UIKit.h> + +@interface ViewController : UIViewController + + +@end + diff --git a/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/ViewController.mm b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/ViewController.mm new file mode 100644 index 0000000000000000000000000000000000000000..900005d39900b56e930550a8337edd4feb153353 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/ViewController.mm @@ -0,0 +1,44 @@ + +#import "ViewController.h" +#include <torch/script.h> +#import "ModelRunner.h" + +@interface ViewController () +@property (weak, nonatomic) IBOutlet UITextView *textView; +@end + +static NSString const *config_error_msg = @"Wrong model configurations... Please fix and click \"Redo\""; + +@implementation ViewController + +- (void)viewDidLoad { + [super viewDidLoad]; + if ([ModelRunner setUp]) { + [self testModel]; + } else { + self.textView.text = [config_error_msg copy]; + } +} + + +- (IBAction)rerun:(id)sender { + self.textView.text = @""; + if (![ModelRunner setUp]) { + self.textView.text = [config_error_msg copy]; + return; + } + dispatch_async(dispatch_get_main_queue(), ^{ + [self testModel]; + }); +} + +- (void)testModel { + dispatch_async(dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0), ^{ + NSString *text = [ModelRunner run]; + dispatch_async(dispatch_get_main_queue(), ^{ + self.textView.text = [self.textView.text stringByAppendingString:text]; + }); + }); +} + +@end diff --git a/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/main.m b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/main.m new file mode 100644 index 0000000000000000000000000000000000000000..1a8b57c33bc9044cd649ba29b6aab989ee6d2e0c --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/VisionTestApp/main.m @@ -0,0 +1,18 @@ +// +// main.m +// VisionTestApp +// +// Created by Yuchen Huang on 3/31/21. +// + +#import <UIKit/UIKit.h> +#import "AppDelegate.h" + +int main(int argc, char * argv[]) { + NSString * appDelegateClassName; + @autoreleasepool { + // Setup code that might create autoreleased objects goes here. + appDelegateClassName = NSStringFromClass([AppDelegate class]); + } + return UIApplicationMain(argc, argv, nil, appDelegateClassName); +} diff --git a/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/clean.sh b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/clean.sh new file mode 100644 index 0000000000000000000000000000000000000000..20bedc784d9b1337c5109088f5b32218ee69b3d1 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/clean.sh @@ -0,0 +1,8 @@ +#!/bin/bash +set -ex -o pipefail + +TEST_APP_PATH=$(dirname $(realpath $0)) +cd ${TEST_APP_PATH} + +rm -rf ./install +rm ./VisionTestApp/*.pt diff --git a/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/make_assets.py b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/make_assets.py new file mode 100644 index 0000000000000000000000000000000000000000..122094b354717f57f1c124f9bbc3d307e18e1171 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/make_assets.py @@ -0,0 +1,17 @@ +import torch +import torchvision +from torch.utils.mobile_optimizer import optimize_for_mobile + +print(torch.__version__) + +model = torchvision.models.detection.fasterrcnn_mobilenet_v3_large_320_fpn( + pretrained=True, + box_score_thresh=0.7, + rpn_post_nms_top_n_test=100, + rpn_score_thresh=0.4, + rpn_pre_nms_top_n_test=150) + +model.eval() +script_model = torch.jit.script(model) +opt_script_model = optimize_for_mobile(script_model) +opt_script_model.save("VisionTestApp/frcnn_mnetv3.pt") diff --git a/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/setup.sh b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/setup.sh new file mode 100644 index 0000000000000000000000000000000000000000..3b3520d70526038df68f835ce2a184e850cac66a --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/ios/VisionTestApp/setup.sh @@ -0,0 +1,33 @@ +#!/bin/bash +set -ex -o pipefail + +echo "" +echo "DIR: $(pwd)" + +TEST_APP_PATH=$(dirname $(realpath $0)) +cd ${TEST_APP_PATH} + +PYTORCH_IOS_NIGHTLY_NAME=libtorch_ios_nightly_build.zip +VISION_IOS_NIGHTLY_NAME=libtorchvision_ops_ios_nightly_build.zip + +echo "Downloading torch libs and vision libs..." +wget https://ossci-ios-build.s3.amazonaws.com/${PYTORCH_IOS_NIGHTLY_NAME} +wget https://ossci-ios-build.s3.amazonaws.com/${VISION_IOS_NIGHTLY_NAME} + +mkdir -p ./library/torch +mkdir -p ./library/vision + +echo "Unziping torch libs and vision libs..." +unzip -d ./library/torch ./${PYTORCH_IOS_NIGHTLY_NAME} +unzip -d ./library/vision ./${VISION_IOS_NIGHTLY_NAME} + +cp ./library/vision/install/lib/*.a ./library/torch/install/lib +cp -r ./library/torch/install . + +rm -rf ./library +rm -rf ./*.zip + +echo "Generating the vision model..." +python ./make_assets.py + +echo "Finished project setups." diff --git a/pretrained_model/pytorch_vision_v0.10.0/ios/build_ios.sh b/pretrained_model/pytorch_vision_v0.10.0/ios/build_ios.sh new file mode 100644 index 0000000000000000000000000000000000000000..81ac2f2a2187f648390ab3d88f318d1535506228 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/ios/build_ios.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +set -ex -o pipefail +echo "" +echo "DIR: $(pwd)" +VISION_IOS_ROOT=$(dirname $(realpath $0)) + +if ! [ -n "${LIBTORCH_HEADER_ROOT:-}" ]; then + echo "Missing parameter: LIBTORCH_HEADER_ROOT" + exit 1 +fi + +if [ -n "${IOS_ARCH:-}" ]; then + if [ "${IOS_ARCH:-}" == "arm64" ]; then + IOS_PLATFORM="OS" + elif [ "${IOS_ARCH:-}" == "x86_64" ]; then + IOS_PLATFORM="SIMULATOR" + fi +fi + +mkdir -p ${VISION_IOS_ROOT}/lib +mkdir -p ${VISION_IOS_ROOT}/build +cd ${VISION_IOS_ROOT}/build +cmake -DLIBTORCH_HEADER_ROOT=${LIBTORCH_HEADER_ROOT} \ + -DCMAKE_TOOLCHAIN_FILE=${VISION_IOS_ROOT}/../cmake/iOS.cmake \ + -DIOS_ARCH=${IOS_ARCH} \ + -DIOS_PLATFORM=${IOS_PLATFORM} \ + .. +make +rm -rf ${VISION_IOS_ROOT}/build diff --git a/pretrained_model/pytorch_vision_v0.10.0/mypy.ini b/pretrained_model/pytorch_vision_v0.10.0/mypy.ini new file mode 100644 index 0000000000000000000000000000000000000000..040b52dfda45248c51a8dab717e3eeb95360cfc2 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/mypy.ini @@ -0,0 +1,69 @@ +[mypy] + +files = torchvision +show_error_codes = True +pretty = True + +[mypy-torchvision.io._video_opt.*] + +ignore_errors = True + +[mypy-torchvision.io.*] + +ignore_errors = True + +[mypy-torchvision.models.densenet.*] + +ignore_errors=True + +[mypy-torchvision.models.detection.*] + +ignore_errors = True + +[mypy-torchvision.models.quantization.*] + +ignore_errors = True + +[mypy-torchvision.ops.*] + +ignore_errors = True + +[mypy-torchvision.transforms.*] + +ignore_errors = True + +[mypy-PIL.*] + +ignore_missing_imports = True + +[mypy-numpy.*] + +ignore_missing_imports = True + +[mypy-scipy.*] + +ignore_missing_imports = True + +[mypy-pycocotools.*] + +ignore_missing_imports = True + +[mypy-lmdb.*] + +ignore_missing_imports = True + +[mypy-pandas.*] + +ignore_missing_imports = True + +[mypy-accimage.*] + +ignore_missing_imports = True + +[mypy-av.*] + +ignore_missing_imports = True + +[mypy-defusedxml.*] + +ignore_missing_imports = True diff --git a/pretrained_model/pytorch_vision_v0.10.0/packaging/README.md b/pretrained_model/pytorch_vision_v0.10.0/packaging/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7d3c5f7831bdf831a7aa4c6434c2e32ade722111 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/packaging/README.md @@ -0,0 +1,90 @@ +# Building torchvision packages for release + +## Anaconda packages + +### Linux + +```bash +nvidia-docker run -it --ipc=host --rm -v $(pwd):/remote soumith/conda-cuda bash +pushd remote/conda + +./build_vision.sh 9.0 +./build_vision.sh 10.0 +./build_vision.sh cpu + +# copy packages over to /remote +# exit docker +# anaconda upload -u pytorch torchvision*.bz2 +``` + +### OSX + +```bash +# create a fresh anaconda environment / install and activate it +conda install -y conda-build anaconda-client +./build_vision.sh cpu + +# copy packages over to /remote +# exit docker +# anaconda upload -u pytorch torchvision*.bz2 +``` + +### Windows + +```bash +# Open `Git Bash` and change dir to `conda` +./build_vision.sh 9.0 +./build_vision.sh 10.0 +./build_vision.sh cpu + +# copy packages to a output directory +# anaconda upload -u pytorch torchvision*.bz2 +``` + +## Wheels + +### Linux + +pushd wheel + +```bash +nvidia-docker run -it --ipc=host --rm -v $(pwd):/remote soumith/manylinux-cuda90:latest bash +cd remote +./linux_manywheel.sh cu90 + +rm -rf /usr/local/cuda* +./linux_manywheel.sh cpu +``` + +```bash +nvidia-docker run -it --ipc=host --rm -v $(pwd):/remote soumith/manylinux-cuda100:latest bash +cd remote +./linux_manywheel.sh cu100 +``` + +wheels are in the folders `cpu`, `cu90`, `cu100`. + +You can upload the `cu90` wheels to twine with `twine upload *.whl`. +Which wheels we upload depends on which wheels PyTorch uploads as default, and right now, it's `cu90`. + +### OSX + +```bash +pushd wheel +./osx_wheel.sh +``` + +### Windows + +```cmd +set PYTORCH_REPO=pytorch + +pushd windows +call build_vision.bat 90 0.3.0 1 +call build_vision.bat 100 0.3.0 1 +call build_vision.bat cpu 0.3.0 1 +``` + +wheels are in the current folder. + +You can upload them to twine with `twine upload *.whl` diff --git a/pretrained_model/pytorch_vision_v0.10.0/packaging/build_cmake.sh b/pretrained_model/pytorch_vision_v0.10.0/packaging/build_cmake.sh new file mode 100644 index 0000000000000000000000000000000000000000..0945f576ee2b5ee47ca8c730a080362403ee0347 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/packaging/build_cmake.sh @@ -0,0 +1,106 @@ +#!/bin/bash +set -ex + +PARALLELISM=8 +if [ -n "$MAX_JOBS" ]; then + PARALLELISM=$MAX_JOBS +fi + +if [[ "$(uname)" != Darwin && "$OSTYPE" != "msys" ]]; then + eval "$(./conda/bin/conda shell.bash hook)" + conda activate ./env +fi + +script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +. "$script_dir/pkg_helpers.bash" + +export BUILD_TYPE=conda +setup_env 0.10.0 +export SOURCE_ROOT_DIR="$PWD" +setup_conda_pytorch_constraint +setup_conda_cudatoolkit_plain_constraint + +if [[ "$OSTYPE" == "msys" ]]; then + conda install -yq conda-build cmake pillow>=5.3.0 future + pip install dataclasses +fi + +setup_visual_studio_constraint +setup_junit_results_folder + +conda install -yq pytorch=$PYTORCH_VERSION $CONDA_CUDATOOLKIT_CONSTRAINT $CONDA_CPUONLY_FEATURE -c "pytorch-${UPLOAD_CHANNEL}" +TORCH_PATH=$(dirname $(python -c "import torch; print(torch.__file__)")) + +if [[ "$(uname)" == Darwin || "$OSTYPE" == "msys" ]]; then + conda install -yq libpng jpeg +else + yum install -y libpng-devel libjpeg-turbo-devel +fi + +mkdir cpp_build +pushd cpp_build + +# Generate libtorchvision files +cmake .. -DTorch_DIR=$TORCH_PATH/share/cmake/Torch -DWITH_CUDA=$CMAKE_USE_CUDA + +# Compile and install libtorchvision +if [[ "$OSTYPE" == "msys" ]]; then + "$script_dir/windows/internal/vc_env_helper.bat" "$script_dir/windows/internal/build_cmake.bat" $PARALLELISM + CONDA_PATH=$(dirname $(which python)) + cp -r "C:/Program Files (x86)/torchvision/include/torchvision" $CONDA_PATH/include +else + make -j$PARALLELISM + make install + + if [[ "$(uname)" == Darwin ]]; then + CONDA_PATH=$(dirname $(dirname $(which python))) + cp -r /usr/local/include/torchvision $CONDA_PATH/include/ + export C_INCLUDE_PATH=/usr/local/include + export CPLUS_INCLUDE_PATH=/usr/local/include + fi +fi + +popd + +# Install torchvision locally +python setup.py develop + +# Trace, compile and run project that uses Faster-RCNN +pushd test/tracing/frcnn +mkdir build + +# Trace model +python trace_model.py +cp fasterrcnn_resnet50_fpn.pt build + +cd build +cmake .. -DTorch_DIR=$TORCH_PATH/share/cmake/Torch -DWITH_CUDA=$CMAKE_USE_CUDA +if [[ "$OSTYPE" == "msys" ]]; then + "$script_dir/windows/internal/vc_env_helper.bat" "$script_dir/windows/internal/build_frcnn.bat" $PARALLELISM + mv fasterrcnn_resnet50_fpn.pt Release + cd Release + export PATH=$(cygpath "C:/Program Files (x86)/torchvision/bin"):$(cygpath $TORCH_PATH)/lib:$PATH +else + make -j$PARALLELISM +fi + +# Run traced program +./test_frcnn_tracing + +# Compile and run the CPP example +popd +cd examples/cpp/hello_world + +mkdir build +cd build +cmake .. -DTorch_DIR=$TORCH_PATH/share/cmake/Torch + +if [[ "$OSTYPE" == "msys" ]]; then + "$script_dir/windows/internal/vc_env_helper.bat" "$script_dir/windows/internal/build_cpp_example.bat" $PARALLELISM + cd Release +else + make -j$PARALLELISM +fi + +# Run CPP example +./hello-world diff --git a/pretrained_model/pytorch_vision_v0.10.0/packaging/build_conda.sh b/pretrained_model/pytorch_vision_v0.10.0/packaging/build_conda.sh new file mode 100644 index 0000000000000000000000000000000000000000..5f2239aae7ef546f5ef47c026dc2e4ecffb65365 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/packaging/build_conda.sh @@ -0,0 +1,14 @@ +#!/bin/bash +set -ex + +script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +. "$script_dir/pkg_helpers.bash" + +export BUILD_TYPE=conda +setup_env 0.10.0 +export SOURCE_ROOT_DIR="$PWD" +setup_conda_pytorch_constraint +setup_conda_cudatoolkit_constraint +setup_visual_studio_constraint +setup_junit_results_folder +conda build $CONDA_CHANNEL_FLAGS -c defaults -c conda-forge --no-anaconda-upload --python "$PYTHON_VERSION" packaging/torchvision diff --git a/pretrained_model/pytorch_vision_v0.10.0/packaging/build_wheel.sh b/pretrained_model/pytorch_vision_v0.10.0/packaging/build_wheel.sh new file mode 100644 index 0000000000000000000000000000000000000000..05dc23a43ab185fff3cb2de8a4e44d0dbf064810 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/packaging/build_wheel.sh @@ -0,0 +1,59 @@ +#!/bin/bash +set -ex + +script_dir="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )" +. "$script_dir/pkg_helpers.bash" + +export BUILD_TYPE=wheel +setup_env 0.10.0 +setup_wheel_python +pip_install numpy pyyaml future ninja +setup_pip_pytorch_version +python setup.py clean + +# Copy binaries to be included in the wheel distribution +if [[ "$(uname)" == Darwin || "$OSTYPE" == "msys" ]]; then + python_exec="$(which python)" + bin_path=$(dirname $python_exec) + env_path=$(dirname $bin_path) + if [[ "$(uname)" == Darwin ]]; then + # Install delocate to relocate the required binaries + pip_install delocate + else + cp "$bin_path/Library/bin/libpng16.dll" torchvision + cp "$bin_path/Library/bin/libjpeg.dll" torchvision + fi +else + # Install auditwheel to get some inspection utilities + pip_install auditwheel + + # Point to custom libraries + export LD_LIBRARY_PATH=$(pwd)/ext_libraries/lib:$LD_LIBRARY_PATH + export TORCHVISION_INCLUDE=$(pwd)/ext_libraries/include + export TORCHVISION_LIBRARY=$(pwd)/ext_libraries/lib +fi + +download_copy_ffmpeg + +if [[ "$OSTYPE" == "msys" ]]; then + IS_WHEEL=1 "$script_dir/windows/internal/vc_env_helper.bat" python setup.py bdist_wheel +else + IS_WHEEL=1 python setup.py bdist_wheel +fi + + +if [[ "$(uname)" == Darwin ]]; then + pushd dist/ + python_exec="$(which python)" + bin_path=$(dirname $python_exec) + env_path=$(dirname $bin_path) + for whl in *.whl; do + DYLD_FALLBACK_LIBRARY_PATH="$env_path/lib/:$DYLD_FALLBACK_LIBRARY_PATH" delocate-wheel -v $whl + done +else + if [[ "$OSTYPE" == "msys" ]]; then + "$script_dir/windows/internal/vc_env_helper.bat" python $script_dir/wheel/relocate.py + else + LD_LIBRARY_PATH="/usr/local/lib:$LD_LIBRARY_PATH" python $script_dir/wheel/relocate.py + fi +fi diff --git a/pretrained_model/pytorch_vision_v0.10.0/packaging/pkg_helpers.bash b/pretrained_model/pytorch_vision_v0.10.0/packaging/pkg_helpers.bash new file mode 100644 index 0000000000000000000000000000000000000000..826fb525e3abbbed7a08bcdcd4dab2f1a69e17d9 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/packaging/pkg_helpers.bash @@ -0,0 +1,405 @@ +# A set of useful bash functions for common functionality we need to do in +# many build scripts + + +# Setup CUDA environment variables, based on CU_VERSION +# +# Inputs: +# CU_VERSION (cpu, cu92, cu100) +# NO_CUDA_PACKAGE (bool) +# BUILD_TYPE (conda, wheel) +# +# Outputs: +# VERSION_SUFFIX (e.g., "") +# PYTORCH_VERSION_SUFFIX (e.g., +cpu) +# WHEEL_DIR (e.g., cu100/) +# CUDA_HOME (e.g., /usr/local/cuda-9.2, respected by torch.utils.cpp_extension) +# FORCE_CUDA (respected by torchvision setup.py) +# NVCC_FLAGS (respected by torchvision setup.py) +# +# Precondition: CUDA versions are installed in their conventional locations in +# /usr/local/cuda-* +# +# NOTE: Why VERSION_SUFFIX versus PYTORCH_VERSION_SUFFIX? If you're building +# a package with CUDA on a platform we support CUDA on, VERSION_SUFFIX == +# PYTORCH_VERSION_SUFFIX and everyone is happy. However, if you are building a +# package with only CPU bits (e.g., torchaudio), then VERSION_SUFFIX is always +# empty, but PYTORCH_VERSION_SUFFIX is +cpu (because that's how you get a CPU +# version of a Python package. But that doesn't apply if you're on OS X, +# since the default CU_VERSION on OS X is cpu. +setup_cuda() { + + # First, compute version suffixes. By default, assume no version suffixes + export VERSION_SUFFIX="" + export PYTORCH_VERSION_SUFFIX="" + export WHEEL_DIR="" + # Wheel builds need suffixes (but not if they're on OS X, which never has suffix) + if [[ "$BUILD_TYPE" == "wheel" ]] && [[ "$(uname)" != Darwin ]]; then + # The default CUDA has no suffix + if [[ "$CU_VERSION" != "cu102" ]]; then + export PYTORCH_VERSION_SUFFIX="+$CU_VERSION" + fi + # Match the suffix scheme of pytorch, unless this package does not have + # CUDA builds (in which case, use default) + if [[ -z "$NO_CUDA_PACKAGE" ]]; then + export VERSION_SUFFIX="$PYTORCH_VERSION_SUFFIX" + export WHEEL_DIR="$CU_VERSION/" + fi + fi + + # Now work out the CUDA settings + case "$CU_VERSION" in + cu112) + if [[ "$OSTYPE" == "msys" ]]; then + export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.2" + else + export CUDA_HOME=/usr/local/cuda-11.2/ + fi + export FORCE_CUDA=1 + export TORCH_CUDA_ARCH_LIST="3.5;5.0+PTX;6.0;7.0;7.5;8.0;8.6" + ;; + cu111) + if [[ "$OSTYPE" == "msys" ]]; then + export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.1" + else + export CUDA_HOME=/usr/local/cuda-11.1/ + fi + export FORCE_CUDA=1 + export TORCH_CUDA_ARCH_LIST="3.5;5.0+PTX;6.0;7.0;7.5;8.0;8.6" + ;; + cu110) + if [[ "$OSTYPE" == "msys" ]]; then + export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v11.0" + else + export CUDA_HOME=/usr/local/cuda-11.0/ + fi + export FORCE_CUDA=1 + export TORCH_CUDA_ARCH_LIST="3.5;5.0+PTX;6.0;7.0;7.5;8.0" + ;; + cu102) + if [[ "$OSTYPE" == "msys" ]]; then + export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.2" + else + export CUDA_HOME=/usr/local/cuda-10.2/ + fi + export FORCE_CUDA=1 + export TORCH_CUDA_ARCH_LIST="3.5;5.0+PTX;6.0;7.0;7.5" + ;; + cu101) + if [[ "$OSTYPE" == "msys" ]]; then + export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.1" + else + export CUDA_HOME=/usr/local/cuda-10.1/ + fi + export FORCE_CUDA=1 + export TORCH_CUDA_ARCH_LIST="3.5;5.0+PTX;6.0;7.0;7.5" + ;; + cu100) + if [[ "$OSTYPE" == "msys" ]]; then + export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v10.0" + else + export CUDA_HOME=/usr/local/cuda-10.0/ + fi + export FORCE_CUDA=1 + export TORCH_CUDA_ARCH_LIST="3.5;5.0+PTX;6.0;7.0;7.5" + ;; + cu92) + if [[ "$OSTYPE" == "msys" ]]; then + export CUDA_HOME="C:\\Program Files\\NVIDIA GPU Computing Toolkit\\CUDA\\v9.2" + else + export CUDA_HOME=/usr/local/cuda-9.2/ + fi + export FORCE_CUDA=1 + export TORCH_CUDA_ARCH_LIST="3.5;5.0+PTX;6.0;7.0" + ;; + cpu) + ;; + rocm*) + export FORCE_CUDA=1 + ;; + *) + echo "Unrecognized CU_VERSION=$CU_VERSION" + exit 1 + ;; + esac +} + +# Populate build version if necessary, and add version suffix +# +# Inputs: +# BUILD_VERSION (e.g., 0.2.0 or empty) +# VERSION_SUFFIX (e.g., +cpu) +# +# Outputs: +# BUILD_VERSION (e.g., 0.2.0.dev20190807+cpu) +# +# Fill BUILD_VERSION if it doesn't exist already with a nightly string +# Usage: setup_build_version 0.2.0 +setup_build_version() { + if [[ -z "$BUILD_VERSION" ]]; then + export BUILD_VERSION="$1.dev$(date "+%Y%m%d")$VERSION_SUFFIX" + else + export BUILD_VERSION="$BUILD_VERSION$VERSION_SUFFIX" + fi + + # Set build version based on tag if on tag + if [[ -n "${CIRCLE_TAG}" ]]; then + # Strip tag + export BUILD_VERSION="$(echo "${CIRCLE_TAG}" | sed -e 's/^v//' -e 's/-.*$//')${VERSION_SUFFIX}" + fi +} + +# Set some useful variables for OS X, if applicable +setup_macos() { + if [[ "$(uname)" == Darwin ]]; then + export MACOSX_DEPLOYMENT_TARGET=10.9 CC=clang CXX=clang++ + fi +} + + +# Top-level entry point for things every package will need to do +# +# Usage: setup_env 0.2.0 +setup_env() { + setup_cuda + setup_build_version "$1" + setup_macos +} + +# Function to retry functions that sometimes timeout or have flaky failures +retry () { + $* || (sleep 1 && $*) || (sleep 2 && $*) || (sleep 4 && $*) || (sleep 8 && $*) +} + +# Inputs: +# PYTHON_VERSION (2.7, 3.5, 3.6, 3.7) +# UNICODE_ABI (bool) +# +# Outputs: +# PATH modified to put correct Python version in PATH +# +# Precondition: If Linux, you are in a soumith/manylinux-cuda* Docker image +setup_wheel_python() { + if [[ "$(uname)" == Darwin || "$OSTYPE" == "msys" ]]; then + eval "$(conda shell.bash hook)" + conda env remove -n "env$PYTHON_VERSION" || true + if [[ "$PYTHON_VERSION" == 3.9 ]]; then + export CONDA_CHANNEL_FLAGS="${CONDA_CHANNEL_FLAGS} -c=conda-forge" + fi + conda create ${CONDA_CHANNEL_FLAGS} -yn "env$PYTHON_VERSION" python="$PYTHON_VERSION" + conda activate "env$PYTHON_VERSION" + # Install libpng from Anaconda (defaults) + conda install ${CONDA_CHANNEL_FLAGS} -c conda-forge libpng "jpeg<=9b" -y + else + # Install native CentOS libJPEG, LAME, freetype and GnuTLS + yum install -y libjpeg-turbo-devel lame freetype gnutls + case "$PYTHON_VERSION" in + 2.7) + if [[ -n "$UNICODE_ABI" ]]; then + python_abi=cp27-cp27mu + else + python_abi=cp27-cp27m + fi + ;; + 3.5) python_abi=cp35-cp35m ;; + 3.6) python_abi=cp36-cp36m ;; + 3.7) python_abi=cp37-cp37m ;; + 3.8) python_abi=cp38-cp38 ;; + 3.9) python_abi=cp39-cp39 ;; + *) + echo "Unrecognized PYTHON_VERSION=$PYTHON_VERSION" + exit 1 + ;; + esac + # Download all the dependencies required to compile image and video_reader + # extensions + + mkdir -p ext_libraries + pushd ext_libraries + popd + export PATH="/opt/python/$python_abi/bin:$(pwd)/ext_libraries/bin:$PATH" + fi +} + +# Install with pip a bit more robustly than the default +pip_install() { + retry pip install --progress-bar off "$@" +} + +# Install torch with pip, respecting PYTORCH_VERSION, and record the installed +# version into PYTORCH_VERSION, if applicable +setup_pip_pytorch_version() { + if [[ -z "$PYTORCH_VERSION" ]]; then + # Install latest prerelease version of torch, per our nightlies, consistent + # with the requested cuda version + pip_install --pre torch -f "https://download.pytorch.org/whl/nightly/${WHEEL_DIR}torch_nightly.html" + if [[ "$CUDA_VERSION" == "cpu" ]]; then + # CUDA and CPU are ABI compatible on the CPU-only parts, so strip + # in this case + export PYTORCH_VERSION="$(pip show torch | grep ^Version: | sed 's/Version: *//' | sed 's/+.\+//')" + else + export PYTORCH_VERSION="$(pip show torch | grep ^Version: | sed 's/Version: *//')" + fi + else + pip_install "torch==$PYTORCH_VERSION$PYTORCH_VERSION_SUFFIX" \ + -f "https://download.pytorch.org/whl/${CU_VERSION}/torch_stable.html" \ + -f "https://download.pytorch.org/whl/${UPLOAD_CHANNEL}/${CU_VERSION}/torch_${UPLOAD_CHANNEL}.html" + fi +} + +# Fill PYTORCH_VERSION with the latest conda nightly version, and +# CONDA_CHANNEL_FLAGS with appropriate flags to retrieve these versions +# +# You MUST have populated PYTORCH_VERSION_SUFFIX before hand. +setup_conda_pytorch_constraint() { + if [[ -z "$PYTORCH_VERSION" ]]; then + export CONDA_CHANNEL_FLAGS="-c pytorch-nightly -c pytorch" + export PYTORCH_VERSION="$(conda search --json 'pytorch[channel=pytorch-nightly]' | \ + python -c "import os, sys, json, re; cuver = os.environ.get('CU_VERSION'); \ + cuver_1 = cuver.replace('cu', 'cuda') if cuver != 'cpu' else cuver; \ + cuver_2 = (cuver[:-1] + '.' + cuver[-1]).replace('cu', 'cuda') if cuver != 'cpu' else cuver; \ + print(re.sub(r'\\+.*$', '', \ + [x['version'] for x in json.load(sys.stdin)['pytorch'] \ + if (x['platform'] == 'darwin' or cuver_1 in x['fn'] or cuver_2 in x['fn']) \ + and 'py' + os.environ['PYTHON_VERSION'] in x['fn']][-1]))")" + if [[ -z "$PYTORCH_VERSION" ]]; then + echo "PyTorch version auto detection failed" + echo "No package found for CU_VERSION=$CU_VERSION and PYTHON_VERSION=$PYTHON_VERSION" + exit 1 + fi + else + export CONDA_CHANNEL_FLAGS="-c pytorch -c pytorch-${UPLOAD_CHANNEL}" + fi + if [[ "$CU_VERSION" == cpu ]]; then + export CONDA_PYTORCH_BUILD_CONSTRAINT="- pytorch==$PYTORCH_VERSION${PYTORCH_VERSION_SUFFIX}" + export CONDA_PYTORCH_CONSTRAINT="- pytorch==$PYTORCH_VERSION" + else + export CONDA_PYTORCH_BUILD_CONSTRAINT="- pytorch==${PYTORCH_VERSION}${PYTORCH_VERSION_SUFFIX}" + export CONDA_PYTORCH_CONSTRAINT="- pytorch==${PYTORCH_VERSION}${PYTORCH_VERSION_SUFFIX}" + fi + if [[ "$OSTYPE" == msys && "$CU_VERSION" == cu92 ]]; then + export CONDA_CHANNEL_FLAGS="${CONDA_CHANNEL_FLAGS} -c defaults -c numba/label/dev" + fi + if [[ "$PYTHON_VERSION" == 3.9 ]]; then + export CONDA_CHANNEL_FLAGS="${CONDA_CHANNEL_FLAGS} -c=conda-forge" + fi +} + +# Translate CUDA_VERSION into CUDA_CUDATOOLKIT_CONSTRAINT +setup_conda_cudatoolkit_constraint() { + export CONDA_CPUONLY_FEATURE="" + if [[ "$(uname)" == Darwin ]]; then + export CONDA_CUDATOOLKIT_CONSTRAINT="" + else + case "$CU_VERSION" in + cu112) + export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=11.2,<11.3 # [not osx]" + ;; + cu111) + export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=11.1,<11.2 # [not osx]" + ;; + cu110) + export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=11.0,<11.1 # [not osx]" + ;; + cu102) + export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=10.2,<10.3 # [not osx]" + ;; + cu101) + export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=10.1,<10.2 # [not osx]" + ;; + cu100) + export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=10.0,<10.1 # [not osx]" + ;; + cu92) + export CONDA_CUDATOOLKIT_CONSTRAINT="- cudatoolkit >=9.2,<9.3 # [not osx]" + ;; + cpu) + export CONDA_CUDATOOLKIT_CONSTRAINT="" + export CONDA_CPUONLY_FEATURE="- cpuonly" + ;; + *) + echo "Unrecognized CU_VERSION=$CU_VERSION" + exit 1 + ;; + esac + fi +} + +setup_conda_cudatoolkit_plain_constraint() { + export CONDA_CPUONLY_FEATURE="" + export CMAKE_USE_CUDA=1 + if [[ "$(uname)" == Darwin ]]; then + export CONDA_CUDATOOLKIT_CONSTRAINT="" + export CMAKE_USE_CUDA=0 + else + case "$CU_VERSION" in + cu112) + export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit=11.2" + ;; + cu111) + export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit=11.1" + ;; + cu102) + export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit=10.2" + ;; + cu101) + export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit=10.1" + ;; + cu100) + export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit=10.0" + ;; + cu92) + export CONDA_CUDATOOLKIT_CONSTRAINT="cudatoolkit=9.2" + ;; + cpu) + export CONDA_CUDATOOLKIT_CONSTRAINT="" + export CONDA_CPUONLY_FEATURE="cpuonly" + export CMAKE_USE_CUDA=0 + ;; + *) + echo "Unrecognized CU_VERSION=$CU_VERSION" + exit 1 + ;; + esac + fi +} + +# Build the proper compiler package before building the final package +setup_visual_studio_constraint() { + if [[ "$OSTYPE" == "msys" ]]; then + export VSTOOLCHAIN_PACKAGE=vs$VC_YEAR + conda build $CONDA_CHANNEL_FLAGS --no-anaconda-upload packaging/$VSTOOLCHAIN_PACKAGE + cp packaging/$VSTOOLCHAIN_PACKAGE/conda_build_config.yaml packaging/torchvision/conda_build_config.yaml + fi +} + +setup_junit_results_folder() { + if [[ "$CI" == "true" ]]; then + export CONDA_PYTORCH_BUILD_RESULTS_DIRECTORY="${SOURCE_ROOT_DIR}/build_results/results.xml" + fi +} + + +download_copy_ffmpeg() { + if [[ "$OSTYPE" == "msys" ]]; then + # conda install -yq ffmpeg=4.2 -c pytorch + # curl -L -q https://anaconda.org/pytorch/ffmpeg/4.3/download/win-64/ffmpeg-4.3-ha925a31_0.tar.bz2 --output ffmpeg-4.3-ha925a31_0.tar.bz2 + # bzip2 --decompress --stdout ffmpeg-4.3-ha925a31_0.tar.bz2 | tar -x --file=- + # cp Library/bin/*.dll ../torchvision + echo "FFmpeg is disabled currently on Windows" + else + if [[ "$(uname)" == Darwin ]]; then + conda install -yq ffmpeg=4.2 -c pytorch + conda install -yq wget + else + # pushd ext_libraries + # wget -q https://anaconda.org/pytorch/ffmpeg/4.2/download/linux-64/ffmpeg-4.2-hf484d3e_0.tar.bz2 + # tar -xjvf ffmpeg-4.2-hf484d3e_0.tar.bz2 + # rm -rf ffmpeg-4.2-hf484d3e_0.tar.bz2 + # ldconfig + # which ffmpeg + # popd + echo "FFmpeg is disabled currently on Linux" + fi + fi +} diff --git a/pretrained_model/pytorch_vision_v0.10.0/packaging/torchvision/conda_build_config.yaml b/pretrained_model/pytorch_vision_v0.10.0/packaging/torchvision/conda_build_config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..257515c8b707fd7d6061f2ef47ea5396db2ead9f --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/packaging/torchvision/conda_build_config.yaml @@ -0,0 +1,26 @@ +channel_sources: + - pytorch-nightly,pytorch,defaults +blas_impl: + - mkl # [x86_64] +c_compiler: + - vs2017 # [win] +cxx_compiler: + - vs2017 # [win] +python: + - 3.5 + - 3.6 +# This differs from target_platform in that it determines what subdir the compiler +# will target, not what subdir the compiler package will be itself. +# For example, we need a win-64 vs2008_win-32 package, so that we compile win-32 +# code on win-64 miniconda. +cross_compiler_target_platform: + - win-64 # [win] +target_platform: + - win-64 # [win] +vc: + - 14 +zip_keys: + - # [win] + - vc # [win] + - c_compiler # [win] + - cxx_compiler # [win] diff --git a/pretrained_model/pytorch_vision_v0.10.0/packaging/torchvision/meta.yaml b/pretrained_model/pytorch_vision_v0.10.0/packaging/torchvision/meta.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c9b6d04fdc793019eca612a9b2c3119d24e6643f --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/packaging/torchvision/meta.yaml @@ -0,0 +1,65 @@ +package: + name: torchvision + version: "{{ environ.get('BUILD_VERSION') }}" + +source: + path: "{{ environ.get('SOURCE_ROOT_DIR') }}" + +requirements: + build: + - {{ compiler('c') }} # [win] + - libpng + # NOTE: Pinned to fix issues with size_t on Windows + - jpeg <=9b + # NOTE: The only ffmpeg version that we build is actually 4.2 + - ffmpeg >=4.2 # [not win] + + host: + - python + - setuptools + {{ environ.get('CONDA_PYTORCH_BUILD_CONSTRAINT') }} + {{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT') }} + {{ environ.get('CONDA_CPUONLY_FEATURE') }} + + run: + - python + - libpng + - ffmpeg >=4.2 # [not win] + # NOTE: Pinned to fix issues with size_t on Windows + - jpeg <=9b + - pillow >=5.3.0 + {{ environ.get('CONDA_PYTORCH_CONSTRAINT') }} + {{ environ.get('CONDA_CUDATOOLKIT_CONSTRAINT') }} + +build: + string: py{{py}}_{{ environ['CU_VERSION'] }} + script: python setup.py install --single-version-externally-managed --record=record.txt + script_env: + - CUDA_HOME + - FORCE_CUDA + - BUILD_VERSION + - TORCH_CUDA_ARCH_LIST + features: + {{ environ.get('CONDA_CPUONLY_FEATURE') }} + +test: + imports: + - torchvision + - torchvision.datasets + - torchvision.transforms + source_files: + - test + requires: + - pytest + - scipy + - av + # NOTE: Pinned to fix issues with size_t on Windows + - jpeg <=9b + - ca-certificates + + +about: + home: https://github.com/pytorch/vision + license: BSD + license_file: LICENSE + summary: 'image and video datasets and models for torch deep learning' diff --git a/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2017/activate.bat b/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2017/activate.bat new file mode 100644 index 0000000000000000000000000000000000000000..ccecfc25442f0563990588edfb0e9f949a4b8af4 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2017/activate.bat @@ -0,0 +1,44 @@ +:: Set env vars that tell distutils to use the compiler that we put on path +SET DISTUTILS_USE_SDK=1 +SET MSSdk=1 + +SET "VS_VERSION=15.0" +SET "VS_MAJOR=15" +SET "VS_YEAR=2017" + +set "MSYS2_ARG_CONV_EXCL=/AI;/AL;/OUT;/out" +set "MSYS2_ENV_CONV_EXCL=CL" + +:: For Python 3.5+, ensure that we link with the dynamic runtime. See +:: http://stevedower.id.au/blog/building-for-python-3-5-part-two/ for more info +set "PY_VCRUNTIME_REDIST=%PREFIX%\\bin\\vcruntime140.dll" + +for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -legacy -products * -version [15^,16^) -property installationPath`) do ( + if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" ( + set "VSINSTALLDIR=%%i\" + goto :vswhere + ) +) + +:vswhere + +:: Shorten PATH to avoid the `input line too long` error. +SET MyPath=%PATH% + +setlocal EnableDelayedExpansion + +SET TempPath="%MyPath:;=";"%" +SET var= +FOR %%a IN (%TempPath%) DO ( + IF EXIST %%~sa ( + SET "var=!var!;%%~sa" + ) +) + +set "TempPath=!var:~1!" +endlocal & set "PATH=%TempPath%" + +:: Shorten current directory too +FOR %%A IN (.) DO CD "%%~sA" + +:: other things added by install_activate.bat at package build time diff --git a/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2017/conda_build_config.yaml b/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2017/conda_build_config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5188bb0ebecf72aefb1c2e779458998216e4d479 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2017/conda_build_config.yaml @@ -0,0 +1,24 @@ +blas_impl: + - mkl # [x86_64] +c_compiler: + - vs2017 # [win] +cxx_compiler: + - vs2017 # [win] +python: + - 3.5 + - 3.6 +# This differs from target_platform in that it determines what subdir the compiler +# will target, not what subdir the compiler package will be itself. +# For example, we need a win-64 vs2008_win-32 package, so that we compile win-32 +# code on win-64 miniconda. +cross_compiler_target_platform: + - win-64 # [win] +target_platform: + - win-64 # [win] +vc: + - 14 +zip_keys: + - # [win] + - vc # [win] + - c_compiler # [win] + - cxx_compiler # [win] diff --git a/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2017/install_activate.bat b/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2017/install_activate.bat new file mode 100644 index 0000000000000000000000000000000000000000..de0e6ff3c5209233153adad34654c2f2b800aba2 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2017/install_activate.bat @@ -0,0 +1,30 @@ +set YEAR=2017 +set VER=15 + +mkdir "%PREFIX%\etc\conda\activate.d" +COPY "%RECIPE_DIR%\activate.bat" "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + +IF "%cross_compiler_target_platform%" == "win-64" ( + set "target_platform=amd64" + echo SET "CMAKE_GENERATOR=Visual Studio %VER% %YEAR% Win64" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + echo pushd "%%VSINSTALLDIR%%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + IF "%VSDEVCMD_ARGS%" == "" ( + echo CALL "VC\Auxiliary\Build\vcvarsall.bat" x64 >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + echo popd >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + echo pushd "%%VSINSTALLDIR%%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + echo CALL "VC\Auxiliary\Build\vcvarsall.bat" x86_amd64 >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + ) ELSE ( + echo CALL "VC\Auxiliary\Build\vcvarsall.bat" x64 %VSDEVCMD_ARGS% >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + echo popd >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + echo pushd "%%VSINSTALLDIR%%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + echo CALL "VC\Auxiliary\Build\vcvarsall.bat" x86_amd64 %VSDEVCMD_ARGS% >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + ) + echo popd >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + ) else ( + set "target_platform=x86" + echo SET "CMAKE_GENERATOR=Visual Studio %VER% %YEAR%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + echo pushd "%%VSINSTALLDIR%%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + echo CALL "VC\Auxiliary\Build\vcvars32.bat" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + echo popd + ) + diff --git a/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2017/install_runtime.bat b/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2017/install_runtime.bat new file mode 100644 index 0000000000000000000000000000000000000000..5163c16cf24d49092b6a4aa5cfb1d18a19cc1549 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2017/install_runtime.bat @@ -0,0 +1,49 @@ +set VC_PATH=x86 +if "%ARCH%"=="64" ( + set VC_PATH=x64 +) + +set MSC_VER=2017 + +rem :: This should always be present for VC installed with VS. Not sure about VC installed with Visual C++ Build Tools 2015 +rem FOR /F "usebackq tokens=3*" %%A IN (`REG QUERY "HKEY_LOCAL_MACHINE\Software\Microsoft\DevDiv\VC\Servicing\14.0\IDE.x64" /v UpdateVersion`) DO ( +rem set SP=%%A +rem ) + +rem if not "%SP%" == "%PKG_VERSION%" ( +rem echo "Version detected from registry: %SP%" +rem echo "does not match version of package being built (%PKG_VERSION%)" +rem echo "Do you have current updates for VS 2015 installed?" +rem exit 1 +rem ) + + +REM ========== REQUIRES Win 10 SDK be installed, or files otherwise copied to location below! +robocopy "C:\Program Files (x86)\Windows Kits\10\Redist\ucrt\DLLs\%VC_PATH%" "%LIBRARY_BIN%" *.dll /E +robocopy "C:\Program Files (x86)\Windows Kits\10\Redist\ucrt\DLLs\%VC_PATH%" "%PREFIX%" *.dll /E +if %ERRORLEVEL% GEQ 8 exit 1 + +REM ========== This one comes from visual studio 2017 +set "VC_VER=141" + +for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -legacy -products * -version [15^,16^) -property installationPath`) do ( + if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" ( + set "VS15VCVARSALL=%%i\VC\Auxiliary\Build\vcvarsall.bat" + goto :eof + ) +) + +@setlocal +call "%VS15VARSALL%" x64 + +set "REDIST_ROOT=%VCToolsRedistDir%%VC_PATH%" + +robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.CRT" "%LIBRARY_BIN%" *.dll /E +if %ERRORLEVEL% LSS 8 exit 0 +robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.CRT" "%PREFIX%" *.dll /E +if %ERRORLEVEL% LSS 8 exit 0 +robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.OpenMP" "%LIBRARY_BIN%" *.dll /E +if %ERRORLEVEL% LSS 8 exit 0 +robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.OpenMP" "%PREFIX%" *.dll /E +if %ERRORLEVEL% LSS 8 exit 0 +@endlocal diff --git a/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2017/meta.yaml b/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2017/meta.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1f569525ee176da433857aa6ae5a565350320549 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2017/meta.yaml @@ -0,0 +1,24 @@ +{% set vcver="14.1" %} +{% set vcfeature="14" %} +{% set vsyear="2017" %} +{% set fullver="15.4.27004.2010" %} + +package: + name: vs{{ vsyear }} + version: {{ fullver }} + +build: + skip: True [not win] + script_env: + - VSDEVCMD_ARGS # [win] + +outputs: + - name: vs{{ vsyear }}_{{ cross_compiler_target_platform }} + script: install_activate.bat + track_features: + # VS 2017 is binary-compatible with VS 2015/vc14. Tools are "v141". + strong: + - vc{{ vcfeature }} + about: + summary: Activation and version verification of MSVC {{ vcver }} (VS {{ vsyear }}) compiler + license: BSD 3-clause diff --git a/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2019/activate.bat b/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2019/activate.bat new file mode 100644 index 0000000000000000000000000000000000000000..6f607ba7518e2346e16489195fcdbd111320996c --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2019/activate.bat @@ -0,0 +1,44 @@ +:: Set env vars that tell distutils to use the compiler that we put on path +SET DISTUTILS_USE_SDK=1 +SET MSSdk=1 + +SET "VS_VERSION=16.0" +SET "VS_MAJOR=16" +SET "VS_YEAR=2019" + +set "MSYS2_ARG_CONV_EXCL=/AI;/AL;/OUT;/out" +set "MSYS2_ENV_CONV_EXCL=CL" + +:: For Python 3.5+, ensure that we link with the dynamic runtime. See +:: http://stevedower.id.au/blog/building-for-python-3-5-part-two/ for more info +set "PY_VCRUNTIME_REDIST=%PREFIX%\\bin\\vcruntime140.dll" + +for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -legacy -products * -version [16^,17^) -property installationPath`) do ( + if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" ( + set "VSINSTALLDIR=%%i\" + goto :vswhere + ) +) + +:vswhere + +:: Shorten PATH to avoid the `input line too long` error. +SET MyPath=%PATH% + +setlocal EnableDelayedExpansion + +SET TempPath="%MyPath:;=";"%" +SET var= +FOR %%a IN (%TempPath%) DO ( + IF EXIST %%~sa ( + SET "var=!var!;%%~sa" + ) +) + +set "TempPath=!var:~1!" +endlocal & set "PATH=%TempPath%" + +:: Shorten current directory too +FOR %%A IN (.) DO CD "%%~sA" + +:: other things added by install_activate.bat at package build time diff --git a/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2019/conda_build_config.yaml b/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2019/conda_build_config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..358052ec012940bb56778d167bcd69302d255846 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2019/conda_build_config.yaml @@ -0,0 +1,24 @@ +blas_impl: + - mkl # [x86_64] +c_compiler: + - vs2019 # [win] +cxx_compiler: + - vs2019 # [win] +python: + - 3.5 + - 3.6 +# This differs from target_platform in that it determines what subdir the compiler +# will target, not what subdir the compiler package will be itself. +# For example, we need a win-64 vs2008_win-32 package, so that we compile win-32 +# code on win-64 miniconda. +cross_compiler_target_platform: + - win-64 # [win] +target_platform: + - win-64 # [win] +vc: + - 14 +zip_keys: + - # [win] + - vc # [win] + - c_compiler # [win] + - cxx_compiler # [win] diff --git a/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2019/install_activate.bat b/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2019/install_activate.bat new file mode 100644 index 0000000000000000000000000000000000000000..3c38253aa5dea3bdfc9f8cf4027e721376512154 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2019/install_activate.bat @@ -0,0 +1,30 @@ +set YEAR=2019 +set VER=16 + +mkdir "%PREFIX%\etc\conda\activate.d" +COPY "%RECIPE_DIR%\activate.bat" "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + +IF "%cross_compiler_target_platform%" == "win-64" ( + set "target_platform=amd64" + echo SET "CMAKE_GENERATOR=Visual Studio %VER% %YEAR% Win64" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + echo pushd "%%VSINSTALLDIR%%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + IF "%VSDEVCMD_ARGS%" == "" ( + echo CALL "VC\Auxiliary\Build\vcvarsall.bat" x64 >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + echo popd >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + echo pushd "%%VSINSTALLDIR%%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + echo CALL "VC\Auxiliary\Build\vcvarsall.bat" x86_amd64 >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + ) ELSE ( + echo CALL "VC\Auxiliary\Build\vcvarsall.bat" x64 %VSDEVCMD_ARGS% >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + echo popd >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + echo pushd "%%VSINSTALLDIR%%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + echo CALL "VC\Auxiliary\Build\vcvarsall.bat" x86_amd64 %VSDEVCMD_ARGS% >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + ) + echo popd >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + ) else ( + set "target_platform=x86" + echo SET "CMAKE_GENERATOR=Visual Studio %VER% %YEAR%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + echo pushd "%%VSINSTALLDIR%%" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + echo CALL "VC\Auxiliary\Build\vcvars32.bat" >> "%PREFIX%\etc\conda\activate.d\vs%YEAR%_compiler_vars.bat" + echo popd + ) + diff --git a/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2019/install_runtime.bat b/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2019/install_runtime.bat new file mode 100644 index 0000000000000000000000000000000000000000..e09a5ccfb0f42cc6de2a2f960d31faf2511ae094 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2019/install_runtime.bat @@ -0,0 +1,49 @@ +set VC_PATH=x86 +if "%ARCH%"=="64" ( + set VC_PATH=x64 +) + +set MSC_VER=2019 + +rem :: This should always be present for VC installed with VS. Not sure about VC installed with Visual C++ Build Tools 2015 +rem FOR /F "usebackq tokens=3*" %%A IN (`REG QUERY "HKEY_LOCAL_MACHINE\Software\Microsoft\DevDiv\VC\Servicing\14.0\IDE.x64" /v UpdateVersion`) DO ( +rem set SP=%%A +rem ) + +rem if not "%SP%" == "%PKG_VERSION%" ( +rem echo "Version detected from registry: %SP%" +rem echo "does not match version of package being built (%PKG_VERSION%)" +rem echo "Do you have current updates for VS 2015 installed?" +rem exit 1 +rem ) + + +REM ========== REQUIRES Win 10 SDK be installed, or files otherwise copied to location below! +robocopy "C:\Program Files (x86)\Windows Kits\10\Redist\ucrt\DLLs\%VC_PATH%" "%LIBRARY_BIN%" *.dll /E +robocopy "C:\Program Files (x86)\Windows Kits\10\Redist\ucrt\DLLs\%VC_PATH%" "%PREFIX%" *.dll /E +if %ERRORLEVEL% GEQ 8 exit 1 + +REM ========== This one comes from visual studio 2019 +set "VC_VER=142" + +for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -legacy -products * -version [16^,17^) -property installationPath`) do ( + if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" ( + set "VS15VCVARSALL=%%i\VC\Auxiliary\Build\vcvarsall.bat" + goto :eof + ) +) + +@setlocal +call "%VS15VARSALL%" x64 + +set "REDIST_ROOT=%VCToolsRedistDir%%VC_PATH%" + +robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.CRT" "%LIBRARY_BIN%" *.dll /E +if %ERRORLEVEL% LSS 8 exit 0 +robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.CRT" "%PREFIX%" *.dll /E +if %ERRORLEVEL% LSS 8 exit 0 +robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.OpenMP" "%LIBRARY_BIN%" *.dll /E +if %ERRORLEVEL% LSS 8 exit 0 +robocopy "%REDIST_ROOT%\Microsoft.VC%VC_VER%.OpenMP" "%PREFIX%" *.dll /E +if %ERRORLEVEL% LSS 8 exit 0 +@endlocal diff --git a/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2019/meta.yaml b/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2019/meta.yaml new file mode 100644 index 0000000000000000000000000000000000000000..94a0ed4db3eb4bdf2dc59b9144bcdf4ade0b75d5 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/packaging/vs2019/meta.yaml @@ -0,0 +1,24 @@ +{% set vcver="14.2" %} +{% set vcfeature="14" %} +{% set vsyear="2019" %} +{% set fullver="15.4.27004.2010" %} + +package: + name: vs{{ vsyear }} + version: {{ fullver }} + +build: + skip: True [not win] + script_env: + - VSDEVCMD_ARGS # [win] + +outputs: + - name: vs{{ vsyear }}_{{ cross_compiler_target_platform }} + script: install_activate.bat + track_features: + # VS 2019 is binary-compatible with VS 2017/vc 14.1 and 2015/vc14. Tools are "v142". + strong: + - vc{{ vcfeature }} + about: + summary: Activation and version verification of MSVC {{ vcver }} (VS {{ vsyear }}) compiler + license: BSD 3-clause diff --git a/pretrained_model/pytorch_vision_v0.10.0/packaging/wheel/linux_manywheel.sh b/pretrained_model/pytorch_vision_v0.10.0/packaging/wheel/linux_manywheel.sh new file mode 100644 index 0000000000000000000000000000000000000000..19e7d1a7500613cb38794be173b1482bdcfd4318 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/packaging/wheel/linux_manywheel.sh @@ -0,0 +1,62 @@ +#!/bin/bash +set -ex + +if [ "$#" -ne 1 ]; then + echo "Illegal number of parameters. Pass cuda version" + echo "CUDA version should be cu92, cu100 or cpu" + exit 1 +fi +export CUVER="$1" # cu[0-9]* cpu + +if [[ "$CUVER" == "cu102" ]]; then + cu_suffix="" +else + cu_suffix="+$CUVER" +fi + +export TORCHVISION_BUILD_VERSION="0.4.0.dev$(date "+%Y%m%d")${cu_suffix}" +export TORCHVISION_BUILD_NUMBER="1" +export TORCHVISION_LOCAL_VERSION_LABEL="$CUVER" +export OUT_DIR="/remote/$CUVER" + +pushd /opt/python +DESIRED_PYTHON=(*/) +popd +for desired_py in "${DESIRED_PYTHON[@]}"; do + python_installations+=("/opt/python/$desired_py") +done + +OLD_PATH=$PATH +cd /tmp +rm -rf vision +git clone https://github.com/pytorch/vision + +cd /tmp/vision + +for PYDIR in "${python_installations[@]}"; do + export PATH=$PYDIR/bin:$OLD_PATH + pip install --upgrade pip + pip install numpy pyyaml future + + pip uninstall -y torch || true + pip uninstall -y torch_nightly || true + + export TORCHVISION_PYTORCH_DEPENDENCY_NAME=torch_nightly + pip install torch_nightly -f https://download.pytorch.org/whl/nightly/$CUVER/torch_nightly.html + # CPU/CUDA variants of PyTorch have ABI compatible PyTorch for + # the CPU only bits. Therefore, we + # strip off the local package qualifier, but ONLY if we're + # doing a CPU build. + if [[ "$CUVER" == "cpu" ]]; then + export TORCHVISION_PYTORCH_DEPENDENCY_VERSION="$(pip show torch_nightly | grep ^Version: | sed 's/Version: \+//' | sed 's/+.\+//')" + else + export TORCHVISION_PYTORCH_DEPENDENCY_VERSION="$(pip show torch_nightly | grep ^Version: | sed 's/Version: \+//')" + fi + echo "Building against ${TORCHVISION_PYTORCH_DEPENDENCY_VERSION}" + + pip install ninja + python setup.py clean + python setup.py bdist_wheel + mkdir -p $OUT_DIR + cp dist/*.whl $OUT_DIR/ +done diff --git a/pretrained_model/pytorch_vision_v0.10.0/packaging/wheel/osx_wheel.sh b/pretrained_model/pytorch_vision_v0.10.0/packaging/wheel/osx_wheel.sh new file mode 100644 index 0000000000000000000000000000000000000000..900485d319954b6ec585c69da31edae7e39ad4d8 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/packaging/wheel/osx_wheel.sh @@ -0,0 +1,52 @@ +if [[ ":$PATH:" == *"conda"* ]]; then + echo "existing anaconda install in PATH, remove it and run script" + exit 1 +fi +# download and activate anaconda +rm -rf ~/minconda_wheel_env_tmp +wget -q https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh && \ + chmod +x Miniconda3-latest-MacOSX-x86_64.sh && \ + ./Miniconda3-latest-MacOSX-x86_64.sh -b -p ~/minconda_wheel_env_tmp && \ + rm Miniconda3-latest-MacOSX-x86_64.sh + +. ~/minconda_wheel_env_tmp/bin/activate + + +export TORCHVISION_BUILD_VERSION="0.4.0.dev$(date "+%Y%m%d")" +export TORCHVISION_BUILD_NUMBER="1" +export OUT_DIR=~/torchvision_wheels + +export MACOSX_DEPLOYMENT_TARGET=10.9 CC=clang CXX=clang++ + +pushd /tmp +rm -rf vision +git clone https://github.com/pytorch/vision +pushd vision + +desired_pythons=( "2.7" "3.5" "3.6" "3.7" ) +# for each python +for desired_python in "${desired_pythons[@]}" +do + # create and activate python env + env_name="env$desired_python" + conda create -yn $env_name python="$desired_python" + conda activate $env_name + + pip uninstall -y torch || true + pip uninstall -y torch_nightly || true + + export TORCHVISION_PYTORCH_DEPENDENCY_NAME=torch_nightly + pip install torch_nightly -f https://download.pytorch.org/whl/nightly/cpu/torch_nightly.html + export TORCHVISION_PYTORCH_DEPENDENCY_VERSION="$(pip show torch_nightly | grep ^Version: | sed 's/Version: *//')" + echo "Building against ${TORCHAUDIO_PYTORCH_DEPENDENCY_VERSION}" + + # install torchvision dependencies + pip install ninja scipy pytest + + python setup.py clean + python setup.py bdist_wheel + mkdir -p $OUT_DIR + cp dist/*.whl $OUT_DIR/ +done +popd +popd diff --git a/pretrained_model/pytorch_vision_v0.10.0/packaging/wheel/relocate.py b/pretrained_model/pytorch_vision_v0.10.0/packaging/wheel/relocate.py new file mode 100644 index 0000000000000000000000000000000000000000..dd2c5d2a4ce7bbfec673c17397d9f600b6f7389c --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/packaging/wheel/relocate.py @@ -0,0 +1,417 @@ +# -*- coding: utf-8 -*- + +"""Helper script to package wheels and relocate binaries.""" + +# Standard library imports +import os +import io +import sys +import glob +import shutil +import zipfile +import hashlib +import platform +import subprocess +import os.path as osp +from base64 import urlsafe_b64encode + +# Third party imports +if sys.platform == 'linux': + from auditwheel.lddtree import lddtree +from wheel.bdist_wheel import get_abi_tag + + +ALLOWLIST = { + 'libgcc_s.so.1', 'libstdc++.so.6', 'libm.so.6', + 'libdl.so.2', 'librt.so.1', 'libc.so.6', + 'libnsl.so.1', 'libutil.so.1', 'libpthread.so.0', + 'libresolv.so.2', 'libX11.so.6', 'libXext.so.6', + 'libXrender.so.1', 'libICE.so.6', 'libSM.so.6', + 'libGL.so.1', 'libgobject-2.0.so.0', 'libgthread-2.0.so.0', + 'libglib-2.0.so.0', 'ld-linux-x86-64.so.2', 'ld-2.17.so' +} + +WINDOWS_ALLOWLIST = { + 'MSVCP140.dll', 'KERNEL32.dll', + 'VCRUNTIME140_1.dll', 'VCRUNTIME140.dll', + 'api-ms-win-crt-heap-l1-1-0.dll', + 'api-ms-win-crt-runtime-l1-1-0.dll', + 'api-ms-win-crt-stdio-l1-1-0.dll', + 'api-ms-win-crt-filesystem-l1-1-0.dll', + 'api-ms-win-crt-string-l1-1-0.dll', + 'api-ms-win-crt-environment-l1-1-0.dll', + 'api-ms-win-crt-math-l1-1-0.dll', + 'api-ms-win-crt-convert-l1-1-0.dll' +} + + +HERE = osp.dirname(osp.abspath(__file__)) +PACKAGE_ROOT = osp.dirname(osp.dirname(HERE)) +PLATFORM_ARCH = platform.machine() +PYTHON_VERSION = sys.version_info + + +def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE): + """Yield pieces of data from a file-like object until EOF.""" + while True: + chunk = file.read(size) + if not chunk: + break + yield chunk + + +def rehash(path, blocksize=1 << 20): + """Return (hash, length) for path using hashlib.sha256()""" + h = hashlib.sha256() + length = 0 + with open(path, 'rb') as f: + for block in read_chunks(f, size=blocksize): + length += len(block) + h.update(block) + digest = 'sha256=' + urlsafe_b64encode( + h.digest() + ).decode('latin1').rstrip('=') + # unicode/str python2 issues + return (digest, str(length)) # type: ignore + + +def unzip_file(file, dest): + """Decompress zip `file` into directory `dest`.""" + with zipfile.ZipFile(file, 'r') as zip_ref: + zip_ref.extractall(dest) + + +def is_program_installed(basename): + """ + Return program absolute path if installed in PATH. + Otherwise, return None + On macOS systems, a .app is considered installed if + it exists. + """ + if (sys.platform == 'darwin' and basename.endswith('.app') and + osp.exists(basename)): + return basename + + for path in os.environ["PATH"].split(os.pathsep): + abspath = osp.join(path, basename) + if osp.isfile(abspath): + return abspath + + +def find_program(basename): + """ + Find program in PATH and return absolute path + Try adding .exe or .bat to basename on Windows platforms + (return None if not found) + """ + names = [basename] + if os.name == 'nt': + # Windows platforms + extensions = ('.exe', '.bat', '.cmd', '.dll') + if not basename.endswith(extensions): + names = [basename + ext for ext in extensions] + [basename] + for name in names: + path = is_program_installed(name) + if path: + return path + + +def patch_new_path(library_path, new_dir): + library = osp.basename(library_path) + name, *rest = library.split('.') + rest = '.'.join(rest) + hash_id = hashlib.sha256(library_path.encode('utf-8')).hexdigest()[:8] + new_name = '.'.join([name, hash_id, rest]) + return osp.join(new_dir, new_name) + + +def find_dll_dependencies(dumpbin, binary): + out = subprocess.run([dumpbin, "/dependents", binary], + stdout=subprocess.PIPE) + out = out.stdout.strip().decode('utf-8') + start_index = out.find('dependencies:') + len('dependencies:') + end_index = out.find('Summary') + dlls = out[start_index:end_index].strip() + dlls = dlls.split(os.linesep) + dlls = [dll.strip() for dll in dlls] + return dlls + + +def relocate_elf_library(patchelf, output_dir, output_library, binary): + """ + Relocate an ELF shared library to be packaged on a wheel. + + Given a shared library, find the transitive closure of its dependencies, + rename and copy them into the wheel while updating their respective rpaths. + """ + + print('Relocating {0}'.format(binary)) + binary_path = osp.join(output_library, binary) + + ld_tree = lddtree(binary_path) + tree_libs = ld_tree['libs'] + + binary_queue = [(n, binary) for n in ld_tree['needed']] + binary_paths = {binary: binary_path} + binary_dependencies = {} + + while binary_queue != []: + library, parent = binary_queue.pop(0) + library_info = tree_libs[library] + print(library) + + if library_info['path'] is None: + print('Omitting {0}'.format(library)) + continue + + if library in ALLOWLIST: + # Omit glibc/gcc/system libraries + print('Omitting {0}'.format(library)) + continue + + parent_dependencies = binary_dependencies.get(parent, []) + parent_dependencies.append(library) + binary_dependencies[parent] = parent_dependencies + + if library in binary_paths: + continue + + binary_paths[library] = library_info['path'] + binary_queue += [(n, library) for n in library_info['needed']] + + print('Copying dependencies to wheel directory') + new_libraries_path = osp.join(output_dir, 'torchvision.libs') + os.makedirs(new_libraries_path) + + new_names = {binary: binary_path} + + for library in binary_paths: + if library != binary: + library_path = binary_paths[library] + new_library_path = patch_new_path(library_path, new_libraries_path) + print('{0} -> {1}'.format(library, new_library_path)) + shutil.copyfile(library_path, new_library_path) + new_names[library] = new_library_path + + print('Updating dependency names by new files') + for library in binary_paths: + if library != binary: + if library not in binary_dependencies: + continue + library_dependencies = binary_dependencies[library] + new_library_name = new_names[library] + for dep in library_dependencies: + new_dep = osp.basename(new_names[dep]) + print('{0}: {1} -> {2}'.format(library, dep, new_dep)) + subprocess.check_output( + [ + patchelf, + '--replace-needed', + dep, + new_dep, + new_library_name + ], + cwd=new_libraries_path) + + print('Updating library rpath') + subprocess.check_output( + [ + patchelf, + '--set-rpath', + "$ORIGIN", + new_library_name + ], + cwd=new_libraries_path) + + subprocess.check_output( + [ + patchelf, + '--print-rpath', + new_library_name + ], + cwd=new_libraries_path) + + print("Update library dependencies") + library_dependencies = binary_dependencies[binary] + for dep in library_dependencies: + new_dep = osp.basename(new_names[dep]) + print('{0}: {1} -> {2}'.format(binary, dep, new_dep)) + subprocess.check_output( + [ + patchelf, + '--replace-needed', + dep, + new_dep, + binary + ], + cwd=output_library) + + print('Update library rpath') + subprocess.check_output( + [ + patchelf, + '--set-rpath', + "$ORIGIN:$ORIGIN/../torchvision.libs", + binary_path + ], + cwd=output_library + ) + + +def relocate_dll_library(dumpbin, output_dir, output_library, binary): + """ + Relocate a DLL/PE shared library to be packaged on a wheel. + + Given a shared library, find the transitive closure of its dependencies, + rename and copy them into the wheel. + """ + print('Relocating {0}'.format(binary)) + binary_path = osp.join(output_library, binary) + + library_dlls = find_dll_dependencies(dumpbin, binary_path) + binary_queue = [(dll, binary) for dll in library_dlls] + binary_paths = {binary: binary_path} + binary_dependencies = {} + + while binary_queue != []: + library, parent = binary_queue.pop(0) + if library in WINDOWS_ALLOWLIST or library.startswith('api-ms-win'): + print('Omitting {0}'.format(library)) + continue + + library_path = find_program(library) + if library_path is None: + print('{0} not found'.format(library)) + continue + + if osp.basename(osp.dirname(library_path)) == 'system32': + continue + + print('{0}: {1}'.format(library, library_path)) + parent_dependencies = binary_dependencies.get(parent, []) + parent_dependencies.append(library) + binary_dependencies[parent] = parent_dependencies + + if library in binary_paths: + continue + + binary_paths[library] = library_path + downstream_dlls = find_dll_dependencies(dumpbin, library_path) + binary_queue += [(n, library) for n in downstream_dlls] + + print('Copying dependencies to wheel directory') + package_dir = osp.join(output_dir, 'torchvision') + for library in binary_paths: + if library != binary: + library_path = binary_paths[library] + new_library_path = osp.join(package_dir, library) + print('{0} -> {1}'.format(library, new_library_path)) + shutil.copyfile(library_path, new_library_path) + + +def compress_wheel(output_dir, wheel, wheel_dir, wheel_name): + """Create RECORD file and compress wheel distribution.""" + print('Update RECORD file in wheel') + dist_info = glob.glob(osp.join(output_dir, '*.dist-info'))[0] + record_file = osp.join(dist_info, 'RECORD') + + with open(record_file, 'w') as f: + for root, _, files in os.walk(output_dir): + for this_file in files: + full_file = osp.join(root, this_file) + rel_file = osp.relpath(full_file, output_dir) + if full_file == record_file: + f.write('{0},,\n'.format(rel_file)) + else: + digest, size = rehash(full_file) + f.write('{0},{1},{2}\n'.format(rel_file, digest, size)) + + print('Compressing wheel') + base_wheel_name = osp.join(wheel_dir, wheel_name) + shutil.make_archive(base_wheel_name, 'zip', output_dir) + os.remove(wheel) + shutil.move('{0}.zip'.format(base_wheel_name), wheel) + shutil.rmtree(output_dir) + + +def patch_linux(): + # Get patchelf location + patchelf = find_program('patchelf') + if patchelf is None: + raise FileNotFoundError('Patchelf was not found in the system, please' + ' make sure that is available on the PATH.') + + # Find wheel + print('Finding wheels...') + wheels = glob.glob(osp.join(PACKAGE_ROOT, 'dist', '*.whl')) + output_dir = osp.join(PACKAGE_ROOT, 'dist', '.wheel-process') + + image_binary = 'image.so' + video_binary = 'video_reader.so' + torchvision_binaries = [image_binary, video_binary] + for wheel in wheels: + if osp.exists(output_dir): + shutil.rmtree(output_dir) + + os.makedirs(output_dir) + + print('Unzipping wheel...') + wheel_file = osp.basename(wheel) + wheel_dir = osp.dirname(wheel) + print('{0}'.format(wheel_file)) + wheel_name, _ = osp.splitext(wheel_file) + unzip_file(wheel, output_dir) + + print('Finding ELF dependencies...') + output_library = osp.join(output_dir, 'torchvision') + for binary in torchvision_binaries: + if osp.exists(osp.join(output_library, binary)): + relocate_elf_library( + patchelf, output_dir, output_library, binary) + + compress_wheel(output_dir, wheel, wheel_dir, wheel_name) + + +def patch_win(): + # Get dumpbin location + dumpbin = find_program('dumpbin') + if dumpbin is None: + raise FileNotFoundError('Dumpbin was not found in the system, please' + ' make sure that is available on the PATH.') + + # Find wheel + print('Finding wheels...') + wheels = glob.glob(osp.join(PACKAGE_ROOT, 'dist', '*.whl')) + output_dir = osp.join(PACKAGE_ROOT, 'dist', '.wheel-process') + + image_binary = 'image.pyd' + video_binary = 'video_reader.pyd' + torchvision_binaries = [image_binary, video_binary] + for wheel in wheels: + if osp.exists(output_dir): + shutil.rmtree(output_dir) + + os.makedirs(output_dir) + + print('Unzipping wheel...') + wheel_file = osp.basename(wheel) + wheel_dir = osp.dirname(wheel) + print('{0}'.format(wheel_file)) + wheel_name, _ = osp.splitext(wheel_file) + unzip_file(wheel, output_dir) + + print('Finding DLL/PE dependencies...') + output_library = osp.join(output_dir, 'torchvision') + for binary in torchvision_binaries: + if osp.exists(osp.join(output_library, binary)): + relocate_dll_library( + dumpbin, output_dir, output_library, binary) + + compress_wheel(output_dir, wheel, wheel_dir, wheel_name) + + +if __name__ == '__main__': + if sys.platform == 'linux': + patch_linux() + elif sys.platform == 'win32': + patch_win() diff --git a/pretrained_model/pytorch_vision_v0.10.0/packaging/windows/internal/build_cmake.bat b/pretrained_model/pytorch_vision_v0.10.0/packaging/windows/internal/build_cmake.bat new file mode 100644 index 0000000000000000000000000000000000000000..a29160538d297b60d4ff7381564045c9bd354e57 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/packaging/windows/internal/build_cmake.bat @@ -0,0 +1,3 @@ +@echo on +msbuild "-p:Configuration=Release" "-p:BuildInParallel=true" "-p:MultiProcessorCompilation=true" "-p:CL_MPCount=%1" torchvision.vcxproj -maxcpucount:%1 +msbuild "-p:Configuration=Release" "-p:BuildInParallel=true" "-p:MultiProcessorCompilation=true" "-p:CL_MPCount=%1" INSTALL.vcxproj -maxcpucount:%1 diff --git a/pretrained_model/pytorch_vision_v0.10.0/packaging/windows/internal/build_cpp_example.bat b/pretrained_model/pytorch_vision_v0.10.0/packaging/windows/internal/build_cpp_example.bat new file mode 100644 index 0000000000000000000000000000000000000000..e3f7afe9f02c5915fdd22f5c22164286349ab58a --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/packaging/windows/internal/build_cpp_example.bat @@ -0,0 +1,3 @@ +@echo on +set CL=/I"C:\Program Files (x86)\torchvision\include" +msbuild "-p:Configuration=Release" "-p:BuildInParallel=true" "-p:MultiProcessorCompilation=true" "-p:CL_MPCount=%1" hello-world.vcxproj -maxcpucount:%1 diff --git a/pretrained_model/pytorch_vision_v0.10.0/packaging/windows/internal/build_frcnn.bat b/pretrained_model/pytorch_vision_v0.10.0/packaging/windows/internal/build_frcnn.bat new file mode 100644 index 0000000000000000000000000000000000000000..36e3757d01cb18d81938a334767c6ca2b7fcfde2 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/packaging/windows/internal/build_frcnn.bat @@ -0,0 +1,3 @@ +@echo on +set CL=/I"C:\Program Files (x86)\torchvision\include" +msbuild "-p:Configuration=Release" "-p:BuildInParallel=true" "-p:MultiProcessorCompilation=true" "-p:CL_MPCount=%1" test_frcnn_tracing.vcxproj -maxcpucount:%1 diff --git a/pretrained_model/pytorch_vision_v0.10.0/packaging/windows/internal/cuda_install.bat b/pretrained_model/pytorch_vision_v0.10.0/packaging/windows/internal/cuda_install.bat new file mode 100644 index 0000000000000000000000000000000000000000..9ca08e1cfbbe2e8f0999f41e9869a8a7dc7e3cff --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/packaging/windows/internal/cuda_install.bat @@ -0,0 +1,201 @@ +@echo on + +if "%CU_VERSION%" == "cpu" ( + echo Skipping for CPU builds + exit /b 0 +) + +set SRC_DIR=%~dp0\.. + +if not exist "%SRC_DIR%\temp_build" mkdir "%SRC_DIR%\temp_build" + +set /a CUDA_VER=%CU_VERSION:cu=% +set CUDA_VER_MAJOR=%CUDA_VER:~0,-1% +set CUDA_VER_MINOR=%CUDA_VER:~-1,1% +set CUDA_VERSION_STR=%CUDA_VER_MAJOR%.%CUDA_VER_MINOR% + +if %CUDA_VER% EQU 92 goto cuda92 +if %CUDA_VER% EQU 100 goto cuda100 +if %CUDA_VER% EQU 101 goto cuda101 +if %CUDA_VER% EQU 102 goto cuda102 +if %CUDA_VER% EQU 110 goto cuda110 +if %CUDA_VER% EQU 111 goto cuda111 +if %CUDA_VER% EQU 112 goto cuda112 + +echo CUDA %CUDA_VERSION_STR% is not supported +exit /b 1 + +:cuda92 +if not exist "%SRC_DIR%\temp_build\cuda_9.2.148_win10.exe" ( + curl -k -L https://ossci-windows.s3.amazonaws.com/win2016/cuda_9.2.148_win10.exe --output "%SRC_DIR%\temp_build\cuda_9.2.148_win10.exe" + if errorlevel 1 exit /b 1 + set "CUDA_SETUP_FILE=%SRC_DIR%\temp_build\cuda_9.2.148_win10.exe" + set "ARGS=nvcc_9.2 cuobjdump_9.2 nvprune_9.2 cupti_9.2 cublas_9.2 cublas_dev_9.2 cudart_9.2 cufft_9.2 cufft_dev_9.2 curand_9.2 curand_dev_9.2 cusolver_9.2 cusolver_dev_9.2 cusparse_9.2 cusparse_dev_9.2 nvgraph_9.2 nvgraph_dev_9.2 npp_9.2 npp_dev_9.2 nvrtc_9.2 nvrtc_dev_9.2 nvml_dev_9.2" +) + +if not exist "%SRC_DIR%\temp_build\cudnn-9.2-windows10-x64-v7.2.1.38.zip" ( + curl -k -L https://ossci-windows.s3.amazonaws.com/win2016/cudnn-9.2-windows10-x64-v7.2.1.38.zip --output "%SRC_DIR%\temp_build\cudnn-9.2-windows10-x64-v7.2.1.38.zip" + if errorlevel 1 exit /b 1 + set "CUDNN_SETUP_FILE=%SRC_DIR%\temp_build\cudnn-9.2-windows10-x64-v7.2.1.38.zip" +) + +goto cuda_common + +:cuda100 + +if not exist "%SRC_DIR%\temp_build\cuda_10.0.130_411.31_win10.exe" ( + curl -k -L https://ossci-windows.s3.amazonaws.com/win2016/cuda_10.0.130_411.31_win10.exe --output "%SRC_DIR%\temp_build\cuda_10.0.130_411.31_win10.exe" + if errorlevel 1 exit /b 1 + set "CUDA_SETUP_FILE=%SRC_DIR%\temp_build\cuda_10.0.130_411.31_win10.exe" + set "ARGS=nvcc_10.0 cuobjdump_10.0 nvprune_10.0 cupti_10.0 cublas_10.0 cublas_dev_10.0 cudart_10.0 cufft_10.0 cufft_dev_10.0 curand_10.0 curand_dev_10.0 cusolver_10.0 cusolver_dev_10.0 cusparse_10.0 cusparse_dev_10.0 nvgraph_10.0 nvgraph_dev_10.0 npp_10.0 npp_dev_10.0 nvrtc_10.0 nvrtc_dev_10.0 nvml_dev_10.0" +) + +if not exist "%SRC_DIR%\temp_build\cudnn-10.0-windows10-x64-v7.4.1.5.zip" ( + curl -k -L https://ossci-windows.s3.amazonaws.com/win2016/cudnn-10.0-windows10-x64-v7.4.1.5.zip --output "%SRC_DIR%\temp_build\cudnn-10.0-windows10-x64-v7.4.1.5.zip" + if errorlevel 1 exit /b 1 + set "CUDNN_SETUP_FILE=%SRC_DIR%\temp_build\cudnn-10.0-windows10-x64-v7.4.1.5.zip" +) + +goto cuda_common + +:cuda101 + +if not exist "%SRC_DIR%\temp_build\cuda_10.1.243_426.00_win10.exe" ( + curl -k -L https://ossci-windows.s3.amazonaws.com/cuda_10.1.243_426.00_win10.exe --output "%SRC_DIR%\temp_build\cuda_10.1.243_426.00_win10.exe" + if errorlevel 1 exit /b 1 + set "CUDA_SETUP_FILE=%SRC_DIR%\temp_build\cuda_10.1.243_426.00_win10.exe" + set "ARGS=nvcc_10.1 cuobjdump_10.1 nvprune_10.1 cupti_10.1 cublas_10.1 cublas_dev_10.1 cudart_10.1 cufft_10.1 cufft_dev_10.1 curand_10.1 curand_dev_10.1 cusolver_10.1 cusolver_dev_10.1 cusparse_10.1 cusparse_dev_10.1 nvgraph_10.1 nvgraph_dev_10.1 npp_10.1 npp_dev_10.1 nvrtc_10.1 nvrtc_dev_10.1 nvml_dev_10.1" +) + +if not exist "%SRC_DIR%\temp_build\cudnn-10.1-windows10-x64-v7.6.4.38.zip" ( + curl -k -L https://ossci-windows.s3.amazonaws.com/cudnn-10.1-windows10-x64-v7.6.4.38.zip --output "%SRC_DIR%\temp_build\cudnn-10.1-windows10-x64-v7.6.4.38.zip" + if errorlevel 1 exit /b 1 + set "CUDNN_SETUP_FILE=%SRC_DIR%\temp_build\cudnn-10.1-windows10-x64-v7.6.4.38.zip" +) + +goto cuda_common + +:cuda102 + +if not exist "%SRC_DIR%\temp_build\cuda_10.2.89_441.22_win10.exe" ( + curl -k -L https://ossci-windows.s3.amazonaws.com/cuda_10.2.89_441.22_win10.exe --output "%SRC_DIR%\temp_build\cuda_10.2.89_441.22_win10.exe" + if errorlevel 1 exit /b 1 + set "CUDA_SETUP_FILE=%SRC_DIR%\temp_build\cuda_10.2.89_441.22_win10.exe" + set "ARGS=nvcc_10.2 cuobjdump_10.2 nvprune_10.2 cupti_10.2 cublas_10.2 cublas_dev_10.2 cudart_10.2 cufft_10.2 cufft_dev_10.2 curand_10.2 curand_dev_10.2 cusolver_10.2 cusolver_dev_10.2 cusparse_10.2 cusparse_dev_10.2 nvgraph_10.2 nvgraph_dev_10.2 npp_10.2 npp_dev_10.2 nvrtc_10.2 nvrtc_dev_10.2 nvml_dev_10.2" +) + +if not exist "%SRC_DIR%\temp_build\cudnn-10.2-windows10-x64-v7.6.5.32.zip" ( + curl -k -L https://ossci-windows.s3.amazonaws.com/cudnn-10.2-windows10-x64-v7.6.5.32.zip --output "%SRC_DIR%\temp_build\cudnn-10.2-windows10-x64-v7.6.5.32.zip" + if errorlevel 1 exit /b 1 + set "CUDNN_SETUP_FILE=%SRC_DIR%\temp_build\cudnn-10.2-windows10-x64-v7.6.5.32.zip" +) + +goto cuda_common + +:cuda110 + +if not exist "%SRC_DIR%\temp_build\cuda_11.0.2_451.48_win10.exe" ( + curl -k -L https://ossci-windows.s3.amazonaws.com/cuda_11.0.2_451.48_win10.exe --output "%SRC_DIR%\temp_build\cuda_11.0.2_451.48_win10.exe" + if errorlevel 1 exit /b 1 + set "CUDA_SETUP_FILE=%SRC_DIR%\temp_build\cuda_11.0.2_451.48_win10.exe" + set "ARGS=nvcc_11.0 cuobjdump_11.0 nvprune_11.0 nvprof_11.0 cupti_11.0 cublas_11.0 cublas_dev_11.0 cudart_11.0 cufft_11.0 cufft_dev_11.0 curand_11.0 curand_dev_11.0 cusolver_11.0 cusolver_dev_11.0 cusparse_11.0 cusparse_dev_11.0 npp_11.0 npp_dev_11.0 nvrtc_11.0 nvrtc_dev_11.0 nvml_dev_11.0" +) + +if not exist "%SRC_DIR%\temp_build\cudnn-11.0-windows-x64-v8.0.4.30.zip" ( + curl -k -L https://ossci-windows.s3.amazonaws.com/cudnn-11.0-windows-x64-v8.0.4.30.zip --output "%SRC_DIR%\temp_build\cudnn-11.0-windows-x64-v8.0.4.30.zip" + if errorlevel 1 exit /b 1 + set "CUDNN_SETUP_FILE=%SRC_DIR%\temp_build\cudnn-11.0-windows-x64-v8.0.4.30.zip" +) + +goto cuda_common + +:cuda111 + +if not exist "%SRC_DIR%\temp_build\cuda_11.1.0_456.43_win10.exe" ( + curl -k -L https://ossci-windows.s3.amazonaws.com/cuda_11.1.0_456.43_win10.exe --output "%SRC_DIR%\temp_build\cuda_11.1.0_456.43_win10.exe" + if errorlevel 1 exit /b 1 + set "CUDA_SETUP_FILE=%SRC_DIR%\temp_build\cuda_11.1.0_456.43_win10.exe" + set "ARGS=nvcc_11.1 cuobjdump_11.1 nvprune_11.1 nvprof_11.1 cupti_11.1 cublas_11.1 cublas_dev_11.1 cudart_11.1 cufft_11.1 cufft_dev_11.1 curand_11.1 curand_dev_11.1 cusolver_11.1 cusolver_dev_11.1 cusparse_11.1 cusparse_dev_11.1 npp_11.1 npp_dev_11.1 nvrtc_11.1 nvrtc_dev_11.1 nvml_dev_11.1" +) + +@REM There is no downloadable driver for Tesla on CUDA 11.1 yet. We will use +@REM the driver inside CUDA +if "%JOB_EXECUTOR%" == "windows-with-nvidia-gpu" set "ARGS=%ARGS% Display.Driver" + +if not exist "%SRC_DIR%\temp_build\cudnn-11.1-windows-x64-v8.0.5.39.zip" ( + curl -k -L https://ossci-windows.s3.amazonaws.com/cudnn-11.1-windows-x64-v8.0.5.39.zip --output "%SRC_DIR%\temp_build\cudnn-11.1-windows-x64-v8.0.5.39.zip" + if errorlevel 1 exit /b 1 + set "CUDNN_SETUP_FILE=%SRC_DIR%\temp_build\cudnn-11.1-windows-x64-v8.0.5.39.zip" +) + +goto cuda_common + +:cuda112 + +if not exist "%SRC_DIR%\temp_build\cuda_11.2.0_460.89_win10.exe" ( + curl -k -L https://ossci-windows.s3.amazonaws.com/cuda_11.2.0_460.89_win10.exe --output "%SRC_DIR%\temp_build\cuda_11.2.0_460.89_win10.exe" + if errorlevel 1 exit /b 1 + set "CUDA_SETUP_FILE=%SRC_DIR%\temp_build\cuda_11.2.0_460.89_win10.exe" + set "ARGS=nvcc_11.2 cuobjdump_11.2 nvprune_11.2 nvprof_11.2 cupti_11.2 cublas_11.2 cublas_dev_11.2 cudart_11.2 cufft_11.2 cufft_dev_11.2 curand_11.2 curand_dev_11.2 cusolver_11.2 cusolver_dev_11.2 cusparse_11.2 cusparse_dev_11.2 npp_11.2 npp_dev_11.2 nvrtc_11.2 nvrtc_dev_11.2 nvml_dev_11.2" +) + +if not exist "%SRC_DIR%\temp_build\cudnn-11.2-windows-x64-v8.1.0.77.zip" ( + curl -k -L http://s3.amazonaws.com/ossci-windows/cudnn-11.2-windows-x64-v8.1.0.77.zip --output "%SRC_DIR%\temp_build\cudnn-11.2-windows-x64-v8.1.0.77.zip" + if errorlevel 1 exit /b 1 + set "CUDNN_SETUP_FILE=%SRC_DIR%\temp_build\cudnn-11.2-windows-x64-v8.1.0.77.zip" +) + +goto cuda_common + +:cuda_common + +if not exist "%SRC_DIR%\temp_build\NvToolsExt.7z" ( + curl -k -L https://www.dropbox.com/s/9mcolalfdj4n979/NvToolsExt.7z?dl=1 --output "%SRC_DIR%\temp_build\NvToolsExt.7z" + if errorlevel 1 exit /b 1 +) + +if not exist "%SRC_DIR%\temp_build\gpu_driver_dlls.7z" ( + curl -k -L "https://drive.google.com/u/0/uc?id=1injUyo3lnarMgWyRcXqKg4UGnN0ysmuq&export=download" --output "%SRC_DIR%\temp_build\gpu_driver_dlls.zip" + if errorlevel 1 exit /b 1 +) + +echo Installing CUDA toolkit... +7z x %CUDA_SETUP_FILE% -o"%SRC_DIR%\temp_build\cuda" +pushd "%SRC_DIR%\temp_build\cuda" +start /wait setup.exe -s %ARGS% +popd + +echo Installing VS integration... +xcopy /Y "%SRC_DIR%\temp_build\cuda\CUDAVisualStudioIntegration\extras\visual_studio_integration\MSBuildExtensions\*.*" "C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\Common7\IDE\VC\VCTargets\BuildCustomizations" + +echo Installing NvToolsExt... +7z x %SRC_DIR%\temp_build\NvToolsExt.7z -o"%SRC_DIR%\temp_build\NvToolsExt" +mkdir "%ProgramFiles%\NVIDIA Corporation\NvToolsExt\bin\x64" +mkdir "%ProgramFiles%\NVIDIA Corporation\NvToolsExt\include" +mkdir "%ProgramFiles%\NVIDIA Corporation\NvToolsExt\lib\x64" +xcopy /Y "%SRC_DIR%\temp_build\NvToolsExt\bin\x64\*.*" "%ProgramFiles%\NVIDIA Corporation\NvToolsExt\bin\x64" +xcopy /Y "%SRC_DIR%\temp_build\NvToolsExt\include\*.*" "%ProgramFiles%\NVIDIA Corporation\NvToolsExt\include" +xcopy /Y "%SRC_DIR%\temp_build\NvToolsExt\lib\x64\*.*" "%ProgramFiles%\NVIDIA Corporation\NvToolsExt\lib\x64" + +echo Setting up environment... +set "PATH=%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%\bin;%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%\libnvvp;%PATH%" +set "CUDA_PATH=%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%" +set "CUDA_PATH_V%CUDA_VER_MAJOR%_%CUDA_VER_MINOR%=%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%" +set "NVTOOLSEXT_PATH=%ProgramFiles%\NVIDIA Corporation\NvToolsExt\bin\x64" + +if not exist "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%\bin\nvcc.exe" ( + echo CUDA %CUDA_VERSION_STR% installed failed. + exit /b 1 +) + +echo Installing cuDNN... +7z x %CUDNN_SETUP_FILE% -o"%SRC_DIR%\temp_build\cudnn" +xcopy /Y "%SRC_DIR%\temp_build\cudnn\cuda\bin\*.*" "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%\bin" +xcopy /Y "%SRC_DIR%\temp_build\cudnn\cuda\lib\x64\*.*" "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%\lib\x64" +xcopy /Y "%SRC_DIR%\temp_build\cudnn\cuda\include\*.*" "%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION_STR%\include" + +echo Installing GPU driver DLLs +7z x %SRC_DIR%\temp_build\gpu_driver_dlls.zip -o"C:\Windows\System32" + +echo Cleaning temp files +rd /s /q "%SRC_DIR%\temp_build" || ver > nul diff --git a/pretrained_model/pytorch_vision_v0.10.0/packaging/windows/internal/vc_env_helper.bat b/pretrained_model/pytorch_vision_v0.10.0/packaging/windows/internal/vc_env_helper.bat new file mode 100644 index 0000000000000000000000000000000000000000..e85a372f93d58c87107c7dc1e2d7aa2a5e423445 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/packaging/windows/internal/vc_env_helper.bat @@ -0,0 +1,43 @@ +@echo on + +set VC_VERSION_LOWER=16 +set VC_VERSION_UPPER=17 +if "%VC_YEAR%" == "2017" ( + set VC_VERSION_LOWER=15 + set VC_VERSION_UPPER=16 +) + +for /f "usebackq tokens=*" %%i in (`"%ProgramFiles(x86)%\Microsoft Visual Studio\Installer\vswhere.exe" -legacy -products * -version [%VC_VERSION_LOWER%^,%VC_VERSION_UPPER%^) -property installationPath`) do ( + if exist "%%i" if exist "%%i\VC\Auxiliary\Build\vcvarsall.bat" ( + set "VS15INSTALLDIR=%%i" + set "VS15VCVARSALL=%%i\VC\Auxiliary\Build\vcvarsall.bat" + goto vswhere + ) +) + +:vswhere +if "%VSDEVCMD_ARGS%" == "" ( + call "%VS15VCVARSALL%" x64 || exit /b 1 +) else ( + call "%VS15VCVARSALL%" x64 %VSDEVCMD_ARGS% || exit /b 1 +) + +@echo on + +set DISTUTILS_USE_SDK=1 + +set args=%1 +shift +:start +if [%1] == [] goto done +set args=%args% %1 +shift +goto start + +:done +if "%args%" == "" ( + echo Usage: vc_env_helper.bat [command] [args] + echo e.g. vc_env_helper.bat cl /c test.cpp +) + +%args% || exit /b 1 diff --git a/pretrained_model/pytorch_vision_v0.10.0/packaging/windows/internal/vc_install_helper.sh b/pretrained_model/pytorch_vision_v0.10.0/packaging/windows/internal/vc_install_helper.sh new file mode 100644 index 0000000000000000000000000000000000000000..cdae18065b9f6e97e385fa2002131ef857562306 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/packaging/windows/internal/vc_install_helper.sh @@ -0,0 +1,16 @@ +#!/bin/bash + +set -ex + +if [[ "$CU_VERSION" == "cu92" ]]; then + export VC_YEAR=2017 + export VSDEVCMD_ARGS="-vcvars_ver=14.13" + powershell packaging/windows/internal/vs2017_install.ps1 +elif [[ "$CU_VERSION" == "cu100" ]]; then + export VC_YEAR=2017 + export VSDEVCMD_ARGS="" + powershell packaging/windows/internal/vs2017_install.ps1 +else + export VC_YEAR=2019 + export VSDEVCMD_ARGS="" +fi diff --git a/pretrained_model/pytorch_vision_v0.10.0/packaging/windows/internal/vs2017_install.ps1 b/pretrained_model/pytorch_vision_v0.10.0/packaging/windows/internal/vs2017_install.ps1 new file mode 100644 index 0000000000000000000000000000000000000000..3e953de1ab7a0fa33238e10fbcd80564246c1a55 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/packaging/windows/internal/vs2017_install.ps1 @@ -0,0 +1,25 @@ +$VS_DOWNLOAD_LINK = "https://aka.ms/vs/15/release/vs_buildtools.exe" +$VS_INSTALL_ARGS = @("--nocache","--quiet","--wait", "--add Microsoft.VisualStudio.Workload.VCTools", + "--add Microsoft.VisualStudio.Component.VC.Tools.14.13", + "--add Microsoft.Component.MSBuild", + "--add Microsoft.VisualStudio.Component.Roslyn.Compiler", + "--add Microsoft.VisualStudio.Component.TextTemplating", + "--add Microsoft.VisualStudio.Component.VC.CoreIde", + "--add Microsoft.VisualStudio.Component.VC.Redist.14.Latest", + "--add Microsoft.VisualStudio.ComponentGroup.NativeDesktop.Core", + "--add Microsoft.VisualStudio.Component.VC.Tools.x86.x64", + "--add Microsoft.VisualStudio.ComponentGroup.NativeDesktop.Win81") + +curl.exe --retry 3 -kL $VS_DOWNLOAD_LINK --output vs_installer.exe +if ($LASTEXITCODE -ne 0) { + echo "Download of the VS 2017 installer failed" + exit 1 +} + +$process = Start-Process "${PWD}\vs_installer.exe" -ArgumentList $VS_INSTALL_ARGS -NoNewWindow -Wait -PassThru +Remove-Item -Path vs_installer.exe -Force +$exitCode = $process.ExitCode +if (($exitCode -ne 0) -and ($exitCode -ne 3010)) { + echo "VS 2017 installer exited with code $exitCode, which should be one of [0, 3010]." + exit 1 +} diff --git a/pretrained_model/pytorch_vision_v0.10.0/packaging/windows/internal/vs2019_install.ps1 b/pretrained_model/pytorch_vision_v0.10.0/packaging/windows/internal/vs2019_install.ps1 new file mode 100644 index 0000000000000000000000000000000000000000..e436051f0dbb2ce9361f3d1c33295249ba032bb2 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/packaging/windows/internal/vs2019_install.ps1 @@ -0,0 +1,21 @@ +$VS_DOWNLOAD_LINK = "https://aka.ms/vs/16/release/vs_buildtools.exe" +$VS_INSTALL_ARGS = @("--nocache","--quiet","--wait", "--add Microsoft.VisualStudio.Workload.VCTools", + "--add Microsoft.Component.MSBuild", + "--add Microsoft.VisualStudio.Component.Roslyn.Compiler", + "--add Microsoft.VisualStudio.Component.VC.CoreBuildTools", + "--add Microsoft.VisualStudio.Component.VC.Redist.14.Latest", + "--add Microsoft.VisualStudio.Component.VC.Tools.x86.x64") + +curl.exe --retry 3 -kL $VS_DOWNLOAD_LINK --output vs_installer.exe +if ($LASTEXITCODE -ne 0) { + echo "Download of the VS 2019 installer failed" + exit 1 +} + +$process = Start-Process "${PWD}\vs_installer.exe" -ArgumentList $VS_INSTALL_ARGS -NoNewWindow -Wait -PassThru +Remove-Item -Path vs_installer.exe -Force +$exitCode = $process.ExitCode +if (($exitCode -ne 0) -and ($exitCode -ne 3010)) { + echo "VS 2019 installer exited with code $exitCode, which should be one of [0, 3010]." + exit 1 +} diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/classification/README.md b/pretrained_model/pytorch_vision_v0.10.0/references/classification/README.md new file mode 100644 index 0000000000000000000000000000000000000000..7a3144b7cac7d34fba12297514e05a13fe871c2c --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/classification/README.md @@ -0,0 +1,142 @@ +# Image classification reference training scripts + +This folder contains reference training scripts for image classification. +They serve as a log of how to train specific models, as provide baseline +training and evaluation scripts to quickly bootstrap research. + +Except otherwise noted, all models have been trained on 8x V100 GPUs with +the following parameters: + +| Parameter | value | +| ------------------------ | ------ | +| `--batch_size` | `32` | +| `--epochs` | `90` | +| `--lr` | `0.1` | +| `--momentum` | `0.9` | +| `--wd`, `--weight-decay` | `1e-4` | +| `--lr-step-size` | `30` | +| `--lr-gamma` | `0.1` | + +### AlexNet and VGG + +Since `AlexNet` and the original `VGG` architectures do not include batch +normalization, the default initial learning rate `--lr 0.1` is to high. + +``` +python main.py --model $MODEL --lr 1e-2 +``` + +Here `$MODEL` is one of `alexnet`, `vgg11`, `vgg13`, `vgg16` or `vgg19`. Note +that `vgg11_bn`, `vgg13_bn`, `vgg16_bn`, and `vgg19_bn` include batch +normalization and thus are trained with the default parameters. + +### ResNext-50 32x4d +``` +python -m torch.distributed.launch --nproc_per_node=8 --use_env train.py\ + --model resnext50_32x4d --epochs 100 +``` + + +### ResNext-101 32x8d + +On 8 nodes, each with 8 GPUs (for a total of 64 GPUS) +``` +python -m torch.distributed.launch --nproc_per_node=8 --use_env train.py\ + --model resnext101_32x8d --epochs 100 +``` + + +### MobileNetV2 +``` +python -m torch.distributed.launch --nproc_per_node=8 --use_env train.py\ + --model mobilenet_v2 --epochs 300 --lr 0.045 --wd 0.00004\ + --lr-step-size 1 --lr-gamma 0.98 +``` + + +### MobileNetV3 Large & Small +``` +python -m torch.distributed.launch --nproc_per_node=8 --use_env train.py\ + --model $MODEL --epochs 600 --opt rmsprop --batch-size 128 --lr 0.064\ + --wd 0.00001 --lr-step-size 2 --lr-gamma 0.973 --auto-augment imagenet --random-erase 0.2 +``` + +Here `$MODEL` is one of `mobilenet_v3_large` or `mobilenet_v3_small`. + +Then we averaged the parameters of the last 3 checkpoints that improved the Acc@1. See [#3182](https://github.com/pytorch/vision/pull/3182) +and [#3354](https://github.com/pytorch/vision/pull/3354) for details. + + +## Mixed precision training +Automatic Mixed Precision (AMP) training on GPU for Pytorch can be enabled with the [NVIDIA Apex extension](https://github.com/NVIDIA/apex). + +Mixed precision training makes use of both FP32 and FP16 precisions where appropriate. FP16 operations can leverage the Tensor cores on NVIDIA GPUs (Volta, Turing or newer architectures) for improved throughput, generally without loss in model accuracy. Mixed precision training also often allows larger batch sizes. GPU automatic mixed precision training for Pytorch Vision can be enabled via the flag value `--apex=True`. + +``` +python -m torch.distributed.launch --nproc_per_node=8 --use_env train.py\ + --model resnext50_32x4d --epochs 100 --apex +``` + +## Quantized + +### Parameters used for generating quantized models: + +For all post training quantized models (All quantized models except mobilenet-v2), the settings are: + +1. num_calibration_batches: 32 +2. num_workers: 16 +3. batch_size: 32 +4. eval_batch_size: 128 +5. backend: 'fbgemm' + +``` +python train_quantization.py --device='cpu' --post-training-quantize --backend='fbgemm' --model='<model_name>' +``` + +For Mobilenet-v2, the model was trained with quantization aware training, the settings used are: +1. num_workers: 16 +2. batch_size: 32 +3. eval_batch_size: 128 +4. backend: 'qnnpack' +5. learning-rate: 0.0001 +6. num_epochs: 90 +7. num_observer_update_epochs:4 +8. num_batch_norm_update_epochs:3 +9. momentum: 0.9 +10. lr_step_size:30 +11. lr_gamma: 0.1 +12. weight-decay: 0.0001 + +``` +python -m torch.distributed.launch --nproc_per_node=8 --use_env train_quantization.py --model='mobilenet_v2' +``` + +Training converges at about 10 epochs. + +For Mobilenet-v3 Large, the model was trained with quantization aware training, the settings used are: +1. num_workers: 16 +2. batch_size: 32 +3. eval_batch_size: 128 +4. backend: 'qnnpack' +5. learning-rate: 0.001 +6. num_epochs: 90 +7. num_observer_update_epochs:4 +8. num_batch_norm_update_epochs:3 +9. momentum: 0.9 +10. lr_step_size:30 +11. lr_gamma: 0.1 +12. weight-decay: 0.00001 + +``` +python -m torch.distributed.launch --nproc_per_node=8 --use_env train_quantization.py --model='mobilenet_v3_large' \ + --wd 0.00001 --lr 0.001 +``` + +For post training quant, device is set to CPU. For training, the device is set to CUDA. + +### Command to evaluate quantized models using the pre-trained weights: + +``` +python train_quantization.py --device='cpu' --test-only --backend='<backend>' --model='<model_name>' +``` + diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/classification/presets.py b/pretrained_model/pytorch_vision_v0.10.0/references/classification/presets.py new file mode 100644 index 0000000000000000000000000000000000000000..6bb389ba8db19fef16e995bc2b80b67e0d65b69f --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/classification/presets.py @@ -0,0 +1,37 @@ +from torchvision.transforms import autoaugment, transforms + + +class ClassificationPresetTrain: + def __init__(self, crop_size, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225), hflip_prob=0.5, + auto_augment_policy=None, random_erase_prob=0.0): + trans = [transforms.RandomResizedCrop(crop_size)] + if hflip_prob > 0: + trans.append(transforms.RandomHorizontalFlip(hflip_prob)) + if auto_augment_policy is not None: + aa_policy = autoaugment.AutoAugmentPolicy(auto_augment_policy) + trans.append(autoaugment.AutoAugment(policy=aa_policy)) + trans.extend([ + transforms.ToTensor(), + transforms.Normalize(mean=mean, std=std), + ]) + if random_erase_prob > 0: + trans.append(transforms.RandomErasing(p=random_erase_prob)) + + self.transforms = transforms.Compose(trans) + + def __call__(self, img): + return self.transforms(img) + + +class ClassificationPresetEval: + def __init__(self, crop_size, resize_size=256, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)): + + self.transforms = transforms.Compose([ + transforms.Resize(resize_size), + transforms.CenterCrop(crop_size), + transforms.ToTensor(), + transforms.Normalize(mean=mean, std=std), + ]) + + def __call__(self, img): + return self.transforms(img) diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/classification/train.py b/pretrained_model/pytorch_vision_v0.10.0/references/classification/train.py new file mode 100644 index 0000000000000000000000000000000000000000..b4e9d274662cb94d40b55539037adc2857593292 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/classification/train.py @@ -0,0 +1,299 @@ +import datetime +import os +import time + +import torch +import torch.utils.data +from torch import nn +import torchvision + +import presets +import utils + +try: + from apex import amp +except ImportError: + amp = None + + +def train_one_epoch(model, criterion, optimizer, data_loader, device, epoch, print_freq, apex=False): + model.train() + metric_logger = utils.MetricLogger(delimiter=" ") + metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value}')) + metric_logger.add_meter('img/s', utils.SmoothedValue(window_size=10, fmt='{value}')) + + header = 'Epoch: [{}]'.format(epoch) + for image, target in metric_logger.log_every(data_loader, print_freq, header): + start_time = time.time() + image, target = image.to(device), target.to(device) + output = model(image) + loss = criterion(output, target) + + optimizer.zero_grad() + if apex: + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + else: + loss.backward() + optimizer.step() + + acc1, acc5 = utils.accuracy(output, target, topk=(1, 5)) + batch_size = image.shape[0] + metric_logger.update(loss=loss.item(), lr=optimizer.param_groups[0]["lr"]) + metric_logger.meters['acc1'].update(acc1.item(), n=batch_size) + metric_logger.meters['acc5'].update(acc5.item(), n=batch_size) + metric_logger.meters['img/s'].update(batch_size / (time.time() - start_time)) + + +def evaluate(model, criterion, data_loader, device, print_freq=100): + model.eval() + metric_logger = utils.MetricLogger(delimiter=" ") + header = 'Test:' + with torch.no_grad(): + for image, target in metric_logger.log_every(data_loader, print_freq, header): + image = image.to(device, non_blocking=True) + target = target.to(device, non_blocking=True) + output = model(image) + loss = criterion(output, target) + + acc1, acc5 = utils.accuracy(output, target, topk=(1, 5)) + # FIXME need to take into account that the datasets + # could have been padded in distributed setup + batch_size = image.shape[0] + metric_logger.update(loss=loss.item()) + metric_logger.meters['acc1'].update(acc1.item(), n=batch_size) + metric_logger.meters['acc5'].update(acc5.item(), n=batch_size) + # gather the stats from all processes + metric_logger.synchronize_between_processes() + + print(' * Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f}' + .format(top1=metric_logger.acc1, top5=metric_logger.acc5)) + return metric_logger.acc1.global_avg + + +def _get_cache_path(filepath): + import hashlib + h = hashlib.sha1(filepath.encode()).hexdigest() + cache_path = os.path.join("~", ".torch", "vision", "datasets", "imagefolder", h[:10] + ".pt") + cache_path = os.path.expanduser(cache_path) + return cache_path + + +def load_data(traindir, valdir, args): + # Data loading code + print("Loading data") + resize_size, crop_size = (342, 299) if args.model == 'inception_v3' else (256, 224) + + print("Loading training data") + st = time.time() + cache_path = _get_cache_path(traindir) + if args.cache_dataset and os.path.exists(cache_path): + # Attention, as the transforms are also cached! + print("Loading dataset_train from {}".format(cache_path)) + dataset, _ = torch.load(cache_path) + else: + auto_augment_policy = getattr(args, "auto_augment", None) + random_erase_prob = getattr(args, "random_erase", 0.0) + dataset = torchvision.datasets.ImageFolder( + traindir, + presets.ClassificationPresetTrain(crop_size=crop_size, auto_augment_policy=auto_augment_policy, + random_erase_prob=random_erase_prob)) + if args.cache_dataset: + print("Saving dataset_train to {}".format(cache_path)) + utils.mkdir(os.path.dirname(cache_path)) + utils.save_on_master((dataset, traindir), cache_path) + print("Took", time.time() - st) + + print("Loading validation data") + cache_path = _get_cache_path(valdir) + if args.cache_dataset and os.path.exists(cache_path): + # Attention, as the transforms are also cached! + print("Loading dataset_test from {}".format(cache_path)) + dataset_test, _ = torch.load(cache_path) + else: + dataset_test = torchvision.datasets.ImageFolder( + valdir, + presets.ClassificationPresetEval(crop_size=crop_size, resize_size=resize_size)) + if args.cache_dataset: + print("Saving dataset_test to {}".format(cache_path)) + utils.mkdir(os.path.dirname(cache_path)) + utils.save_on_master((dataset_test, valdir), cache_path) + + print("Creating data loaders") + if args.distributed: + train_sampler = torch.utils.data.distributed.DistributedSampler(dataset) + test_sampler = torch.utils.data.distributed.DistributedSampler(dataset_test) + else: + train_sampler = torch.utils.data.RandomSampler(dataset) + test_sampler = torch.utils.data.SequentialSampler(dataset_test) + + return dataset, dataset_test, train_sampler, test_sampler + + +def main(args): + if args.apex and amp is None: + raise RuntimeError("Failed to import apex. Please install apex from https://www.github.com/nvidia/apex " + "to enable mixed-precision training.") + + if args.output_dir: + utils.mkdir(args.output_dir) + + utils.init_distributed_mode(args) + print(args) + + device = torch.device(args.device) + + torch.backends.cudnn.benchmark = True + + train_dir = os.path.join(args.data_path, 'train') + val_dir = os.path.join(args.data_path, 'val') + dataset, dataset_test, train_sampler, test_sampler = load_data(train_dir, val_dir, args) + data_loader = torch.utils.data.DataLoader( + dataset, batch_size=args.batch_size, + sampler=train_sampler, num_workers=args.workers, pin_memory=True) + + data_loader_test = torch.utils.data.DataLoader( + dataset_test, batch_size=args.batch_size, + sampler=test_sampler, num_workers=args.workers, pin_memory=True) + + print("Creating model") + model = torchvision.models.__dict__[args.model](pretrained=args.pretrained) + model.to(device) + if args.distributed and args.sync_bn: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + + criterion = nn.CrossEntropyLoss() + + opt_name = args.opt.lower() + if opt_name == 'sgd': + optimizer = torch.optim.SGD( + model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) + elif opt_name == 'rmsprop': + optimizer = torch.optim.RMSprop(model.parameters(), lr=args.lr, momentum=args.momentum, + weight_decay=args.weight_decay, eps=0.0316, alpha=0.9) + else: + raise RuntimeError("Invalid optimizer {}. Only SGD and RMSprop are supported.".format(args.opt)) + + if args.apex: + model, optimizer = amp.initialize(model, optimizer, + opt_level=args.apex_opt_level + ) + + lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.lr_step_size, gamma=args.lr_gamma) + + model_without_ddp = model + if args.distributed: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) + model_without_ddp = model.module + + if args.resume: + checkpoint = torch.load(args.resume, map_location='cpu') + model_without_ddp.load_state_dict(checkpoint['model']) + optimizer.load_state_dict(checkpoint['optimizer']) + lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) + args.start_epoch = checkpoint['epoch'] + 1 + + if args.test_only: + evaluate(model, criterion, data_loader_test, device=device) + return + + print("Start training") + start_time = time.time() + for epoch in range(args.start_epoch, args.epochs): + if args.distributed: + train_sampler.set_epoch(epoch) + train_one_epoch(model, criterion, optimizer, data_loader, device, epoch, args.print_freq, args.apex) + lr_scheduler.step() + evaluate(model, criterion, data_loader_test, device=device) + if args.output_dir: + checkpoint = { + 'model': model_without_ddp.state_dict(), + 'optimizer': optimizer.state_dict(), + 'lr_scheduler': lr_scheduler.state_dict(), + 'epoch': epoch, + 'args': args} + utils.save_on_master( + checkpoint, + os.path.join(args.output_dir, 'model_{}.pth'.format(epoch))) + utils.save_on_master( + checkpoint, + os.path.join(args.output_dir, 'checkpoint.pth')) + + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('Training time {}'.format(total_time_str)) + + +def get_args_parser(add_help=True): + import argparse + parser = argparse.ArgumentParser(description='PyTorch Classification Training', add_help=add_help) + + parser.add_argument('--data-path', default='/datasets01/imagenet_full_size/061417/', help='dataset') + parser.add_argument('--model', default='resnet18', help='model') + parser.add_argument('--device', default='cuda', help='device') + parser.add_argument('-b', '--batch-size', default=32, type=int) + parser.add_argument('--epochs', default=90, type=int, metavar='N', + help='number of total epochs to run') + parser.add_argument('-j', '--workers', default=16, type=int, metavar='N', + help='number of data loading workers (default: 16)') + parser.add_argument('--opt', default='sgd', type=str, help='optimizer') + parser.add_argument('--lr', default=0.1, type=float, help='initial learning rate') + parser.add_argument('--momentum', default=0.9, type=float, metavar='M', + help='momentum') + parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float, + metavar='W', help='weight decay (default: 1e-4)', + dest='weight_decay') + parser.add_argument('--lr-step-size', default=30, type=int, help='decrease lr every step-size epochs') + parser.add_argument('--lr-gamma', default=0.1, type=float, help='decrease lr by a factor of lr-gamma') + parser.add_argument('--print-freq', default=10, type=int, help='print frequency') + parser.add_argument('--output-dir', default='.', help='path where to save') + parser.add_argument('--resume', default='', help='resume from checkpoint') + parser.add_argument('--start-epoch', default=0, type=int, metavar='N', + help='start epoch') + parser.add_argument( + "--cache-dataset", + dest="cache_dataset", + help="Cache the datasets for quicker initialization. It also serializes the transforms", + action="store_true", + ) + parser.add_argument( + "--sync-bn", + dest="sync_bn", + help="Use sync batch norm", + action="store_true", + ) + parser.add_argument( + "--test-only", + dest="test_only", + help="Only test the model", + action="store_true", + ) + parser.add_argument( + "--pretrained", + dest="pretrained", + help="Use pre-trained models from the modelzoo", + action="store_true", + ) + parser.add_argument('--auto-augment', default=None, help='auto augment policy (default: None)') + parser.add_argument('--random-erase', default=0.0, type=float, help='random erasing probability (default: 0.0)') + + # Mixed precision training parameters + parser.add_argument('--apex', action='store_true', + help='Use apex for mixed precision training') + parser.add_argument('--apex-opt-level', default='O1', type=str, + help='For apex mixed precision training' + 'O0 for FP32 training, O1 for mixed precision training.' + 'For further detail, see https://github.com/NVIDIA/apex/tree/master/examples/imagenet' + ) + + # distributed training parameters + parser.add_argument('--world-size', default=1, type=int, + help='number of distributed processes') + parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training') + + return parser + + +if __name__ == "__main__": + args = get_args_parser().parse_args() + main(args) diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/classification/train_quantization.py b/pretrained_model/pytorch_vision_v0.10.0/references/classification/train_quantization.py new file mode 100644 index 0000000000000000000000000000000000000000..ec945f4f58f915dfdd89203b856f503789d4f346 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/classification/train_quantization.py @@ -0,0 +1,257 @@ +import datetime +import os +import time +import copy + +import torch +import torch.utils.data +from torch import nn +import torchvision +import torch.quantization +import utils +from train import train_one_epoch, evaluate, load_data + + +def main(args): + if args.output_dir: + utils.mkdir(args.output_dir) + + utils.init_distributed_mode(args) + print(args) + + if args.post_training_quantize and args.distributed: + raise RuntimeError("Post training quantization example should not be performed " + "on distributed mode") + + # Set backend engine to ensure that quantized model runs on the correct kernels + if args.backend not in torch.backends.quantized.supported_engines: + raise RuntimeError("Quantized backend not supported: " + str(args.backend)) + torch.backends.quantized.engine = args.backend + + device = torch.device(args.device) + torch.backends.cudnn.benchmark = True + + # Data loading code + print("Loading data") + train_dir = os.path.join(args.data_path, 'train') + val_dir = os.path.join(args.data_path, 'val') + + dataset, dataset_test, train_sampler, test_sampler = load_data(train_dir, val_dir, args) + data_loader = torch.utils.data.DataLoader( + dataset, batch_size=args.batch_size, + sampler=train_sampler, num_workers=args.workers, pin_memory=True) + + data_loader_test = torch.utils.data.DataLoader( + dataset_test, batch_size=args.eval_batch_size, + sampler=test_sampler, num_workers=args.workers, pin_memory=True) + + print("Creating model", args.model) + # when training quantized models, we always start from a pre-trained fp32 reference model + model = torchvision.models.quantization.__dict__[args.model](pretrained=True, quantize=args.test_only) + model.to(device) + + if not (args.test_only or args.post_training_quantize): + model.fuse_model() + model.qconfig = torch.quantization.get_default_qat_qconfig(args.backend) + torch.quantization.prepare_qat(model, inplace=True) + + if args.distributed and args.sync_bn: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + + optimizer = torch.optim.SGD( + model.parameters(), lr=args.lr, momentum=args.momentum, + weight_decay=args.weight_decay) + + lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, + step_size=args.lr_step_size, + gamma=args.lr_gamma) + + criterion = nn.CrossEntropyLoss() + model_without_ddp = model + if args.distributed: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) + model_without_ddp = model.module + + if args.resume: + checkpoint = torch.load(args.resume, map_location='cpu') + model_without_ddp.load_state_dict(checkpoint['model']) + optimizer.load_state_dict(checkpoint['optimizer']) + lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) + args.start_epoch = checkpoint['epoch'] + 1 + + if args.post_training_quantize: + # perform calibration on a subset of the training dataset + # for that, create a subset of the training dataset + ds = torch.utils.data.Subset( + dataset, + indices=list(range(args.batch_size * args.num_calibration_batches))) + data_loader_calibration = torch.utils.data.DataLoader( + ds, batch_size=args.batch_size, shuffle=False, num_workers=args.workers, + pin_memory=True) + model.eval() + model.fuse_model() + model.qconfig = torch.quantization.get_default_qconfig(args.backend) + torch.quantization.prepare(model, inplace=True) + # Calibrate first + print("Calibrating") + evaluate(model, criterion, data_loader_calibration, device=device, print_freq=1) + torch.quantization.convert(model, inplace=True) + if args.output_dir: + print('Saving quantized model') + if utils.is_main_process(): + torch.save(model.state_dict(), os.path.join(args.output_dir, + 'quantized_post_train_model.pth')) + print("Evaluating post-training quantized model") + evaluate(model, criterion, data_loader_test, device=device) + return + + if args.test_only: + evaluate(model, criterion, data_loader_test, device=device) + return + + model.apply(torch.quantization.enable_observer) + model.apply(torch.quantization.enable_fake_quant) + start_time = time.time() + for epoch in range(args.start_epoch, args.epochs): + if args.distributed: + train_sampler.set_epoch(epoch) + print('Starting training for epoch', epoch) + train_one_epoch(model, criterion, optimizer, data_loader, device, epoch, + args.print_freq) + lr_scheduler.step() + with torch.no_grad(): + if epoch >= args.num_observer_update_epochs: + print('Disabling observer for subseq epochs, epoch = ', epoch) + model.apply(torch.quantization.disable_observer) + if epoch >= args.num_batch_norm_update_epochs: + print('Freezing BN for subseq epochs, epoch = ', epoch) + model.apply(torch.nn.intrinsic.qat.freeze_bn_stats) + print('Evaluate QAT model') + + evaluate(model, criterion, data_loader_test, device=device) + quantized_eval_model = copy.deepcopy(model_without_ddp) + quantized_eval_model.eval() + quantized_eval_model.to(torch.device('cpu')) + torch.quantization.convert(quantized_eval_model, inplace=True) + + print('Evaluate Quantized model') + evaluate(quantized_eval_model, criterion, data_loader_test, + device=torch.device('cpu')) + + model.train() + + if args.output_dir: + checkpoint = { + 'model': model_without_ddp.state_dict(), + 'eval_model': quantized_eval_model.state_dict(), + 'optimizer': optimizer.state_dict(), + 'lr_scheduler': lr_scheduler.state_dict(), + 'epoch': epoch, + 'args': args} + utils.save_on_master( + checkpoint, + os.path.join(args.output_dir, 'model_{}.pth'.format(epoch))) + utils.save_on_master( + checkpoint, + os.path.join(args.output_dir, 'checkpoint.pth')) + print('Saving models after epoch ', epoch) + + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('Training time {}'.format(total_time_str)) + + +def get_args_parser(add_help=True): + import argparse + parser = argparse.ArgumentParser(description='PyTorch Quantized Classification Training', add_help=add_help) + + parser.add_argument('--data-path', + default='/datasets01/imagenet_full_size/061417/', + help='dataset') + parser.add_argument('--model', + default='mobilenet_v2', + help='model') + parser.add_argument('--backend', + default='qnnpack', + help='fbgemm or qnnpack') + parser.add_argument('--device', + default='cuda', + help='device') + + parser.add_argument('-b', '--batch-size', default=32, type=int, + help='batch size for calibration/training') + parser.add_argument('--eval-batch-size', default=128, type=int, + help='batch size for evaluation') + parser.add_argument('--epochs', default=90, type=int, metavar='N', + help='number of total epochs to run') + parser.add_argument('--num-observer-update-epochs', + default=4, type=int, metavar='N', + help='number of total epochs to update observers') + parser.add_argument('--num-batch-norm-update-epochs', default=3, + type=int, metavar='N', + help='number of total epochs to update batch norm stats') + parser.add_argument('--num-calibration-batches', + default=32, type=int, metavar='N', + help='number of batches of training set for \ + observer calibration ') + + parser.add_argument('-j', '--workers', default=16, type=int, metavar='N', + help='number of data loading workers (default: 16)') + parser.add_argument('--lr', + default=0.0001, type=float, + help='initial learning rate') + parser.add_argument('--momentum', + default=0.9, type=float, metavar='M', + help='momentum') + parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float, + metavar='W', help='weight decay (default: 1e-4)', + dest='weight_decay') + parser.add_argument('--lr-step-size', default=30, type=int, + help='decrease lr every step-size epochs') + parser.add_argument('--lr-gamma', default=0.1, type=float, + help='decrease lr by a factor of lr-gamma') + parser.add_argument('--print-freq', default=10, type=int, + help='print frequency') + parser.add_argument('--output-dir', default='.', help='path where to save') + parser.add_argument('--resume', default='', help='resume from checkpoint') + parser.add_argument('--start-epoch', default=0, type=int, metavar='N', + help='start epoch') + parser.add_argument( + "--cache-dataset", + dest="cache_dataset", + help="Cache the datasets for quicker initialization. \ + It also serializes the transforms", + action="store_true", + ) + parser.add_argument( + "--sync-bn", + dest="sync_bn", + help="Use sync batch norm", + action="store_true", + ) + parser.add_argument( + "--test-only", + dest="test_only", + help="Only test the model", + action="store_true", + ) + parser.add_argument( + "--post-training-quantize", + dest="post_training_quantize", + help="Post training quantize the model", + action="store_true", + ) + + # distributed training parameters + parser.add_argument('--world-size', default=1, type=int, + help='number of distributed processes') + parser.add_argument('--dist-url', + default='env://', + help='url used to set up distributed training') + + return parser + + +if __name__ == "__main__": + args = get_args_parser().parse_args() + main(args) diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/classification/utils.py b/pretrained_model/pytorch_vision_v0.10.0/references/classification/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4e53ed1d3d703109f9e6eba05966fb008e3d5623 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/classification/utils.py @@ -0,0 +1,379 @@ +from collections import defaultdict, deque, OrderedDict +import copy +import datetime +import hashlib +import time +import torch +import torch.distributed as dist + +import errno +import os + + +class SmoothedValue(object): + """Track a series of values and provide access to smoothed values over a + window or the global series average. + """ + + def __init__(self, window_size=20, fmt=None): + if fmt is None: + fmt = "{median:.4f} ({global_avg:.4f})" + self.deque = deque(maxlen=window_size) + self.total = 0.0 + self.count = 0 + self.fmt = fmt + + def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n + + def synchronize_between_processes(self): + """ + Warning: does not synchronize the deque! + """ + if not is_dist_avail_and_initialized(): + return + t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') + dist.barrier() + dist.all_reduce(t) + t = t.tolist() + self.count = int(t[0]) + self.total = t[1] + + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + + @property + def global_avg(self): + return self.total / self.count + + @property + def max(self): + return max(self.deque) + + @property + def value(self): + return self.deque[-1] + + def __str__(self): + return self.fmt.format( + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value) + + +class MetricLogger(object): + def __init__(self, delimiter="\t"): + self.meters = defaultdict(SmoothedValue) + self.delimiter = delimiter + + def update(self, **kwargs): + for k, v in kwargs.items(): + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.meters[k].update(v) + + def __getattr__(self, attr): + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) + + def __str__(self): + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append( + "{}: {}".format(name, str(meter)) + ) + return self.delimiter.join(loss_str) + + def synchronize_between_processes(self): + for meter in self.meters.values(): + meter.synchronize_between_processes() + + def add_meter(self, name, meter): + self.meters[name] = meter + + def log_every(self, iterable, print_freq, header=None): + i = 0 + if not header: + header = '' + start_time = time.time() + end = time.time() + iter_time = SmoothedValue(fmt='{avg:.4f}') + data_time = SmoothedValue(fmt='{avg:.4f}') + space_fmt = ':' + str(len(str(len(iterable)))) + 'd' + if torch.cuda.is_available(): + log_msg = self.delimiter.join([ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}', + 'max mem: {memory:.0f}' + ]) + else: + log_msg = self.delimiter.join([ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}' + ]) + MB = 1024.0 * 1024.0 + for obj in iterable: + data_time.update(time.time() - end) + yield obj + iter_time.update(time.time() - end) + if i % print_freq == 0: + eta_seconds = iter_time.global_avg * (len(iterable) - i) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + if torch.cuda.is_available(): + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time), + memory=torch.cuda.max_memory_allocated() / MB)) + else: + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time))) + i += 1 + end = time.time() + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('{} Total time: {}'.format(header, total_time_str)) + + +def accuracy(output, target, topk=(1,)): + """Computes the accuracy over the k top predictions for the specified values of k""" + with torch.no_grad(): + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target[None]) + + res = [] + for k in topk: + correct_k = correct[:k].flatten().sum(dtype=torch.float32) + res.append(correct_k * (100.0 / batch_size)) + return res + + +def mkdir(path): + try: + os.makedirs(path) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + import builtins as __builtin__ + builtin_print = __builtin__.print + + def print(*args, **kwargs): + force = kwargs.pop('force', False) + if is_master or force: + builtin_print(*args, **kwargs) + + __builtin__.print = print + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def save_on_master(*args, **kwargs): + if is_main_process(): + torch.save(*args, **kwargs) + + +def init_distributed_mode(args): + if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ['WORLD_SIZE']) + args.gpu = int(os.environ['LOCAL_RANK']) + elif 'SLURM_PROCID' in os.environ: + args.rank = int(os.environ['SLURM_PROCID']) + args.gpu = args.rank % torch.cuda.device_count() + elif hasattr(args, "rank"): + pass + else: + print('Not using distributed mode') + args.distributed = False + return + + args.distributed = True + + torch.cuda.set_device(args.gpu) + args.dist_backend = 'nccl' + print('| distributed init (rank {}): {}'.format( + args.rank, args.dist_url), flush=True) + torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, + world_size=args.world_size, rank=args.rank) + setup_for_distributed(args.rank == 0) + + +def average_checkpoints(inputs): + """Loads checkpoints from inputs and returns a model with averaged weights. Original implementation taken from: + https://github.com/pytorch/fairseq/blob/a48f235636557b8d3bc4922a6fa90f3a0fa57955/scripts/average_checkpoints.py#L16 + + Args: + inputs (List[str]): An iterable of string paths of checkpoints to load from. + Returns: + A dict of string keys mapping to various values. The 'model' key + from the returned dict should correspond to an OrderedDict mapping + string parameter names to torch Tensors. + """ + params_dict = OrderedDict() + params_keys = None + new_state = None + num_models = len(inputs) + for fpath in inputs: + with open(fpath, "rb") as f: + state = torch.load( + f, + map_location=( + lambda s, _: torch.serialization.default_restore_location(s, "cpu") + ), + ) + # Copies over the settings from the first checkpoint + if new_state is None: + new_state = state + model_params = state["model"] + model_params_keys = list(model_params.keys()) + if params_keys is None: + params_keys = model_params_keys + elif params_keys != model_params_keys: + raise KeyError( + "For checkpoint {}, expected list of params: {}, " + "but found: {}".format(f, params_keys, model_params_keys) + ) + for k in params_keys: + p = model_params[k] + if isinstance(p, torch.HalfTensor): + p = p.float() + if k not in params_dict: + params_dict[k] = p.clone() + # NOTE: clone() is needed in case of p is a shared parameter + else: + params_dict[k] += p + averaged_params = OrderedDict() + for k, v in params_dict.items(): + averaged_params[k] = v + if averaged_params[k].is_floating_point(): + averaged_params[k].div_(num_models) + else: + averaged_params[k] //= num_models + new_state["model"] = averaged_params + return new_state + + +def store_model_weights(model, checkpoint_path, checkpoint_key='model', strict=True): + """ + This method can be used to prepare weights files for new models. It receives as + input a model architecture and a checkpoint from the training script and produces + a file with the weights ready for release. + + Examples: + from torchvision import models as M + + # Classification + model = M.mobilenet_v3_large(pretrained=False) + print(store_model_weights(model, './class.pth')) + + # Quantized Classification + model = M.quantization.mobilenet_v3_large(pretrained=False, quantize=False) + model.fuse_model() + model.qconfig = torch.quantization.get_default_qat_qconfig('qnnpack') + _ = torch.quantization.prepare_qat(model, inplace=True) + print(store_model_weights(model, './qat.pth')) + + # Object Detection + model = M.detection.fasterrcnn_mobilenet_v3_large_fpn(pretrained=False, pretrained_backbone=False) + print(store_model_weights(model, './obj.pth')) + + # Segmentation + model = M.segmentation.deeplabv3_mobilenet_v3_large(pretrained=False, pretrained_backbone=False, aux_loss=True) + print(store_model_weights(model, './segm.pth', strict=False)) + + Args: + model (pytorch.nn.Module): The model on which the weights will be loaded for validation purposes. + checkpoint_path (str): The path of the checkpoint we will load. + checkpoint_key (str, optional): The key of the checkpoint where the model weights are stored. + Default: "model". + strict (bool): whether to strictly enforce that the keys + in :attr:`state_dict` match the keys returned by this module's + :meth:`~torch.nn.Module.state_dict` function. Default: ``True`` + + Returns: + output_path (str): The location where the weights are saved. + """ + # Store the new model next to the checkpoint_path + checkpoint_path = os.path.abspath(checkpoint_path) + output_dir = os.path.dirname(checkpoint_path) + + # Deep copy to avoid side-effects on the model object. + model = copy.deepcopy(model) + checkpoint = torch.load(checkpoint_path, map_location='cpu') + + # Load the weights to the model to validate that everything works + # and remove unnecessary weights (such as auxiliaries, etc) + model.load_state_dict(checkpoint[checkpoint_key], strict=strict) + + tmp_path = os.path.join(output_dir, str(model.__hash__())) + torch.save(model.state_dict(), tmp_path) + + sha256_hash = hashlib.sha256() + with open(tmp_path, "rb") as f: + # Read and update hash string value in blocks of 4K + for byte_block in iter(lambda: f.read(4096), b""): + sha256_hash.update(byte_block) + hh = sha256_hash.hexdigest() + + output_path = os.path.join(output_dir, "weights-" + str(hh[:8]) + ".pth") + os.replace(tmp_path, output_path) + + return output_path diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/detection/README.md b/pretrained_model/pytorch_vision_v0.10.0/references/detection/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ea5be6ea791f79b612f954ff56e7d1315b7f405d --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/detection/README.md @@ -0,0 +1,82 @@ +# Object detection reference training scripts + +This folder contains reference training scripts for object detection. +They serve as a log of how to train specific models, to provide baseline +training and evaluation scripts to quickly bootstrap research. + +To execute the example commands below you must install the following: + +``` +cython +pycocotools +matplotlib +``` + +You must modify the following flags: + +`--data-path=/path/to/coco/dataset` + +`--nproc_per_node=<number_of_gpus_available>` + +Except otherwise noted, all models have been trained on 8x V100 GPUs. + +### Faster R-CNN ResNet-50 FPN +``` +python -m torch.distributed.launch --nproc_per_node=8 --use_env train.py\ + --dataset coco --model fasterrcnn_resnet50_fpn --epochs 26\ + --lr-steps 16 22 --aspect-ratio-group-factor 3 +``` + +### Faster R-CNN MobileNetV3-Large FPN +``` +python -m torch.distributed.launch --nproc_per_node=8 --use_env train.py\ + --dataset coco --model fasterrcnn_mobilenet_v3_large_fpn --epochs 26\ + --lr-steps 16 22 --aspect-ratio-group-factor 3 +``` + +### Faster R-CNN MobileNetV3-Large 320 FPN +``` +python -m torch.distributed.launch --nproc_per_node=8 --use_env train.py\ + --dataset coco --model fasterrcnn_mobilenet_v3_large_320_fpn --epochs 26\ + --lr-steps 16 22 --aspect-ratio-group-factor 3 +``` + +### RetinaNet +``` +python -m torch.distributed.launch --nproc_per_node=8 --use_env train.py\ + --dataset coco --model retinanet_resnet50_fpn --epochs 26\ + --lr-steps 16 22 --aspect-ratio-group-factor 3 --lr 0.01 +``` + +### SSD300 VGG16 +``` +python -m torch.distributed.launch --nproc_per_node=8 --use_env train.py\ + --dataset coco --model ssd300_vgg16 --epochs 120\ + --lr-steps 80 110 --aspect-ratio-group-factor 3 --lr 0.002 --batch-size 4\ + --weight-decay 0.0005 --data-augmentation ssd +``` + +### SSDlite320 MobileNetV3-Large +``` +python -m torch.distributed.launch --nproc_per_node=8 --use_env train.py\ + --dataset coco --model ssdlite320_mobilenet_v3_large --epochs 660\ + --aspect-ratio-group-factor 3 --lr-scheduler cosineannealinglr --lr 0.15 --batch-size 24\ + --weight-decay 0.00004 --data-augmentation ssdlite +``` + + +### Mask R-CNN +``` +python -m torch.distributed.launch --nproc_per_node=8 --use_env train.py\ + --dataset coco --model maskrcnn_resnet50_fpn --epochs 26\ + --lr-steps 16 22 --aspect-ratio-group-factor 3 +``` + + +### Keypoint R-CNN +``` +python -m torch.distributed.launch --nproc_per_node=8 --use_env train.py\ + --dataset coco_kp --model keypointrcnn_resnet50_fpn --epochs 46\ + --lr-steps 36 43 --aspect-ratio-group-factor 3 +``` + diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/detection/coco_eval.py b/pretrained_model/pytorch_vision_v0.10.0/references/detection/coco_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..09648f29ae46548626d0f16cd65fd1b5399cacbe --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/detection/coco_eval.py @@ -0,0 +1,352 @@ +import json +import tempfile + +import numpy as np +import copy +import time +import torch +import torch._six + +from pycocotools.cocoeval import COCOeval +from pycocotools.coco import COCO +import pycocotools.mask as mask_util + +from collections import defaultdict + +import utils + + +class CocoEvaluator(object): + def __init__(self, coco_gt, iou_types): + assert isinstance(iou_types, (list, tuple)) + coco_gt = copy.deepcopy(coco_gt) + self.coco_gt = coco_gt + + self.iou_types = iou_types + self.coco_eval = {} + for iou_type in iou_types: + self.coco_eval[iou_type] = COCOeval(coco_gt, iouType=iou_type) + + self.img_ids = [] + self.eval_imgs = {k: [] for k in iou_types} + + def update(self, predictions): + img_ids = list(np.unique(list(predictions.keys()))) + self.img_ids.extend(img_ids) + + for iou_type in self.iou_types: + results = self.prepare(predictions, iou_type) + coco_dt = loadRes(self.coco_gt, results) if results else COCO() + coco_eval = self.coco_eval[iou_type] + + coco_eval.cocoDt = coco_dt + coco_eval.params.imgIds = list(img_ids) + img_ids, eval_imgs = evaluate(coco_eval) + + self.eval_imgs[iou_type].append(eval_imgs) + + def synchronize_between_processes(self): + for iou_type in self.iou_types: + self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2) + create_common_coco_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type]) + + def accumulate(self): + for coco_eval in self.coco_eval.values(): + coco_eval.accumulate() + + def summarize(self): + for iou_type, coco_eval in self.coco_eval.items(): + print("IoU metric: {}".format(iou_type)) + coco_eval.summarize() + + def prepare(self, predictions, iou_type): + if iou_type == "bbox": + return self.prepare_for_coco_detection(predictions) + elif iou_type == "segm": + return self.prepare_for_coco_segmentation(predictions) + elif iou_type == "keypoints": + return self.prepare_for_coco_keypoint(predictions) + else: + raise ValueError("Unknown iou type {}".format(iou_type)) + + def prepare_for_coco_detection(self, predictions): + coco_results = [] + for original_id, prediction in predictions.items(): + if len(prediction) == 0: + continue + + boxes = prediction["boxes"] + boxes = convert_to_xywh(boxes).tolist() + scores = prediction["scores"].tolist() + labels = prediction["labels"].tolist() + + coco_results.extend( + [ + { + "image_id": original_id, + "category_id": labels[k], + "bbox": box, + "score": scores[k], + } + for k, box in enumerate(boxes) + ] + ) + return coco_results + + def prepare_for_coco_segmentation(self, predictions): + coco_results = [] + for original_id, prediction in predictions.items(): + if len(prediction) == 0: + continue + + scores = prediction["scores"] + labels = prediction["labels"] + masks = prediction["masks"] + + masks = masks > 0.5 + + scores = prediction["scores"].tolist() + labels = prediction["labels"].tolist() + + rles = [ + mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0] + for mask in masks + ] + for rle in rles: + rle["counts"] = rle["counts"].decode("utf-8") + + coco_results.extend( + [ + { + "image_id": original_id, + "category_id": labels[k], + "segmentation": rle, + "score": scores[k], + } + for k, rle in enumerate(rles) + ] + ) + return coco_results + + def prepare_for_coco_keypoint(self, predictions): + coco_results = [] + for original_id, prediction in predictions.items(): + if len(prediction) == 0: + continue + + boxes = prediction["boxes"] + boxes = convert_to_xywh(boxes).tolist() + scores = prediction["scores"].tolist() + labels = prediction["labels"].tolist() + keypoints = prediction["keypoints"] + keypoints = keypoints.flatten(start_dim=1).tolist() + + coco_results.extend( + [ + { + "image_id": original_id, + "category_id": labels[k], + 'keypoints': keypoint, + "score": scores[k], + } + for k, keypoint in enumerate(keypoints) + ] + ) + return coco_results + + +def convert_to_xywh(boxes): + xmin, ymin, xmax, ymax = boxes.unbind(1) + return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1) + + +def merge(img_ids, eval_imgs): + all_img_ids = utils.all_gather(img_ids) + all_eval_imgs = utils.all_gather(eval_imgs) + + merged_img_ids = [] + for p in all_img_ids: + merged_img_ids.extend(p) + + merged_eval_imgs = [] + for p in all_eval_imgs: + merged_eval_imgs.append(p) + + merged_img_ids = np.array(merged_img_ids) + merged_eval_imgs = np.concatenate(merged_eval_imgs, 2) + + # keep only unique (and in sorted order) images + merged_img_ids, idx = np.unique(merged_img_ids, return_index=True) + merged_eval_imgs = merged_eval_imgs[..., idx] + + return merged_img_ids, merged_eval_imgs + + +def create_common_coco_eval(coco_eval, img_ids, eval_imgs): + img_ids, eval_imgs = merge(img_ids, eval_imgs) + img_ids = list(img_ids) + eval_imgs = list(eval_imgs.flatten()) + + coco_eval.evalImgs = eval_imgs + coco_eval.params.imgIds = img_ids + coco_eval._paramsEval = copy.deepcopy(coco_eval.params) + + +################################################################# +# From pycocotools, just removed the prints and fixed +# a Python3 bug about unicode not defined +################################################################# + +# Ideally, pycocotools wouldn't have hard-coded prints +# so that we could avoid copy-pasting those two functions + +def createIndex(self): + # create index + # print('creating index...') + anns, cats, imgs = {}, {}, {} + imgToAnns, catToImgs = defaultdict(list), defaultdict(list) + if 'annotations' in self.dataset: + for ann in self.dataset['annotations']: + imgToAnns[ann['image_id']].append(ann) + anns[ann['id']] = ann + + if 'images' in self.dataset: + for img in self.dataset['images']: + imgs[img['id']] = img + + if 'categories' in self.dataset: + for cat in self.dataset['categories']: + cats[cat['id']] = cat + + if 'annotations' in self.dataset and 'categories' in self.dataset: + for ann in self.dataset['annotations']: + catToImgs[ann['category_id']].append(ann['image_id']) + + # print('index created!') + + # create class members + self.anns = anns + self.imgToAnns = imgToAnns + self.catToImgs = catToImgs + self.imgs = imgs + self.cats = cats + + +maskUtils = mask_util + + +def loadRes(self, resFile): + """ + Load result file and return a result api object. + Args: + self (obj): coco object with ground truth annotations + resFile (str): file name of result file + Returns: + res (obj): result api object + """ + res = COCO() + res.dataset['images'] = [img for img in self.dataset['images']] + + # print('Loading and preparing results...') + # tic = time.time() + if isinstance(resFile, torch._six.string_classes): + anns = json.load(open(resFile)) + elif type(resFile) == np.ndarray: + anns = self.loadNumpyAnnotations(resFile) + else: + anns = resFile + assert type(anns) == list, 'results in not an array of objects' + annsImgIds = [ann['image_id'] for ann in anns] + assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \ + 'Results do not correspond to current coco set' + if 'caption' in anns[0]: + imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns]) + res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds] + for id, ann in enumerate(anns): + ann['id'] = id + 1 + elif 'bbox' in anns[0] and not anns[0]['bbox'] == []: + res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) + for id, ann in enumerate(anns): + bb = ann['bbox'] + x1, x2, y1, y2 = [bb[0], bb[0] + bb[2], bb[1], bb[1] + bb[3]] + if 'segmentation' not in ann: + ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]] + ann['area'] = bb[2] * bb[3] + ann['id'] = id + 1 + ann['iscrowd'] = 0 + elif 'segmentation' in anns[0]: + res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) + for id, ann in enumerate(anns): + # now only support compressed RLE format as segmentation results + ann['area'] = maskUtils.area(ann['segmentation']) + if 'bbox' not in ann: + ann['bbox'] = maskUtils.toBbox(ann['segmentation']) + ann['id'] = id + 1 + ann['iscrowd'] = 0 + elif 'keypoints' in anns[0]: + res.dataset['categories'] = copy.deepcopy(self.dataset['categories']) + for id, ann in enumerate(anns): + s = ann['keypoints'] + x = s[0::3] + y = s[1::3] + x1, x2, y1, y2 = np.min(x), np.max(x), np.min(y), np.max(y) + ann['area'] = (x2 - x1) * (y2 - y1) + ann['id'] = id + 1 + ann['bbox'] = [x1, y1, x2 - x1, y2 - y1] + # print('DONE (t={:0.2f}s)'.format(time.time()- tic)) + + res.dataset['annotations'] = anns + createIndex(res) + return res + + +def evaluate(self): + ''' + Run per image evaluation on given images and store results (a list of dict) in self.evalImgs + :return: None + ''' + # tic = time.time() + # print('Running per image evaluation...') + p = self.params + # add backward compatibility if useSegm is specified in params + if p.useSegm is not None: + p.iouType = 'segm' if p.useSegm == 1 else 'bbox' + print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType)) + # print('Evaluate annotation type *{}*'.format(p.iouType)) + p.imgIds = list(np.unique(p.imgIds)) + if p.useCats: + p.catIds = list(np.unique(p.catIds)) + p.maxDets = sorted(p.maxDets) + self.params = p + + self._prepare() + # loop through images, area range, max detection number + catIds = p.catIds if p.useCats else [-1] + + if p.iouType == 'segm' or p.iouType == 'bbox': + computeIoU = self.computeIoU + elif p.iouType == 'keypoints': + computeIoU = self.computeOks + self.ious = { + (imgId, catId): computeIoU(imgId, catId) + for imgId in p.imgIds + for catId in catIds} + + evaluateImg = self.evaluateImg + maxDet = p.maxDets[-1] + evalImgs = [ + evaluateImg(imgId, catId, areaRng, maxDet) + for catId in catIds + for areaRng in p.areaRng + for imgId in p.imgIds + ] + # this is NOT in the pycocotools code, but could be done outside + evalImgs = np.asarray(evalImgs).reshape(len(catIds), len(p.areaRng), len(p.imgIds)) + self._paramsEval = copy.deepcopy(self.params) + # toc = time.time() + # print('DONE (t={:0.2f}s).'.format(toc-tic)) + return p.imgIds, evalImgs + +################################################################# +# end of straight copy from pycocotools, just removing the prints +################################################################# diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/detection/coco_utils.py b/pretrained_model/pytorch_vision_v0.10.0/references/detection/coco_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..26701a2cbee2086fd8aef3d5c9a29bc3ee14b7c8 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/detection/coco_utils.py @@ -0,0 +1,252 @@ +import copy +import os +from PIL import Image + +import torch +import torch.utils.data +import torchvision + +from pycocotools import mask as coco_mask +from pycocotools.coco import COCO + +import transforms as T + + +class FilterAndRemapCocoCategories(object): + def __init__(self, categories, remap=True): + self.categories = categories + self.remap = remap + + def __call__(self, image, target): + anno = target["annotations"] + anno = [obj for obj in anno if obj["category_id"] in self.categories] + if not self.remap: + target["annotations"] = anno + return image, target + anno = copy.deepcopy(anno) + for obj in anno: + obj["category_id"] = self.categories.index(obj["category_id"]) + target["annotations"] = anno + return image, target + + +def convert_coco_poly_to_mask(segmentations, height, width): + masks = [] + for polygons in segmentations: + rles = coco_mask.frPyObjects(polygons, height, width) + mask = coco_mask.decode(rles) + if len(mask.shape) < 3: + mask = mask[..., None] + mask = torch.as_tensor(mask, dtype=torch.uint8) + mask = mask.any(dim=2) + masks.append(mask) + if masks: + masks = torch.stack(masks, dim=0) + else: + masks = torch.zeros((0, height, width), dtype=torch.uint8) + return masks + + +class ConvertCocoPolysToMask(object): + def __call__(self, image, target): + w, h = image.size + + image_id = target["image_id"] + image_id = torch.tensor([image_id]) + + anno = target["annotations"] + + anno = [obj for obj in anno if obj['iscrowd'] == 0] + + boxes = [obj["bbox"] for obj in anno] + # guard against no boxes via resizing + boxes = torch.as_tensor(boxes, dtype=torch.float32).reshape(-1, 4) + boxes[:, 2:] += boxes[:, :2] + boxes[:, 0::2].clamp_(min=0, max=w) + boxes[:, 1::2].clamp_(min=0, max=h) + + classes = [obj["category_id"] for obj in anno] + classes = torch.tensor(classes, dtype=torch.int64) + + segmentations = [obj["segmentation"] for obj in anno] + masks = convert_coco_poly_to_mask(segmentations, h, w) + + keypoints = None + if anno and "keypoints" in anno[0]: + keypoints = [obj["keypoints"] for obj in anno] + keypoints = torch.as_tensor(keypoints, dtype=torch.float32) + num_keypoints = keypoints.shape[0] + if num_keypoints: + keypoints = keypoints.view(num_keypoints, -1, 3) + + keep = (boxes[:, 3] > boxes[:, 1]) & (boxes[:, 2] > boxes[:, 0]) + boxes = boxes[keep] + classes = classes[keep] + masks = masks[keep] + if keypoints is not None: + keypoints = keypoints[keep] + + target = {} + target["boxes"] = boxes + target["labels"] = classes + target["masks"] = masks + target["image_id"] = image_id + if keypoints is not None: + target["keypoints"] = keypoints + + # for conversion to coco api + area = torch.tensor([obj["area"] for obj in anno]) + iscrowd = torch.tensor([obj["iscrowd"] for obj in anno]) + target["area"] = area + target["iscrowd"] = iscrowd + + return image, target + + +def _coco_remove_images_without_annotations(dataset, cat_list=None): + def _has_only_empty_bbox(anno): + return all(any(o <= 1 for o in obj["bbox"][2:]) for obj in anno) + + def _count_visible_keypoints(anno): + return sum(sum(1 for v in ann["keypoints"][2::3] if v > 0) for ann in anno) + + min_keypoints_per_image = 10 + + def _has_valid_annotation(anno): + # if it's empty, there is no annotation + if len(anno) == 0: + return False + # if all boxes have close to zero area, there is no annotation + if _has_only_empty_bbox(anno): + return False + # keypoints task have a slight different critera for considering + # if an annotation is valid + if "keypoints" not in anno[0]: + return True + # for keypoint detection tasks, only consider valid images those + # containing at least min_keypoints_per_image + if _count_visible_keypoints(anno) >= min_keypoints_per_image: + return True + return False + + assert isinstance(dataset, torchvision.datasets.CocoDetection) + ids = [] + for ds_idx, img_id in enumerate(dataset.ids): + ann_ids = dataset.coco.getAnnIds(imgIds=img_id, iscrowd=None) + anno = dataset.coco.loadAnns(ann_ids) + if cat_list: + anno = [obj for obj in anno if obj["category_id"] in cat_list] + if _has_valid_annotation(anno): + ids.append(ds_idx) + + dataset = torch.utils.data.Subset(dataset, ids) + return dataset + + +def convert_to_coco_api(ds): + coco_ds = COCO() + # annotation IDs need to start at 1, not 0, see torchvision issue #1530 + ann_id = 1 + dataset = {'images': [], 'categories': [], 'annotations': []} + categories = set() + for img_idx in range(len(ds)): + # find better way to get target + # targets = ds.get_annotations(img_idx) + img, targets = ds[img_idx] + image_id = targets["image_id"].item() + img_dict = {} + img_dict['id'] = image_id + img_dict['height'] = img.shape[-2] + img_dict['width'] = img.shape[-1] + dataset['images'].append(img_dict) + bboxes = targets["boxes"] + bboxes[:, 2:] -= bboxes[:, :2] + bboxes = bboxes.tolist() + labels = targets['labels'].tolist() + areas = targets['area'].tolist() + iscrowd = targets['iscrowd'].tolist() + if 'masks' in targets: + masks = targets['masks'] + # make masks Fortran contiguous for coco_mask + masks = masks.permute(0, 2, 1).contiguous().permute(0, 2, 1) + if 'keypoints' in targets: + keypoints = targets['keypoints'] + keypoints = keypoints.reshape(keypoints.shape[0], -1).tolist() + num_objs = len(bboxes) + for i in range(num_objs): + ann = {} + ann['image_id'] = image_id + ann['bbox'] = bboxes[i] + ann['category_id'] = labels[i] + categories.add(labels[i]) + ann['area'] = areas[i] + ann['iscrowd'] = iscrowd[i] + ann['id'] = ann_id + if 'masks' in targets: + ann["segmentation"] = coco_mask.encode(masks[i].numpy()) + if 'keypoints' in targets: + ann['keypoints'] = keypoints[i] + ann['num_keypoints'] = sum(k != 0 for k in keypoints[i][2::3]) + dataset['annotations'].append(ann) + ann_id += 1 + dataset['categories'] = [{'id': i} for i in sorted(categories)] + coco_ds.dataset = dataset + coco_ds.createIndex() + return coco_ds + + +def get_coco_api_from_dataset(dataset): + for _ in range(10): + if isinstance(dataset, torchvision.datasets.CocoDetection): + break + if isinstance(dataset, torch.utils.data.Subset): + dataset = dataset.dataset + if isinstance(dataset, torchvision.datasets.CocoDetection): + return dataset.coco + return convert_to_coco_api(dataset) + + +class CocoDetection(torchvision.datasets.CocoDetection): + def __init__(self, img_folder, ann_file, transforms): + super(CocoDetection, self).__init__(img_folder, ann_file) + self._transforms = transforms + + def __getitem__(self, idx): + img, target = super(CocoDetection, self).__getitem__(idx) + image_id = self.ids[idx] + target = dict(image_id=image_id, annotations=target) + if self._transforms is not None: + img, target = self._transforms(img, target) + return img, target + + +def get_coco(root, image_set, transforms, mode='instances'): + anno_file_template = "{}_{}2017.json" + PATHS = { + "train": ("train2017", os.path.join("annotations", anno_file_template.format(mode, "train"))), + "val": ("val2017", os.path.join("annotations", anno_file_template.format(mode, "val"))), + # "train": ("val2017", os.path.join("annotations", anno_file_template.format(mode, "val"))) + } + + t = [ConvertCocoPolysToMask()] + + if transforms is not None: + t.append(transforms) + transforms = T.Compose(t) + + img_folder, ann_file = PATHS[image_set] + img_folder = os.path.join(root, img_folder) + ann_file = os.path.join(root, ann_file) + + dataset = CocoDetection(img_folder, ann_file, transforms=transforms) + + if image_set == "train": + dataset = _coco_remove_images_without_annotations(dataset) + + # dataset = torch.utils.data.Subset(dataset, [i for i in range(500)]) + + return dataset + + +def get_coco_kp(root, image_set, transforms): + return get_coco(root, image_set, transforms, mode="person_keypoints") diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/detection/engine.py b/pretrained_model/pytorch_vision_v0.10.0/references/detection/engine.py new file mode 100644 index 0000000000000000000000000000000000000000..49992af60a9f4c0ecf154fc4585af7ee33ee51f5 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/detection/engine.py @@ -0,0 +1,110 @@ +import math +import sys +import time +import torch + +import torchvision.models.detection.mask_rcnn + +from coco_utils import get_coco_api_from_dataset +from coco_eval import CocoEvaluator +import utils + + +def train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq): + model.train() + metric_logger = utils.MetricLogger(delimiter=" ") + metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) + header = 'Epoch: [{}]'.format(epoch) + + lr_scheduler = None + if epoch == 0: + warmup_factor = 1. / 1000 + warmup_iters = min(1000, len(data_loader) - 1) + + lr_scheduler = utils.warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor) + + for images, targets in metric_logger.log_every(data_loader, print_freq, header): + images = list(image.to(device) for image in images) + targets = [{k: v.to(device) for k, v in t.items()} for t in targets] + + loss_dict = model(images, targets) + + losses = sum(loss for loss in loss_dict.values()) + + # reduce losses over all GPUs for logging purposes + loss_dict_reduced = utils.reduce_dict(loss_dict) + losses_reduced = sum(loss for loss in loss_dict_reduced.values()) + + loss_value = losses_reduced.item() + + if not math.isfinite(loss_value): + print("Loss is {}, stopping training".format(loss_value)) + print(loss_dict_reduced) + sys.exit(1) + + optimizer.zero_grad() + losses.backward() + optimizer.step() + + if lr_scheduler is not None: + lr_scheduler.step() + + metric_logger.update(loss=losses_reduced, **loss_dict_reduced) + metric_logger.update(lr=optimizer.param_groups[0]["lr"]) + + return metric_logger + + +def _get_iou_types(model): + model_without_ddp = model + if isinstance(model, torch.nn.parallel.DistributedDataParallel): + model_without_ddp = model.module + iou_types = ["bbox"] + if isinstance(model_without_ddp, torchvision.models.detection.MaskRCNN): + iou_types.append("segm") + if isinstance(model_without_ddp, torchvision.models.detection.KeypointRCNN): + iou_types.append("keypoints") + return iou_types + + +@torch.no_grad() +def evaluate(model, data_loader, device): + n_threads = torch.get_num_threads() + # FIXME remove this and make paste_masks_in_image run on the GPU + torch.set_num_threads(1) + cpu_device = torch.device("cpu") + model.eval() + metric_logger = utils.MetricLogger(delimiter=" ") + header = 'Test:' + + coco = get_coco_api_from_dataset(data_loader.dataset) + iou_types = _get_iou_types(model) + coco_evaluator = CocoEvaluator(coco, iou_types) + + for images, targets in metric_logger.log_every(data_loader, 100, header): + images = list(img.to(device) for img in images) + + if torch.cuda.is_available(): + torch.cuda.synchronize() + model_time = time.time() + outputs = model(images) + + outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs] + model_time = time.time() - model_time + + res = {target["image_id"].item(): output for target, output in zip(targets, outputs)} + evaluator_time = time.time() + coco_evaluator.update(res) + evaluator_time = time.time() - evaluator_time + metric_logger.update(model_time=model_time, evaluator_time=evaluator_time) + + # gather the stats from all processes + metric_logger.synchronize_between_processes() + print("Averaged stats:", metric_logger) + coco_evaluator.synchronize_between_processes() + + # accumulate predictions from all images + coco_evaluator.accumulate() + coco_evaluator.summarize() + torch.set_num_threads(n_threads) + return coco_evaluator diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/detection/group_by_aspect_ratio.py b/pretrained_model/pytorch_vision_v0.10.0/references/detection/group_by_aspect_ratio.py new file mode 100644 index 0000000000000000000000000000000000000000..1b76f4c64f7ab470b3bb30160a16e865a3909351 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/detection/group_by_aspect_ratio.py @@ -0,0 +1,195 @@ +import bisect +from collections import defaultdict +import copy +from itertools import repeat, chain +import math +import numpy as np + +import torch +import torch.utils.data +from torch.utils.data.sampler import BatchSampler, Sampler +from torch.utils.model_zoo import tqdm +import torchvision + +from PIL import Image + + +def _repeat_to_at_least(iterable, n): + repeat_times = math.ceil(n / len(iterable)) + repeated = chain.from_iterable(repeat(iterable, repeat_times)) + return list(repeated) + + +class GroupedBatchSampler(BatchSampler): + """ + Wraps another sampler to yield a mini-batch of indices. + It enforces that the batch only contain elements from the same group. + It also tries to provide mini-batches which follows an ordering which is + as close as possible to the ordering from the original sampler. + Args: + sampler (Sampler): Base sampler. + group_ids (list[int]): If the sampler produces indices in range [0, N), + `group_ids` must be a list of `N` ints which contains the group id of each sample. + The group ids must be a continuous set of integers starting from + 0, i.e. they must be in the range [0, num_groups). + batch_size (int): Size of mini-batch. + """ + def __init__(self, sampler, group_ids, batch_size): + if not isinstance(sampler, Sampler): + raise ValueError( + "sampler should be an instance of " + "torch.utils.data.Sampler, but got sampler={}".format(sampler) + ) + self.sampler = sampler + self.group_ids = group_ids + self.batch_size = batch_size + + def __iter__(self): + buffer_per_group = defaultdict(list) + samples_per_group = defaultdict(list) + + num_batches = 0 + for idx in self.sampler: + group_id = self.group_ids[idx] + buffer_per_group[group_id].append(idx) + samples_per_group[group_id].append(idx) + if len(buffer_per_group[group_id]) == self.batch_size: + yield buffer_per_group[group_id] + num_batches += 1 + del buffer_per_group[group_id] + assert len(buffer_per_group[group_id]) < self.batch_size + + # now we have run out of elements that satisfy + # the group criteria, let's return the remaining + # elements so that the size of the sampler is + # deterministic + expected_num_batches = len(self) + num_remaining = expected_num_batches - num_batches + if num_remaining > 0: + # for the remaining batches, take first the buffers with largest number + # of elements + for group_id, _ in sorted(buffer_per_group.items(), + key=lambda x: len(x[1]), reverse=True): + remaining = self.batch_size - len(buffer_per_group[group_id]) + samples_from_group_id = _repeat_to_at_least(samples_per_group[group_id], remaining) + buffer_per_group[group_id].extend(samples_from_group_id[:remaining]) + assert len(buffer_per_group[group_id]) == self.batch_size + yield buffer_per_group[group_id] + num_remaining -= 1 + if num_remaining == 0: + break + assert num_remaining == 0 + + def __len__(self): + return len(self.sampler) // self.batch_size + + +def _compute_aspect_ratios_slow(dataset, indices=None): + print("Your dataset doesn't support the fast path for " + "computing the aspect ratios, so will iterate over " + "the full dataset and load every image instead. " + "This might take some time...") + if indices is None: + indices = range(len(dataset)) + + class SubsetSampler(Sampler): + def __init__(self, indices): + self.indices = indices + + def __iter__(self): + return iter(self.indices) + + def __len__(self): + return len(self.indices) + + sampler = SubsetSampler(indices) + data_loader = torch.utils.data.DataLoader( + dataset, batch_size=1, sampler=sampler, + num_workers=14, # you might want to increase it for faster processing + collate_fn=lambda x: x[0]) + aspect_ratios = [] + with tqdm(total=len(dataset)) as pbar: + for _i, (img, _) in enumerate(data_loader): + pbar.update(1) + height, width = img.shape[-2:] + aspect_ratio = float(width) / float(height) + aspect_ratios.append(aspect_ratio) + return aspect_ratios + + +def _compute_aspect_ratios_custom_dataset(dataset, indices=None): + if indices is None: + indices = range(len(dataset)) + aspect_ratios = [] + for i in indices: + height, width = dataset.get_height_and_width(i) + aspect_ratio = float(width) / float(height) + aspect_ratios.append(aspect_ratio) + return aspect_ratios + + +def _compute_aspect_ratios_coco_dataset(dataset, indices=None): + if indices is None: + indices = range(len(dataset)) + aspect_ratios = [] + for i in indices: + img_info = dataset.coco.imgs[dataset.ids[i]] + aspect_ratio = float(img_info["width"]) / float(img_info["height"]) + aspect_ratios.append(aspect_ratio) + return aspect_ratios + + +def _compute_aspect_ratios_voc_dataset(dataset, indices=None): + if indices is None: + indices = range(len(dataset)) + aspect_ratios = [] + for i in indices: + # this doesn't load the data into memory, because PIL loads it lazily + width, height = Image.open(dataset.images[i]).size + aspect_ratio = float(width) / float(height) + aspect_ratios.append(aspect_ratio) + return aspect_ratios + + +def _compute_aspect_ratios_subset_dataset(dataset, indices=None): + if indices is None: + indices = range(len(dataset)) + + ds_indices = [dataset.indices[i] for i in indices] + return compute_aspect_ratios(dataset.dataset, ds_indices) + + +def compute_aspect_ratios(dataset, indices=None): + if hasattr(dataset, "get_height_and_width"): + return _compute_aspect_ratios_custom_dataset(dataset, indices) + + if isinstance(dataset, torchvision.datasets.CocoDetection): + return _compute_aspect_ratios_coco_dataset(dataset, indices) + + if isinstance(dataset, torchvision.datasets.VOCDetection): + return _compute_aspect_ratios_voc_dataset(dataset, indices) + + if isinstance(dataset, torch.utils.data.Subset): + return _compute_aspect_ratios_subset_dataset(dataset, indices) + + # slow path + return _compute_aspect_ratios_slow(dataset, indices) + + +def _quantize(x, bins): + bins = copy.deepcopy(bins) + bins = sorted(bins) + quantized = list(map(lambda y: bisect.bisect_right(bins, y), x)) + return quantized + + +def create_aspect_ratio_groups(dataset, k=0): + aspect_ratios = compute_aspect_ratios(dataset) + bins = (2 ** np.linspace(-1, 1, 2 * k + 1)).tolist() if k > 0 else [1.0] + groups = _quantize(aspect_ratios, bins) + # count number of elements per group + counts = np.unique(groups, return_counts=True)[1] + fbins = [0] + bins + [np.inf] + print("Using {} as bins for aspect ratio quantization".format(fbins)) + print("Count of instances per bin: {}".format(counts)) + return groups diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/detection/presets.py b/pretrained_model/pytorch_vision_v0.10.0/references/detection/presets.py new file mode 100644 index 0000000000000000000000000000000000000000..1fac69ae35690a5c286cafa8a35b6474ab0d3688 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/detection/presets.py @@ -0,0 +1,37 @@ +import transforms as T + + +class DetectionPresetTrain: + def __init__(self, data_augmentation, hflip_prob=0.5, mean=(123., 117., 104.)): + if data_augmentation == 'hflip': + self.transforms = T.Compose([ + T.RandomHorizontalFlip(p=hflip_prob), + T.ToTensor(), + ]) + elif data_augmentation == 'ssd': + self.transforms = T.Compose([ + T.RandomPhotometricDistort(), + T.RandomZoomOut(fill=list(mean)), + T.RandomIoUCrop(), + T.RandomHorizontalFlip(p=hflip_prob), + T.ToTensor(), + ]) + elif data_augmentation == 'ssdlite': + self.transforms = T.Compose([ + T.RandomIoUCrop(), + T.RandomHorizontalFlip(p=hflip_prob), + T.ToTensor(), + ]) + else: + raise ValueError(f'Unknown data augmentation policy "{data_augmentation}"') + + def __call__(self, img, target): + return self.transforms(img, target) + + +class DetectionPresetEval: + def __init__(self): + self.transforms = T.ToTensor() + + def __call__(self, img, target): + return self.transforms(img, target) diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/detection/train.py b/pretrained_model/pytorch_vision_v0.10.0/references/detection/train.py new file mode 100644 index 0000000000000000000000000000000000000000..cd4148e9bf7852d66e074ba522c59e6bb630855b --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/detection/train.py @@ -0,0 +1,233 @@ +r"""PyTorch Detection Training. + +To run in a multi-gpu environment, use the distributed launcher:: + + python -m torch.distributed.launch --nproc_per_node=$NGPU --use_env \ + train.py ... --world-size $NGPU + +The default hyperparameters are tuned for training on 8 gpus and 2 images per gpu. + --lr 0.02 --batch-size 2 --world-size 8 +If you use different number of gpus, the learning rate should be changed to 0.02/8*$NGPU. + +On top of that, for training Faster/Mask R-CNN, the default hyperparameters are + --epochs 26 --lr-steps 16 22 --aspect-ratio-group-factor 3 + +Also, if you train Keypoint R-CNN, the default hyperparameters are + --epochs 46 --lr-steps 36 43 --aspect-ratio-group-factor 3 +Because the number of images is smaller in the person keypoint subset of COCO, +the number of epochs should be adapted so that we have the same number of iterations. +""" +import datetime +import os +import time + +import torch +import torch.utils.data +import torchvision +import torchvision.models.detection +import torchvision.models.detection.mask_rcnn + +from coco_utils import get_coco, get_coco_kp + +from group_by_aspect_ratio import GroupedBatchSampler, create_aspect_ratio_groups +from engine import train_one_epoch, evaluate + +import presets +import utils + + +def get_dataset(name, image_set, transform, data_path): + paths = { + "coco": (data_path, get_coco, 91), + "coco_kp": (data_path, get_coco_kp, 2) + } + p, ds_fn, num_classes = paths[name] + + ds = ds_fn(p, image_set=image_set, transforms=transform) + return ds, num_classes + + +def get_transform(train, data_augmentation): + return presets.DetectionPresetTrain(data_augmentation) if train else presets.DetectionPresetEval() + + +def get_args_parser(add_help=True): + import argparse + parser = argparse.ArgumentParser(description='PyTorch Detection Training', add_help=add_help) + + parser.add_argument('--data-path', default='/datasets01/COCO/022719/', help='dataset') + parser.add_argument('--dataset', default='coco', help='dataset') + parser.add_argument('--model', default='maskrcnn_resnet50_fpn', help='model') + parser.add_argument('--device', default='cuda', help='device') + parser.add_argument('-b', '--batch-size', default=2, type=int, + help='images per gpu, the total batch size is $NGPU x batch_size') + parser.add_argument('--epochs', default=26, type=int, metavar='N', + help='number of total epochs to run') + parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', + help='number of data loading workers (default: 4)') + parser.add_argument('--lr', default=0.02, type=float, + help='initial learning rate, 0.02 is the default value for training ' + 'on 8 gpus and 2 images_per_gpu') + parser.add_argument('--momentum', default=0.9, type=float, metavar='M', + help='momentum') + parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float, + metavar='W', help='weight decay (default: 1e-4)', + dest='weight_decay') + parser.add_argument('--lr-scheduler', default="multisteplr", help='the lr scheduler (default: multisteplr)') + parser.add_argument('--lr-step-size', default=8, type=int, + help='decrease lr every step-size epochs (multisteplr scheduler only)') + parser.add_argument('--lr-steps', default=[16, 22], nargs='+', type=int, + help='decrease lr every step-size epochs (multisteplr scheduler only)') + parser.add_argument('--lr-gamma', default=0.1, type=float, + help='decrease lr by a factor of lr-gamma (multisteplr scheduler only)') + parser.add_argument('--print-freq', default=20, type=int, help='print frequency') + parser.add_argument('--output-dir', default='.', help='path where to save') + parser.add_argument('--resume', default='', help='resume from checkpoint') + parser.add_argument('--start_epoch', default=0, type=int, help='start epoch') + parser.add_argument('--aspect-ratio-group-factor', default=3, type=int) + parser.add_argument('--rpn-score-thresh', default=None, type=float, help='rpn score threshold for faster-rcnn') + parser.add_argument('--trainable-backbone-layers', default=None, type=int, + help='number of trainable layers of backbone') + parser.add_argument('--data-augmentation', default="hflip", help='data augmentation policy (default: hflip)') + parser.add_argument( + "--sync-bn", + dest="sync_bn", + help="Use sync batch norm", + action="store_true", + ) + parser.add_argument( + "--test-only", + dest="test_only", + help="Only test the model", + action="store_true", + ) + parser.add_argument( + "--pretrained", + dest="pretrained", + help="Use pre-trained models from the modelzoo", + action="store_true", + ) + + # distributed training parameters + parser.add_argument('--world-size', default=1, type=int, + help='number of distributed processes') + parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training') + + return parser + + +def main(args): + if args.output_dir: + utils.mkdir(args.output_dir) + + utils.init_distributed_mode(args) + print(args) + + device = torch.device(args.device) + + # Data loading code + print("Loading data") + + dataset, num_classes = get_dataset(args.dataset, "train", get_transform(True, args.data_augmentation), + args.data_path) + dataset_test, _ = get_dataset(args.dataset, "val", get_transform(False, args.data_augmentation), args.data_path) + + print("Creating data loaders") + if args.distributed: + train_sampler = torch.utils.data.distributed.DistributedSampler(dataset) + test_sampler = torch.utils.data.distributed.DistributedSampler(dataset_test) + else: + train_sampler = torch.utils.data.RandomSampler(dataset) + test_sampler = torch.utils.data.SequentialSampler(dataset_test) + + if args.aspect_ratio_group_factor >= 0: + group_ids = create_aspect_ratio_groups(dataset, k=args.aspect_ratio_group_factor) + train_batch_sampler = GroupedBatchSampler(train_sampler, group_ids, args.batch_size) + else: + train_batch_sampler = torch.utils.data.BatchSampler( + train_sampler, args.batch_size, drop_last=True) + + data_loader = torch.utils.data.DataLoader( + dataset, batch_sampler=train_batch_sampler, num_workers=args.workers, + collate_fn=utils.collate_fn) + + data_loader_test = torch.utils.data.DataLoader( + dataset_test, batch_size=1, + sampler=test_sampler, num_workers=args.workers, + collate_fn=utils.collate_fn) + + print("Creating model") + kwargs = { + "trainable_backbone_layers": args.trainable_backbone_layers + } + if "rcnn" in args.model: + if args.rpn_score_thresh is not None: + kwargs["rpn_score_thresh"] = args.rpn_score_thresh + model = torchvision.models.detection.__dict__[args.model](num_classes=num_classes, pretrained=args.pretrained, + **kwargs) + model.to(device) + if args.distributed and args.sync_bn: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + + model_without_ddp = model + if args.distributed: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) + model_without_ddp = model.module + + params = [p for p in model.parameters() if p.requires_grad] + optimizer = torch.optim.SGD( + params, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) + + args.lr_scheduler = args.lr_scheduler.lower() + if args.lr_scheduler == 'multisteplr': + lr_scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=args.lr_steps, gamma=args.lr_gamma) + elif args.lr_scheduler == 'cosineannealinglr': + lr_scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs) + else: + raise RuntimeError("Invalid lr scheduler '{}'. Only MultiStepLR and CosineAnnealingLR " + "are supported.".format(args.lr_scheduler)) + + if args.resume: + checkpoint = torch.load(args.resume, map_location='cpu') + model_without_ddp.load_state_dict(checkpoint['model']) + optimizer.load_state_dict(checkpoint['optimizer']) + lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) + args.start_epoch = checkpoint['epoch'] + 1 + + if args.test_only: + evaluate(model, data_loader_test, device=device) + return + + print("Start training") + start_time = time.time() + for epoch in range(args.start_epoch, args.epochs): + if args.distributed: + train_sampler.set_epoch(epoch) + train_one_epoch(model, optimizer, data_loader, device, epoch, args.print_freq) + lr_scheduler.step() + if args.output_dir: + checkpoint = { + 'model': model_without_ddp.state_dict(), + 'optimizer': optimizer.state_dict(), + 'lr_scheduler': lr_scheduler.state_dict(), + 'args': args, + 'epoch': epoch + } + utils.save_on_master( + checkpoint, + os.path.join(args.output_dir, 'model_{}.pth'.format(epoch))) + utils.save_on_master( + checkpoint, + os.path.join(args.output_dir, 'checkpoint.pth')) + + # evaluate after every epoch + evaluate(model, data_loader_test, device=device) + + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('Training time {}'.format(total_time_str)) + + +if __name__ == "__main__": + args = get_args_parser().parse_args() + main(args) diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/detection/transforms.py b/pretrained_model/pytorch_vision_v0.10.0/references/detection/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..8e4b8870eaf0e4729337bf0303f3a6542565a4d2 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/detection/transforms.py @@ -0,0 +1,239 @@ +import torch +import torchvision + +from torch import nn, Tensor +from torchvision.transforms import functional as F +from torchvision.transforms import transforms as T +from typing import List, Tuple, Dict, Optional + + +def _flip_coco_person_keypoints(kps, width): + flip_inds = [0, 2, 1, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15] + flipped_data = kps[:, flip_inds] + flipped_data[..., 0] = width - flipped_data[..., 0] + # Maintain COCO convention that if visibility == 0, then x, y = 0 + inds = flipped_data[..., 2] == 0 + flipped_data[inds] = 0 + return flipped_data + + +class Compose(object): + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, image, target): + for t in self.transforms: + image, target = t(image, target) + return image, target + + +class RandomHorizontalFlip(T.RandomHorizontalFlip): + def forward(self, image: Tensor, + target: Optional[Dict[str, Tensor]] = None) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]: + if torch.rand(1) < self.p: + image = F.hflip(image) + if target is not None: + width, _ = F._get_image_size(image) + target["boxes"][:, [0, 2]] = width - target["boxes"][:, [2, 0]] + if "masks" in target: + target["masks"] = target["masks"].flip(-1) + if "keypoints" in target: + keypoints = target["keypoints"] + keypoints = _flip_coco_person_keypoints(keypoints, width) + target["keypoints"] = keypoints + return image, target + + +class ToTensor(nn.Module): + def forward(self, image: Tensor, + target: Optional[Dict[str, Tensor]] = None) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]: + image = F.to_tensor(image) + return image, target + + +class RandomIoUCrop(nn.Module): + def __init__(self, min_scale: float = 0.3, max_scale: float = 1.0, min_aspect_ratio: float = 0.5, + max_aspect_ratio: float = 2.0, sampler_options: Optional[List[float]] = None, trials: int = 40): + super().__init__() + # Configuration similar to https://github.com/weiliu89/caffe/blob/ssd/examples/ssd/ssd_coco.py#L89-L174 + self.min_scale = min_scale + self.max_scale = max_scale + self.min_aspect_ratio = min_aspect_ratio + self.max_aspect_ratio = max_aspect_ratio + if sampler_options is None: + sampler_options = [0.0, 0.1, 0.3, 0.5, 0.7, 0.9, 1.0] + self.options = sampler_options + self.trials = trials + + def forward(self, image: Tensor, + target: Optional[Dict[str, Tensor]] = None) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]: + if target is None: + raise ValueError("The targets can't be None for this transform.") + + if isinstance(image, torch.Tensor): + if image.ndimension() not in {2, 3}: + raise ValueError('image should be 2/3 dimensional. Got {} dimensions.'.format(image.ndimension())) + elif image.ndimension() == 2: + image = image.unsqueeze(0) + + orig_w, orig_h = F._get_image_size(image) + + while True: + # sample an option + idx = int(torch.randint(low=0, high=len(self.options), size=(1,))) + min_jaccard_overlap = self.options[idx] + if min_jaccard_overlap >= 1.0: # a value larger than 1 encodes the leave as-is option + return image, target + + for _ in range(self.trials): + # check the aspect ratio limitations + r = self.min_scale + (self.max_scale - self.min_scale) * torch.rand(2) + new_w = int(orig_w * r[0]) + new_h = int(orig_h * r[1]) + aspect_ratio = new_w / new_h + if not (self.min_aspect_ratio <= aspect_ratio <= self.max_aspect_ratio): + continue + + # check for 0 area crops + r = torch.rand(2) + left = int((orig_w - new_w) * r[0]) + top = int((orig_h - new_h) * r[1]) + right = left + new_w + bottom = top + new_h + if left == right or top == bottom: + continue + + # check for any valid boxes with centers within the crop area + cx = 0.5 * (target["boxes"][:, 0] + target["boxes"][:, 2]) + cy = 0.5 * (target["boxes"][:, 1] + target["boxes"][:, 3]) + is_within_crop_area = (left < cx) & (cx < right) & (top < cy) & (cy < bottom) + if not is_within_crop_area.any(): + continue + + # check at least 1 box with jaccard limitations + boxes = target["boxes"][is_within_crop_area] + ious = torchvision.ops.boxes.box_iou(boxes, torch.tensor([[left, top, right, bottom]], + dtype=boxes.dtype, device=boxes.device)) + if ious.max() < min_jaccard_overlap: + continue + + # keep only valid boxes and perform cropping + target["boxes"] = boxes + target["labels"] = target["labels"][is_within_crop_area] + target["boxes"][:, 0::2] -= left + target["boxes"][:, 1::2] -= top + target["boxes"][:, 0::2].clamp_(min=0, max=new_w) + target["boxes"][:, 1::2].clamp_(min=0, max=new_h) + image = F.crop(image, top, left, new_h, new_w) + + return image, target + + +class RandomZoomOut(nn.Module): + def __init__(self, fill: Optional[List[float]] = None, side_range: Tuple[float, float] = (1., 4.), p: float = 0.5): + super().__init__() + if fill is None: + fill = [0., 0., 0.] + self.fill = fill + self.side_range = side_range + if side_range[0] < 1. or side_range[0] > side_range[1]: + raise ValueError("Invalid canvas side range provided {}.".format(side_range)) + self.p = p + + @torch.jit.unused + def _get_fill_value(self, is_pil): + # type: (bool) -> int + # We fake the type to make it work on JIT + return tuple(int(x) for x in self.fill) if is_pil else 0 + + def forward(self, image: Tensor, + target: Optional[Dict[str, Tensor]] = None) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]: + if isinstance(image, torch.Tensor): + if image.ndimension() not in {2, 3}: + raise ValueError('image should be 2/3 dimensional. Got {} dimensions.'.format(image.ndimension())) + elif image.ndimension() == 2: + image = image.unsqueeze(0) + + if torch.rand(1) < self.p: + return image, target + + orig_w, orig_h = F._get_image_size(image) + + r = self.side_range[0] + torch.rand(1) * (self.side_range[1] - self.side_range[0]) + canvas_width = int(orig_w * r) + canvas_height = int(orig_h * r) + + r = torch.rand(2) + left = int((canvas_width - orig_w) * r[0]) + top = int((canvas_height - orig_h) * r[1]) + right = canvas_width - (left + orig_w) + bottom = canvas_height - (top + orig_h) + + if torch.jit.is_scripting(): + fill = 0 + else: + fill = self._get_fill_value(F._is_pil_image(image)) + + image = F.pad(image, [left, top, right, bottom], fill=fill) + if isinstance(image, torch.Tensor): + v = torch.tensor(self.fill, device=image.device, dtype=image.dtype).view(-1, 1, 1) + image[..., :top, :] = image[..., :, :left] = image[..., (top + orig_h):, :] = \ + image[..., :, (left + orig_w):] = v + + if target is not None: + target["boxes"][:, 0::2] += left + target["boxes"][:, 1::2] += top + + return image, target + + +class RandomPhotometricDistort(nn.Module): + def __init__(self, contrast: Tuple[float] = (0.5, 1.5), saturation: Tuple[float] = (0.5, 1.5), + hue: Tuple[float] = (-0.05, 0.05), brightness: Tuple[float] = (0.875, 1.125), p: float = 0.5): + super().__init__() + self._brightness = T.ColorJitter(brightness=brightness) + self._contrast = T.ColorJitter(contrast=contrast) + self._hue = T.ColorJitter(hue=hue) + self._saturation = T.ColorJitter(saturation=saturation) + self.p = p + + def forward(self, image: Tensor, + target: Optional[Dict[str, Tensor]] = None) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]: + if isinstance(image, torch.Tensor): + if image.ndimension() not in {2, 3}: + raise ValueError('image should be 2/3 dimensional. Got {} dimensions.'.format(image.ndimension())) + elif image.ndimension() == 2: + image = image.unsqueeze(0) + + r = torch.rand(7) + + if r[0] < self.p: + image = self._brightness(image) + + contrast_before = r[1] < 0.5 + if contrast_before: + if r[2] < self.p: + image = self._contrast(image) + + if r[3] < self.p: + image = self._saturation(image) + + if r[4] < self.p: + image = self._hue(image) + + if not contrast_before: + if r[5] < self.p: + image = self._contrast(image) + + if r[6] < self.p: + channels = F._get_image_num_channels(image) + permutation = torch.randperm(channels) + + is_pil = F._is_pil_image(image) + if is_pil: + image = F.to_tensor(image) + image = image[..., permutation, :, :] + if is_pil: + image = F.to_pil_image(image) + + return image, target diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/detection/utils.py b/pretrained_model/pytorch_vision_v0.10.0/references/detection/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3c52abb2167ae2bf0504e77740f17bb1cd7f487d --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/detection/utils.py @@ -0,0 +1,295 @@ +from collections import defaultdict, deque +import datetime +import errno +import os +import time + +import torch +import torch.distributed as dist + + +class SmoothedValue(object): + """Track a series of values and provide access to smoothed values over a + window or the global series average. + """ + + def __init__(self, window_size=20, fmt=None): + if fmt is None: + fmt = "{median:.4f} ({global_avg:.4f})" + self.deque = deque(maxlen=window_size) + self.total = 0.0 + self.count = 0 + self.fmt = fmt + + def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n + + def synchronize_between_processes(self): + """ + Warning: does not synchronize the deque! + """ + if not is_dist_avail_and_initialized(): + return + t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') + dist.barrier() + dist.all_reduce(t) + t = t.tolist() + self.count = int(t[0]) + self.total = t[1] + + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + + @property + def global_avg(self): + return self.total / self.count + + @property + def max(self): + return max(self.deque) + + @property + def value(self): + return self.deque[-1] + + def __str__(self): + return self.fmt.format( + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value) + + +def all_gather(data): + """ + Run all_gather on arbitrary picklable data (not necessarily tensors) + Args: + data: any picklable object + Returns: + list[data]: list of data gathered from each rank + """ + world_size = get_world_size() + if world_size == 1: + return [data] + data_list = [None] * world_size + dist.all_gather_object(data_list, data) + return data_list + + +def reduce_dict(input_dict, average=True): + """ + Args: + input_dict (dict): all the values will be reduced + average (bool): whether to do average or sum + Reduce the values in the dictionary from all processes so that all processes + have the averaged results. Returns a dict with the same fields as + input_dict, after reduction. + """ + world_size = get_world_size() + if world_size < 2: + return input_dict + with torch.no_grad(): + names = [] + values = [] + # sort the keys so that they are consistent across processes + for k in sorted(input_dict.keys()): + names.append(k) + values.append(input_dict[k]) + values = torch.stack(values, dim=0) + dist.all_reduce(values) + if average: + values /= world_size + reduced_dict = {k: v for k, v in zip(names, values)} + return reduced_dict + + +class MetricLogger(object): + def __init__(self, delimiter="\t"): + self.meters = defaultdict(SmoothedValue) + self.delimiter = delimiter + + def update(self, **kwargs): + for k, v in kwargs.items(): + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.meters[k].update(v) + + def __getattr__(self, attr): + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) + + def __str__(self): + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append( + "{}: {}".format(name, str(meter)) + ) + return self.delimiter.join(loss_str) + + def synchronize_between_processes(self): + for meter in self.meters.values(): + meter.synchronize_between_processes() + + def add_meter(self, name, meter): + self.meters[name] = meter + + def log_every(self, iterable, print_freq, header=None): + i = 0 + if not header: + header = '' + start_time = time.time() + end = time.time() + iter_time = SmoothedValue(fmt='{avg:.4f}') + data_time = SmoothedValue(fmt='{avg:.4f}') + space_fmt = ':' + str(len(str(len(iterable)))) + 'd' + if torch.cuda.is_available(): + log_msg = self.delimiter.join([ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}', + 'max mem: {memory:.0f}' + ]) + else: + log_msg = self.delimiter.join([ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}' + ]) + MB = 1024.0 * 1024.0 + for obj in iterable: + data_time.update(time.time() - end) + yield obj + iter_time.update(time.time() - end) + if i % print_freq == 0 or i == len(iterable) - 1: + eta_seconds = iter_time.global_avg * (len(iterable) - i) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + if torch.cuda.is_available(): + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time), + memory=torch.cuda.max_memory_allocated() / MB)) + else: + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time))) + i += 1 + end = time.time() + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('{} Total time: {} ({:.4f} s / it)'.format( + header, total_time_str, total_time / len(iterable))) + + +def collate_fn(batch): + return tuple(zip(*batch)) + + +def warmup_lr_scheduler(optimizer, warmup_iters, warmup_factor): + + def f(x): + if x >= warmup_iters: + return 1 + alpha = float(x) / warmup_iters + return warmup_factor * (1 - alpha) + alpha + + return torch.optim.lr_scheduler.LambdaLR(optimizer, f) + + +def mkdir(path): + try: + os.makedirs(path) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + import builtins as __builtin__ + builtin_print = __builtin__.print + + def print(*args, **kwargs): + force = kwargs.pop('force', False) + if is_master or force: + builtin_print(*args, **kwargs) + + __builtin__.print = print + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def save_on_master(*args, **kwargs): + if is_main_process(): + torch.save(*args, **kwargs) + + +def init_distributed_mode(args): + if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ['WORLD_SIZE']) + args.gpu = int(os.environ['LOCAL_RANK']) + elif 'SLURM_PROCID' in os.environ: + args.rank = int(os.environ['SLURM_PROCID']) + args.gpu = args.rank % torch.cuda.device_count() + else: + print('Not using distributed mode') + args.distributed = False + return + + args.distributed = True + + torch.cuda.set_device(args.gpu) + args.dist_backend = 'nccl' + print('| distributed init (rank {}): {}'.format( + args.rank, args.dist_url), flush=True) + torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, + world_size=args.world_size, rank=args.rank) + torch.distributed.barrier() + setup_for_distributed(args.rank == 0) diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/segmentation/README.md b/pretrained_model/pytorch_vision_v0.10.0/references/segmentation/README.md new file mode 100644 index 0000000000000000000000000000000000000000..6e24f8366245325feb588d0cb8d6ceee9cb6a2a7 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/segmentation/README.md @@ -0,0 +1,43 @@ +# Semantic segmentation reference training scripts + +This folder contains reference training scripts for semantic segmentation. +They serve as a log of how to train specific models, as provide baseline +training and evaluation scripts to quickly bootstrap research. + +All models have been trained on 8x V100 GPUs. + +You must modify the following flags: + +`--data-path=/path/to/dataset` + +`--nproc_per_node=<number_of_gpus_available>` + +## fcn_resnet50 +``` +python -m torch.distributed.launch --nproc_per_node=8 --use_env train.py --lr 0.02 --dataset coco -b 4 --model fcn_resnet50 --aux-loss +``` + +## fcn_resnet101 +``` +python -m torch.distributed.launch --nproc_per_node=8 --use_env train.py --lr 0.02 --dataset coco -b 4 --model fcn_resnet101 --aux-loss +``` + +## deeplabv3_resnet50 +``` +python -m torch.distributed.launch --nproc_per_node=8 --use_env train.py --lr 0.02 --dataset coco -b 4 --model deeplabv3_resnet50 --aux-loss +``` + +## deeplabv3_resnet101 +``` +python -m torch.distributed.launch --nproc_per_node=8 --use_env train.py --lr 0.02 --dataset coco -b 4 --model deeplabv3_resnet101 --aux-loss +``` + +## deeplabv3_mobilenet_v3_large +``` +python -m torch.distributed.launch --nproc_per_node=8 --use_env train.py --dataset coco -b 4 --model deeplabv3_mobilenet_v3_large --aux-loss --wd 0.000001 +``` + +## lraspp_mobilenet_v3_large +``` +python -m torch.distributed.launch --nproc_per_node=8 --use_env train.py --dataset coco -b 4 --model lraspp_mobilenet_v3_large --wd 0.000001 +``` diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/segmentation/coco_utils.py b/pretrained_model/pytorch_vision_v0.10.0/references/segmentation/coco_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c86d5495247629685f9c1d172f511abfb6aa1767 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/segmentation/coco_utils.py @@ -0,0 +1,111 @@ +import copy +import torch +import torch.utils.data +import torchvision +from PIL import Image + +import os + +from pycocotools import mask as coco_mask + +from transforms import Compose + + +class FilterAndRemapCocoCategories(object): + def __init__(self, categories, remap=True): + self.categories = categories + self.remap = remap + + def __call__(self, image, anno): + anno = [obj for obj in anno if obj["category_id"] in self.categories] + if not self.remap: + return image, anno + anno = copy.deepcopy(anno) + for obj in anno: + obj["category_id"] = self.categories.index(obj["category_id"]) + return image, anno + + +def convert_coco_poly_to_mask(segmentations, height, width): + masks = [] + for polygons in segmentations: + rles = coco_mask.frPyObjects(polygons, height, width) + mask = coco_mask.decode(rles) + if len(mask.shape) < 3: + mask = mask[..., None] + mask = torch.as_tensor(mask, dtype=torch.uint8) + mask = mask.any(dim=2) + masks.append(mask) + if masks: + masks = torch.stack(masks, dim=0) + else: + masks = torch.zeros((0, height, width), dtype=torch.uint8) + return masks + + +class ConvertCocoPolysToMask(object): + def __call__(self, image, anno): + w, h = image.size + segmentations = [obj["segmentation"] for obj in anno] + cats = [obj["category_id"] for obj in anno] + if segmentations: + masks = convert_coco_poly_to_mask(segmentations, h, w) + cats = torch.as_tensor(cats, dtype=masks.dtype) + # merge all instance masks into a single segmentation map + # with its corresponding categories + target, _ = (masks * cats[:, None, None]).max(dim=0) + # discard overlapping instances + target[masks.sum(0) > 1] = 255 + else: + target = torch.zeros((h, w), dtype=torch.uint8) + target = Image.fromarray(target.numpy()) + return image, target + + +def _coco_remove_images_without_annotations(dataset, cat_list=None): + def _has_valid_annotation(anno): + # if it's empty, there is no annotation + if len(anno) == 0: + return False + # if more than 1k pixels occupied in the image + return sum(obj["area"] for obj in anno) > 1000 + + assert isinstance(dataset, torchvision.datasets.CocoDetection) + ids = [] + for ds_idx, img_id in enumerate(dataset.ids): + ann_ids = dataset.coco.getAnnIds(imgIds=img_id, iscrowd=None) + anno = dataset.coco.loadAnns(ann_ids) + if cat_list: + anno = [obj for obj in anno if obj["category_id"] in cat_list] + if _has_valid_annotation(anno): + ids.append(ds_idx) + + dataset = torch.utils.data.Subset(dataset, ids) + return dataset + + +def get_coco(root, image_set, transforms): + PATHS = { + "train": ("train2017", os.path.join("annotations", "instances_train2017.json")), + "val": ("val2017", os.path.join("annotations", "instances_val2017.json")), + # "train": ("val2017", os.path.join("annotations", "instances_val2017.json")) + } + CAT_LIST = [0, 5, 2, 16, 9, 44, 6, 3, 17, 62, 21, 67, 18, 19, 4, + 1, 64, 20, 63, 7, 72] + + transforms = Compose([ + FilterAndRemapCocoCategories(CAT_LIST, remap=True), + ConvertCocoPolysToMask(), + transforms + ]) + + img_folder, ann_file = PATHS[image_set] + img_folder = os.path.join(root, img_folder) + ann_file = os.path.join(root, ann_file) + + dataset = torchvision.datasets.CocoDetection(img_folder, ann_file, transforms=transforms) + + if image_set == "train": + dataset = _coco_remove_images_without_annotations(dataset, CAT_LIST) + + return dataset diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/segmentation/presets.py b/pretrained_model/pytorch_vision_v0.10.0/references/segmentation/presets.py new file mode 100644 index 0000000000000000000000000000000000000000..3bf29c237519328028f58de4f2679935e7c2c1f1 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/segmentation/presets.py @@ -0,0 +1,32 @@ +import transforms as T + + +class SegmentationPresetTrain: + def __init__(self, base_size, crop_size, hflip_prob=0.5, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)): + min_size = int(0.5 * base_size) + max_size = int(2.0 * base_size) + + trans = [T.RandomResize(min_size, max_size)] + if hflip_prob > 0: + trans.append(T.RandomHorizontalFlip(hflip_prob)) + trans.extend([ + T.RandomCrop(crop_size), + T.ToTensor(), + T.Normalize(mean=mean, std=std), + ]) + self.transforms = T.Compose(trans) + + def __call__(self, img, target): + return self.transforms(img, target) + + +class SegmentationPresetEval: + def __init__(self, base_size, mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)): + self.transforms = T.Compose([ + T.RandomResize(base_size, base_size), + T.ToTensor(), + T.Normalize(mean=mean, std=std), + ]) + + def __call__(self, img, target): + return self.transforms(img, target) diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/segmentation/train.py b/pretrained_model/pytorch_vision_v0.10.0/references/segmentation/train.py new file mode 100644 index 0000000000000000000000000000000000000000..fb6c7eeee154f3fd1878ddb4e8145683c032516d --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/segmentation/train.py @@ -0,0 +1,227 @@ +import datetime +import os +import time + +import torch +import torch.utils.data +from torch import nn +import torchvision + +from coco_utils import get_coco +import presets +import utils + + +def get_dataset(dir_path, name, image_set, transform): + def sbd(*args, **kwargs): + return torchvision.datasets.SBDataset(*args, mode='segmentation', **kwargs) + paths = { + "voc": (dir_path, torchvision.datasets.VOCSegmentation, 21), + "voc_aug": (dir_path, sbd, 21), + "coco": (dir_path, get_coco, 21) + } + p, ds_fn, num_classes = paths[name] + + ds = ds_fn(p, image_set=image_set, transforms=transform) + return ds, num_classes + + +def get_transform(train): + base_size = 520 + crop_size = 480 + + return presets.SegmentationPresetTrain(base_size, crop_size) if train else presets.SegmentationPresetEval(base_size) + + +def criterion(inputs, target): + losses = {} + for name, x in inputs.items(): + losses[name] = nn.functional.cross_entropy(x, target, ignore_index=255) + + if len(losses) == 1: + return losses['out'] + + return losses['out'] + 0.5 * losses['aux'] + + +def evaluate(model, data_loader, device, num_classes): + model.eval() + confmat = utils.ConfusionMatrix(num_classes) + metric_logger = utils.MetricLogger(delimiter=" ") + header = 'Test:' + with torch.no_grad(): + for image, target in metric_logger.log_every(data_loader, 100, header): + image, target = image.to(device), target.to(device) + output = model(image) + output = output['out'] + + confmat.update(target.flatten(), output.argmax(1).flatten()) + + confmat.reduce_from_all_processes() + + return confmat + + +def train_one_epoch(model, criterion, optimizer, data_loader, lr_scheduler, device, epoch, print_freq): + model.train() + metric_logger = utils.MetricLogger(delimiter=" ") + metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value}')) + header = 'Epoch: [{}]'.format(epoch) + for image, target in metric_logger.log_every(data_loader, print_freq, header): + image, target = image.to(device), target.to(device) + output = model(image) + loss = criterion(output, target) + + optimizer.zero_grad() + loss.backward() + optimizer.step() + + lr_scheduler.step() + + metric_logger.update(loss=loss.item(), lr=optimizer.param_groups[0]["lr"]) + + +def main(args): + if args.output_dir: + utils.mkdir(args.output_dir) + + utils.init_distributed_mode(args) + print(args) + + device = torch.device(args.device) + + dataset, num_classes = get_dataset(args.data_path, args.dataset, "train", get_transform(train=True)) + dataset_test, _ = get_dataset(args.data_path, args.dataset, "val", get_transform(train=False)) + + if args.distributed: + train_sampler = torch.utils.data.distributed.DistributedSampler(dataset) + test_sampler = torch.utils.data.distributed.DistributedSampler(dataset_test) + else: + train_sampler = torch.utils.data.RandomSampler(dataset) + test_sampler = torch.utils.data.SequentialSampler(dataset_test) + + data_loader = torch.utils.data.DataLoader( + dataset, batch_size=args.batch_size, + sampler=train_sampler, num_workers=args.workers, + collate_fn=utils.collate_fn, drop_last=True) + + data_loader_test = torch.utils.data.DataLoader( + dataset_test, batch_size=1, + sampler=test_sampler, num_workers=args.workers, + collate_fn=utils.collate_fn) + + model = torchvision.models.segmentation.__dict__[args.model](num_classes=num_classes, + aux_loss=args.aux_loss, + pretrained=args.pretrained) + model.to(device) + if args.distributed: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + + model_without_ddp = model + if args.distributed: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) + model_without_ddp = model.module + + params_to_optimize = [ + {"params": [p for p in model_without_ddp.backbone.parameters() if p.requires_grad]}, + {"params": [p for p in model_without_ddp.classifier.parameters() if p.requires_grad]}, + ] + if args.aux_loss: + params = [p for p in model_without_ddp.aux_classifier.parameters() if p.requires_grad] + params_to_optimize.append({"params": params, "lr": args.lr * 10}) + optimizer = torch.optim.SGD( + params_to_optimize, + lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) + + lr_scheduler = torch.optim.lr_scheduler.LambdaLR( + optimizer, + lambda x: (1 - x / (len(data_loader) * args.epochs)) ** 0.9) + + if args.resume: + checkpoint = torch.load(args.resume, map_location='cpu') + model_without_ddp.load_state_dict(checkpoint['model'], strict=not args.test_only) + if not args.test_only: + optimizer.load_state_dict(checkpoint['optimizer']) + lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) + args.start_epoch = checkpoint['epoch'] + 1 + + if args.test_only: + confmat = evaluate(model, data_loader_test, device=device, num_classes=num_classes) + print(confmat) + return + + start_time = time.time() + for epoch in range(args.start_epoch, args.epochs): + if args.distributed: + train_sampler.set_epoch(epoch) + train_one_epoch(model, criterion, optimizer, data_loader, lr_scheduler, device, epoch, args.print_freq) + confmat = evaluate(model, data_loader_test, device=device, num_classes=num_classes) + print(confmat) + checkpoint = { + 'model': model_without_ddp.state_dict(), + 'optimizer': optimizer.state_dict(), + 'lr_scheduler': lr_scheduler.state_dict(), + 'epoch': epoch, + 'args': args + } + utils.save_on_master( + checkpoint, + os.path.join(args.output_dir, 'model_{}.pth'.format(epoch))) + utils.save_on_master( + checkpoint, + os.path.join(args.output_dir, 'checkpoint.pth')) + + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('Training time {}'.format(total_time_str)) + + +def get_args_parser(add_help=True): + import argparse + parser = argparse.ArgumentParser(description='PyTorch Segmentation Training', add_help=add_help) + + parser.add_argument('--data-path', default='/datasets01/COCO/022719/', help='dataset path') + parser.add_argument('--dataset', default='coco', help='dataset name') + parser.add_argument('--model', default='fcn_resnet101', help='model') + parser.add_argument('--aux-loss', action='store_true', help='auxiliar loss') + parser.add_argument('--device', default='cuda', help='device') + parser.add_argument('-b', '--batch-size', default=8, type=int) + parser.add_argument('--epochs', default=30, type=int, metavar='N', + help='number of total epochs to run') + + parser.add_argument('-j', '--workers', default=16, type=int, metavar='N', + help='number of data loading workers (default: 16)') + parser.add_argument('--lr', default=0.01, type=float, help='initial learning rate') + parser.add_argument('--momentum', default=0.9, type=float, metavar='M', + help='momentum') + parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float, + metavar='W', help='weight decay (default: 1e-4)', + dest='weight_decay') + parser.add_argument('--print-freq', default=10, type=int, help='print frequency') + parser.add_argument('--output-dir', default='.', help='path where to save') + parser.add_argument('--resume', default='', help='resume from checkpoint') + parser.add_argument('--start-epoch', default=0, type=int, metavar='N', + help='start epoch') + parser.add_argument( + "--test-only", + dest="test_only", + help="Only test the model", + action="store_true", + ) + parser.add_argument( + "--pretrained", + dest="pretrained", + help="Use pre-trained models from the modelzoo", + action="store_true", + ) + # distributed training parameters + parser.add_argument('--world-size', default=1, type=int, + help='number of distributed processes') + parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training') + + return parser + + +if __name__ == "__main__": + args = get_args_parser().parse_args() + main(args) diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/segmentation/transforms.py b/pretrained_model/pytorch_vision_v0.10.0/references/segmentation/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..4fe5a5ad147f490db83e5fa9c37589da50b34957 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/segmentation/transforms.py @@ -0,0 +1,92 @@ +import numpy as np +from PIL import Image +import random + +import torch +from torchvision import transforms as T +from torchvision.transforms import functional as F + + +def pad_if_smaller(img, size, fill=0): + min_size = min(img.size) + if min_size < size: + ow, oh = img.size + padh = size - oh if oh < size else 0 + padw = size - ow if ow < size else 0 + img = F.pad(img, (0, 0, padw, padh), fill=fill) + return img + + +class Compose(object): + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, image, target): + for t in self.transforms: + image, target = t(image, target) + return image, target + + +class RandomResize(object): + def __init__(self, min_size, max_size=None): + self.min_size = min_size + if max_size is None: + max_size = min_size + self.max_size = max_size + + def __call__(self, image, target): + size = random.randint(self.min_size, self.max_size) + image = F.resize(image, size) + target = F.resize(target, size, interpolation=Image.NEAREST) + return image, target + + +class RandomHorizontalFlip(object): + def __init__(self, flip_prob): + self.flip_prob = flip_prob + + def __call__(self, image, target): + if random.random() < self.flip_prob: + image = F.hflip(image) + target = F.hflip(target) + return image, target + + +class RandomCrop(object): + def __init__(self, size): + self.size = size + + def __call__(self, image, target): + image = pad_if_smaller(image, self.size) + target = pad_if_smaller(target, self.size, fill=255) + crop_params = T.RandomCrop.get_params(image, (self.size, self.size)) + image = F.crop(image, *crop_params) + target = F.crop(target, *crop_params) + return image, target + + +class CenterCrop(object): + def __init__(self, size): + self.size = size + + def __call__(self, image, target): + image = F.center_crop(image, self.size) + target = F.center_crop(target, self.size) + return image, target + + +class ToTensor(object): + def __call__(self, image, target): + image = F.to_tensor(image) + target = torch.as_tensor(np.array(target), dtype=torch.int64) + return image, target + + +class Normalize(object): + def __init__(self, mean, std): + self.mean = mean + self.std = std + + def __call__(self, image, target): + image = F.normalize(image, mean=self.mean, std=self.std) + return image, target diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/segmentation/utils.py b/pretrained_model/pytorch_vision_v0.10.0/references/segmentation/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..b67c18052fb5cf297d53640c79879300f4e9b9b1 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/segmentation/utils.py @@ -0,0 +1,298 @@ +from collections import defaultdict, deque +import datetime +import time +import torch +import torch.distributed as dist + +import errno +import os + + +class SmoothedValue(object): + """Track a series of values and provide access to smoothed values over a + window or the global series average. + """ + + def __init__(self, window_size=20, fmt=None): + if fmt is None: + fmt = "{median:.4f} ({global_avg:.4f})" + self.deque = deque(maxlen=window_size) + self.total = 0.0 + self.count = 0 + self.fmt = fmt + + def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n + + def synchronize_between_processes(self): + """ + Warning: does not synchronize the deque! + """ + if not is_dist_avail_and_initialized(): + return + t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') + dist.barrier() + dist.all_reduce(t) + t = t.tolist() + self.count = int(t[0]) + self.total = t[1] + + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + + @property + def global_avg(self): + return self.total / self.count + + @property + def max(self): + return max(self.deque) + + @property + def value(self): + return self.deque[-1] + + def __str__(self): + return self.fmt.format( + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value) + + +class ConfusionMatrix(object): + def __init__(self, num_classes): + self.num_classes = num_classes + self.mat = None + + def update(self, a, b): + n = self.num_classes + if self.mat is None: + self.mat = torch.zeros((n, n), dtype=torch.int64, device=a.device) + with torch.no_grad(): + k = (a >= 0) & (a < n) + inds = n * a[k].to(torch.int64) + b[k] + self.mat += torch.bincount(inds, minlength=n**2).reshape(n, n) + + def reset(self): + self.mat.zero_() + + def compute(self): + h = self.mat.float() + acc_global = torch.diag(h).sum() / h.sum() + acc = torch.diag(h) / h.sum(1) + iu = torch.diag(h) / (h.sum(1) + h.sum(0) - torch.diag(h)) + return acc_global, acc, iu + + def reduce_from_all_processes(self): + if not torch.distributed.is_available(): + return + if not torch.distributed.is_initialized(): + return + torch.distributed.barrier() + torch.distributed.all_reduce(self.mat) + + def __str__(self): + acc_global, acc, iu = self.compute() + return ( + 'global correct: {:.1f}\n' + 'average row correct: {}\n' + 'IoU: {}\n' + 'mean IoU: {:.1f}').format( + acc_global.item() * 100, + ['{:.1f}'.format(i) for i in (acc * 100).tolist()], + ['{:.1f}'.format(i) for i in (iu * 100).tolist()], + iu.mean().item() * 100) + + +class MetricLogger(object): + def __init__(self, delimiter="\t"): + self.meters = defaultdict(SmoothedValue) + self.delimiter = delimiter + + def update(self, **kwargs): + for k, v in kwargs.items(): + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.meters[k].update(v) + + def __getattr__(self, attr): + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) + + def __str__(self): + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append( + "{}: {}".format(name, str(meter)) + ) + return self.delimiter.join(loss_str) + + def synchronize_between_processes(self): + for meter in self.meters.values(): + meter.synchronize_between_processes() + + def add_meter(self, name, meter): + self.meters[name] = meter + + def log_every(self, iterable, print_freq, header=None): + i = 0 + if not header: + header = '' + start_time = time.time() + end = time.time() + iter_time = SmoothedValue(fmt='{avg:.4f}') + data_time = SmoothedValue(fmt='{avg:.4f}') + space_fmt = ':' + str(len(str(len(iterable)))) + 'd' + if torch.cuda.is_available(): + log_msg = self.delimiter.join([ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}', + 'max mem: {memory:.0f}' + ]) + else: + log_msg = self.delimiter.join([ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}' + ]) + MB = 1024.0 * 1024.0 + for obj in iterable: + data_time.update(time.time() - end) + yield obj + iter_time.update(time.time() - end) + if i % print_freq == 0: + eta_seconds = iter_time.global_avg * (len(iterable) - i) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + if torch.cuda.is_available(): + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time), + memory=torch.cuda.max_memory_allocated() / MB)) + else: + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time))) + i += 1 + end = time.time() + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('{} Total time: {}'.format(header, total_time_str)) + + +def cat_list(images, fill_value=0): + max_size = tuple(max(s) for s in zip(*[img.shape for img in images])) + batch_shape = (len(images),) + max_size + batched_imgs = images[0].new(*batch_shape).fill_(fill_value) + for img, pad_img in zip(images, batched_imgs): + pad_img[..., :img.shape[-2], :img.shape[-1]].copy_(img) + return batched_imgs + + +def collate_fn(batch): + images, targets = list(zip(*batch)) + batched_imgs = cat_list(images, fill_value=0) + batched_targets = cat_list(targets, fill_value=255) + return batched_imgs, batched_targets + + +def mkdir(path): + try: + os.makedirs(path) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + import builtins as __builtin__ + builtin_print = __builtin__.print + + def print(*args, **kwargs): + force = kwargs.pop('force', False) + if is_master or force: + builtin_print(*args, **kwargs) + + __builtin__.print = print + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def save_on_master(*args, **kwargs): + if is_main_process(): + torch.save(*args, **kwargs) + + +def init_distributed_mode(args): + if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ['WORLD_SIZE']) + args.gpu = int(os.environ['LOCAL_RANK']) + elif 'SLURM_PROCID' in os.environ: + args.rank = int(os.environ['SLURM_PROCID']) + args.gpu = args.rank % torch.cuda.device_count() + elif hasattr(args, "rank"): + pass + else: + print('Not using distributed mode') + args.distributed = False + return + + args.distributed = True + + torch.cuda.set_device(args.gpu) + args.dist_backend = 'nccl' + print('| distributed init (rank {}): {}'.format( + args.rank, args.dist_url), flush=True) + torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, + world_size=args.world_size, rank=args.rank) + setup_for_distributed(args.rank == 0) diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/similarity/README.md b/pretrained_model/pytorch_vision_v0.10.0/references/similarity/README.md new file mode 100644 index 0000000000000000000000000000000000000000..770c0f65b7d8196f1d7129d48fd97fdb6cc15610 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/similarity/README.md @@ -0,0 +1,53 @@ +# Similarity Learning Using Triplet Loss # + +In this reference, we use triplet loss to learn embeddings which can be used to differentiate images. This learning technique was popularized by [FaceNet: A Unified Embedding for Face Recognition and Clustering](https://arxiv.org/abs/1503.03832) and has been quite effective in learning embeddings to differentiate between faces. + +This reference can be directly applied to the following use cases: + +* You have an unknown number of classes and would like to train a model to learn how to differentiate between them. +* You want to train a model to learn a distance-based metric between samples. For example, learning a distance-based similarity measure between faces. + +### Training ### +By default, the training script trains ResNet50 on the FashionMNIST Dataset to learn image embeddings which can be used to differentiate between images by measuring the euclidean distance between embeddings. This can be changed as per your requirements. + +Image embeddings of the same class should be 'close' to each other, while image embeddings between different classes should be 'far' away. + +To run the training script: + +```bash +python train.py -h # Lists all optional arguments +python train.py # Runs training script with default args +``` + +Running the training script as is should yield 97% accuracy on the FMNIST test set within 10 epochs. + +### Loss ### +`TripletMarginLoss` is a loss function which takes in a triplet of samples. A valid triplet has an: + +1. Anchor: a sample from the dataset +2. Positive: another sample with the same label/group as the anchor (Generally, positive != anchor) +3. Negative: a sample with a different label/group from the anchor + +`TripletMarginLoss` (refer to `loss.py`) does the following: + +```python +loss = max(dist(anchor, positive) - dist(anchor, negative) + margin, 0) +``` +Where `dist` is a distance function. Minimizing this function effectively leads to minimizing `dist(anchor, positive)` and maximizing `dist(anchor, negative)`. + +The FaceNet paper describe this loss in more detail. + +### Sampler ### + +In order to generate valid triplets from a batch of samples, we need to make sure that each batch has multiple samples with the same label. We do this using `PKSampler` (refer to `sampler.py`), which ensures that each batch of size `p * k` will have samples from exactly `p` classes and `k` samples per class. + +### Triplet Mining ### + +`TripletMarginLoss` currently supports the following mining techniques: + +* `batch_all`: Generates all possible triplets from a batch and excludes the triplets which are 'easy' (which have `loss = 0`) before passing it through the loss function. +* `batch_hard`: For every anchor, `batch_hard` creates a triplet with the 'hardest' positive (farthest positive) and negative (closest negative). + +These mining strategies usually speed up training. + +This [webpage](https://omoindrot.github.io/triplet-loss) describes the sampling and mining strategies in more detail. diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/similarity/loss.py b/pretrained_model/pytorch_vision_v0.10.0/references/similarity/loss.py new file mode 100644 index 0000000000000000000000000000000000000000..1fa4a89c762514c9c69f4dbda89c6be9b48394d9 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/similarity/loss.py @@ -0,0 +1,109 @@ +''' + Pytorch adaptation of https://omoindrot.github.io/triplet-loss + https://github.com/omoindrot/tensorflow-triplet-loss +''' +import torch +import torch.nn as nn + + +class TripletMarginLoss(nn.Module): + def __init__(self, margin=1.0, p=2., mining='batch_all'): + super(TripletMarginLoss, self).__init__() + self.margin = margin + self.p = p + self.mining = mining + + if mining == 'batch_all': + self.loss_fn = batch_all_triplet_loss + if mining == 'batch_hard': + self.loss_fn = batch_hard_triplet_loss + + def forward(self, embeddings, labels): + return self.loss_fn(labels, embeddings, self.margin, self.p) + + +def batch_hard_triplet_loss(labels, embeddings, margin, p): + pairwise_dist = torch.cdist(embeddings, embeddings, p=p) + + mask_anchor_positive = _get_anchor_positive_triplet_mask(labels).float() + anchor_positive_dist = mask_anchor_positive * pairwise_dist + + # hardest positive for every anchor + hardest_positive_dist, _ = anchor_positive_dist.max(1, keepdim=True) + + mask_anchor_negative = _get_anchor_negative_triplet_mask(labels).float() + + # Add max value in each row to invalid negatives + max_anchor_negative_dist, _ = pairwise_dist.max(1, keepdim=True) + anchor_negative_dist = pairwise_dist + max_anchor_negative_dist * (1.0 - mask_anchor_negative) + + # hardest negative for every anchor + hardest_negative_dist, _ = anchor_negative_dist.min(1, keepdim=True) + + triplet_loss = hardest_positive_dist - hardest_negative_dist + margin + triplet_loss[triplet_loss < 0] = 0 + + triplet_loss = triplet_loss.mean() + + return triplet_loss, -1 + + +def batch_all_triplet_loss(labels, embeddings, margin, p): + pairwise_dist = torch.cdist(embeddings, embeddings, p=p) + + anchor_positive_dist = pairwise_dist.unsqueeze(2) + anchor_negative_dist = pairwise_dist.unsqueeze(1) + + triplet_loss = anchor_positive_dist - anchor_negative_dist + margin + + mask = _get_triplet_mask(labels) + triplet_loss = mask.float() * triplet_loss + + # Remove negative losses (easy triplets) + triplet_loss[triplet_loss < 0] = 0 + + # Count number of positive triplets (where triplet_loss > 0) + valid_triplets = triplet_loss[triplet_loss > 1e-16] + num_positive_triplets = valid_triplets.size(0) + num_valid_triplets = mask.sum() + + fraction_positive_triplets = num_positive_triplets / (num_valid_triplets.float() + 1e-16) + + # Get final mean triplet loss over the positive valid triplets + triplet_loss = triplet_loss.sum() / (num_positive_triplets + 1e-16) + + return triplet_loss, fraction_positive_triplets + + +def _get_triplet_mask(labels): + # Check that i, j and k are distinct + indices_equal = torch.eye(labels.size(0), dtype=torch.bool, device=labels.device) + indices_not_equal = ~indices_equal + i_not_equal_j = indices_not_equal.unsqueeze(2) + i_not_equal_k = indices_not_equal.unsqueeze(1) + j_not_equal_k = indices_not_equal.unsqueeze(0) + + distinct_indices = (i_not_equal_j & i_not_equal_k) & j_not_equal_k + + label_equal = labels.unsqueeze(0) == labels.unsqueeze(1) + i_equal_j = label_equal.unsqueeze(2) + i_equal_k = label_equal.unsqueeze(1) + + valid_labels = ~i_equal_k & i_equal_j + + return valid_labels & distinct_indices + + +def _get_anchor_positive_triplet_mask(labels): + # Check that i and j are distinct + indices_equal = torch.eye(labels.size(0), dtype=torch.bool, device=labels.device) + indices_not_equal = ~indices_equal + + # Check if labels[i] == labels[j] + labels_equal = labels.unsqueeze(0) == labels.unsqueeze(1) + + return labels_equal & indices_not_equal + + +def _get_anchor_negative_triplet_mask(labels): + return labels.unsqueeze(0) != labels.unsqueeze(1) diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/similarity/model.py b/pretrained_model/pytorch_vision_v0.10.0/references/similarity/model.py new file mode 100644 index 0000000000000000000000000000000000000000..3b39c0ec0dc28a6ea59c320d61c8c7e178b06359 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/similarity/model.py @@ -0,0 +1,16 @@ +import torch.nn as nn +import torchvision.models as models + + +class EmbeddingNet(nn.Module): + def __init__(self, backbone=None): + super(EmbeddingNet, self).__init__() + if backbone is None: + backbone = models.resnet50(num_classes=128) + + self.backbone = backbone + + def forward(self, x): + x = self.backbone(x) + x = nn.functional.normalize(x, dim=1) + return x diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/similarity/sampler.py b/pretrained_model/pytorch_vision_v0.10.0/references/similarity/sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..0ae6d07a77c04022a4c959bef196f824f86e0ae3 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/similarity/sampler.py @@ -0,0 +1,76 @@ +import torch +from torch.utils.data.sampler import Sampler +from collections import defaultdict +import random + + +def create_groups(groups, k): + """Bins sample indices with respect to groups, remove bins with less than k samples + + Args: + groups (list[int]): where ith index stores ith sample's group id + + Returns: + defaultdict[list]: Bins of sample indices, binned by group_idx + """ + group_samples = defaultdict(list) + for sample_idx, group_idx in enumerate(groups): + group_samples[group_idx].append(sample_idx) + + keys_to_remove = [] + for key in group_samples: + if len(group_samples[key]) < k: + keys_to_remove.append(key) + continue + + for key in keys_to_remove: + group_samples.pop(key) + + return group_samples + + +class PKSampler(Sampler): + """ + Randomly samples from a dataset while ensuring that each batch (of size p * k) + includes samples from exactly p labels, with k samples for each label. + + Args: + groups (list[int]): List where the ith entry is the group_id/label of the ith sample in the dataset. + p (int): Number of labels/groups to be sampled from in a batch + k (int): Number of samples for each label/group in a batch + """ + + def __init__(self, groups, p, k): + self.p = p + self.k = k + self.groups = create_groups(groups, self.k) + + # Ensures there are enough classes to sample from + assert len(self.groups) >= p + + def __iter__(self): + # Shuffle samples within groups + for key in self.groups: + random.shuffle(self.groups[key]) + + # Keep track of the number of samples left for each group + group_samples_remaining = {} + for key in self.groups: + group_samples_remaining[key] = len(self.groups[key]) + + while len(group_samples_remaining) > self.p: + # Select p groups at random from valid/remaining groups + group_ids = list(group_samples_remaining.keys()) + selected_group_idxs = torch.multinomial(torch.ones(len(group_ids)), self.p).tolist() + for i in selected_group_idxs: + group_id = group_ids[i] + group = self.groups[group_id] + for _ in range(self.k): + # No need to pick samples at random since group samples are shuffled + sample_idx = len(group) - group_samples_remaining[group_id] + yield group[sample_idx] + group_samples_remaining[group_id] -= 1 + + # Don't sample from group if it has less than k samples remaining + if group_samples_remaining[group_id] < self.k: + group_samples_remaining.pop(group_id) diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/similarity/test.py b/pretrained_model/pytorch_vision_v0.10.0/references/similarity/test.py new file mode 100644 index 0000000000000000000000000000000000000000..8381e02e740b9af65935988e55642497e9e699d5 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/similarity/test.py @@ -0,0 +1,42 @@ +import unittest +from collections import defaultdict + +from torch.utils.data import DataLoader +from torchvision.datasets import FakeData +import torchvision.transforms as transforms + +from sampler import PKSampler + + +class Tester(unittest.TestCase): + + def test_pksampler(self): + p, k = 16, 4 + + # Ensure sampler does not allow p to be greater than num_classes + dataset = FakeData(size=100, num_classes=10, image_size=(3, 1, 1)) + targets = [target.item() for _, target in dataset] + self.assertRaises(AssertionError, PKSampler, targets, p, k) + + # Ensure p, k constraints on batch + dataset = FakeData(size=1000, num_classes=100, image_size=(3, 1, 1), + transform=transforms.ToTensor()) + targets = [target.item() for _, target in dataset] + sampler = PKSampler(targets, p, k) + loader = DataLoader(dataset, batch_size=p * k, sampler=sampler) + + for _, labels in loader: + bins = defaultdict(int) + for label in labels.tolist(): + bins[label] += 1 + + # Ensure that each batch has samples from exactly p classes + self.assertEqual(len(bins), p) + + # Ensure that there are k samples from each class + for b in bins: + self.assertEqual(bins[b], k) + + +if __name__ == '__main__': + unittest.main() diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/similarity/train.py b/pretrained_model/pytorch_vision_v0.10.0/references/similarity/train.py new file mode 100644 index 0000000000000000000000000000000000000000..9a166a14b385d52b13d543649b1489071c4ce247 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/similarity/train.py @@ -0,0 +1,165 @@ +import os + +import torch +from torch.optim import Adam +from torch.utils.data import DataLoader + +import torchvision.transforms as transforms +from torchvision.datasets import FashionMNIST + +from loss import TripletMarginLoss +from sampler import PKSampler +from model import EmbeddingNet + + +def train_epoch(model, optimizer, criterion, data_loader, device, epoch, print_freq): + model.train() + running_loss = 0 + running_frac_pos_triplets = 0 + for i, data in enumerate(data_loader): + optimizer.zero_grad() + samples, targets = data[0].to(device), data[1].to(device) + + embeddings = model(samples) + + loss, frac_pos_triplets = criterion(embeddings, targets) + loss.backward() + optimizer.step() + + running_loss += loss.item() + running_frac_pos_triplets += float(frac_pos_triplets) + + if i % print_freq == print_freq - 1: + i += 1 + avg_loss = running_loss / print_freq + avg_trip = 100.0 * running_frac_pos_triplets / print_freq + print('[{:d}, {:d}] | loss: {:.4f} | % avg hard triplets: {:.2f}%'.format(epoch, i, avg_loss, avg_trip)) + running_loss = 0 + running_frac_pos_triplets = 0 + + +def find_best_threshold(dists, targets, device): + best_thresh = 0.01 + best_correct = 0 + for thresh in torch.arange(0.0, 1.51, 0.01): + predictions = dists <= thresh.to(device) + correct = torch.sum(predictions == targets.to(device)).item() + if correct > best_correct: + best_thresh = thresh + best_correct = correct + + accuracy = 100.0 * best_correct / dists.size(0) + + return best_thresh, accuracy + + +@torch.no_grad() +def evaluate(model, loader, device): + model.eval() + embeds, labels = [], [] + dists, targets = None, None + + for data in loader: + samples, _labels = data[0].to(device), data[1] + out = model(samples) + embeds.append(out) + labels.append(_labels) + + embeds = torch.cat(embeds, dim=0) + labels = torch.cat(labels, dim=0) + + dists = torch.cdist(embeds, embeds) + + labels = labels.unsqueeze(0) + targets = labels == labels.t() + + mask = torch.ones(dists.size()).triu() - torch.eye(dists.size(0)) + dists = dists[mask == 1] + targets = targets[mask == 1] + + threshold, accuracy = find_best_threshold(dists, targets, device) + + print('accuracy: {:.3f}%, threshold: {:.2f}'.format(accuracy, threshold)) + + +def save(model, epoch, save_dir, file_name): + file_name = 'epoch_' + str(epoch) + '__' + file_name + save_path = os.path.join(save_dir, file_name) + torch.save(model.state_dict(), save_path) + + +def main(args): + device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') + p = args.labels_per_batch + k = args.samples_per_label + batch_size = p * k + + model = EmbeddingNet() + if args.resume: + model.load_state_dict(torch.load(args.resume)) + + model.to(device) + + criterion = TripletMarginLoss(margin=args.margin) + optimizer = Adam(model.parameters(), lr=args.lr) + + transform = transforms.Compose([transforms.Lambda(lambda image: image.convert('RGB')), + transforms.Resize((224, 224)), + transforms.ToTensor()]) + + # Using FMNIST to demonstrate embedding learning using triplet loss. This dataset can + # be replaced with any classification dataset. + train_dataset = FashionMNIST(args.dataset_dir, train=True, transform=transform, download=True) + test_dataset = FashionMNIST(args.dataset_dir, train=False, transform=transform, download=True) + + # targets is a list where the i_th element corresponds to the label of i_th dataset element. + # This is required for PKSampler to randomly sample from exactly p classes. You will need to + # construct targets while building your dataset. Some datasets (such as ImageFolder) have a + # targets attribute with the same format. + targets = train_dataset.targets.tolist() + + train_loader = DataLoader(train_dataset, batch_size=batch_size, + sampler=PKSampler(targets, p, k), + num_workers=args.workers) + test_loader = DataLoader(test_dataset, batch_size=args.eval_batch_size, + shuffle=False, + num_workers=args.workers) + + for epoch in range(1, args.epochs + 1): + print('Training...') + train_epoch(model, optimizer, criterion, train_loader, device, epoch, args.print_freq) + + print('Evaluating...') + evaluate(model, test_loader, device) + + print('Saving...') + save(model, epoch, args.save_dir, 'ckpt.pth') + + +def parse_args(): + import argparse + parser = argparse.ArgumentParser(description='PyTorch Embedding Learning') + + parser.add_argument('--dataset-dir', default='/tmp/fmnist/', + help='FashionMNIST dataset directory path') + parser.add_argument('-p', '--labels-per-batch', default=8, type=int, + help='Number of unique labels/classes per batch') + parser.add_argument('-k', '--samples-per-label', default=8, type=int, + help='Number of samples per label in a batch') + parser.add_argument('--eval-batch-size', default=512, type=int) + parser.add_argument('--epochs', default=10, type=int, metavar='N', + help='Number of training epochs to run') + parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', + help='Number of data loading workers') + parser.add_argument('--lr', default=0.0001, type=float, help='Learning rate') + parser.add_argument('--margin', default=0.2, type=float, help='Triplet loss margin') + parser.add_argument('--print-freq', default=20, type=int, help='Print frequency') + parser.add_argument('--save-dir', default='.', help='Model save directory') + parser.add_argument('--resume', default='', help='Resume from checkpoint') + + return parser.parse_args() + + +if __name__ == '__main__': + args = parse_args() + main(args) diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/video_classification/README.md b/pretrained_model/pytorch_vision_v0.10.0/references/video_classification/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ef7db6dcd9004a08e68334ba7bf4407f0986994e --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/video_classification/README.md @@ -0,0 +1,34 @@ +# Video Classification + +We present a simple training script that can be used for replicating the result of [resenet-based video models](https://research.fb.com/wp-content/uploads/2018/04/a-closer-look-at-spatiotemporal-convolutions-for-action-recognition.pdf). All models are trained on [Kinetics400 dataset](https://deepmind.com/research/open-source/kinetics), a benchmark dataset for human-action recognition. The accuracy is reported on the traditional validation split. + +## Data preparation + +If you already have downloaded [Kinetics400 dataset](https://deepmind.com/research/open-source/kinetics), +please proceed directly to the next section. + +To download videos, one can use https://github.com/Showmax/kinetics-downloader. Please note that the dataset can take up upwards of 400GB, depending on the quality setting during download. + +## Training + +We assume the training and validation AVI videos are stored at `/data/kinectics400/train` and +`/data/kinectics400/val`. For training we suggest starting with the hyperparameters reported in the [paper](https://research.fb.com/wp-content/uploads/2018/04/a-closer-look-at-spatiotemporal-convolutions-for-action-recognition.pdf), in order to match the performance of said models. Clip sampling strategy is a particularly important parameter during training, and we suggest using random temporal jittering during training - in other words sampling multiple training clips from each video with random start times during at every epoch. This functionality is built into our training script, and optimal hyperparameters are set by default. + +### Multiple GPUs + +Run the training on a single node with 8 GPUs: +```bash +python -m torch.distributed.launch --nproc_per_node=8 --use_env train.py --data-path=/data/kinectics400 --train-dir=train --val-dir=val --batch-size=16 --cache-dataset --sync-bn --apex +``` + +**Note:** all our models were trained on 8 nodes with 8 V100 GPUs each for a total of 64 GPUs. Expected training time for 64 GPUs is 24 hours, depending on the storage solution. +**Note 2:** hyperparameters for exact replication of our training can be found [here](https://github.com/pytorch/vision/blob/master/torchvision/models/video/README.md). Some hyperparameters such as learning rate are scaled linearly in proportion to the number of GPUs. + +### Single GPU + +**Note:** training on a single gpu can be extremely slow. + + +```bash +python train.py --data-path=/data/kinectics400 --train-dir=train --val-dir=val --batch-size=8 --cache-dataset +``` diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/video_classification/presets.py b/pretrained_model/pytorch_vision_v0.10.0/references/video_classification/presets.py new file mode 100644 index 0000000000000000000000000000000000000000..3ee679ad5afcaeae039de71eee46712919c460c6 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/video_classification/presets.py @@ -0,0 +1,40 @@ +import torch + +from torchvision.transforms import transforms +from transforms import ConvertBHWCtoBCHW, ConvertBCHWtoCBHW + + +class VideoClassificationPresetTrain: + def __init__(self, resize_size, crop_size, mean=(0.43216, 0.394666, 0.37645), std=(0.22803, 0.22145, 0.216989), + hflip_prob=0.5): + trans = [ + ConvertBHWCtoBCHW(), + transforms.ConvertImageDtype(torch.float32), + transforms.Resize(resize_size), + ] + if hflip_prob > 0: + trans.append(transforms.RandomHorizontalFlip(hflip_prob)) + trans.extend([ + transforms.Normalize(mean=mean, std=std), + transforms.RandomCrop(crop_size), + ConvertBCHWtoCBHW() + ]) + self.transforms = transforms.Compose(trans) + + def __call__(self, x): + return self.transforms(x) + + +class VideoClassificationPresetEval: + def __init__(self, resize_size, crop_size, mean=(0.43216, 0.394666, 0.37645), std=(0.22803, 0.22145, 0.216989)): + self.transforms = transforms.Compose([ + ConvertBHWCtoBCHW(), + transforms.ConvertImageDtype(torch.float32), + transforms.Resize(resize_size), + transforms.Normalize(mean=mean, std=std), + transforms.CenterCrop(crop_size), + ConvertBCHWtoCBHW() + ]) + + def __call__(self, x): + return self.transforms(x) diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/video_classification/scheduler.py b/pretrained_model/pytorch_vision_v0.10.0/references/video_classification/scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..f0f862d41ad9a028a82186bb9a11b48b6e840b85 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/video_classification/scheduler.py @@ -0,0 +1,47 @@ +import torch +from bisect import bisect_right + + +class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler): + def __init__( + self, + optimizer, + milestones, + gamma=0.1, + warmup_factor=1.0 / 3, + warmup_iters=5, + warmup_method="linear", + last_epoch=-1, + ): + if not milestones == sorted(milestones): + raise ValueError( + "Milestones should be a list of" " increasing integers. Got {}", + milestones, + ) + + if warmup_method not in ("constant", "linear"): + raise ValueError( + "Only 'constant' or 'linear' warmup_method accepted" + "got {}".format(warmup_method) + ) + self.milestones = milestones + self.gamma = gamma + self.warmup_factor = warmup_factor + self.warmup_iters = warmup_iters + self.warmup_method = warmup_method + super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch) + + def get_lr(self): + warmup_factor = 1 + if self.last_epoch < self.warmup_iters: + if self.warmup_method == "constant": + warmup_factor = self.warmup_factor + elif self.warmup_method == "linear": + alpha = float(self.last_epoch) / self.warmup_iters + warmup_factor = self.warmup_factor * (1 - alpha) + alpha + return [ + base_lr * + warmup_factor * + self.gamma ** bisect_right(self.milestones, self.last_epoch) + for base_lr in self.base_lrs + ] diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/video_classification/train.py b/pretrained_model/pytorch_vision_v0.10.0/references/video_classification/train.py new file mode 100644 index 0000000000000000000000000000000000000000..bcc74064344d47775792f1e6e34b0cfc576ef812 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/video_classification/train.py @@ -0,0 +1,332 @@ +import datetime +import os +import time +import torch +import torch.utils.data +from torch.utils.data.dataloader import default_collate +from torch import nn +import torchvision +import torchvision.datasets.video_utils +from torchvision.datasets.samplers import DistributedSampler, UniformClipSampler, RandomClipSampler + +import presets +import utils + +from scheduler import WarmupMultiStepLR + +try: + from apex import amp +except ImportError: + amp = None + + +def train_one_epoch(model, criterion, optimizer, lr_scheduler, data_loader, device, epoch, print_freq, apex=False): + model.train() + metric_logger = utils.MetricLogger(delimiter=" ") + metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value}')) + metric_logger.add_meter('clips/s', utils.SmoothedValue(window_size=10, fmt='{value:.3f}')) + + header = 'Epoch: [{}]'.format(epoch) + for video, target in metric_logger.log_every(data_loader, print_freq, header): + start_time = time.time() + video, target = video.to(device), target.to(device) + output = model(video) + loss = criterion(output, target) + + optimizer.zero_grad() + if apex: + with amp.scale_loss(loss, optimizer) as scaled_loss: + scaled_loss.backward() + else: + loss.backward() + optimizer.step() + + acc1, acc5 = utils.accuracy(output, target, topk=(1, 5)) + batch_size = video.shape[0] + metric_logger.update(loss=loss.item(), lr=optimizer.param_groups[0]["lr"]) + metric_logger.meters['acc1'].update(acc1.item(), n=batch_size) + metric_logger.meters['acc5'].update(acc5.item(), n=batch_size) + metric_logger.meters['clips/s'].update(batch_size / (time.time() - start_time)) + lr_scheduler.step() + + +def evaluate(model, criterion, data_loader, device): + model.eval() + metric_logger = utils.MetricLogger(delimiter=" ") + header = 'Test:' + with torch.no_grad(): + for video, target in metric_logger.log_every(data_loader, 100, header): + video = video.to(device, non_blocking=True) + target = target.to(device, non_blocking=True) + output = model(video) + loss = criterion(output, target) + + acc1, acc5 = utils.accuracy(output, target, topk=(1, 5)) + # FIXME need to take into account that the datasets + # could have been padded in distributed setup + batch_size = video.shape[0] + metric_logger.update(loss=loss.item()) + metric_logger.meters['acc1'].update(acc1.item(), n=batch_size) + metric_logger.meters['acc5'].update(acc5.item(), n=batch_size) + # gather the stats from all processes + metric_logger.synchronize_between_processes() + + print(' * Clip Acc@1 {top1.global_avg:.3f} Clip Acc@5 {top5.global_avg:.3f}' + .format(top1=metric_logger.acc1, top5=metric_logger.acc5)) + return metric_logger.acc1.global_avg + + +def _get_cache_path(filepath): + import hashlib + h = hashlib.sha1(filepath.encode()).hexdigest() + cache_path = os.path.join("~", ".torch", "vision", "datasets", "kinetics", h[:10] + ".pt") + cache_path = os.path.expanduser(cache_path) + return cache_path + + +def collate_fn(batch): + # remove audio from the batch + batch = [(d[0], d[2]) for d in batch] + return default_collate(batch) + + +def main(args): + if args.apex and amp is None: + raise RuntimeError("Failed to import apex. Please install apex from https://www.github.com/nvidia/apex " + "to enable mixed-precision training.") + + if args.output_dir: + utils.mkdir(args.output_dir) + + utils.init_distributed_mode(args) + print(args) + print("torch version: ", torch.__version__) + print("torchvision version: ", torchvision.__version__) + + device = torch.device(args.device) + + torch.backends.cudnn.benchmark = True + + # Data loading code + print("Loading data") + traindir = os.path.join(args.data_path, args.train_dir) + valdir = os.path.join(args.data_path, args.val_dir) + + print("Loading training data") + st = time.time() + cache_path = _get_cache_path(traindir) + transform_train = presets.VideoClassificationPresetTrain((128, 171), (112, 112)) + + if args.cache_dataset and os.path.exists(cache_path): + print("Loading dataset_train from {}".format(cache_path)) + dataset, _ = torch.load(cache_path) + dataset.transform = transform_train + else: + if args.distributed: + print("It is recommended to pre-compute the dataset cache " + "on a single-gpu first, as it will be faster") + dataset = torchvision.datasets.Kinetics400( + traindir, + frames_per_clip=args.clip_len, + step_between_clips=1, + transform=transform_train, + frame_rate=15, + extensions=('avi', 'mp4', ) + ) + if args.cache_dataset: + print("Saving dataset_train to {}".format(cache_path)) + utils.mkdir(os.path.dirname(cache_path)) + utils.save_on_master((dataset, traindir), cache_path) + + print("Took", time.time() - st) + + print("Loading validation data") + cache_path = _get_cache_path(valdir) + + transform_test = presets.VideoClassificationPresetEval((128, 171), (112, 112)) + + if args.cache_dataset and os.path.exists(cache_path): + print("Loading dataset_test from {}".format(cache_path)) + dataset_test, _ = torch.load(cache_path) + dataset_test.transform = transform_test + else: + if args.distributed: + print("It is recommended to pre-compute the dataset cache " + "on a single-gpu first, as it will be faster") + dataset_test = torchvision.datasets.Kinetics400( + valdir, + frames_per_clip=args.clip_len, + step_between_clips=1, + transform=transform_test, + frame_rate=15, + extensions=('avi', 'mp4',) + ) + if args.cache_dataset: + print("Saving dataset_test to {}".format(cache_path)) + utils.mkdir(os.path.dirname(cache_path)) + utils.save_on_master((dataset_test, valdir), cache_path) + + print("Creating data loaders") + train_sampler = RandomClipSampler(dataset.video_clips, args.clips_per_video) + test_sampler = UniformClipSampler(dataset_test.video_clips, args.clips_per_video) + if args.distributed: + train_sampler = DistributedSampler(train_sampler) + test_sampler = DistributedSampler(test_sampler) + + data_loader = torch.utils.data.DataLoader( + dataset, batch_size=args.batch_size, + sampler=train_sampler, num_workers=args.workers, + pin_memory=True, collate_fn=collate_fn) + + data_loader_test = torch.utils.data.DataLoader( + dataset_test, batch_size=args.batch_size, + sampler=test_sampler, num_workers=args.workers, + pin_memory=True, collate_fn=collate_fn) + + print("Creating model") + model = torchvision.models.video.__dict__[args.model](pretrained=args.pretrained) + model.to(device) + if args.distributed and args.sync_bn: + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + + criterion = nn.CrossEntropyLoss() + + lr = args.lr * args.world_size + optimizer = torch.optim.SGD( + model.parameters(), lr=lr, momentum=args.momentum, weight_decay=args.weight_decay) + + if args.apex: + model, optimizer = amp.initialize(model, optimizer, + opt_level=args.apex_opt_level + ) + + # convert scheduler to be per iteration, not per epoch, for warmup that lasts + # between different epochs + warmup_iters = args.lr_warmup_epochs * len(data_loader) + lr_milestones = [len(data_loader) * m for m in args.lr_milestones] + lr_scheduler = WarmupMultiStepLR( + optimizer, milestones=lr_milestones, gamma=args.lr_gamma, + warmup_iters=warmup_iters, warmup_factor=1e-5) + + model_without_ddp = model + if args.distributed: + model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu]) + model_without_ddp = model.module + + if args.resume: + checkpoint = torch.load(args.resume, map_location='cpu') + model_without_ddp.load_state_dict(checkpoint['model']) + optimizer.load_state_dict(checkpoint['optimizer']) + lr_scheduler.load_state_dict(checkpoint['lr_scheduler']) + args.start_epoch = checkpoint['epoch'] + 1 + + if args.test_only: + evaluate(model, criterion, data_loader_test, device=device) + return + + print("Start training") + start_time = time.time() + for epoch in range(args.start_epoch, args.epochs): + if args.distributed: + train_sampler.set_epoch(epoch) + train_one_epoch(model, criterion, optimizer, lr_scheduler, data_loader, + device, epoch, args.print_freq, args.apex) + evaluate(model, criterion, data_loader_test, device=device) + if args.output_dir: + checkpoint = { + 'model': model_without_ddp.state_dict(), + 'optimizer': optimizer.state_dict(), + 'lr_scheduler': lr_scheduler.state_dict(), + 'epoch': epoch, + 'args': args} + utils.save_on_master( + checkpoint, + os.path.join(args.output_dir, 'model_{}.pth'.format(epoch))) + utils.save_on_master( + checkpoint, + os.path.join(args.output_dir, 'checkpoint.pth')) + + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('Training time {}'.format(total_time_str)) + + +def parse_args(): + import argparse + parser = argparse.ArgumentParser(description='PyTorch Video Classification Training') + + parser.add_argument('--data-path', default='/datasets01_101/kinetics/070618/', help='dataset') + parser.add_argument('--train-dir', default='train_avi-480p', help='name of train dir') + parser.add_argument('--val-dir', default='val_avi-480p', help='name of val dir') + parser.add_argument('--model', default='r2plus1d_18', help='model') + parser.add_argument('--device', default='cuda', help='device') + parser.add_argument('--clip-len', default=16, type=int, metavar='N', + help='number of frames per clip') + parser.add_argument('--clips-per-video', default=5, type=int, metavar='N', + help='maximum number of clips per video to consider') + parser.add_argument('-b', '--batch-size', default=24, type=int) + parser.add_argument('--epochs', default=45, type=int, metavar='N', + help='number of total epochs to run') + parser.add_argument('-j', '--workers', default=10, type=int, metavar='N', + help='number of data loading workers (default: 16)') + parser.add_argument('--lr', default=0.01, type=float, help='initial learning rate') + parser.add_argument('--momentum', default=0.9, type=float, metavar='M', + help='momentum') + parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float, + metavar='W', help='weight decay (default: 1e-4)', + dest='weight_decay') + parser.add_argument('--lr-milestones', nargs='+', default=[20, 30, 40], type=int, help='decrease lr on milestones') + parser.add_argument('--lr-gamma', default=0.1, type=float, help='decrease lr by a factor of lr-gamma') + parser.add_argument('--lr-warmup-epochs', default=10, type=int, help='number of warmup epochs') + parser.add_argument('--print-freq', default=10, type=int, help='print frequency') + parser.add_argument('--output-dir', default='.', help='path where to save') + parser.add_argument('--resume', default='', help='resume from checkpoint') + parser.add_argument('--start-epoch', default=0, type=int, metavar='N', + help='start epoch') + parser.add_argument( + "--cache-dataset", + dest="cache_dataset", + help="Cache the datasets for quicker initialization. It also serializes the transforms", + action="store_true", + ) + parser.add_argument( + "--sync-bn", + dest="sync_bn", + help="Use sync batch norm", + action="store_true", + ) + parser.add_argument( + "--test-only", + dest="test_only", + help="Only test the model", + action="store_true", + ) + parser.add_argument( + "--pretrained", + dest="pretrained", + help="Use pre-trained models from the modelzoo", + action="store_true", + ) + + # Mixed precision training parameters + parser.add_argument('--apex', action='store_true', + help='Use apex for mixed precision training') + parser.add_argument('--apex-opt-level', default='O1', type=str, + help='For apex mixed precision training' + 'O0 for FP32 training, O1 for mixed precision training.' + 'For further detail, see https://github.com/NVIDIA/apex/tree/master/examples/imagenet' + ) + + # distributed training parameters + parser.add_argument('--world-size', default=1, type=int, + help='number of distributed processes') + parser.add_argument('--dist-url', default='env://', help='url used to set up distributed training') + + args = parser.parse_args() + + return args + + +if __name__ == "__main__": + args = parse_args() + main(args) diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/video_classification/transforms.py b/pretrained_model/pytorch_vision_v0.10.0/references/video_classification/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..27f6c75450a555e285a069fc10a902c3e400363e --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/video_classification/transforms.py @@ -0,0 +1,18 @@ +import torch +import torch.nn as nn + + +class ConvertBHWCtoBCHW(nn.Module): + """Convert tensor from (B, H, W, C) to (B, C, H, W) + """ + + def forward(self, vid: torch.Tensor) -> torch.Tensor: + return vid.permute(0, 3, 1, 2) + + +class ConvertBCHWtoCBHW(nn.Module): + """Convert tensor from (B, C, H, W) to (C, B, H, W) + """ + + def forward(self, vid: torch.Tensor) -> torch.Tensor: + return vid.permute(1, 0, 2, 3) diff --git a/pretrained_model/pytorch_vision_v0.10.0/references/video_classification/utils.py b/pretrained_model/pytorch_vision_v0.10.0/references/video_classification/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3573b84d7808f656a8f601bc81de5035295d97a3 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/references/video_classification/utils.py @@ -0,0 +1,254 @@ +from collections import defaultdict, deque +import datetime +import time +import torch +import torch.distributed as dist + +import errno +import os + + +class SmoothedValue(object): + """Track a series of values and provide access to smoothed values over a + window or the global series average. + """ + + def __init__(self, window_size=20, fmt=None): + if fmt is None: + fmt = "{median:.4f} ({global_avg:.4f})" + self.deque = deque(maxlen=window_size) + self.total = 0.0 + self.count = 0 + self.fmt = fmt + + def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n + + def synchronize_between_processes(self): + """ + Warning: does not synchronize the deque! + """ + if not is_dist_avail_and_initialized(): + return + t = torch.tensor([self.count, self.total], dtype=torch.float64, device='cuda') + dist.barrier() + dist.all_reduce(t) + t = t.tolist() + self.count = int(t[0]) + self.total = t[1] + + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + + @property + def global_avg(self): + return self.total / self.count + + @property + def max(self): + return max(self.deque) + + @property + def value(self): + return self.deque[-1] + + def __str__(self): + return self.fmt.format( + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value) + + +class MetricLogger(object): + def __init__(self, delimiter="\t"): + self.meters = defaultdict(SmoothedValue) + self.delimiter = delimiter + + def update(self, **kwargs): + for k, v in kwargs.items(): + if isinstance(v, torch.Tensor): + v = v.item() + assert isinstance(v, (float, int)) + self.meters[k].update(v) + + def __getattr__(self, attr): + if attr in self.meters: + return self.meters[attr] + if attr in self.__dict__: + return self.__dict__[attr] + raise AttributeError("'{}' object has no attribute '{}'".format( + type(self).__name__, attr)) + + def __str__(self): + loss_str = [] + for name, meter in self.meters.items(): + loss_str.append( + "{}: {}".format(name, str(meter)) + ) + return self.delimiter.join(loss_str) + + def synchronize_between_processes(self): + for meter in self.meters.values(): + meter.synchronize_between_processes() + + def add_meter(self, name, meter): + self.meters[name] = meter + + def log_every(self, iterable, print_freq, header=None): + i = 0 + if not header: + header = '' + start_time = time.time() + end = time.time() + iter_time = SmoothedValue(fmt='{avg:.4f}') + data_time = SmoothedValue(fmt='{avg:.4f}') + space_fmt = ':' + str(len(str(len(iterable)))) + 'd' + if torch.cuda.is_available(): + log_msg = self.delimiter.join([ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}', + 'max mem: {memory:.0f}' + ]) + else: + log_msg = self.delimiter.join([ + header, + '[{0' + space_fmt + '}/{1}]', + 'eta: {eta}', + '{meters}', + 'time: {time}', + 'data: {data}' + ]) + MB = 1024.0 * 1024.0 + for obj in iterable: + data_time.update(time.time() - end) + yield obj + iter_time.update(time.time() - end) + if i % print_freq == 0: + eta_seconds = iter_time.global_avg * (len(iterable) - i) + eta_string = str(datetime.timedelta(seconds=int(eta_seconds))) + if torch.cuda.is_available(): + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time), + memory=torch.cuda.max_memory_allocated() / MB)) + else: + print(log_msg.format( + i, len(iterable), eta=eta_string, + meters=str(self), + time=str(iter_time), data=str(data_time))) + i += 1 + end = time.time() + total_time = time.time() - start_time + total_time_str = str(datetime.timedelta(seconds=int(total_time))) + print('{} Total time: {}'.format(header, total_time_str)) + + +def accuracy(output, target, topk=(1,)): + """Computes the accuracy over the k top predictions for the specified values of k""" + with torch.no_grad(): + maxk = max(topk) + batch_size = target.size(0) + + _, pred = output.topk(maxk, 1, True, True) + pred = pred.t() + correct = pred.eq(target[None]) + + res = [] + for k in topk: + correct_k = correct[:k].flatten().sum(dtype=torch.float32) + res.append(correct_k * (100.0 / batch_size)) + return res + + +def mkdir(path): + try: + os.makedirs(path) + except OSError as e: + if e.errno != errno.EEXIST: + raise + + +def setup_for_distributed(is_master): + """ + This function disables printing when not in master process + """ + import builtins as __builtin__ + builtin_print = __builtin__.print + + def print(*args, **kwargs): + force = kwargs.pop('force', False) + if is_master or force: + builtin_print(*args, **kwargs) + + __builtin__.print = print + + +def is_dist_avail_and_initialized(): + if not dist.is_available(): + return False + if not dist.is_initialized(): + return False + return True + + +def get_world_size(): + if not is_dist_avail_and_initialized(): + return 1 + return dist.get_world_size() + + +def get_rank(): + if not is_dist_avail_and_initialized(): + return 0 + return dist.get_rank() + + +def is_main_process(): + return get_rank() == 0 + + +def save_on_master(*args, **kwargs): + if is_main_process(): + torch.save(*args, **kwargs) + + +def init_distributed_mode(args): + if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ: + args.rank = int(os.environ["RANK"]) + args.world_size = int(os.environ['WORLD_SIZE']) + args.gpu = int(os.environ['LOCAL_RANK']) + elif 'SLURM_PROCID' in os.environ: + args.rank = int(os.environ['SLURM_PROCID']) + args.gpu = args.rank % torch.cuda.device_count() + elif hasattr(args, "rank"): + pass + else: + print('Not using distributed mode') + args.distributed = False + return + + args.distributed = True + + torch.cuda.set_device(args.gpu) + args.dist_backend = 'nccl' + print('| distributed init (rank {}): {}'.format( + args.rank, args.dist_url), flush=True) + torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url, + world_size=args.world_size, rank=args.rank) + setup_for_distributed(args.rank == 0) diff --git a/pretrained_model/pytorch_vision_v0.10.0/setup.cfg b/pretrained_model/pytorch_vision_v0.10.0/setup.cfg new file mode 100644 index 0000000000000000000000000000000000000000..fd3b74c47de24b6348a4c3abd5f2d86224a4b748 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/setup.cfg @@ -0,0 +1,16 @@ +[bdist_wheel] +universal=1 + +[metadata] +license_file = LICENSE + +[pep8] +max-line-length = 120 + +[flake8] +max-line-length = 120 +ignore = F401,E402,F403,W503,W504,F821 +exclude = venv + +[pydocstyle] +select = D417 # Missing argument descriptions in the docstring diff --git a/pretrained_model/pytorch_vision_v0.10.0/setup.py b/pretrained_model/pytorch_vision_v0.10.0/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..4cc3d0698a471a15c7a7e9f8a816c2ec9bd28c47 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/setup.py @@ -0,0 +1,490 @@ +import os +import io +import sys +from setuptools import setup, find_packages +from pkg_resources import parse_version, get_distribution, DistributionNotFound +import subprocess +import distutils.command.clean +import distutils.spawn +import glob +import shutil + +import torch +from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME +from torch.utils.hipify import hipify_python + + +def read(*names, **kwargs): + with io.open( + os.path.join(os.path.dirname(__file__), *names), + encoding=kwargs.get("encoding", "utf8") + ) as fp: + return fp.read() + + +def get_dist(pkgname): + try: + return get_distribution(pkgname) + except DistributionNotFound: + return None + + +cwd = os.path.dirname(os.path.abspath(__file__)) + +version_txt = os.path.join(cwd, 'version.txt') +with open(version_txt, 'r') as f: + version = f.readline().strip() +sha = 'Unknown' +package_name = 'torchvision' + +try: + sha = subprocess.check_output(['git', 'rev-parse', 'HEAD'], cwd=cwd).decode('ascii').strip() +except Exception: + pass + +if os.getenv('BUILD_VERSION'): + version = os.getenv('BUILD_VERSION') +elif sha != 'Unknown': + version += '+' + sha[:7] + + +def write_version_file(): + version_path = os.path.join(cwd, 'torchvision', 'version.py') + with open(version_path, 'w') as f: + f.write("__version__ = '{}'\n".format(version)) + f.write("git_version = {}\n".format(repr(sha))) + f.write("from torchvision.extension import _check_cuda_version\n") + f.write("if _check_cuda_version() > 0:\n") + f.write(" cuda = _check_cuda_version()\n") + + +pytorch_dep = 'torch' +if os.getenv('PYTORCH_VERSION'): + pytorch_dep += "==" + os.getenv('PYTORCH_VERSION') + +requirements = [ + 'numpy', + pytorch_dep, +] + +pillow_ver = ' >= 5.3.0' +pillow_req = 'pillow-simd' if get_dist('pillow-simd') is not None else 'pillow' +requirements.append(pillow_req + pillow_ver) + + +def find_library(name, vision_include): + this_dir = os.path.dirname(os.path.abspath(__file__)) + build_prefix = os.environ.get('BUILD_PREFIX', None) + is_conda_build = build_prefix is not None + + library_found = False + conda_installed = False + lib_folder = None + include_folder = None + library_header = '{0}.h'.format(name) + + # Lookup in TORCHVISION_INCLUDE or in the package file + package_path = [os.path.join(this_dir, 'torchvision')] + for folder in vision_include + package_path: + candidate_path = os.path.join(folder, library_header) + library_found = os.path.exists(candidate_path) + if library_found: + break + + if not library_found: + print('Running build on conda-build: {0}'.format(is_conda_build)) + if is_conda_build: + # Add conda headers/libraries + if os.name == 'nt': + build_prefix = os.path.join(build_prefix, 'Library') + include_folder = os.path.join(build_prefix, 'include') + lib_folder = os.path.join(build_prefix, 'lib') + library_header_path = os.path.join( + include_folder, library_header) + library_found = os.path.isfile(library_header_path) + conda_installed = library_found + else: + # Check if using Anaconda to produce wheels + conda = distutils.spawn.find_executable('conda') + is_conda = conda is not None + print('Running build on conda: {0}'.format(is_conda)) + if is_conda: + python_executable = sys.executable + py_folder = os.path.dirname(python_executable) + if os.name == 'nt': + env_path = os.path.join(py_folder, 'Library') + else: + env_path = os.path.dirname(py_folder) + lib_folder = os.path.join(env_path, 'lib') + include_folder = os.path.join(env_path, 'include') + library_header_path = os.path.join( + include_folder, library_header) + library_found = os.path.isfile(library_header_path) + conda_installed = library_found + + if not library_found: + if sys.platform == 'linux': + library_found = os.path.exists('/usr/include/{0}'.format( + library_header)) + library_found = library_found or os.path.exists( + '/usr/local/include/{0}'.format(library_header)) + + return library_found, conda_installed, include_folder, lib_folder + + +def get_extensions(): + this_dir = os.path.dirname(os.path.abspath(__file__)) + extensions_dir = os.path.join(this_dir, 'torchvision', 'csrc') + + main_file = glob.glob(os.path.join(extensions_dir, '*.cpp')) + glob.glob(os.path.join(extensions_dir, 'ops', + '*.cpp')) + source_cpu = ( + glob.glob(os.path.join(extensions_dir, 'ops', 'autograd', '*.cpp')) + + glob.glob(os.path.join(extensions_dir, 'ops', 'cpu', '*.cpp')) + + glob.glob(os.path.join(extensions_dir, 'ops', 'quantized', 'cpu', '*.cpp')) + ) + + is_rocm_pytorch = False + if torch.__version__ >= '1.5': + from torch.utils.cpp_extension import ROCM_HOME + is_rocm_pytorch = True if ((torch.version.hip is not None) and (ROCM_HOME is not None)) else False + + if is_rocm_pytorch: + hipify_python.hipify( + project_directory=this_dir, + output_directory=this_dir, + includes="torchvision/csrc/ops/cuda/*", + show_detailed=True, + is_pytorch_extension=True, + ) + source_cuda = glob.glob(os.path.join(extensions_dir, 'ops', 'hip', '*.hip')) + # Copy over additional files + for file in glob.glob(r"torchvision/csrc/ops/cuda/*.h"): + shutil.copy(file, "torchvision/csrc/ops/hip") + + else: + source_cuda = glob.glob(os.path.join(extensions_dir, 'ops', 'cuda', '*.cu')) + + source_cuda += glob.glob(os.path.join(extensions_dir, 'ops', 'autocast', '*.cpp')) + + sources = main_file + source_cpu + extension = CppExtension + + compile_cpp_tests = os.getenv('WITH_CPP_MODELS_TEST', '0') == '1' + if compile_cpp_tests: + test_dir = os.path.join(this_dir, 'test') + models_dir = os.path.join(this_dir, 'torchvision', 'csrc', 'models') + test_file = glob.glob(os.path.join(test_dir, '*.cpp')) + source_models = glob.glob(os.path.join(models_dir, '*.cpp')) + + test_file = [os.path.join(test_dir, s) for s in test_file] + source_models = [os.path.join(models_dir, s) for s in source_models] + tests = test_file + source_models + tests_include_dirs = [test_dir, models_dir] + + define_macros = [] + + extra_compile_args = {'cxx': []} + if (torch.cuda.is_available() and ((CUDA_HOME is not None) or is_rocm_pytorch)) \ + or os.getenv('FORCE_CUDA', '0') == '1': + extension = CUDAExtension + sources += source_cuda + if not is_rocm_pytorch: + define_macros += [('WITH_CUDA', None)] + nvcc_flags = os.getenv('NVCC_FLAGS', '') + if nvcc_flags == '': + nvcc_flags = [] + else: + nvcc_flags = nvcc_flags.split(' ') + else: + define_macros += [('WITH_HIP', None)] + nvcc_flags = [] + extra_compile_args["nvcc"] = nvcc_flags + + if sys.platform == 'win32': + define_macros += [('torchvision_EXPORTS', None)] + + extra_compile_args['cxx'].append('/MP') + + debug_mode = os.getenv('DEBUG', '0') == '1' + if debug_mode: + print("Compile in debug mode") + extra_compile_args['cxx'].append("-g") + extra_compile_args['cxx'].append("-O0") + if "nvcc" in extra_compile_args: + # we have to remove "-OX" and "-g" flag if exists and append + nvcc_flags = extra_compile_args["nvcc"] + extra_compile_args["nvcc"] = [ + f for f in nvcc_flags if not ("-O" in f or "-g" in f) + ] + extra_compile_args["nvcc"].append("-O0") + extra_compile_args["nvcc"].append("-g") + + sources = [os.path.join(extensions_dir, s) for s in sources] + + include_dirs = [extensions_dir] + + ext_modules = [ + extension( + 'torchvision._C', + sorted(sources), + include_dirs=include_dirs, + define_macros=define_macros, + extra_compile_args=extra_compile_args, + ) + ] + if compile_cpp_tests: + ext_modules.append( + extension( + 'torchvision._C_tests', + tests, + include_dirs=tests_include_dirs, + define_macros=define_macros, + extra_compile_args=extra_compile_args, + ) + ) + + # ------------------- Torchvision extra extensions ------------------------ + vision_include = os.environ.get('TORCHVISION_INCLUDE', None) + vision_library = os.environ.get('TORCHVISION_LIBRARY', None) + vision_include = (vision_include.split(os.pathsep) + if vision_include is not None else []) + vision_library = (vision_library.split(os.pathsep) + if vision_library is not None else []) + include_dirs += vision_include + library_dirs = vision_library + + # Image reading extension + image_macros = [] + image_include = [extensions_dir] + image_library = [] + image_link_flags = [] + + # Locating libPNG + libpng = distutils.spawn.find_executable('libpng-config') + pngfix = distutils.spawn.find_executable('pngfix') + png_found = libpng is not None or pngfix is not None + print('PNG found: {0}'.format(png_found)) + if png_found: + if libpng is not None: + # Linux / Mac + png_version = subprocess.run([libpng, '--version'], + stdout=subprocess.PIPE) + png_version = png_version.stdout.strip().decode('utf-8') + print('libpng version: {0}'.format(png_version)) + png_version = parse_version(png_version) + if png_version >= parse_version("1.6.0"): + print('Building torchvision with PNG image support') + png_lib = subprocess.run([libpng, '--libdir'], + stdout=subprocess.PIPE) + png_lib = png_lib.stdout.strip().decode('utf-8') + if 'disabled' not in png_lib: + image_library += [png_lib] + png_include = subprocess.run([libpng, '--I_opts'], + stdout=subprocess.PIPE) + png_include = png_include.stdout.strip().decode('utf-8') + _, png_include = png_include.split('-I') + print('libpng include path: {0}'.format(png_include)) + image_include += [png_include] + image_link_flags.append('png') + else: + print('libpng installed version is less than 1.6.0, ' + 'disabling PNG support') + png_found = False + else: + # Windows + png_lib = os.path.join( + os.path.dirname(os.path.dirname(pngfix)), 'lib') + png_include = os.path.join(os.path.dirname( + os.path.dirname(pngfix)), 'include', 'libpng16') + image_library += [png_lib] + image_include += [png_include] + image_link_flags.append('libpng') + + # Locating libjpeg + (jpeg_found, jpeg_conda, + jpeg_include, jpeg_lib) = find_library('jpeglib', vision_include) + + print('JPEG found: {0}'.format(jpeg_found)) + image_macros += [('PNG_FOUND', str(int(png_found)))] + image_macros += [('JPEG_FOUND', str(int(jpeg_found)))] + if jpeg_found: + print('Building torchvision with JPEG image support') + image_link_flags.append('jpeg') + if jpeg_conda: + image_library += [jpeg_lib] + image_include += [jpeg_include] + + # Locating nvjpeg + # Should be included in CUDA_HOME for CUDA >= 10.1, which is the minimum version we have in the CI + nvjpeg_found = ( + extension is CUDAExtension and + CUDA_HOME is not None and + os.path.exists(os.path.join(CUDA_HOME, 'include', 'nvjpeg.h')) + ) + + print('NVJPEG found: {0}'.format(nvjpeg_found)) + image_macros += [('NVJPEG_FOUND', str(int(nvjpeg_found)))] + if nvjpeg_found: + print('Building torchvision with NVJPEG image support') + image_link_flags.append('nvjpeg') + + image_path = os.path.join(extensions_dir, 'io', 'image') + image_src = (glob.glob(os.path.join(image_path, '*.cpp')) + glob.glob(os.path.join(image_path, 'cpu', '*.cpp')) + + glob.glob(os.path.join(image_path, 'cuda', '*.cpp'))) + + if png_found or jpeg_found: + ext_modules.append(extension( + 'torchvision.image', + image_src, + include_dirs=image_include + include_dirs + [image_path], + library_dirs=image_library + library_dirs, + define_macros=image_macros, + libraries=image_link_flags, + extra_compile_args=extra_compile_args + )) + + ffmpeg_exe = distutils.spawn.find_executable('ffmpeg') + has_ffmpeg = ffmpeg_exe is not None + print("FFmpeg found: {}".format(has_ffmpeg)) + + if has_ffmpeg: + ffmpeg_libraries = { + 'libavcodec', + 'libavformat', + 'libavutil', + 'libswresample', + 'libswscale' + } + + ffmpeg_bin = os.path.dirname(ffmpeg_exe) + ffmpeg_root = os.path.dirname(ffmpeg_bin) + ffmpeg_include_dir = os.path.join(ffmpeg_root, 'include') + ffmpeg_library_dir = os.path.join(ffmpeg_root, 'lib') + + gcc = distutils.spawn.find_executable('gcc') + platform_tag = subprocess.run( + [gcc, '-print-multiarch'], stdout=subprocess.PIPE) + platform_tag = platform_tag.stdout.strip().decode('utf-8') + + if platform_tag: + # Most probably a Debian-based distribution + ffmpeg_include_dir = [ + ffmpeg_include_dir, + os.path.join(ffmpeg_include_dir, platform_tag) + ] + ffmpeg_library_dir = [ + ffmpeg_library_dir, + os.path.join(ffmpeg_library_dir, platform_tag) + ] + else: + ffmpeg_include_dir = [ffmpeg_include_dir] + ffmpeg_library_dir = [ffmpeg_library_dir] + + has_ffmpeg = True + for library in ffmpeg_libraries: + library_found = False + for search_path in ffmpeg_include_dir + include_dirs: + full_path = os.path.join(search_path, library, '*.h') + library_found |= len(glob.glob(full_path)) > 0 + + if not library_found: + print(f'{library} header files were not found, disabling ffmpeg support') + has_ffmpeg = False + + if has_ffmpeg: + print("ffmpeg include path: {}".format(ffmpeg_include_dir)) + print("ffmpeg library_dir: {}".format(ffmpeg_library_dir)) + + # TorchVision base decoder + video reader + video_reader_src_dir = os.path.join(this_dir, 'torchvision', 'csrc', 'io', 'video_reader') + video_reader_src = glob.glob(os.path.join(video_reader_src_dir, "*.cpp")) + base_decoder_src_dir = os.path.join(this_dir, 'torchvision', 'csrc', 'io', 'decoder') + base_decoder_src = glob.glob( + os.path.join(base_decoder_src_dir, "*.cpp")) + # Torchvision video API + videoapi_src_dir = os.path.join(this_dir, 'torchvision', 'csrc', 'io', 'video') + videoapi_src = glob.glob(os.path.join(videoapi_src_dir, "*.cpp")) + # exclude tests + base_decoder_src = [x for x in base_decoder_src if '_test.cpp' not in x] + + combined_src = video_reader_src + base_decoder_src + videoapi_src + + ext_modules.append( + CppExtension( + 'torchvision.video_reader', + combined_src, + include_dirs=[ + base_decoder_src_dir, + video_reader_src_dir, + videoapi_src_dir, + extensions_dir, + *ffmpeg_include_dir, + *include_dirs + ], + library_dirs=ffmpeg_library_dir + library_dirs, + libraries=[ + 'avcodec', + 'avformat', + 'avutil', + 'swresample', + 'swscale', + ], + extra_compile_args=["-std=c++14"] if os.name != 'nt' else ['/std:c++14', '/MP'], + extra_link_args=["-std=c++14" if os.name != 'nt' else '/std:c++14'], + ) + ) + + return ext_modules + + +class clean(distutils.command.clean.clean): + def run(self): + with open('.gitignore', 'r') as f: + ignores = f.read() + for wildcard in filter(None, ignores.split('\n')): + for filename in glob.glob(wildcard): + try: + os.remove(filename) + except OSError: + shutil.rmtree(filename, ignore_errors=True) + + # It's an old-style class in Python 2.7... + distutils.command.clean.clean.run(self) + + +if __name__ == "__main__": + print("Building wheel {}-{}".format(package_name, version)) + + write_version_file() + + with open('README.rst') as f: + readme = f.read() + + setup( + # Metadata + name=package_name, + version=version, + author='PyTorch Core Team', + author_email='soumith@pytorch.org', + url='https://github.com/pytorch/vision', + description='image and video datasets and models for torch deep learning', + long_description=readme, + license='BSD', + + # Package info + packages=find_packages(exclude=('test',)), + package_data={ + package_name: ['*.dll', '*.dylib', '*.so'] + }, + zip_safe=False, + install_requires=requirements, + extras_require={ + "scipy": ["scipy"], + }, + ext_modules=get_extensions(), + cmdclass={ + 'build_ext': BuildExtension.with_options(no_python_abi_suffix=True), + 'clean': clean, + } + ) diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/_assert_utils.py b/pretrained_model/pytorch_vision_v0.10.0/test/_assert_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e766e2df4b81dfaef6fe9671707ec07134428175 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/_assert_utils.py @@ -0,0 +1,11 @@ +"""This is a temporary module and should be removed as soon as torch.testing.assert_equal is supported.""" +# TODO: remove this as soon torch.testing.assert_equal is supported + +import functools + +import torch.testing + +__all__ = ["assert_equal"] + + +assert_equal = functools.partial(torch.testing.assert_close, rtol=0, atol=0) diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/_utils_internal.py b/pretrained_model/pytorch_vision_v0.10.0/test/_utils_internal.py new file mode 100644 index 0000000000000000000000000000000000000000..1a32e6f2b2560917d2e3b36be397183dec848401 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/_utils_internal.py @@ -0,0 +1,7 @@ +import os + + +# Get relative file path +# this returns relative path from current file. +def get_relative_path(curr_file, *path_components): + return os.path.join(os.path.dirname(curr_file), *path_components) diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/damaged_jpeg/TensorFlow-LICENSE b/pretrained_model/pytorch_vision_v0.10.0/test/assets/damaged_jpeg/TensorFlow-LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..c7563fe4e5b2b3969657d1c6d65d19901e9eb23f --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/assets/damaged_jpeg/TensorFlow-LICENSE @@ -0,0 +1,13 @@ + Copyright 2019 The TensorFlow Authors. All rights reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/damaged_jpeg/bad_huffman.jpg b/pretrained_model/pytorch_vision_v0.10.0/test/assets/damaged_jpeg/bad_huffman.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/damaged_jpeg/corrupt.jpg b/pretrained_model/pytorch_vision_v0.10.0/test/assets/damaged_jpeg/corrupt.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/damaged_jpeg/corrupt34_2.jpg b/pretrained_model/pytorch_vision_v0.10.0/test/assets/damaged_jpeg/corrupt34_2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/damaged_jpeg/corrupt34_3.jpg b/pretrained_model/pytorch_vision_v0.10.0/test/assets/damaged_jpeg/corrupt34_3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/damaged_jpeg/corrupt34_4.jpg b/pretrained_model/pytorch_vision_v0.10.0/test/assets/damaged_jpeg/corrupt34_4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/encode_jpeg/grace_hopper_517x606.jpg b/pretrained_model/pytorch_vision_v0.10.0/test/assets/encode_jpeg/grace_hopper_517x606.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/encode_jpeg/jpeg_write/grace_hopper_517x606_pil.jpg b/pretrained_model/pytorch_vision_v0.10.0/test/assets/encode_jpeg/jpeg_write/grace_hopper_517x606_pil.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/draw_boxes_util.png b/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/draw_boxes_util.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/draw_boxes_vanilla.png b/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/draw_boxes_vanilla.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/imagefolder/a/a1.png b/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/imagefolder/a/a1.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/imagefolder/a/a2.png b/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/imagefolder/a/a2.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/imagefolder/a/a3.png b/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/imagefolder/a/a3.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/imagefolder/b/b1.png b/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/imagefolder/b/b1.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/imagefolder/b/b2.png b/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/imagefolder/b/b2.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/imagefolder/b/b3.png b/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/imagefolder/b/b3.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/imagefolder/b/b4.png b/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/imagefolder/b/b4.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/logos/cmyk_pytorch.jpg b/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/logos/cmyk_pytorch.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/logos/gray_pytorch.jpg b/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/logos/gray_pytorch.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/logos/gray_pytorch.png b/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/logos/gray_pytorch.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/logos/grayalpha_pytorch.png b/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/logos/grayalpha_pytorch.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/logos/palette_pytorch.png b/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/logos/palette_pytorch.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/logos/rgb_pytorch.jpg b/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/logos/rgb_pytorch.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/logos/rgb_pytorch.png b/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/logos/rgb_pytorch.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/logos/rgbalpha_pytorch.png b/pretrained_model/pytorch_vision_v0.10.0/test/assets/fakedata/logos/rgbalpha_pytorch.png new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/gaussian_blur_opencv_results.pt b/pretrained_model/pytorch_vision_v0.10.0/test/assets/gaussian_blur_opencv_results.pt new file mode 100644 index 0000000000000000000000000000000000000000..d68f477fb4448794a791109b1efafaa9f351d8b4 Binary files /dev/null and b/pretrained_model/pytorch_vision_v0.10.0/test/assets/gaussian_blur_opencv_results.pt differ diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/videos/R6llTwEh07w.mp4 b/pretrained_model/pytorch_vision_v0.10.0/test/assets/videos/R6llTwEh07w.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/videos/RATRACE_wave_f_nm_np1_fr_goo_37.avi b/pretrained_model/pytorch_vision_v0.10.0/test/assets/videos/RATRACE_wave_f_nm_np1_fr_goo_37.avi new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/videos/README b/pretrained_model/pytorch_vision_v0.10.0/test/assets/videos/README new file mode 100644 index 0000000000000000000000000000000000000000..ef6dd4dac96662dc89bbf2c8c612391777adb4b0 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/assets/videos/README @@ -0,0 +1,59 @@ +Video meta-information Notation + +Video File Name + video: codec, fps + audio: codec, bits per sample, sample rate + +Test videos are listed below. +-------------------------------- + +- RATRACE_wave_f_nm_np1_fr_goo_37.avi + - source: hmdb51 + - video: DivX MPEG-4 + - fps: 30 + - audio: N/A + +- SchoolRulesHowTheyHelpUs_wave_f_nm_np1_ba_med_0.avi + - source: hmdb51 + - video: DivX MPEG-4 + - fps: 30 + - audio: N/A + +- TrumanShow_wave_f_nm_np1_fr_med_26.avi + - source: hmdb51 + - video: DivX MPEG-4 + - fps: 30 + - audio: N/A + +- v_SoccerJuggling_g23_c01.avi + - source: ucf101 + - video: Xvid MPEG-4 + - fps: 29.97 + - audio: N/A + +- v_SoccerJuggling_g24_c01.avi + - source: ucf101 + - video: Xvid MPEG-4 + - fps: 29.97 + - audio: N/A + +- R6llTwEh07w.mp4 + - source: kinetics-400 + - video: H-264 - MPEG-4 AVC (part 10) (avc1) + - fps: 30 + - audio: MPEG AAC audio (mp4a) + - sample rate: 44.1K Hz + +- SOX5yA1l24A.mp4 + - source: kinetics-400 + - video: H-264 - MPEG-4 AVC (part 10) (avc1) + - fps: 29.97 + - audio: MPEG AAC audio (mp4a) + - sample rate: 48K Hz + +- WUzgd7C1pWA.mp4 + - source: kinetics-400 + - video: H-264 - MPEG-4 AVC (part 10) (avc1) + - fps: 29.97 + - audio: MPEG AAC audio (mp4a) + - sample rate: 48K Hz diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/videos/SOX5yA1l24A.mp4 b/pretrained_model/pytorch_vision_v0.10.0/test/assets/videos/SOX5yA1l24A.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/videos/SchoolRulesHowTheyHelpUs_wave_f_nm_np1_ba_med_0.avi b/pretrained_model/pytorch_vision_v0.10.0/test/assets/videos/SchoolRulesHowTheyHelpUs_wave_f_nm_np1_ba_med_0.avi new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/videos/TrumanShow_wave_f_nm_np1_fr_med_26.avi b/pretrained_model/pytorch_vision_v0.10.0/test/assets/videos/TrumanShow_wave_f_nm_np1_fr_med_26.avi new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/videos/WUzgd7C1pWA.mp4 b/pretrained_model/pytorch_vision_v0.10.0/test/assets/videos/WUzgd7C1pWA.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/videos/hmdb51_Turnk_r_Pippi_Michel_cartwheel_f_cm_np2_le_med_6.avi b/pretrained_model/pytorch_vision_v0.10.0/test/assets/videos/hmdb51_Turnk_r_Pippi_Michel_cartwheel_f_cm_np2_le_med_6.avi new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/videos/v_SoccerJuggling_g23_c01.avi b/pretrained_model/pytorch_vision_v0.10.0/test/assets/videos/v_SoccerJuggling_g23_c01.avi new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/assets/videos/v_SoccerJuggling_g24_c01.avi b/pretrained_model/pytorch_vision_v0.10.0/test/assets/videos/v_SoccerJuggling_g24_c01.avi new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/common_utils.py b/pretrained_model/pytorch_vision_v0.10.0/test/common_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..44c4a1fca77a9d569bfdb18acf4dbde72f5327d8 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/common_utils.py @@ -0,0 +1,459 @@ +import os +import shutil +import tempfile +import contextlib +import unittest +import argparse +import sys +import io +import torch +import warnings +import __main__ +import random +import inspect + +from numbers import Number +from torch._six import string_classes +from collections import OrderedDict +from _utils_internal import get_relative_path + +import numpy as np +from PIL import Image + +from _assert_utils import assert_equal + +IS_PY39 = sys.version_info.major == 3 and sys.version_info.minor == 9 +PY39_SEGFAULT_SKIP_MSG = "Segmentation fault with Python 3.9, see https://github.com/pytorch/vision/issues/3367" +PY39_SKIP = unittest.skipIf(IS_PY39, PY39_SEGFAULT_SKIP_MSG) +IN_CIRCLE_CI = os.getenv("CIRCLECI", False) == 'true' +IN_RE_WORKER = os.environ.get("INSIDE_RE_WORKER") is not None +IN_FBCODE = os.environ.get("IN_FBCODE_TORCHVISION") == "1" +CUDA_NOT_AVAILABLE_MSG = 'CUDA device not available' + + +@contextlib.contextmanager +def get_tmp_dir(src=None, **kwargs): + tmp_dir = tempfile.mkdtemp(**kwargs) + if src is not None: + os.rmdir(tmp_dir) + shutil.copytree(src, tmp_dir) + try: + yield tmp_dir + finally: + shutil.rmtree(tmp_dir) + + +def set_rng_seed(seed): + torch.manual_seed(seed) + random.seed(seed) + np.random.seed(seed) + + +ACCEPT = os.getenv('EXPECTTEST_ACCEPT', '0') == '1' +TEST_WITH_SLOW = os.getenv('PYTORCH_TEST_WITH_SLOW', '0') == '1' + + +class MapNestedTensorObjectImpl(object): + def __init__(self, tensor_map_fn): + self.tensor_map_fn = tensor_map_fn + + def __call__(self, object): + if isinstance(object, torch.Tensor): + return self.tensor_map_fn(object) + + elif isinstance(object, dict): + mapped_dict = {} + for key, value in object.items(): + mapped_dict[self(key)] = self(value) + return mapped_dict + + elif isinstance(object, (list, tuple)): + mapped_iter = [] + for iter in object: + mapped_iter.append(self(iter)) + return mapped_iter if not isinstance(object, tuple) else tuple(mapped_iter) + + else: + return object + + +def map_nested_tensor_object(object, tensor_map_fn): + impl = MapNestedTensorObjectImpl(tensor_map_fn) + return impl(object) + + +def is_iterable(obj): + try: + iter(obj) + return True + except TypeError: + return False + + +# adapted from TestCase in torch/test/common_utils to accept non-string +# inputs and set maximum binary size +class TestCase(unittest.TestCase): + precision = 1e-5 + + def _get_expected_file(self, name=None): + # NB: we take __file__ from the module that defined the test + # class, so we place the expect directory where the test script + # lives, NOT where test/common_utils.py lives. + module_id = self.__class__.__module__ + + # Determine expected file based on environment + expected_file_base = get_relative_path( + os.path.realpath(sys.modules[module_id].__file__), + "expect") + + # Note: for legacy reasons, the reference file names all had "ModelTest.test_" in their names + # We hardcode it here to avoid having to re-generate the reference files + expected_file = expected_file = os.path.join(expected_file_base, 'ModelTester.test_' + name) + expected_file += "_expect.pkl" + + if not ACCEPT and not os.path.exists(expected_file): + raise RuntimeError( + f"No expect file exists for {os.path.basename(expected_file)} in {expected_file}; " + "to accept the current output, re-run the failing test after setting the EXPECTTEST_ACCEPT " + "env variable. For example: EXPECTTEST_ACCEPT=1 pytest test/test_models.py -k alexnet" + ) + + return expected_file + + def assertExpected(self, output, name, prec=None): + r""" + Test that a python value matches the recorded contents of a file + based on a "check" name. The value must be + pickable with `torch.save`. This file + is placed in the 'expect' directory in the same directory + as the test script. You can automatically update the recorded test + output using an EXPECTTEST_ACCEPT=1 env variable. + """ + expected_file = self._get_expected_file(name) + + if ACCEPT: + filename = {os.path.basename(expected_file)} + print("Accepting updated output for {}:\n\n{}".format(filename, output)) + torch.save(output, expected_file) + MAX_PICKLE_SIZE = 50 * 1000 # 50 KB + binary_size = os.path.getsize(expected_file) + if binary_size > MAX_PICKLE_SIZE: + raise RuntimeError("The output for {}, is larger than 50kb".format(filename)) + else: + expected = torch.load(expected_file) + rtol = atol = prec or self.precision + torch.testing.assert_close(output, expected, rtol=rtol, atol=atol, check_dtype=False) + + def assertEqual(self, x, y, prec=None, message='', allow_inf=False): + """ + This is copied from pytorch/test/common_utils.py's TestCase.assertEqual + """ + if isinstance(prec, str) and message == '': + message = prec + prec = None + if prec is None: + prec = self.precision + + if isinstance(x, torch.Tensor) and isinstance(y, Number): + self.assertEqual(x.item(), y, prec=prec, message=message, + allow_inf=allow_inf) + elif isinstance(y, torch.Tensor) and isinstance(x, Number): + self.assertEqual(x, y.item(), prec=prec, message=message, + allow_inf=allow_inf) + elif isinstance(x, torch.Tensor) and isinstance(y, torch.Tensor): + def assertTensorsEqual(a, b): + super(TestCase, self).assertEqual(a.size(), b.size(), message) + if a.numel() > 0: + if (a.device.type == 'cpu' and (a.dtype == torch.float16 or a.dtype == torch.bfloat16)): + # CPU half and bfloat16 tensors don't have the methods we need below + a = a.to(torch.float32) + b = b.to(a) + + if (a.dtype == torch.bool) != (b.dtype == torch.bool): + raise TypeError("Was expecting both tensors to be bool type.") + else: + if a.dtype == torch.bool and b.dtype == torch.bool: + # we want to respect precision but as bool doesn't support substraction, + # boolean tensor has to be converted to int + a = a.to(torch.int) + b = b.to(torch.int) + + diff = a - b + if a.is_floating_point(): + # check that NaNs are in the same locations + nan_mask = torch.isnan(a) + self.assertTrue(torch.equal(nan_mask, torch.isnan(b)), message) + diff[nan_mask] = 0 + # inf check if allow_inf=True + if allow_inf: + inf_mask = torch.isinf(a) + inf_sign = inf_mask.sign() + self.assertTrue(torch.equal(inf_sign, torch.isinf(b).sign()), message) + diff[inf_mask] = 0 + # TODO: implement abs on CharTensor (int8) + if diff.is_signed() and diff.dtype != torch.int8: + diff = diff.abs() + max_err = diff.max() + tolerance = prec + prec * abs(a.max()) + self.assertLessEqual(max_err, tolerance, message) + super(TestCase, self).assertEqual(x.is_sparse, y.is_sparse, message) + super(TestCase, self).assertEqual(x.is_quantized, y.is_quantized, message) + if x.is_sparse: + x = self.safeCoalesce(x) + y = self.safeCoalesce(y) + assertTensorsEqual(x._indices(), y._indices()) + assertTensorsEqual(x._values(), y._values()) + elif x.is_quantized and y.is_quantized: + self.assertEqual(x.qscheme(), y.qscheme(), prec=prec, + message=message, allow_inf=allow_inf) + if x.qscheme() == torch.per_tensor_affine: + self.assertEqual(x.q_scale(), y.q_scale(), prec=prec, + message=message, allow_inf=allow_inf) + self.assertEqual(x.q_zero_point(), y.q_zero_point(), + prec=prec, message=message, + allow_inf=allow_inf) + elif x.qscheme() == torch.per_channel_affine: + self.assertEqual(x.q_per_channel_scales(), y.q_per_channel_scales(), prec=prec, + message=message, allow_inf=allow_inf) + self.assertEqual(x.q_per_channel_zero_points(), y.q_per_channel_zero_points(), + prec=prec, message=message, + allow_inf=allow_inf) + self.assertEqual(x.q_per_channel_axis(), y.q_per_channel_axis(), + prec=prec, message=message) + self.assertEqual(x.dtype, y.dtype) + self.assertEqual(x.int_repr().to(torch.int32), + y.int_repr().to(torch.int32), prec=prec, + message=message, allow_inf=allow_inf) + else: + assertTensorsEqual(x, y) + elif isinstance(x, string_classes) and isinstance(y, string_classes): + super(TestCase, self).assertEqual(x, y, message) + elif type(x) == set and type(y) == set: + super(TestCase, self).assertEqual(x, y, message) + elif isinstance(x, dict) and isinstance(y, dict): + if isinstance(x, OrderedDict) and isinstance(y, OrderedDict): + self.assertEqual(x.items(), y.items(), prec=prec, + message=message, allow_inf=allow_inf) + else: + self.assertEqual(set(x.keys()), set(y.keys()), prec=prec, + message=message, allow_inf=allow_inf) + key_list = list(x.keys()) + self.assertEqual([x[k] for k in key_list], + [y[k] for k in key_list], + prec=prec, message=message, + allow_inf=allow_inf) + elif is_iterable(x) and is_iterable(y): + super(TestCase, self).assertEqual(len(x), len(y), message) + for x_, y_ in zip(x, y): + self.assertEqual(x_, y_, prec=prec, message=message, + allow_inf=allow_inf) + elif isinstance(x, bool) and isinstance(y, bool): + super(TestCase, self).assertEqual(x, y, message) + elif isinstance(x, Number) and isinstance(y, Number): + inf = float("inf") + if abs(x) == inf or abs(y) == inf: + if allow_inf: + super(TestCase, self).assertEqual(x, y, message) + else: + self.fail("Expected finite numeric values - x={}, y={}".format(x, y)) + return + super(TestCase, self).assertLessEqual(abs(x - y), prec, message) + else: + super(TestCase, self).assertEqual(x, y, message) + + def check_jit_scriptable(self, nn_module, args, unwrapper=None, skip=False): + """ + Check that a nn.Module's results in TorchScript match eager and that it + can be exported + """ + if not TEST_WITH_SLOW or skip: + # TorchScript is not enabled, skip these tests + msg = "The check_jit_scriptable test for {} was skipped. " \ + "This test checks if the module's results in TorchScript " \ + "match eager and that it can be exported. To run these " \ + "tests make sure you set the environment variable " \ + "PYTORCH_TEST_WITH_SLOW=1 and that the test is not " \ + "manually skipped.".format(nn_module.__class__.__name__) + warnings.warn(msg, RuntimeWarning) + return None + + sm = torch.jit.script(nn_module) + + with freeze_rng_state(): + eager_out = nn_module(*args) + + with freeze_rng_state(): + script_out = sm(*args) + if unwrapper: + script_out = unwrapper(script_out) + + self.assertEqual(eager_out, script_out, prec=1e-4) + self.assertExportImportModule(sm, args) + + return sm + + def getExportImportCopy(self, m): + """ + Save and load a TorchScript model + """ + buffer = io.BytesIO() + torch.jit.save(m, buffer) + buffer.seek(0) + imported = torch.jit.load(buffer) + return imported + + def assertExportImportModule(self, m, args): + """ + Check that the results of a model are the same after saving and loading + """ + m_import = self.getExportImportCopy(m) + with freeze_rng_state(): + results = m(*args) + with freeze_rng_state(): + results_from_imported = m_import(*args) + self.assertEqual(results, results_from_imported, prec=3e-5) + + +@contextlib.contextmanager +def freeze_rng_state(): + rng_state = torch.get_rng_state() + if torch.cuda.is_available(): + cuda_rng_state = torch.cuda.get_rng_state() + yield + if torch.cuda.is_available(): + torch.cuda.set_rng_state(cuda_rng_state) + torch.set_rng_state(rng_state) + + +class TransformsTester(unittest.TestCase): + + def _create_data(self, height=3, width=3, channels=3, device="cpu"): + tensor = torch.randint(0, 256, (channels, height, width), dtype=torch.uint8, device=device) + pil_img = Image.fromarray(tensor.permute(1, 2, 0).contiguous().cpu().numpy()) + return tensor, pil_img + + def _create_data_batch(self, height=3, width=3, channels=3, num_samples=4, device="cpu"): + batch_tensor = torch.randint( + 0, 256, + (num_samples, channels, height, width), + dtype=torch.uint8, + device=device + ) + return batch_tensor + + def compareTensorToPIL(self, tensor, pil_image, msg=None): + np_pil_image = np.array(pil_image) + if np_pil_image.ndim == 2: + np_pil_image = np_pil_image[:, :, None] + pil_tensor = torch.as_tensor(np_pil_image.transpose((2, 0, 1))) + if msg is None: + msg = "tensor:\n{} \ndid not equal PIL tensor:\n{}".format(tensor, pil_tensor) + assert_equal(tensor.cpu(), pil_tensor, check_stride=False, msg=msg) + + def approxEqualTensorToPIL(self, tensor, pil_image, tol=1e-5, msg=None, agg_method="mean", + allowed_percentage_diff=None): + np_pil_image = np.array(pil_image) + if np_pil_image.ndim == 2: + np_pil_image = np_pil_image[:, :, None] + pil_tensor = torch.as_tensor(np_pil_image.transpose((2, 0, 1))).to(tensor) + + if allowed_percentage_diff is not None: + # Assert that less than a given %age of pixels are different + self.assertTrue( + (tensor != pil_tensor).to(torch.float).mean() <= allowed_percentage_diff + ) + # error value can be mean absolute error, max abs error + # Convert to float to avoid underflow when computing absolute difference + tensor = tensor.to(torch.float) + pil_tensor = pil_tensor.to(torch.float) + err = getattr(torch, agg_method)(torch.abs(tensor - pil_tensor)).item() + self.assertTrue( + err < tol, + msg="{}: err={}, tol={}: \n{}\nvs\n{}".format(msg, err, tol, tensor[0, :10, :10], pil_tensor[0, :10, :10]) + ) + + +def cycle_over(objs): + for idx, obj in enumerate(objs): + yield obj, objs[:idx] + objs[idx + 1:] + + +def int_dtypes(): + return torch.testing.integral_types() + + +def float_dtypes(): + return torch.testing.floating_types() + + +@contextlib.contextmanager +def disable_console_output(): + with contextlib.ExitStack() as stack, open(os.devnull, "w") as devnull: + stack.enter_context(contextlib.redirect_stdout(devnull)) + stack.enter_context(contextlib.redirect_stderr(devnull)) + yield + + +def call_args_to_kwargs_only(call_args, *callable_or_arg_names): + callable_or_arg_name = callable_or_arg_names[0] + if callable(callable_or_arg_name): + argspec = inspect.getfullargspec(callable_or_arg_name) + arg_names = argspec.args + if isinstance(callable_or_arg_name, type): + # remove self + arg_names.pop(0) + else: + arg_names = callable_or_arg_names + + args, kwargs = call_args + kwargs_only = kwargs.copy() + kwargs_only.update(dict(zip(arg_names, args))) + return kwargs_only + + +def cpu_and_gpu(): + # TODO: make this properly handle CircleCI + import pytest # noqa + + # ignore CPU tests in RE as they're already covered by another contbuild + devices = [] if IN_RE_WORKER else ['cpu'] + + if torch.cuda.is_available(): + cuda_marks = () + elif IN_FBCODE: + # Dont collect cuda tests on fbcode if the machine doesnt have a GPU + # This avoids skipping the tests. More robust would be to detect if + # we're in sancastle instead of fbcode? + cuda_marks = pytest.mark.dont_collect() + else: + cuda_marks = pytest.mark.skip(reason=CUDA_NOT_AVAILABLE_MSG) + + devices.append(pytest.param('cuda', marks=cuda_marks)) + + return devices + + +def needs_cuda(test_func): + # TODO: make this properly handle CircleCI + import pytest # noqa + + if IN_FBCODE and not IN_RE_WORKER: + # We don't want to skip in fbcode, so we just don't collect + # TODO: slightly more robust way would be to detect if we're in a sandcastle instance + # so that the test will still be collected (and skipped) in the devvms. + return pytest.mark.dont_collect(test_func) + elif torch.cuda.is_available(): + return test_func + else: + return pytest.mark.skip(reason=CUDA_NOT_AVAILABLE_MSG)(test_func) + + +def cpu_only(test_func): + # TODO: make this properly handle CircleCI + import pytest # noqa + + if IN_RE_WORKER: + # The assumption is that all RE workers have GPUs. + return pytest.mark.dont_collect(test_func) + else: + return test_func diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/conftest.py b/pretrained_model/pytorch_vision_v0.10.0/test/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..6e10e4ef071417f617ecb31f2212830c61e7da3e --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/conftest.py @@ -0,0 +1,14 @@ +def pytest_configure(config): + # register an additional marker (see pytest_collection_modifyitems) + config.addinivalue_line( + "markers", "dont_collect: marks a test that should not be collected (avoids skipping it)" + ) + + +def pytest_collection_modifyitems(items): + # This hook is called by pytest after it has collected the tests (google its name!) + # We can ignore some tests as we see fit here. In particular we ignore the tests that + # we have marked with the custom 'dont_collect' mark. This avoids skipping the tests, + # since the internal fb infra doesn't like skipping tests. + to_keep = [item for item in items if item.get_closest_marker('dont_collect') is None] + items[:] = to_keep diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/cpp/test_custom_operators.cpp b/pretrained_model/pytorch_vision_v0.10.0/test/cpp/test_custom_operators.cpp new file mode 100644 index 0000000000000000000000000000000000000000..499683a78af2159f50272979c34dc371aa36f5c5 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/cpp/test_custom_operators.cpp @@ -0,0 +1,58 @@ +#include <gtest/gtest.h> +#include <torch/script.h> +#include <torch/torch.h> + +// FIXME: the include path differs from OSS due to the extra csrc +#include <torchvision/csrc/ops/nms.h> + +TEST(test_custom_operators, nms) { + // make sure that the torchvision ops are visible to the jit interpreter + auto& ops = torch::jit::getAllOperatorsFor(torch::jit::Symbol::fromQualString("torchvision::nms")); + ASSERT_EQ(ops.size(), 1); + + auto& op = ops.front(); + ASSERT_EQ(op->schema().name(), "torchvision::nms"); + + torch::jit::Stack stack; + at::Tensor boxes = at::rand({50, 4}), scores = at::rand({50}); + double thresh = 0.7; + + torch::jit::push(stack, boxes, scores, thresh); + op->getOperation()(&stack); + at::Tensor output_jit; + torch::jit::pop(stack, output_jit); + + at::Tensor output = vision::ops::nms(boxes, scores, thresh); + ASSERT_TRUE(output_jit.allclose(output)); + +} + +TEST(test_custom_operators, roi_align_visible) { + // make sure that the torchvision ops are visible to the jit interpreter even if + // not explicitly included + auto& ops = torch::jit::getAllOperatorsFor(torch::jit::Symbol::fromQualString("torchvision::roi_align")); + ASSERT_EQ(ops.size(), 1); + + auto& op = ops.front(); + ASSERT_EQ(op->schema().name(), "torchvision::roi_align"); + + torch::jit::Stack stack; + float roi_data[] = { + 0., 0., 0., 5., 5., + 0., 5., 5., 10., 10. + }; + at::Tensor input = at::rand({1, 2, 10, 10}), rois = at::from_blob(roi_data, {2, 5}); + double spatial_scale = 1.0; + int64_t pooled_height = 3, pooled_width = 3, sampling_ratio = -1; + bool aligned = true; + + torch::jit::push(stack, input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned); + op->getOperation()(&stack); + at::Tensor output_jit; + torch::jit::pop(stack, output_jit); + + ASSERT_EQ(output_jit.sizes()[0], 2); + ASSERT_EQ(output_jit.sizes()[1], 2); + ASSERT_EQ(output_jit.sizes()[2], 3); + ASSERT_EQ(output_jit.sizes()[3], 3); +} diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/datasets_utils.py b/pretrained_model/pytorch_vision_v0.10.0/test/datasets_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8077a03b91062d73abb955b8f3fd8dcdcf8dcf60 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/datasets_utils.py @@ -0,0 +1,848 @@ +import contextlib +import functools +import importlib +import inspect +import itertools +import os +import pathlib +import random +import string +import unittest +import unittest.mock +from collections import defaultdict +from typing import Any, Callable, Dict, Iterator, List, Optional, Sequence, Tuple, Union + +import PIL +import PIL.Image +import torch +import torchvision.datasets +import torchvision.io + +from common_utils import get_tmp_dir, disable_console_output + + +__all__ = [ + "UsageError", + "lazy_importer", + "test_all_configs", + "DatasetTestCase", + "ImageDatasetTestCase", + "VideoDatasetTestCase", + "create_image_or_video_tensor", + "create_image_file", + "create_image_folder", + "create_video_file", + "create_video_folder", + "create_random_string", +] + + +class UsageError(Exception): + """Should be raised in case an error happens in the setup rather than the test.""" + + +class LazyImporter: + r"""Lazy importer for additional dependencies. + + Some datasets require additional packages that are no direct dependencies of torchvision. Instances of this class + provide modules listed in MODULES as attributes. They are only imported when accessed. + + """ + MODULES = ( + "av", + "lmdb", + "pycocotools", + "requests", + "scipy.io", + "scipy.sparse", + ) + + def __init__(self): + modules = defaultdict(list) + for module in self.MODULES: + module, *submodules = module.split(".", 1) + if submodules: + modules[module].append(submodules[0]) + else: + # This introduces the module so that it is known when we later iterate over the dictionary. + modules.__missing__(module) + + for module, submodules in modules.items(): + # We need the quirky 'module=module' and submodules=submodules arguments to the lambda since otherwise the + # lookup for these would happen at runtime rather than at definition. Thus, without it, every property + # would try to import the last item in 'modules' + setattr( + type(self), + module, + property(lambda self, module=module, submodules=submodules: LazyImporter._import(module, submodules)), + ) + + @staticmethod + def _import(package, subpackages): + try: + module = importlib.import_module(package) + except ImportError as error: + raise UsageError( + f"Failed to import module '{package}'. " + f"This probably means that the current test case needs '{package}' installed, " + f"but it is not a dependency of torchvision. " + f"You need to install it manually, for example 'pip install {package}'." + ) from error + + for name in subpackages: + importlib.import_module(f".{name}", package=package) + + return module + + +lazy_importer = LazyImporter() + + +def requires_lazy_imports(*modules): + def outer_wrapper(fn): + @functools.wraps(fn) + def inner_wrapper(*args, **kwargs): + for module in modules: + getattr(lazy_importer, module.replace(".", "_")) + return fn(*args, **kwargs) + + return inner_wrapper + + return outer_wrapper + + +def test_all_configs(test): + """Decorator to run test against all configurations. + + Add this as decorator to an arbitrary test to run it against all configurations. This includes + :attr:`DatasetTestCase.DEFAULT_CONFIG` and :attr:`DatasetTestCase.ADDITIONAL_CONFIGS`. + + The current configuration is provided as the first parameter for the test: + + .. code-block:: + + @test_all_configs() + def test_foo(self, config): + pass + + .. note:: + + This will try to remove duplicate configurations. During this process it will not not preserve a potential + ordering of the configurations or an inner ordering of a configuration. + """ + + def maybe_remove_duplicates(configs): + try: + return [dict(config_) for config_ in set(tuple(sorted(config.items())) for config in configs)] + except TypeError: + # A TypeError will be raised if a value of any config is not hashable, e.g. a list. In that case duplicate + # removal would be a lot more elaborate and we simply bail out. + return configs + + @functools.wraps(test) + def wrapper(self): + configs = [] + if self.DEFAULT_CONFIG is not None: + configs.append(self.DEFAULT_CONFIG) + if self.ADDITIONAL_CONFIGS is not None: + configs.extend(self.ADDITIONAL_CONFIGS) + + if not configs: + configs = [self._KWARG_DEFAULTS.copy()] + else: + configs = maybe_remove_duplicates(configs) + + for config in configs: + with self.subTest(**config): + test(self, config) + + return wrapper + + +def combinations_grid(**kwargs): + """Creates a grid of input combinations. + + Each element in the returned sequence is a dictionary containing one possible combination as values. + + Example: + >>> combinations_grid(foo=("bar", "baz"), spam=("eggs", "ham")) + [ + {'foo': 'bar', 'spam': 'eggs'}, + {'foo': 'bar', 'spam': 'ham'}, + {'foo': 'baz', 'spam': 'eggs'}, + {'foo': 'baz', 'spam': 'ham'} + ] + """ + return [dict(zip(kwargs.keys(), values)) for values in itertools.product(*kwargs.values())] + + +class DatasetTestCase(unittest.TestCase): + """Abstract base class for all dataset testcases. + + You have to overwrite the following class attributes: + + - DATASET_CLASS (torchvision.datasets.VisionDataset): Class of dataset to be tested. + - FEATURE_TYPES (Sequence[Any]): Types of the elements returned by index access of the dataset. Instead of + providing these manually, you can instead subclass ``ImageDatasetTestCase`` or ``VideoDatasetTestCase```to + get a reasonable default, that should work for most cases. Each entry of the sequence may be a tuple, + to indicate multiple possible values. + + Optionally, you can overwrite the following class attributes: + + - DEFAULT_CONFIG (Dict[str, Any]): Config that will be used by default. If omitted, this defaults to all + keyword arguments of the dataset minus ``transform``, ``target_transform``, ``transforms``, and + ``download``. Overwrite this if you want to use a default value for a parameter for which the dataset does + not provide one. + - ADDITIONAL_CONFIGS (Sequence[Dict[str, Any]]): Additional configs that should be tested. Each dictionary can + contain an arbitrary combination of dataset parameters that are **not** ``transform``, ``target_transform``, + ``transforms``, or ``download``. + - REQUIRED_PACKAGES (Iterable[str]): Additional dependencies to use the dataset. If these packages are not + available, the tests are skipped. + + Additionally, you need to overwrite the ``inject_fake_data()`` method that provides the data that the tests rely on. + The fake data should resemble the original data as close as necessary, while containing only few examples. During + the creation of the dataset check-, download-, and extract-functions from ``torchvision.datasets.utils`` are + disabled. + + Without further configuration, the testcase will test if + + 1. the dataset raises a :class:`FileNotFoundError` or a :class:`RuntimeError` if the data files are not found or + corrupted, + 2. the dataset inherits from `torchvision.datasets.VisionDataset`, + 3. the dataset can be turned into a string, + 4. the feature types of a returned example matches ``FEATURE_TYPES``, + 5. the number of examples matches the injected fake data, and + 6. the dataset calls ``transform``, ``target_transform``, or ``transforms`` if available when accessing data. + + Case 3. to 6. are tested against all configurations in ``CONFIGS``. + + To add dataset-specific tests, create a new method that takes no arguments with ``test_`` as a name prefix: + + .. code-block:: + + def test_foo(self): + pass + + If you want to run the test against all configs, add the ``@test_all_configs`` decorator to the definition and + accept a single argument: + + .. code-block:: + + @test_all_configs + def test_bar(self, config): + pass + + Within the test you can use the ``create_dataset()`` method that yields the dataset as well as additional + information provided by the ``ìnject_fake_data()`` method: + + .. code-block:: + + def test_baz(self): + with self.create_dataset() as (dataset, info): + pass + """ + + DATASET_CLASS = None + FEATURE_TYPES = None + + DEFAULT_CONFIG = None + ADDITIONAL_CONFIGS = None + REQUIRED_PACKAGES = None + + # These keyword arguments are checked by test_transforms in case they are available in DATASET_CLASS. + _TRANSFORM_KWARGS = { + "transform", + "target_transform", + "transforms", + } + # These keyword arguments get a 'special' treatment and should not be set in DEFAULT_CONFIG or ADDITIONAL_CONFIGS. + _SPECIAL_KWARGS = { + *_TRANSFORM_KWARGS, + "download", + } + + # These fields are populated during setupClass() within _populate_private_class_attributes() + + # This will be a dictionary containing all keyword arguments with their respective default values extracted from + # the dataset constructor. + _KWARG_DEFAULTS = None + # This will be a set of all _SPECIAL_KWARGS that the dataset constructor takes. + _HAS_SPECIAL_KWARG = None + + # These functions are disabled during dataset creation in create_dataset(). + _CHECK_FUNCTIONS = { + "check_md5", + "check_integrity", + } + _DOWNLOAD_EXTRACT_FUNCTIONS = { + "download_url", + "download_file_from_google_drive", + "extract_archive", + "download_and_extract_archive", + } + + def dataset_args(self, tmpdir: str, config: Dict[str, Any]) -> Sequence[Any]: + """Define positional arguments passed to the dataset. + + .. note:: + + The default behavior is only valid if the dataset to be tested has ``root`` as the only required parameter. + Otherwise you need to overwrite this method. + + Args: + tmpdir (str): Path to a temporary directory. For most cases this acts as root directory for the dataset + to be created and in turn also for the fake data injected here. + config (Dict[str, Any]): Configuration that will be passed to the dataset constructor. It provides at least + fields for all dataset parameters with default values. + + Returns: + (Tuple[str]): ``tmpdir`` which corresponds to ``root`` for most datasets. + """ + return (tmpdir,) + + def inject_fake_data(self, tmpdir: str, config: Dict[str, Any]) -> Union[int, Dict[str, Any]]: + """Inject fake data for dataset into a temporary directory. + + During the creation of the dataset the download and extract logic is disabled. Thus, the fake data injected + here needs to resemble the raw data, i.e. the state of the dataset directly after the files are downloaded and + potentially extracted. + + Args: + tmpdir (str): Path to a temporary directory. For most cases this acts as root directory for the dataset + to be created and in turn also for the fake data injected here. + config (Dict[str, Any]): Configuration that will be passed to the dataset constructor. It provides at least + fields for all dataset parameters with default values. + + Needs to return one of the following: + + 1. (int): Number of examples in the dataset to be created, or + 2. (Dict[str, Any]): Additional information about the injected fake data. Must contain the field + ``"num_examples"`` that corresponds to the number of examples in the dataset to be created. + """ + raise NotImplementedError("You need to provide fake data in order for the tests to run.") + + @contextlib.contextmanager + def create_dataset( + self, + config: Optional[Dict[str, Any]] = None, + inject_fake_data: bool = True, + patch_checks: Optional[bool] = None, + **kwargs: Any, + ) -> Iterator[Tuple[torchvision.datasets.VisionDataset, Dict[str, Any]]]: + r"""Create the dataset in a temporary directory. + + The configuration passed to the dataset is populated to contain at least all parameters with default values. + For this the following order of precedence is used: + + 1. Parameters in :attr:`kwargs`. + 2. Configuration in :attr:`config`. + 3. Configuration in :attr:`~DatasetTestCase.DEFAULT_CONFIG`. + 4. Default parameters of the dataset. + + Args: + config (Optional[Dict[str, Any]]): Configuration that will be used to create the dataset. + inject_fake_data (bool): If ``True`` (default) inject the fake data with :meth:`.inject_fake_data` before + creating the dataset. + patch_checks (Optional[bool]): If ``True`` disable integrity check logic while creating the dataset. If + omitted defaults to the same value as ``inject_fake_data``. + **kwargs (Any): Additional parameters passed to the dataset. These parameters take precedence in case they + overlap with ``config``. + + Yields: + dataset (torchvision.dataset.VisionDataset): Dataset. + info (Dict[str, Any]): Additional information about the injected fake data. See :meth:`.inject_fake_data` + for details. + """ + if patch_checks is None: + patch_checks = inject_fake_data + + special_kwargs, other_kwargs = self._split_kwargs(kwargs) + + complete_config = self._KWARG_DEFAULTS.copy() + if self.DEFAULT_CONFIG: + complete_config.update(self.DEFAULT_CONFIG) + if config: + complete_config.update(config) + if other_kwargs: + complete_config.update(other_kwargs) + + if "download" in self._HAS_SPECIAL_KWARG and special_kwargs.get("download", False): + # override download param to False param if its default is truthy + special_kwargs["download"] = False + + patchers = self._patch_download_extract() + if patch_checks: + patchers.update(self._patch_checks()) + + with get_tmp_dir() as tmpdir: + args = self.dataset_args(tmpdir, complete_config) + info = self._inject_fake_data(tmpdir, complete_config) if inject_fake_data else None + + with self._maybe_apply_patches(patchers), disable_console_output(): + dataset = self.DATASET_CLASS(*args, **complete_config, **special_kwargs) + + yield dataset, info + + @classmethod + def setUpClass(cls): + cls._verify_required_public_class_attributes() + cls._populate_private_class_attributes() + cls._process_optional_public_class_attributes() + super().setUpClass() + + @classmethod + def _verify_required_public_class_attributes(cls): + if cls.DATASET_CLASS is None: + raise UsageError( + "The class attribute 'DATASET_CLASS' needs to be overwritten. " + "It should contain the class of the dataset to be tested." + ) + if cls.FEATURE_TYPES is None: + raise UsageError( + "The class attribute 'FEATURE_TYPES' needs to be overwritten. " + "It should contain a sequence of types that the dataset returns when accessed by index." + ) + + @classmethod + def _populate_private_class_attributes(cls): + defaults = [] + for cls_ in cls.DATASET_CLASS.__mro__: + if cls_ is torchvision.datasets.VisionDataset: + break + + argspec = inspect.getfullargspec(cls_.__init__) + + if not argspec.defaults: + continue + + defaults.append( + {kwarg: default for kwarg, default in zip(argspec.args[-len(argspec.defaults):], argspec.defaults)} + ) + + if not argspec.varkw: + break + + kwarg_defaults = dict() + for config in reversed(defaults): + kwarg_defaults.update(config) + + has_special_kwargs = set() + for name in cls._SPECIAL_KWARGS: + if name not in kwarg_defaults: + continue + + del kwarg_defaults[name] + has_special_kwargs.add(name) + + cls._KWARG_DEFAULTS = kwarg_defaults + cls._HAS_SPECIAL_KWARG = has_special_kwargs + + @classmethod + def _process_optional_public_class_attributes(cls): + def check_config(config, name): + special_kwargs = tuple(f"'{name}'" for name in cls._SPECIAL_KWARGS if name in config) + if special_kwargs: + raise UsageError( + f"{name} contains a value for the parameter(s) {', '.join(special_kwargs)}. " + f"These are handled separately by the test case and should not be set here. " + f"If you need to test some custom behavior regarding these parameters, " + f"you need to write a custom test (*not* test case), e.g. test_custom_transform()." + ) + + if cls.DEFAULT_CONFIG is not None: + check_config(cls.DEFAULT_CONFIG, "DEFAULT_CONFIG") + + if cls.ADDITIONAL_CONFIGS is not None: + for idx, config in enumerate(cls.ADDITIONAL_CONFIGS): + check_config(config, f"CONFIGS[{idx}]") + + if cls.REQUIRED_PACKAGES: + missing_pkgs = [] + for pkg in cls.REQUIRED_PACKAGES: + try: + importlib.import_module(pkg) + except ImportError: + missing_pkgs.append(f"'{pkg}'") + + if missing_pkgs: + raise unittest.SkipTest( + f"The package(s) {', '.join(missing_pkgs)} are required to load the dataset " + f"'{cls.DATASET_CLASS.__name__}', but are not installed." + ) + + def _split_kwargs(self, kwargs): + special_kwargs = kwargs.copy() + other_kwargs = {key: special_kwargs.pop(key) for key in set(special_kwargs.keys()) - self._SPECIAL_KWARGS} + return special_kwargs, other_kwargs + + def _inject_fake_data(self, tmpdir, config): + info = self.inject_fake_data(tmpdir, config) + if info is None: + raise UsageError( + "The method 'inject_fake_data' needs to return at least an integer indicating the number of " + "examples for the current configuration." + ) + elif isinstance(info, int): + info = dict(num_examples=info) + elif not isinstance(info, dict): + raise UsageError( + f"The additional information returned by the method 'inject_fake_data' must be either an " + f"integer indicating the number of examples for the current configuration or a dictionary with " + f"the same content. Got {type(info)} instead." + ) + elif "num_examples" not in info: + raise UsageError( + "The information dictionary returned by the method 'inject_fake_data' must contain a " + "'num_examples' field that holds the number of examples for the current configuration." + ) + return info + + def _patch_download_extract(self): + module = inspect.getmodule(self.DATASET_CLASS).__name__ + return {unittest.mock.patch(f"{module}.{function}") for function in self._DOWNLOAD_EXTRACT_FUNCTIONS} + + def _patch_checks(self): + module = inspect.getmodule(self.DATASET_CLASS).__name__ + return {unittest.mock.patch(f"{module}.{function}", return_value=True) for function in self._CHECK_FUNCTIONS} + + @contextlib.contextmanager + def _maybe_apply_patches(self, patchers): + with contextlib.ExitStack() as stack: + mocks = {} + for patcher in patchers: + with contextlib.suppress(AttributeError): + mocks[patcher.target] = stack.enter_context(patcher) + yield mocks + + def test_not_found_or_corrupted(self): + with self.assertRaises((FileNotFoundError, RuntimeError)): + with self.create_dataset(inject_fake_data=False): + pass + + def test_smoke(self): + with self.create_dataset() as (dataset, _): + self.assertIsInstance(dataset, torchvision.datasets.VisionDataset) + + @test_all_configs + def test_str_smoke(self, config): + with self.create_dataset(config) as (dataset, _): + self.assertIsInstance(str(dataset), str) + + @test_all_configs + def test_feature_types(self, config): + with self.create_dataset(config) as (dataset, _): + example = dataset[0] + + if len(self.FEATURE_TYPES) > 1: + actual = len(example) + expected = len(self.FEATURE_TYPES) + self.assertEqual( + actual, + expected, + f"The number of the returned features does not match the the number of elements in FEATURE_TYPES: " + f"{actual} != {expected}", + ) + else: + example = (example,) + + for idx, (feature, expected_feature_type) in enumerate(zip(example, self.FEATURE_TYPES)): + with self.subTest(idx=idx): + self.assertIsInstance(feature, expected_feature_type) + + @test_all_configs + def test_num_examples(self, config): + with self.create_dataset(config) as (dataset, info): + self.assertEqual(len(dataset), info["num_examples"]) + + @test_all_configs + def test_transforms(self, config): + mock = unittest.mock.Mock(wraps=lambda *args: args[0] if len(args) == 1 else args) + for kwarg in self._TRANSFORM_KWARGS: + if kwarg not in self._HAS_SPECIAL_KWARG: + continue + + mock.reset_mock() + + with self.subTest(kwarg=kwarg): + with self.create_dataset(config, **{kwarg: mock}) as (dataset, _): + dataset[0] + + mock.assert_called() + + +class ImageDatasetTestCase(DatasetTestCase): + """Abstract base class for image dataset testcases. + + - Overwrites the FEATURE_TYPES class attribute to expect a :class:`PIL.Image.Image` and an integer label. + """ + + FEATURE_TYPES = (PIL.Image.Image, int) + + @contextlib.contextmanager + def create_dataset( + self, + config: Optional[Dict[str, Any]] = None, + inject_fake_data: bool = True, + patch_checks: Optional[bool] = None, + **kwargs: Any, + ) -> Iterator[Tuple[torchvision.datasets.VisionDataset, Dict[str, Any]]]: + with super().create_dataset( + config=config, + inject_fake_data=inject_fake_data, + patch_checks=patch_checks, + **kwargs, + ) as (dataset, info): + # PIL.Image.open() only loads the image meta data upfront and keeps the file open until the first access + # to the pixel data occurs. Trying to delete such a file results in an PermissionError on Windows. Thus, we + # force-load opened images. + # This problem only occurs during testing since some tests, e.g. DatasetTestCase.test_feature_types open an + # image, but never use the underlying data. During normal operation it is reasonable to assume that the + # user wants to work with the image he just opened rather than deleting the underlying file. + with self._force_load_images(): + yield dataset, info + + @contextlib.contextmanager + def _force_load_images(self): + open = PIL.Image.open + + def new(fp, *args, **kwargs): + image = open(fp, *args, **kwargs) + if isinstance(fp, (str, pathlib.Path)): + image.load() + return image + + with unittest.mock.patch("PIL.Image.open", new=new): + yield + + +class VideoDatasetTestCase(DatasetTestCase): + """Abstract base class for video dataset testcases. + + - Overwrites the 'FEATURE_TYPES' class attribute to expect two :class:`torch.Tensor` s for the video and audio as + well as an integer label. + - Overwrites the 'REQUIRED_PACKAGES' class attribute to require PyAV (``av``). + - Adds the 'DEFAULT_FRAMES_PER_CLIP' class attribute. If no 'frames_per_clip' is provided by 'inject_fake_data()' + and it is the last parameter without a default value in the dataset constructor, the value of the + 'DEFAULT_FRAMES_PER_CLIP' class attribute is appended to the output. + """ + + FEATURE_TYPES = (torch.Tensor, torch.Tensor, int) + REQUIRED_PACKAGES = ("av",) + + DEFAULT_FRAMES_PER_CLIP = 1 + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.dataset_args = self._set_default_frames_per_clip(self.dataset_args) + + def _set_default_frames_per_clip(self, inject_fake_data): + argspec = inspect.getfullargspec(self.DATASET_CLASS.__init__) + args_without_default = argspec.args[1:-len(argspec.defaults)] + frames_per_clip_last = args_without_default[-1] == "frames_per_clip" + + @functools.wraps(inject_fake_data) + def wrapper(tmpdir, config): + args = inject_fake_data(tmpdir, config) + if frames_per_clip_last and len(args) == len(args_without_default) - 1: + args = (*args, self.DEFAULT_FRAMES_PER_CLIP) + + return args + + return wrapper + + +def create_image_or_video_tensor(size: Sequence[int]) -> torch.Tensor: + r"""Create a random uint8 tensor. + + Args: + size (Sequence[int]): Size of the tensor. + """ + return torch.randint(0, 256, size, dtype=torch.uint8) + + +def create_image_file( + root: Union[pathlib.Path, str], name: Union[pathlib.Path, str], size: Union[Sequence[int], int] = 10, **kwargs: Any +) -> pathlib.Path: + """Create an image file from random data. + + Args: + root (Union[str, pathlib.Path]): Root directory the image file will be placed in. + name (Union[str, pathlib.Path]): Name of the image file. + size (Union[Sequence[int], int]): Size of the image that represents the ``(num_channels, height, width)``. If + scalar, the value is used for the height and width. If not provided, three channels are assumed. + kwargs (Any): Additional parameters passed to :meth:`PIL.Image.Image.save`. + + Returns: + pathlib.Path: Path to the created image file. + """ + if isinstance(size, int): + size = (size, size) + if len(size) == 2: + size = (3, *size) + if len(size) != 3: + raise UsageError( + f"The 'size' argument should either be an int or a sequence of length 2 or 3. Got {len(size)} instead" + ) + + image = create_image_or_video_tensor(size) + file = pathlib.Path(root) / name + + # torch (num_channels x height x width) -> PIL (width x height x num_channels) + image = image.permute(2, 1, 0) + # For grayscale images PIL doesn't use a channel dimension + if image.shape[2] == 1: + image = torch.squeeze(image, 2) + PIL.Image.fromarray(image.numpy()).save(file, **kwargs) + return file + + +def create_image_folder( + root: Union[pathlib.Path, str], + name: Union[pathlib.Path, str], + file_name_fn: Callable[[int], str], + num_examples: int, + size: Optional[Union[Sequence[int], int, Callable[[int], Union[Sequence[int], int]]]] = None, + **kwargs: Any, +) -> List[pathlib.Path]: + """Create a folder of random images. + + Args: + root (Union[str, pathlib.Path]): Root directory the image folder will be placed in. + name (Union[str, pathlib.Path]): Name of the image folder. + file_name_fn (Callable[[int], str]): Should return a file name if called with the file index. + num_examples (int): Number of images to create. + size (Optional[Union[Sequence[int], int, Callable[[int], Union[Sequence[int], int]]]]): Size of the images. If + callable, will be called with the index of the corresponding file. If omitted, a random height and width + between 3 and 10 pixels is selected on a per-image basis. + kwargs (Any): Additional parameters passed to :func:`create_image_file`. + + Returns: + List[pathlib.Path]: Paths to all created image files. + + .. seealso:: + + - :func:`create_image_file` + """ + if size is None: + + def size(idx: int) -> Tuple[int, int, int]: + num_channels = 3 + height, width = torch.randint(3, 11, size=(2,), dtype=torch.int).tolist() + return (num_channels, height, width) + + root = pathlib.Path(root) / name + os.makedirs(root, exist_ok=True) + + return [ + create_image_file(root, file_name_fn(idx), size=size(idx) if callable(size) else size, **kwargs) + for idx in range(num_examples) + ] + + +@requires_lazy_imports("av") +def create_video_file( + root: Union[pathlib.Path, str], + name: Union[pathlib.Path, str], + size: Union[Sequence[int], int] = (1, 3, 10, 10), + fps: float = 25, + **kwargs: Any, +) -> pathlib.Path: + """Create an video file from random data. + + Args: + root (Union[str, pathlib.Path]): Root directory the video file will be placed in. + name (Union[str, pathlib.Path]): Name of the video file. + size (Union[Sequence[int], int]): Size of the video that represents the + ``(num_frames, num_channels, height, width)``. If scalar, the value is used for the height and width. + If not provided, ``num_frames=1`` and ``num_channels=3`` are assumed. + fps (float): Frame rate in frames per second. + kwargs (Any): Additional parameters passed to :func:`torchvision.io.write_video`. + + Returns: + pathlib.Path: Path to the created image file. + + Raises: + UsageError: If PyAV is not available. + """ + if isinstance(size, int): + size = (size, size) + if len(size) == 2: + size = (3, *size) + if len(size) == 3: + size = (1, *size) + if len(size) != 4: + raise UsageError( + f"The 'size' argument should either be an int or a sequence of length 2, 3, or 4. Got {len(size)} instead" + ) + + video = create_image_or_video_tensor(size) + file = pathlib.Path(root) / name + torchvision.io.write_video(str(file), video.permute(0, 2, 3, 1), fps, **kwargs) + return file + + +@requires_lazy_imports("av") +def create_video_folder( + root: Union[str, pathlib.Path], + name: Union[str, pathlib.Path], + file_name_fn: Callable[[int], str], + num_examples: int, + size: Optional[Union[Sequence[int], int, Callable[[int], Union[Sequence[int], int]]]] = None, + fps=25, + **kwargs, +) -> List[pathlib.Path]: + """Create a folder of random videos. + + Args: + root (Union[str, pathlib.Path]): Root directory the video folder will be placed in. + name (Union[str, pathlib.Path]): Name of the video folder. + file_name_fn (Callable[[int], str]): Should return a file name if called with the file index. + num_examples (int): Number of videos to create. + size (Optional[Union[Sequence[int], int, Callable[[int], Union[Sequence[int], int]]]]): Size of the videos. If + callable, will be called with the index of the corresponding file. If omitted, a random even height and + width between 4 and 10 pixels is selected on a per-video basis. + fps (float): Frame rate in frames per second. + kwargs (Any): Additional parameters passed to :func:`create_video_file`. + + Returns: + List[pathlib.Path]: Paths to all created video files. + + Raises: + UsageError: If PyAV is not available. + + .. seealso:: + + - :func:`create_video_file` + """ + if size is None: + + def size(idx): + num_frames = 1 + num_channels = 3 + # The 'libx264' video codec, which is the default of torchvision.io.write_video, requires the height and + # width of the video to be divisible by 2. + height, width = (torch.randint(2, 6, size=(2,), dtype=torch.int) * 2).tolist() + return (num_frames, num_channels, height, width) + + root = pathlib.Path(root) / name + os.makedirs(root, exist_ok=True) + + return [ + create_video_file(root, file_name_fn(idx), size=size(idx) if callable(size) else size, **kwargs) + for idx in range(num_examples) + ] + + +def create_random_string(length: int, *digits: str) -> str: + """Create a random string. + + Args: + length (int): Number of characters in the generated string. + *characters (str): Characters to sample from. If omitted defaults to :attr:`string.ascii_lowercase`. + """ + if not digits: + digits = string.ascii_lowercase + else: + digits = "".join(itertools.chain(*digits)) + + return "".join(random.choice(digits) for _ in range(length)) diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_alexnet_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_alexnet_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_deeplabv3_mobilenet_v3_large_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_deeplabv3_mobilenet_v3_large_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_deeplabv3_resnet101_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_deeplabv3_resnet101_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_deeplabv3_resnet50_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_deeplabv3_resnet50_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_densenet121_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_densenet121_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_densenet161_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_densenet161_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_densenet169_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_densenet169_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_densenet201_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_densenet201_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_fasterrcnn_mobilenet_v3_large_320_fpn_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_fasterrcnn_mobilenet_v3_large_320_fpn_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_fasterrcnn_mobilenet_v3_large_fpn_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_fasterrcnn_mobilenet_v3_large_fpn_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_fasterrcnn_resnet50_fpn_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_fasterrcnn_resnet50_fpn_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_fcn_resnet101_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_fcn_resnet101_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_fcn_resnet50_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_fcn_resnet50_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_googlenet_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_googlenet_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_inception_v3_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_inception_v3_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_keypointrcnn_resnet50_fpn_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_keypointrcnn_resnet50_fpn_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_lraspp_mobilenet_v3_large_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_lraspp_mobilenet_v3_large_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_maskrcnn_resnet50_fpn_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_maskrcnn_resnet50_fpn_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_mnasnet0_5_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_mnasnet0_5_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_mnasnet0_75_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_mnasnet0_75_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_mnasnet1_0_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_mnasnet1_0_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_mnasnet1_3_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_mnasnet1_3_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_mobilenet_v2_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_mobilenet_v2_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_mobilenet_v3_large_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_mobilenet_v3_large_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_mobilenet_v3_small_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_mobilenet_v3_small_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_mobilenetv2_residual_setting_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_mobilenetv2_residual_setting_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_resnet101_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_resnet101_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_resnet152_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_resnet152_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_resnet18_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_resnet18_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_resnet34_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_resnet34_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_resnet50_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_resnet50_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_resnext101_32x8d_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_resnext101_32x8d_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_resnext50_32x4d_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_resnext50_32x4d_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_retinanet_resnet50_fpn_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_retinanet_resnet50_fpn_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_shufflenet_v2_x0_5_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_shufflenet_v2_x0_5_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_shufflenet_v2_x1_0_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_shufflenet_v2_x1_0_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_shufflenet_v2_x1_5_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_shufflenet_v2_x1_5_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_shufflenet_v2_x2_0_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_shufflenet_v2_x2_0_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_squeezenet1_0_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_squeezenet1_0_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_squeezenet1_1_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_squeezenet1_1_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_ssd300_vgg16_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_ssd300_vgg16_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_ssdlite320_mobilenet_v3_large_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_ssdlite320_mobilenet_v3_large_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_vgg11_bn_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_vgg11_bn_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_vgg11_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_vgg11_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_vgg13_bn_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_vgg13_bn_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_vgg13_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_vgg13_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_vgg16_bn_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_vgg16_bn_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_vgg16_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_vgg16_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_vgg19_bn_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_vgg19_bn_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_vgg19_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_vgg19_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_wide_resnet101_2_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_wide_resnet101_2_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_wide_resnet50_2_expect.pkl b/pretrained_model/pytorch_vision_v0.10.0/test/expect/ModelTester.test_wide_resnet50_2_expect.pkl new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/fakedata_generation.py b/pretrained_model/pytorch_vision_v0.10.0/test/fakedata_generation.py new file mode 100644 index 0000000000000000000000000000000000000000..8540410351ee5f6c01bf614f91bc6a6546067e23 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/fakedata_generation.py @@ -0,0 +1,208 @@ +import os +import contextlib +import hashlib +import pickle +import re +import tarfile +import unittest.mock +from distutils import dir_util + +import numpy as np +import PIL +import torch + +from common_utils import get_tmp_dir + + +def mock_class_attribute(stack, target, new): + mock = unittest.mock.patch(target, new_callable=unittest.mock.PropertyMock, return_value=new) + stack.enter_context(mock) + return mock + + +def compute_md5(file): + with open(file, "rb") as fh: + return hashlib.md5(fh.read()).hexdigest() + + +def make_tar(root, name, *files, compression=None): + ext = ".tar" + mode = "w" + if compression is not None: + ext = f"{ext}.{compression}" + mode = f"{mode}:{compression}" + + name = os.path.splitext(name)[0] + ext + archive = os.path.join(root, name) + + with tarfile.open(archive, mode) as fh: + for file in files: + fh.add(os.path.join(root, file), arcname=file) + + return name, compute_md5(archive) + + +def clean_dir(root, *keep): + pattern = re.compile(f"({f')|('.join(keep)})") + for file_or_dir in os.listdir(root): + if pattern.search(file_or_dir): + continue + + file_or_dir = os.path.join(root, file_or_dir) + if os.path.isfile(file_or_dir): + os.remove(file_or_dir) + else: + dir_util.remove_tree(file_or_dir) + + +@contextlib.contextmanager +def mnist_root(num_images, cls_name): + def _encode(v): + return torch.tensor(v, dtype=torch.int32).numpy().tobytes()[::-1] + + def _make_image_file(filename, num_images): + img = torch.randint(0, 256, size=(28 * 28 * num_images,), dtype=torch.uint8) + with open(filename, "wb") as f: + f.write(_encode(2051)) # magic header + f.write(_encode(num_images)) + f.write(_encode(28)) + f.write(_encode(28)) + f.write(img.numpy().tobytes()) + + def _make_label_file(filename, num_images): + labels = torch.zeros((num_images,), dtype=torch.uint8) + with open(filename, "wb") as f: + f.write(_encode(2049)) # magic header + f.write(_encode(num_images)) + f.write(labels.numpy().tobytes()) + + with get_tmp_dir() as tmp_dir: + raw_dir = os.path.join(tmp_dir, cls_name, "raw") + os.makedirs(raw_dir) + _make_image_file(os.path.join(raw_dir, "train-images-idx3-ubyte"), num_images) + _make_label_file(os.path.join(raw_dir, "train-labels-idx1-ubyte"), num_images) + _make_image_file(os.path.join(raw_dir, "t10k-images-idx3-ubyte"), num_images) + _make_label_file(os.path.join(raw_dir, "t10k-labels-idx1-ubyte"), num_images) + yield tmp_dir + + +@contextlib.contextmanager +def cifar_root(version): + def _get_version_params(version): + if version == 'CIFAR10': + return { + 'base_folder': 'cifar-10-batches-py', + 'train_files': ['data_batch_{}'.format(batch) for batch in range(1, 6)], + 'test_file': 'test_batch', + 'target_key': 'labels', + 'meta_file': 'batches.meta', + 'classes_key': 'label_names', + } + elif version == 'CIFAR100': + return { + 'base_folder': 'cifar-100-python', + 'train_files': ['train'], + 'test_file': 'test', + 'target_key': 'fine_labels', + 'meta_file': 'meta', + 'classes_key': 'fine_label_names', + } + else: + raise ValueError + + def _make_pickled_file(obj, file): + with open(file, 'wb') as fh: + pickle.dump(obj, fh, 2) + + def _make_data_file(file, target_key): + obj = { + 'data': np.zeros((1, 32 * 32 * 3), dtype=np.uint8), + target_key: [0] + } + _make_pickled_file(obj, file) + + def _make_meta_file(file, classes_key): + obj = { + classes_key: ['fakedata'], + } + _make_pickled_file(obj, file) + + params = _get_version_params(version) + with get_tmp_dir() as root: + base_folder = os.path.join(root, params['base_folder']) + os.mkdir(base_folder) + + for file in list(params['train_files']) + [params['test_file']]: + _make_data_file(os.path.join(base_folder, file), params['target_key']) + + _make_meta_file(os.path.join(base_folder, params['meta_file']), + params['classes_key']) + + yield root + + +@contextlib.contextmanager +def widerface_root(): + """ + Generates a dataset with the following folder structure and returns the path root: + <root> + └── widerface + ├── wider_face_split + ├── WIDER_train + ├── WIDER_val + └── WIDER_test + + The dataset consist of + 1 image for each dataset split (train, val, test) and annotation files + for each split + """ + + def _make_image(file): + PIL.Image.fromarray(np.zeros((32, 32, 3), dtype=np.uint8)).save(file) + + def _make_train_archive(root): + extracted_dir = os.path.join(root, 'WIDER_train', 'images', '0--Parade') + os.makedirs(extracted_dir) + _make_image(os.path.join(extracted_dir, '0_Parade_marchingband_1_1.jpg')) + + def _make_val_archive(root): + extracted_dir = os.path.join(root, 'WIDER_val', 'images', '0--Parade') + os.makedirs(extracted_dir) + _make_image(os.path.join(extracted_dir, '0_Parade_marchingband_1_2.jpg')) + + def _make_test_archive(root): + extracted_dir = os.path.join(root, 'WIDER_test', 'images', '0--Parade') + os.makedirs(extracted_dir) + _make_image(os.path.join(extracted_dir, '0_Parade_marchingband_1_3.jpg')) + + def _make_annotations_archive(root): + train_bbox_contents = '0--Parade/0_Parade_marchingband_1_1.jpg\n1\n449 330 122 149 0 0 0 0 0 0\n' + val_bbox_contents = '0--Parade/0_Parade_marchingband_1_2.jpg\n1\n501 160 285 443 0 0 0 0 0 0\n' + test_filelist_contents = '0--Parade/0_Parade_marchingband_1_3.jpg\n' + extracted_dir = os.path.join(root, 'wider_face_split') + os.mkdir(extracted_dir) + + # bbox training file + bbox_file = os.path.join(extracted_dir, "wider_face_train_bbx_gt.txt") + with open(bbox_file, "w") as txt_file: + txt_file.write(train_bbox_contents) + + # bbox validation file + bbox_file = os.path.join(extracted_dir, "wider_face_val_bbx_gt.txt") + with open(bbox_file, "w") as txt_file: + txt_file.write(val_bbox_contents) + + # test filelist file + filelist_file = os.path.join(extracted_dir, "wider_face_test_filelist.txt") + with open(filelist_file, "w") as txt_file: + txt_file.write(test_filelist_contents) + + with get_tmp_dir() as root: + root_base = os.path.join(root, "widerface") + os.mkdir(root_base) + _make_train_archive(root_base) + _make_val_archive(root_base) + _make_test_archive(root_base) + _make_annotations_archive(root_base) + + yield root diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/preprocess-bench.py b/pretrained_model/pytorch_vision_v0.10.0/test/preprocess-bench.py new file mode 100644 index 0000000000000000000000000000000000000000..4ba3ca46dbcf8ce0c00c7fa9025545ea02070281 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/preprocess-bench.py @@ -0,0 +1,59 @@ +import argparse +import os +from timeit import default_timer as timer +from torch.utils.model_zoo import tqdm +import torch +import torch.utils.data +import torchvision +import torchvision.transforms as transforms +import torchvision.datasets as datasets + + +parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') +parser.add_argument('--data', metavar='PATH', required=True, + help='path to dataset') +parser.add_argument('--nThreads', '-j', default=2, type=int, metavar='N', + help='number of data loading threads (default: 2)') +parser.add_argument('--batchSize', '-b', default=256, type=int, metavar='N', + help='mini-batch size (1 = pure stochastic) Default: 256') +parser.add_argument('--accimage', action='store_true', + help='use accimage') + + +if __name__ == "__main__": + args = parser.parse_args() + + if args.accimage: + torchvision.set_image_backend('accimage') + print('Using {}'.format(torchvision.get_image_backend())) + + # Data loading code + transform = transforms.Compose([ + transforms.RandomSizedCrop(224), + transforms.RandomHorizontalFlip(), + transforms.ToTensor(), + transforms.Normalize(mean=[0.485, 0.456, 0.406], + std=[0.229, 0.224, 0.225]), + ]) + + traindir = os.path.join(args.data, 'train') + valdir = os.path.join(args.data, 'val') + train = datasets.ImageFolder(traindir, transform) + val = datasets.ImageFolder(valdir, transform) + train_loader = torch.utils.data.DataLoader( + train, batch_size=args.batchSize, shuffle=True, num_workers=args.nThreads) + train_iter = iter(train_loader) + + start_time = timer() + batch_count = 20 * args.nThreads + with tqdm(total=batch_count) as pbar: + for _ in tqdm(range(batch_count)): + pbar.update(1) + batch = next(train_iter) + end_time = timer() + print("Performance: {dataset:.0f} minutes/dataset, {batch:.1f} ms/batch," + " {image:.2f} ms/image {rate:.0f} images/sec" + .format(dataset=(end_time - start_time) * (float(len(train_loader)) / batch_count / 60.0), + batch=(end_time - start_time) / float(batch_count) * 1.0e+3, + image=(end_time - start_time) / (batch_count * args.batchSize) * 1.0e+3, + rate=(batch_count * args.batchSize) / (end_time - start_time))) diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/sanity_checks.ipynb b/pretrained_model/pytorch_vision_v0.10.0/test/sanity_checks.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/smoke_test.py b/pretrained_model/pytorch_vision_v0.10.0/test/smoke_test.py new file mode 100644 index 0000000000000000000000000000000000000000..c3a4bdd19d6431250591c8376bf1d2c785c2cb10 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/smoke_test.py @@ -0,0 +1,4 @@ +import torch +import torchvision +import torchvision.datasets as dset +import torchvision.transforms diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/test_backbone_utils.py b/pretrained_model/pytorch_vision_v0.10.0/test/test_backbone_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..7ee1aed1459ba37f12fcc64b30f97de38a3d2ce3 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/test_backbone_utils.py @@ -0,0 +1,25 @@ +import unittest + + +import torch +from torchvision.models.detection.backbone_utils import resnet_fpn_backbone + + +class ResnetFPNBackboneTester(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.dtype = torch.float32 + + def test_resnet18_fpn_backbone(self): + device = torch.device('cpu') + x = torch.rand(1, 3, 300, 300, dtype=self.dtype, device=device) + resnet18_fpn = resnet_fpn_backbone(backbone_name='resnet18', pretrained=False) + y = resnet18_fpn(x) + self.assertEqual(list(y.keys()), ['0', '1', '2', '3', 'pool']) + + def test_resnet50_fpn_backbone(self): + device = torch.device('cpu') + x = torch.rand(1, 3, 300, 300, dtype=self.dtype, device=device) + resnet50_fpn = resnet_fpn_backbone(backbone_name='resnet50', pretrained=False) + y = resnet50_fpn(x) + self.assertEqual(list(y.keys()), ['0', '1', '2', '3', 'pool']) diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/test_cpp_models.py b/pretrained_model/pytorch_vision_v0.10.0/test/test_cpp_models.py new file mode 100644 index 0000000000000000000000000000000000000000..6deb5d79739a31636865f5bdb783427584a5b54c --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/test_cpp_models.py @@ -0,0 +1,152 @@ +import torch +import os +import unittest +from torchvision import models, transforms +import sys + +from PIL import Image +import torchvision.transforms.functional as F + +try: + from torchvision import _C_tests +except ImportError: + _C_tests = None + + +def process_model(model, tensor, func, name): + model.eval() + traced_script_module = torch.jit.trace(model, tensor) + traced_script_module.save("model.pt") + + py_output = model.forward(tensor) + cpp_output = func("model.pt", tensor) + + assert torch.allclose(py_output, cpp_output), 'Output mismatch of ' + name + ' models' + + +def read_image1(): + image_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets', 'encode_jpeg', + 'grace_hopper_517x606.jpg') + image = Image.open(image_path) + image = image.resize((224, 224)) + x = F.to_tensor(image) + return x.view(1, 3, 224, 224) + + +def read_image2(): + image_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets', 'encode_jpeg', + 'grace_hopper_517x606.jpg') + image = Image.open(image_path) + image = image.resize((299, 299)) + x = F.to_tensor(image) + x = x.view(1, 3, 299, 299) + return torch.cat([x, x], 0) + + +@unittest.skipIf( + sys.platform == "darwin" or True, + "C++ models are broken on OS X at the moment, and there's a BC breakage on master; " + "see https://github.com/pytorch/vision/issues/1191") +class Tester(unittest.TestCase): + pretrained = False + image = read_image1() + + def test_alexnet(self): + process_model(models.alexnet(self.pretrained), self.image, _C_tests.forward_alexnet, 'Alexnet') + + def test_vgg11(self): + process_model(models.vgg11(self.pretrained), self.image, _C_tests.forward_vgg11, 'VGG11') + + def test_vgg13(self): + process_model(models.vgg13(self.pretrained), self.image, _C_tests.forward_vgg13, 'VGG13') + + def test_vgg16(self): + process_model(models.vgg16(self.pretrained), self.image, _C_tests.forward_vgg16, 'VGG16') + + def test_vgg19(self): + process_model(models.vgg19(self.pretrained), self.image, _C_tests.forward_vgg19, 'VGG19') + + def test_vgg11_bn(self): + process_model(models.vgg11_bn(self.pretrained), self.image, _C_tests.forward_vgg11bn, 'VGG11BN') + + def test_vgg13_bn(self): + process_model(models.vgg13_bn(self.pretrained), self.image, _C_tests.forward_vgg13bn, 'VGG13BN') + + def test_vgg16_bn(self): + process_model(models.vgg16_bn(self.pretrained), self.image, _C_tests.forward_vgg16bn, 'VGG16BN') + + def test_vgg19_bn(self): + process_model(models.vgg19_bn(self.pretrained), self.image, _C_tests.forward_vgg19bn, 'VGG19BN') + + def test_resnet18(self): + process_model(models.resnet18(self.pretrained), self.image, _C_tests.forward_resnet18, 'Resnet18') + + def test_resnet34(self): + process_model(models.resnet34(self.pretrained), self.image, _C_tests.forward_resnet34, 'Resnet34') + + def test_resnet50(self): + process_model(models.resnet50(self.pretrained), self.image, _C_tests.forward_resnet50, 'Resnet50') + + def test_resnet101(self): + process_model(models.resnet101(self.pretrained), self.image, _C_tests.forward_resnet101, 'Resnet101') + + def test_resnet152(self): + process_model(models.resnet152(self.pretrained), self.image, _C_tests.forward_resnet152, 'Resnet152') + + def test_resnext50_32x4d(self): + process_model(models.resnext50_32x4d(), self.image, _C_tests.forward_resnext50_32x4d, 'ResNext50_32x4d') + + def test_resnext101_32x8d(self): + process_model(models.resnext101_32x8d(), self.image, _C_tests.forward_resnext101_32x8d, 'ResNext101_32x8d') + + def test_wide_resnet50_2(self): + process_model(models.wide_resnet50_2(), self.image, _C_tests.forward_wide_resnet50_2, 'WideResNet50_2') + + def test_wide_resnet101_2(self): + process_model(models.wide_resnet101_2(), self.image, _C_tests.forward_wide_resnet101_2, 'WideResNet101_2') + + def test_squeezenet1_0(self): + process_model(models.squeezenet1_0(self.pretrained), self.image, + _C_tests.forward_squeezenet1_0, 'Squeezenet1.0') + + def test_squeezenet1_1(self): + process_model(models.squeezenet1_1(self.pretrained), self.image, + _C_tests.forward_squeezenet1_1, 'Squeezenet1.1') + + def test_densenet121(self): + process_model(models.densenet121(self.pretrained), self.image, _C_tests.forward_densenet121, 'Densenet121') + + def test_densenet169(self): + process_model(models.densenet169(self.pretrained), self.image, _C_tests.forward_densenet169, 'Densenet169') + + def test_densenet201(self): + process_model(models.densenet201(self.pretrained), self.image, _C_tests.forward_densenet201, 'Densenet201') + + def test_densenet161(self): + process_model(models.densenet161(self.pretrained), self.image, _C_tests.forward_densenet161, 'Densenet161') + + def test_mobilenet_v2(self): + process_model(models.mobilenet_v2(self.pretrained), self.image, _C_tests.forward_mobilenetv2, 'MobileNet') + + def test_googlenet(self): + process_model(models.googlenet(self.pretrained), self.image, _C_tests.forward_googlenet, 'GoogLeNet') + + def test_mnasnet0_5(self): + process_model(models.mnasnet0_5(self.pretrained), self.image, _C_tests.forward_mnasnet0_5, 'MNASNet0_5') + + def test_mnasnet0_75(self): + process_model(models.mnasnet0_75(self.pretrained), self.image, _C_tests.forward_mnasnet0_75, 'MNASNet0_75') + + def test_mnasnet1_0(self): + process_model(models.mnasnet1_0(self.pretrained), self.image, _C_tests.forward_mnasnet1_0, 'MNASNet1_0') + + def test_mnasnet1_3(self): + process_model(models.mnasnet1_3(self.pretrained), self.image, _C_tests.forward_mnasnet1_3, 'MNASNet1_3') + + def test_inception_v3(self): + self.image = read_image2() + process_model(models.inception_v3(self.pretrained), self.image, _C_tests.forward_inceptionv3, 'Inceptionv3') + + +if __name__ == '__main__': + unittest.main() diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/test_datasets.py b/pretrained_model/pytorch_vision_v0.10.0/test/test_datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..a076b843fa86239349824a839c2308cfb889a54b --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/test_datasets.py @@ -0,0 +1,1738 @@ +import bz2 +import contextlib +import io +import itertools +import os +import pathlib +import pickle +import json +import random +import shutil +import string +import unittest +import xml.etree.ElementTree as ET +import zipfile + +import PIL +import datasets_utils +import numpy as np +import torch +import torch.nn.functional as F +from torchvision import datasets + + +class STL10TestCase(datasets_utils.ImageDatasetTestCase): + DATASET_CLASS = datasets.STL10 + ADDITIONAL_CONFIGS = datasets_utils.combinations_grid( + split=("train", "test", "unlabeled", "train+unlabeled")) + + @staticmethod + def _make_binary_file(num_elements, root, name): + file_name = os.path.join(root, name) + np.zeros(num_elements, dtype=np.uint8).tofile(file_name) + + @staticmethod + def _make_image_file(num_images, root, name, num_channels=3, height=96, width=96): + STL10TestCase._make_binary_file(num_images * num_channels * height * width, root, name) + + @staticmethod + def _make_label_file(num_images, root, name): + STL10TestCase._make_binary_file(num_images, root, name) + + @staticmethod + def _make_class_names_file(root, name="class_names.txt"): + with open(os.path.join(root, name), "w") as fh: + for cname in ("airplane", "bird"): + fh.write(f"{cname}\n") + + @staticmethod + def _make_fold_indices_file(root): + num_folds = 10 + offset = 0 + with open(os.path.join(root, "fold_indices.txt"), "w") as fh: + for fold in range(num_folds): + line = " ".join([str(idx) for idx in range(offset, offset + fold + 1)]) + fh.write(f"{line}\n") + offset += fold + 1 + + return tuple(range(1, num_folds + 1)) + + @staticmethod + def _make_train_files(root, num_unlabeled_images=1): + num_images_in_fold = STL10TestCase._make_fold_indices_file(root) + num_train_images = sum(num_images_in_fold) + + STL10TestCase._make_image_file(num_train_images, root, "train_X.bin") + STL10TestCase._make_label_file(num_train_images, root, "train_y.bin") + STL10TestCase._make_image_file(1, root, "unlabeled_X.bin") + + return dict(train=num_train_images, unlabeled=num_unlabeled_images) + + @staticmethod + def _make_test_files(root, num_images=2): + STL10TestCase._make_image_file(num_images, root, "test_X.bin") + STL10TestCase._make_label_file(num_images, root, "test_y.bin") + + return dict(test=num_images) + + def inject_fake_data(self, tmpdir, config): + root_folder = os.path.join(tmpdir, "stl10_binary") + os.mkdir(root_folder) + + num_images_in_split = self._make_train_files(root_folder) + num_images_in_split.update(self._make_test_files(root_folder)) + self._make_class_names_file(root_folder) + + return sum(num_images_in_split[part] for part in config["split"].split("+")) + + def test_folds(self): + for fold in range(10): + with self.create_dataset(split="train", folds=fold) as (dataset, _): + self.assertEqual(len(dataset), fold + 1) + + def test_unlabeled(self): + with self.create_dataset(split="unlabeled") as (dataset, _): + labels = [dataset[idx][1] for idx in range(len(dataset))] + self.assertTrue(all(label == -1 for label in labels)) + + def test_invalid_folds1(self): + with self.assertRaises(ValueError): + with self.create_dataset(folds=10): + pass + + def test_invalid_folds2(self): + with self.assertRaises(ValueError): + with self.create_dataset(folds="0"): + pass + + +class Caltech101TestCase(datasets_utils.ImageDatasetTestCase): + DATASET_CLASS = datasets.Caltech101 + FEATURE_TYPES = (PIL.Image.Image, (int, np.ndarray, tuple)) + + ADDITIONAL_CONFIGS = datasets_utils.combinations_grid( + target_type=("category", "annotation", ["category", "annotation"]) + ) + REQUIRED_PACKAGES = ("scipy",) + + def inject_fake_data(self, tmpdir, config): + root = pathlib.Path(tmpdir) / "caltech101" + images = root / "101_ObjectCategories" + annotations = root / "Annotations" + + categories = (("Faces", "Faces_2"), ("helicopter", "helicopter"), ("ying_yang", "ying_yang")) + num_images_per_category = 2 + + for image_category, annotation_category in categories: + datasets_utils.create_image_folder( + root=images, + name=image_category, + file_name_fn=lambda idx: f"image_{idx + 1:04d}.jpg", + num_examples=num_images_per_category, + ) + self._create_annotation_folder( + root=annotations, + name=annotation_category, + file_name_fn=lambda idx: f"annotation_{idx + 1:04d}.mat", + num_examples=num_images_per_category, + ) + + # This is included in the original archive, but is removed by the dataset. Thus, an empty directory suffices. + os.makedirs(images / "BACKGROUND_Google") + + return num_images_per_category * len(categories) + + def _create_annotation_folder(self, root, name, file_name_fn, num_examples): + root = pathlib.Path(root) / name + os.makedirs(root) + + for idx in range(num_examples): + self._create_annotation_file(root, file_name_fn(idx)) + + def _create_annotation_file(self, root, name): + mdict = dict(obj_contour=torch.rand((2, torch.randint(3, 6, size=())), dtype=torch.float64).numpy()) + datasets_utils.lazy_importer.scipy.io.savemat(str(pathlib.Path(root) / name), mdict) + + def test_combined_targets(self): + target_types = ["category", "annotation"] + + individual_targets = [] + for target_type in target_types: + with self.create_dataset(target_type=target_type) as (dataset, _): + _, target = dataset[0] + individual_targets.append(target) + + with self.create_dataset(target_type=target_types) as (dataset, _): + _, combined_targets = dataset[0] + + actual = len(individual_targets) + expected = len(combined_targets) + self.assertEqual( + actual, + expected, + f"The number of the returned combined targets does not match the the number targets if requested " + f"individually: {actual} != {expected}", + ) + + for target_type, combined_target, individual_target in zip(target_types, combined_targets, individual_targets): + with self.subTest(target_type=target_type): + actual = type(combined_target) + expected = type(individual_target) + self.assertIs( + actual, + expected, + f"Type of the combined target does not match the type of the corresponding individual target: " + f"{actual} is not {expected}", + ) + + +class Caltech256TestCase(datasets_utils.ImageDatasetTestCase): + DATASET_CLASS = datasets.Caltech256 + + def inject_fake_data(self, tmpdir, config): + tmpdir = pathlib.Path(tmpdir) / "caltech256" / "256_ObjectCategories" + + categories = ((1, "ak47"), (127, "laptop-101"), (257, "clutter")) + num_images_per_category = 2 + + for idx, category in categories: + datasets_utils.create_image_folder( + tmpdir, + name=f"{idx:03d}.{category}", + file_name_fn=lambda image_idx: f"{idx:03d}_{image_idx + 1:04d}.jpg", + num_examples=num_images_per_category, + ) + + return num_images_per_category * len(categories) + + +class WIDERFaceTestCase(datasets_utils.ImageDatasetTestCase): + DATASET_CLASS = datasets.WIDERFace + FEATURE_TYPES = (PIL.Image.Image, (dict, type(None))) # test split returns None as target + ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(split=('train', 'val', 'test')) + + def inject_fake_data(self, tmpdir, config): + widerface_dir = pathlib.Path(tmpdir) / 'widerface' + annotations_dir = widerface_dir / 'wider_face_split' + os.makedirs(annotations_dir) + + split_to_idx = split_to_num_examples = { + "train": 1, + "val": 2, + "test": 3, + } + + # We need to create all folders regardless of the split in config + for split in ('train', 'val', 'test'): + split_idx = split_to_idx[split] + num_examples = split_to_num_examples[split] + + datasets_utils.create_image_folder( + root=tmpdir, + name=widerface_dir / f'WIDER_{split}' / 'images' / '0--Parade', + file_name_fn=lambda image_idx: f"0_Parade_marchingband_1_{split_idx + image_idx}.jpg", + num_examples=num_examples, + ) + + annotation_file_name = { + 'train': annotations_dir / 'wider_face_train_bbx_gt.txt', + 'val': annotations_dir / 'wider_face_val_bbx_gt.txt', + 'test': annotations_dir / 'wider_face_test_filelist.txt', + }[split] + + annotation_content = { + "train": "".join( + f"0--Parade/0_Parade_marchingband_1_{split_idx + image_idx}.jpg\n1\n449 330 122 149 0 0 0 0 0 0\n" + for image_idx in range(num_examples) + ), + "val": "".join( + f"0--Parade/0_Parade_marchingband_1_{split_idx + image_idx}.jpg\n1\n501 160 285 443 0 0 0 0 0 0\n" + for image_idx in range(num_examples) + ), + "test": "".join( + f"0--Parade/0_Parade_marchingband_1_{split_idx + image_idx}.jpg\n" + for image_idx in range(num_examples) + ), + }[split] + + with open(annotation_file_name, "w") as annotation_file: + annotation_file.write(annotation_content) + + return split_to_num_examples[config["split"]] + + +class CityScapesTestCase(datasets_utils.ImageDatasetTestCase): + DATASET_CLASS = datasets.Cityscapes + TARGET_TYPES = ( + "instance", + "semantic", + "polygon", + "color", + ) + ADDITIONAL_CONFIGS = ( + *datasets_utils.combinations_grid( + mode=("fine",), split=("train", "test", "val"), target_type=TARGET_TYPES + ), + *datasets_utils.combinations_grid( + mode=("coarse",), + split=("train", "train_extra", "val"), + target_type=TARGET_TYPES, + ), + ) + FEATURE_TYPES = (PIL.Image.Image, (dict, PIL.Image.Image)) + + def inject_fake_data(self, tmpdir, config): + + tmpdir = pathlib.Path(tmpdir) + + mode_to_splits = { + "Coarse": ["train", "train_extra", "val"], + "Fine": ["train", "test", "val"], + } + + if config["split"] == "train": # just for coverage of the number of samples + cities = ["bochum", "bremen"] + else: + cities = ["bochum"] + + polygon_target = { + "imgHeight": 1024, + "imgWidth": 2048, + "objects": [ + { + "label": "sky", + "polygon": [ + [1241, 0], + [1234, 156], + [1478, 197], + [1611, 172], + [1606, 0], + ], + }, + { + "label": "road", + "polygon": [ + [0, 448], + [1331, 274], + [1473, 265], + [2047, 605], + [2047, 1023], + [0, 1023], + ], + }, + ], + } + + for mode in ["Coarse", "Fine"]: + gt_dir = tmpdir / f"gt{mode}" + for split in mode_to_splits[mode]: + for city in cities: + def make_image(name, size=10): + datasets_utils.create_image_folder( + root=gt_dir / split, + name=city, + file_name_fn=lambda _: name, + size=size, + num_examples=1, + ) + make_image(f"{city}_000000_000000_gt{mode}_instanceIds.png") + make_image(f"{city}_000000_000000_gt{mode}_labelIds.png") + make_image(f"{city}_000000_000000_gt{mode}_color.png", size=(4, 10, 10)) + + polygon_target_name = gt_dir / split / city / f"{city}_000000_000000_gt{mode}_polygons.json" + with open(polygon_target_name, "w") as outfile: + json.dump(polygon_target, outfile) + + # Create leftImg8bit folder + for split in ['test', 'train_extra', 'train', 'val']: + for city in cities: + datasets_utils.create_image_folder( + root=tmpdir / "leftImg8bit" / split, + name=city, + file_name_fn=lambda _: f"{city}_000000_000000_leftImg8bit.png", + num_examples=1, + ) + + info = {'num_examples': len(cities)} + if config['target_type'] == 'polygon': + info['expected_polygon_target'] = polygon_target + return info + + def test_combined_targets(self): + target_types = ['semantic', 'polygon', 'color'] + + with self.create_dataset(target_type=target_types) as (dataset, _): + output = dataset[0] + self.assertTrue(isinstance(output, tuple)) + self.assertTrue(len(output) == 2) + self.assertTrue(isinstance(output[0], PIL.Image.Image)) + self.assertTrue(isinstance(output[1], tuple)) + self.assertTrue(len(output[1]) == 3) + self.assertTrue(isinstance(output[1][0], PIL.Image.Image)) # semantic + self.assertTrue(isinstance(output[1][1], dict)) # polygon + self.assertTrue(isinstance(output[1][2], PIL.Image.Image)) # color + + def test_feature_types_target_color(self): + with self.create_dataset(target_type='color') as (dataset, _): + color_img, color_target = dataset[0] + self.assertTrue(isinstance(color_img, PIL.Image.Image)) + self.assertTrue(np.array(color_target).shape[2] == 4) + + def test_feature_types_target_polygon(self): + with self.create_dataset(target_type='polygon') as (dataset, info): + polygon_img, polygon_target = dataset[0] + self.assertTrue(isinstance(polygon_img, PIL.Image.Image)) + self.assertEqual(polygon_target, info['expected_polygon_target']) + + +class ImageNetTestCase(datasets_utils.ImageDatasetTestCase): + DATASET_CLASS = datasets.ImageNet + REQUIRED_PACKAGES = ('scipy',) + ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(split=('train', 'val')) + + def inject_fake_data(self, tmpdir, config): + tmpdir = pathlib.Path(tmpdir) + + wnid = 'n01234567' + if config['split'] == 'train': + num_examples = 3 + datasets_utils.create_image_folder( + root=tmpdir, + name=tmpdir / 'train' / wnid / wnid, + file_name_fn=lambda image_idx: f"{wnid}_{image_idx}.JPEG", + num_examples=num_examples, + ) + else: + num_examples = 1 + datasets_utils.create_image_folder( + root=tmpdir, + name=tmpdir / 'val' / wnid, + file_name_fn=lambda image_ifx: "ILSVRC2012_val_0000000{image_idx}.JPEG", + num_examples=num_examples, + ) + + wnid_to_classes = {wnid: [1]} + torch.save((wnid_to_classes, None), tmpdir / 'meta.bin') + return num_examples + + +class CIFAR10TestCase(datasets_utils.ImageDatasetTestCase): + DATASET_CLASS = datasets.CIFAR10 + ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(train=(True, False)) + + _VERSION_CONFIG = dict( + base_folder="cifar-10-batches-py", + train_files=tuple(f"data_batch_{idx}" for idx in range(1, 6)), + test_files=("test_batch",), + labels_key="labels", + meta_file="batches.meta", + num_categories=10, + categories_key="label_names", + ) + + def inject_fake_data(self, tmpdir, config): + tmpdir = pathlib.Path(tmpdir) / self._VERSION_CONFIG["base_folder"] + os.makedirs(tmpdir) + + num_images_per_file = 1 + for name in itertools.chain(self._VERSION_CONFIG["train_files"], self._VERSION_CONFIG["test_files"]): + self._create_batch_file(tmpdir, name, num_images_per_file) + + categories = self._create_meta_file(tmpdir) + + return dict( + num_examples=num_images_per_file + * len(self._VERSION_CONFIG["train_files"] if config["train"] else self._VERSION_CONFIG["test_files"]), + categories=categories, + ) + + def _create_batch_file(self, root, name, num_images): + data = datasets_utils.create_image_or_video_tensor((num_images, 32 * 32 * 3)) + labels = np.random.randint(0, self._VERSION_CONFIG["num_categories"], size=num_images).tolist() + self._create_binary_file(root, name, {"data": data, self._VERSION_CONFIG["labels_key"]: labels}) + + def _create_meta_file(self, root): + categories = [ + f"{idx:0{len(str(self._VERSION_CONFIG['num_categories'] - 1))}d}" + for idx in range(self._VERSION_CONFIG["num_categories"]) + ] + self._create_binary_file( + root, self._VERSION_CONFIG["meta_file"], {self._VERSION_CONFIG["categories_key"]: categories} + ) + return categories + + def _create_binary_file(self, root, name, content): + with open(pathlib.Path(root) / name, "wb") as fh: + pickle.dump(content, fh) + + def test_class_to_idx(self): + with self.create_dataset() as (dataset, info): + expected = {category: label for label, category in enumerate(info["categories"])} + actual = dataset.class_to_idx + self.assertEqual(actual, expected) + + +class CIFAR100(CIFAR10TestCase): + DATASET_CLASS = datasets.CIFAR100 + + _VERSION_CONFIG = dict( + base_folder="cifar-100-python", + train_files=("train",), + test_files=("test",), + labels_key="fine_labels", + meta_file="meta", + num_categories=100, + categories_key="fine_label_names", + ) + + +class CelebATestCase(datasets_utils.ImageDatasetTestCase): + DATASET_CLASS = datasets.CelebA + FEATURE_TYPES = (PIL.Image.Image, (torch.Tensor, int, tuple, type(None))) + + ADDITIONAL_CONFIGS = datasets_utils.combinations_grid( + split=("train", "valid", "test", "all"), + target_type=("attr", "identity", "bbox", "landmarks", ["attr", "identity"]), + ) + + _SPLIT_TO_IDX = dict(train=0, valid=1, test=2) + + def inject_fake_data(self, tmpdir, config): + base_folder = pathlib.Path(tmpdir) / "celeba" + os.makedirs(base_folder) + + num_images, num_images_per_split = self._create_split_txt(base_folder) + + datasets_utils.create_image_folder( + base_folder, "img_align_celeba", lambda idx: f"{idx + 1:06d}.jpg", num_images + ) + attr_names = self._create_attr_txt(base_folder, num_images) + self._create_identity_txt(base_folder, num_images) + self._create_bbox_txt(base_folder, num_images) + self._create_landmarks_txt(base_folder, num_images) + + return dict(num_examples=num_images_per_split[config["split"]], attr_names=attr_names) + + def _create_split_txt(self, root): + num_images_per_split = dict(train=3, valid=2, test=1) + + data = [ + [self._SPLIT_TO_IDX[split]] for split, num_images in num_images_per_split.items() for _ in range(num_images) + ] + self._create_txt(root, "list_eval_partition.txt", data) + + num_images_per_split["all"] = num_images = sum(num_images_per_split.values()) + return num_images, num_images_per_split + + def _create_attr_txt(self, root, num_images): + header = ("5_o_Clock_Shadow", "Young") + data = torch.rand((num_images, len(header))).ge(0.5).int().mul(2).sub(1).tolist() + self._create_txt(root, "list_attr_celeba.txt", data, header=header, add_num_examples=True) + return header + + def _create_identity_txt(self, root, num_images): + data = torch.randint(1, 4, size=(num_images, 1)).tolist() + self._create_txt(root, "identity_CelebA.txt", data) + + def _create_bbox_txt(self, root, num_images): + header = ("x_1", "y_1", "width", "height") + data = torch.randint(10, size=(num_images, len(header))).tolist() + self._create_txt( + root, "list_bbox_celeba.txt", data, header=header, add_num_examples=True, add_image_id_to_header=True + ) + + def _create_landmarks_txt(self, root, num_images): + header = ("lefteye_x", "rightmouth_y") + data = torch.randint(10, size=(num_images, len(header))).tolist() + self._create_txt(root, "list_landmarks_align_celeba.txt", data, header=header, add_num_examples=True) + + def _create_txt(self, root, name, data, header=None, add_num_examples=False, add_image_id_to_header=False): + with open(pathlib.Path(root) / name, "w") as fh: + if add_num_examples: + fh.write(f"{len(data)}\n") + + if header: + if add_image_id_to_header: + header = ("image_id", *header) + fh.write(f"{' '.join(header)}\n") + + for idx, line in enumerate(data, 1): + fh.write(f"{' '.join((f'{idx:06d}.jpg', *[str(value) for value in line]))}\n") + + def test_combined_targets(self): + target_types = ["attr", "identity", "bbox", "landmarks"] + + individual_targets = [] + for target_type in target_types: + with self.create_dataset(target_type=target_type) as (dataset, _): + _, target = dataset[0] + individual_targets.append(target) + + with self.create_dataset(target_type=target_types) as (dataset, _): + _, combined_targets = dataset[0] + + actual = len(individual_targets) + expected = len(combined_targets) + self.assertEqual( + actual, + expected, + f"The number of the returned combined targets does not match the the number targets if requested " + f"individually: {actual} != {expected}", + ) + + for target_type, combined_target, individual_target in zip(target_types, combined_targets, individual_targets): + with self.subTest(target_type=target_type): + actual = type(combined_target) + expected = type(individual_target) + self.assertIs( + actual, + expected, + f"Type of the combined target does not match the type of the corresponding individual target: " + f"{actual} is not {expected}", + ) + + def test_no_target(self): + with self.create_dataset(target_type=[]) as (dataset, _): + _, target = dataset[0] + + self.assertIsNone(target) + + def test_attr_names(self): + with self.create_dataset() as (dataset, info): + self.assertEqual(tuple(dataset.attr_names), info["attr_names"]) + + +class VOCSegmentationTestCase(datasets_utils.ImageDatasetTestCase): + DATASET_CLASS = datasets.VOCSegmentation + FEATURE_TYPES = (PIL.Image.Image, PIL.Image.Image) + + ADDITIONAL_CONFIGS = ( + *datasets_utils.combinations_grid( + year=[f"20{year:02d}" for year in range(7, 13)], image_set=("train", "val", "trainval") + ), + dict(year="2007", image_set="test"), + dict(year="2007-test", image_set="test"), + ) + + def inject_fake_data(self, tmpdir, config): + year, is_test_set = ( + ("2007", True) + if config["year"] == "2007-test" or config["image_set"] == "test" + else (config["year"], False) + ) + image_set = config["image_set"] + + base_dir = pathlib.Path(tmpdir) + if year == "2011": + base_dir /= "TrainVal" + base_dir = base_dir / "VOCdevkit" / f"VOC{year}" + os.makedirs(base_dir) + + num_images, num_images_per_image_set = self._create_image_set_files(base_dir, "ImageSets", is_test_set) + datasets_utils.create_image_folder(base_dir, "JPEGImages", lambda idx: f"{idx:06d}.jpg", num_images) + + datasets_utils.create_image_folder(base_dir, "SegmentationClass", lambda idx: f"{idx:06d}.png", num_images) + annotation = self._create_annotation_files(base_dir, "Annotations", num_images) + + return dict(num_examples=num_images_per_image_set[image_set], annotation=annotation) + + def _create_image_set_files(self, root, name, is_test_set): + root = pathlib.Path(root) / name + src = pathlib.Path(root) / "Main" + os.makedirs(src, exist_ok=True) + + idcs = dict(train=(0, 1, 2), val=(3, 4), test=(5,)) + idcs["trainval"] = (*idcs["train"], *idcs["val"]) + + for image_set in ("test",) if is_test_set else ("train", "val", "trainval"): + self._create_image_set_file(src, image_set, idcs[image_set]) + + shutil.copytree(src, root / "Segmentation") + + num_images = max(itertools.chain(*idcs.values())) + 1 + num_images_per_image_set = dict([(image_set, len(idcs_)) for image_set, idcs_ in idcs.items()]) + return num_images, num_images_per_image_set + + def _create_image_set_file(self, root, image_set, idcs): + with open(pathlib.Path(root) / f"{image_set}.txt", "w") as fh: + fh.writelines([f"{idx:06d}\n" for idx in idcs]) + + def _create_annotation_files(self, root, name, num_images): + root = pathlib.Path(root) / name + os.makedirs(root) + + for idx in range(num_images): + annotation = self._create_annotation_file(root, f"{idx:06d}.xml") + + return annotation + + def _create_annotation_file(self, root, name): + def add_child(parent, name, text=None): + child = ET.SubElement(parent, name) + child.text = text + return child + + def add_name(obj, name="dog"): + add_child(obj, "name", name) + return name + + def add_bndbox(obj, bndbox=None): + if bndbox is None: + bndbox = {"xmin": "1", "xmax": "2", "ymin": "3", "ymax": "4"} + + obj = add_child(obj, "bndbox") + for name, text in bndbox.items(): + add_child(obj, name, text) + + return bndbox + + annotation = ET.Element("annotation") + obj = add_child(annotation, "object") + data = dict(name=add_name(obj), bndbox=add_bndbox(obj)) + + with open(pathlib.Path(root) / name, "wb") as fh: + fh.write(ET.tostring(annotation)) + + return data + + +class VOCDetectionTestCase(VOCSegmentationTestCase): + DATASET_CLASS = datasets.VOCDetection + FEATURE_TYPES = (PIL.Image.Image, dict) + + def test_annotations(self): + with self.create_dataset() as (dataset, info): + _, target = dataset[0] + + self.assertIn("annotation", target) + annotation = target["annotation"] + + self.assertIn("object", annotation) + objects = annotation["object"] + + self.assertEqual(len(objects), 1) + object = objects[0] + + self.assertEqual(object, info["annotation"]) + + +class CocoDetectionTestCase(datasets_utils.ImageDatasetTestCase): + DATASET_CLASS = datasets.CocoDetection + FEATURE_TYPES = (PIL.Image.Image, list) + + REQUIRED_PACKAGES = ("pycocotools",) + + _IMAGE_FOLDER = "images" + _ANNOTATIONS_FOLDER = "annotations" + _ANNOTATIONS_FILE = "annotations.json" + + def dataset_args(self, tmpdir, config): + tmpdir = pathlib.Path(tmpdir) + root = tmpdir / self._IMAGE_FOLDER + annotation_file = tmpdir / self._ANNOTATIONS_FOLDER / self._ANNOTATIONS_FILE + return root, annotation_file + + def inject_fake_data(self, tmpdir, config): + tmpdir = pathlib.Path(tmpdir) + + num_images = 3 + num_annotations_per_image = 2 + + files = datasets_utils.create_image_folder( + tmpdir, name=self._IMAGE_FOLDER, file_name_fn=lambda idx: f"{idx:012d}.jpg", num_examples=num_images + ) + file_names = [file.relative_to(tmpdir / self._IMAGE_FOLDER) for file in files] + + annotation_folder = tmpdir / self._ANNOTATIONS_FOLDER + os.makedirs(annotation_folder) + info = self._create_annotation_file( + annotation_folder, self._ANNOTATIONS_FILE, file_names, num_annotations_per_image + ) + + info["num_examples"] = num_images + return info + + def _create_annotation_file(self, root, name, file_names, num_annotations_per_image): + image_ids = [int(file_name.stem) for file_name in file_names] + images = [dict(file_name=str(file_name), id=id) for file_name, id in zip(file_names, image_ids)] + + annotations, info = self._create_annotations(image_ids, num_annotations_per_image) + self._create_json(root, name, dict(images=images, annotations=annotations)) + + return info + + def _create_annotations(self, image_ids, num_annotations_per_image): + annotations = datasets_utils.combinations_grid( + image_id=image_ids, bbox=([1.0, 2.0, 3.0, 4.0],) * num_annotations_per_image + ) + for id, annotation in enumerate(annotations): + annotation["id"] = id + return annotations, dict() + + def _create_json(self, root, name, content): + file = pathlib.Path(root) / name + with open(file, "w") as fh: + json.dump(content, fh) + return file + + +class CocoCaptionsTestCase(CocoDetectionTestCase): + DATASET_CLASS = datasets.CocoCaptions + + def _create_annotations(self, image_ids, num_annotations_per_image): + captions = [str(idx) for idx in range(num_annotations_per_image)] + annotations = datasets_utils.combinations_grid(image_id=image_ids, caption=captions) + for id, annotation in enumerate(annotations): + annotation["id"] = id + return annotations, dict(captions=captions) + + def test_captions(self): + with self.create_dataset() as (dataset, info): + _, captions = dataset[0] + self.assertEqual(tuple(captions), tuple(info["captions"])) + + +class UCF101TestCase(datasets_utils.VideoDatasetTestCase): + DATASET_CLASS = datasets.UCF101 + + ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(fold=(1, 2, 3), train=(True, False)) + + _VIDEO_FOLDER = "videos" + _ANNOTATIONS_FOLDER = "annotations" + + def dataset_args(self, tmpdir, config): + tmpdir = pathlib.Path(tmpdir) + root = tmpdir / self._VIDEO_FOLDER + annotation_path = tmpdir / self._ANNOTATIONS_FOLDER + return root, annotation_path + + def inject_fake_data(self, tmpdir, config): + tmpdir = pathlib.Path(tmpdir) + + video_folder = tmpdir / self._VIDEO_FOLDER + os.makedirs(video_folder) + video_files = self._create_videos(video_folder) + + annotations_folder = tmpdir / self._ANNOTATIONS_FOLDER + os.makedirs(annotations_folder) + num_examples = self._create_annotation_files(annotations_folder, video_files, config["fold"], config["train"]) + + return num_examples + + def _create_videos(self, root, num_examples_per_class=3): + def file_name_fn(cls, idx, clips_per_group=2): + return f"v_{cls}_g{(idx // clips_per_group) + 1:02d}_c{(idx % clips_per_group) + 1:02d}.avi" + + video_files = [ + datasets_utils.create_video_folder(root, cls, lambda idx: file_name_fn(cls, idx), num_examples_per_class) + for cls in ("ApplyEyeMakeup", "YoYo") + ] + return [path.relative_to(root) for path in itertools.chain(*video_files)] + + def _create_annotation_files(self, root, video_files, fold, train): + current_videos = random.sample(video_files, random.randrange(1, len(video_files) - 1)) + current_annotation = self._annotation_file_name(fold, train) + self._create_annotation_file(root, current_annotation, current_videos) + + other_videos = set(video_files) - set(current_videos) + other_annotations = [ + self._annotation_file_name(fold, train) for fold, train in itertools.product((1, 2, 3), (True, False)) + ] + other_annotations.remove(current_annotation) + for name in other_annotations: + self._create_annotation_file(root, name, other_videos) + + return len(current_videos) + + def _annotation_file_name(self, fold, train): + return f"{'train' if train else 'test'}list{fold:02d}.txt" + + def _create_annotation_file(self, root, name, video_files): + with open(pathlib.Path(root) / name, "w") as fh: + fh.writelines(f"{file}\n" for file in sorted(video_files)) + + +class LSUNTestCase(datasets_utils.ImageDatasetTestCase): + DATASET_CLASS = datasets.LSUN + + REQUIRED_PACKAGES = ("lmdb",) + ADDITIONAL_CONFIGS = datasets_utils.combinations_grid( + classes=("train", "test", "val", ["bedroom_train", "church_outdoor_train"]) + ) + + _CATEGORIES = ( + "bedroom", + "bridge", + "church_outdoor", + "classroom", + "conference_room", + "dining_room", + "kitchen", + "living_room", + "restaurant", + "tower", + ) + + def inject_fake_data(self, tmpdir, config): + root = pathlib.Path(tmpdir) + + num_images = 0 + for cls in self._parse_classes(config["classes"]): + num_images += self._create_lmdb(root, cls) + + return num_images + + @contextlib.contextmanager + def create_dataset( + self, + *args, **kwargs + ): + with super().create_dataset(*args, **kwargs) as output: + yield output + # Currently datasets.LSUN caches the keys in the current directory rather than in the root directory. Thus, + # this creates a number of _cache_* files in the current directory that will not be removed together + # with the temporary directory + for file in os.listdir(os.getcwd()): + if file.startswith("_cache_"): + try: + os.remove(file) + except FileNotFoundError: + # When the same test is run in parallel (in fb internal tests), a thread may remove another + # thread's file. We should be able to remove the try/except when + # https://github.com/pytorch/vision/issues/825 is fixed. + pass + + def _parse_classes(self, classes): + if not isinstance(classes, str): + return classes + + split = classes + if split == "test": + return [split] + + return [f"{category}_{split}" for category in self._CATEGORIES] + + def _create_lmdb(self, root, cls): + lmdb = datasets_utils.lazy_importer.lmdb + hexdigits_lowercase = string.digits + string.ascii_lowercase[:6] + + folder = f"{cls}_lmdb" + + num_images = torch.randint(1, 4, size=()).item() + format = "png" + files = datasets_utils.create_image_folder(root, folder, lambda idx: f"{idx}.{format}", num_images) + + with lmdb.open(str(root / folder)) as env, env.begin(write=True) as txn: + for file in files: + key = "".join(random.choice(hexdigits_lowercase) for _ in range(40)).encode() + + buffer = io.BytesIO() + PIL.Image.open(file).save(buffer, format) + buffer.seek(0) + value = buffer.read() + + txn.put(key, value) + + os.remove(file) + + return num_images + + def test_not_found_or_corrupted(self): + # LSUN does not raise built-in exception, but a custom one. It is expressive enough to not 'cast' it to + # RuntimeError or FileNotFoundError that are normally checked by this test. + with self.assertRaises(datasets_utils.lazy_importer.lmdb.Error): + super().test_not_found_or_corrupted() + + +class Kinetics400TestCase(datasets_utils.VideoDatasetTestCase): + DATASET_CLASS = datasets.Kinetics400 + + def inject_fake_data(self, tmpdir, config): + classes = ("Abseiling", "Zumba") + num_videos_per_class = 2 + + digits = string.ascii_letters + string.digits + "-_" + for cls in classes: + datasets_utils.create_video_folder( + tmpdir, + cls, + lambda _: f"{datasets_utils.create_random_string(11, digits)}.avi", + num_videos_per_class, + ) + + return num_videos_per_class * len(classes) + + +class HMDB51TestCase(datasets_utils.VideoDatasetTestCase): + DATASET_CLASS = datasets.HMDB51 + + ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(fold=(1, 2, 3), train=(True, False)) + + _VIDEO_FOLDER = "videos" + _SPLITS_FOLDER = "splits" + _CLASSES = ("brush_hair", "wave") + + def dataset_args(self, tmpdir, config): + tmpdir = pathlib.Path(tmpdir) + root = tmpdir / self._VIDEO_FOLDER + annotation_path = tmpdir / self._SPLITS_FOLDER + return root, annotation_path + + def inject_fake_data(self, tmpdir, config): + tmpdir = pathlib.Path(tmpdir) + + video_folder = tmpdir / self._VIDEO_FOLDER + os.makedirs(video_folder) + video_files = self._create_videos(video_folder) + + splits_folder = tmpdir / self._SPLITS_FOLDER + os.makedirs(splits_folder) + num_examples = self._create_split_files(splits_folder, video_files, config["fold"], config["train"]) + + return num_examples + + def _create_videos(self, root, num_examples_per_class=3): + def file_name_fn(cls, idx, clips_per_group=2): + return f"{cls}_{(idx // clips_per_group) + 1:d}_{(idx % clips_per_group) + 1:d}.avi" + + return [ + ( + cls, + datasets_utils.create_video_folder( + root, + cls, + lambda idx: file_name_fn(cls, idx), + num_examples_per_class, + ), + ) + for cls in self._CLASSES + ] + + def _create_split_files(self, root, video_files, fold, train): + num_videos = num_train_videos = 0 + + for cls, videos in video_files: + num_videos += len(videos) + + train_videos = set(random.sample(videos, random.randrange(1, len(videos) - 1))) + num_train_videos += len(train_videos) + + with open(pathlib.Path(root) / f"{cls}_test_split{fold}.txt", "w") as fh: + fh.writelines(f"{file.name} {1 if file in train_videos else 2}\n" for file in videos) + + return num_train_videos if train else (num_videos - num_train_videos) + + +class OmniglotTestCase(datasets_utils.ImageDatasetTestCase): + DATASET_CLASS = datasets.Omniglot + + ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(background=(True, False)) + + def inject_fake_data(self, tmpdir, config): + target_folder = ( + pathlib.Path(tmpdir) / "omniglot-py" / f"images_{'background' if config['background'] else 'evaluation'}" + ) + os.makedirs(target_folder) + + num_images = 0 + for name in ("Alphabet_of_the_Magi", "Tifinagh"): + num_images += self._create_alphabet_folder(target_folder, name) + + return num_images + + def _create_alphabet_folder(self, root, name): + num_images_total = 0 + for idx in range(torch.randint(1, 4, size=()).item()): + num_images = torch.randint(1, 4, size=()).item() + num_images_total += num_images + + datasets_utils.create_image_folder( + root / name, f"character{idx:02d}", lambda image_idx: f"{image_idx:02d}.png", num_images + ) + + return num_images_total + + +class SBUTestCase(datasets_utils.ImageDatasetTestCase): + DATASET_CLASS = datasets.SBU + FEATURE_TYPES = (PIL.Image.Image, str) + + def inject_fake_data(self, tmpdir, config): + num_images = 3 + + dataset_folder = pathlib.Path(tmpdir) / "dataset" + images = datasets_utils.create_image_folder(tmpdir, "dataset", self._create_file_name, num_images) + + self._create_urls_txt(dataset_folder, images) + self._create_captions_txt(dataset_folder, num_images) + + return num_images + + def _create_file_name(self, idx): + part1 = datasets_utils.create_random_string(10, string.digits) + part2 = datasets_utils.create_random_string(10, string.ascii_lowercase, string.digits[:6]) + return f"{part1}_{part2}.jpg" + + def _create_urls_txt(self, root, images): + with open(root / "SBU_captioned_photo_dataset_urls.txt", "w") as fh: + for image in images: + fh.write( + f"http://static.flickr.com/{datasets_utils.create_random_string(4, string.digits)}/{image.name}\n" + ) + + def _create_captions_txt(self, root, num_images): + with open(root / "SBU_captioned_photo_dataset_captions.txt", "w") as fh: + for _ in range(num_images): + fh.write(f"{datasets_utils.create_random_string(10)}\n") + + +class SEMEIONTestCase(datasets_utils.ImageDatasetTestCase): + DATASET_CLASS = datasets.SEMEION + + def inject_fake_data(self, tmpdir, config): + num_images = 3 + + images = torch.rand(num_images, 256) + labels = F.one_hot(torch.randint(10, size=(num_images,))) + with open(pathlib.Path(tmpdir) / "semeion.data", "w") as fh: + for image, one_hot_labels in zip(images, labels): + image_columns = " ".join([f"{pixel.item():.4f}" for pixel in image]) + labels_columns = " ".join([str(label.item()) for label in one_hot_labels]) + fh.write(f"{image_columns} {labels_columns}\n") + + return num_images + + +class USPSTestCase(datasets_utils.ImageDatasetTestCase): + DATASET_CLASS = datasets.USPS + + ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(train=(True, False)) + + def inject_fake_data(self, tmpdir, config): + num_images = 2 if config["train"] else 1 + + images = torch.rand(num_images, 256) * 2 - 1 + labels = torch.randint(1, 11, size=(num_images,)) + + with bz2.open(pathlib.Path(tmpdir) / f"usps{'.t' if not config['train'] else ''}.bz2", "w") as fh: + for image, label in zip(images, labels): + line = " ".join((str(label.item()), *[f"{idx}:{pixel:.6f}" for idx, pixel in enumerate(image, 1)])) + fh.write(f"{line}\n".encode()) + + return num_images + + +class SBDatasetTestCase(datasets_utils.ImageDatasetTestCase): + DATASET_CLASS = datasets.SBDataset + FEATURE_TYPES = (PIL.Image.Image, (np.ndarray, PIL.Image.Image)) + + REQUIRED_PACKAGES = ("scipy.io", "scipy.sparse") + + ADDITIONAL_CONFIGS = datasets_utils.combinations_grid( + image_set=("train", "val", "train_noval"), mode=("boundaries", "segmentation") + ) + + _NUM_CLASSES = 20 + + def inject_fake_data(self, tmpdir, config): + num_images, num_images_per_image_set = self._create_split_files(tmpdir) + + sizes = self._create_target_folder(tmpdir, "cls", num_images) + + datasets_utils.create_image_folder( + tmpdir, "img", lambda idx: f"{self._file_stem(idx)}.jpg", num_images, size=lambda idx: sizes[idx] + ) + + return num_images_per_image_set[config["image_set"]] + + def _create_split_files(self, root): + root = pathlib.Path(root) + + splits = dict(train=(0, 1, 2), train_noval=(0, 2), val=(3,)) + + for split, idcs in splits.items(): + self._create_split_file(root, split, idcs) + + num_images = max(itertools.chain(*splits.values())) + 1 + num_images_per_split = dict([(split, len(idcs)) for split, idcs in splits.items()]) + return num_images, num_images_per_split + + def _create_split_file(self, root, name, idcs): + with open(root / f"{name}.txt", "w") as fh: + fh.writelines(f"{self._file_stem(idx)}\n" for idx in idcs) + + def _create_target_folder(self, root, name, num_images): + io = datasets_utils.lazy_importer.scipy.io + + target_folder = pathlib.Path(root) / name + os.makedirs(target_folder) + + sizes = [torch.randint(1, 4, size=(2,)).tolist() for _ in range(num_images)] + for idx, size in enumerate(sizes): + content = dict( + GTcls=dict(Boundaries=self._create_boundaries(size), Segmentation=self._create_segmentation(size)) + ) + io.savemat(target_folder / f"{self._file_stem(idx)}.mat", content) + + return sizes + + def _create_boundaries(self, size): + sparse = datasets_utils.lazy_importer.scipy.sparse + return [ + [sparse.csc_matrix(torch.randint(0, 2, size=size, dtype=torch.uint8).numpy())] + for _ in range(self._NUM_CLASSES) + ] + + def _create_segmentation(self, size): + return torch.randint(0, self._NUM_CLASSES + 1, size=size, dtype=torch.uint8).numpy() + + def _file_stem(self, idx): + return f"2008_{idx:06d}" + + +class FakeDataTestCase(datasets_utils.ImageDatasetTestCase): + DATASET_CLASS = datasets.FakeData + FEATURE_TYPES = (PIL.Image.Image, int) + + def dataset_args(self, tmpdir, config): + return () + + def inject_fake_data(self, tmpdir, config): + return config["size"] + + def test_not_found_or_corrupted(self): + self.skipTest("The data is generated at creation and thus cannot be non-existent or corrupted.") + + +class PhotoTourTestCase(datasets_utils.ImageDatasetTestCase): + DATASET_CLASS = datasets.PhotoTour + + # The PhotoTour dataset returns examples with different features with respect to the 'train' parameter. Thus, + # we overwrite 'FEATURE_TYPES' with a dummy value to satisfy the initial checks of the base class. Furthermore, we + # overwrite the 'test_feature_types()' method to select the correct feature types before the test is run. + FEATURE_TYPES = () + _TRAIN_FEATURE_TYPES = (torch.Tensor,) + _TEST_FEATURE_TYPES = (torch.Tensor, torch.Tensor, torch.Tensor) + + datasets_utils.combinations_grid(train=(True, False)) + + _NAME = "liberty" + + def dataset_args(self, tmpdir, config): + return tmpdir, self._NAME + + def inject_fake_data(self, tmpdir, config): + tmpdir = pathlib.Path(tmpdir) + + # In contrast to the original data, the fake images injected here comprise only a single patch. Thus, + # num_images == num_patches. + num_patches = 5 + + image_files = self._create_images(tmpdir, self._NAME, num_patches) + point_ids, info_file = self._create_info_file(tmpdir / self._NAME, num_patches) + num_matches, matches_file = self._create_matches_file(tmpdir / self._NAME, num_patches, point_ids) + + self._create_archive(tmpdir, self._NAME, *image_files, info_file, matches_file) + + return num_patches if config["train"] else num_matches + + def _create_images(self, root, name, num_images): + # The images in the PhotoTour dataset comprises of multiple grayscale patches of 64 x 64 pixels. Thus, the + # smallest fake image is 64 x 64 pixels and comprises a single patch. + return datasets_utils.create_image_folder( + root, name, lambda idx: f"patches{idx:04d}.bmp", num_images, size=(1, 64, 64) + ) + + def _create_info_file(self, root, num_images): + point_ids = torch.randint(num_images, size=(num_images,)).tolist() + + file = root / "info.txt" + with open(file, "w") as fh: + fh.writelines([f"{point_id} 0\n" for point_id in point_ids]) + + return point_ids, file + + def _create_matches_file(self, root, num_patches, point_ids): + lines = [ + f"{patch_id1} {point_ids[patch_id1]} 0 {patch_id2} {point_ids[patch_id2]} 0\n" + for patch_id1, patch_id2 in itertools.combinations(range(num_patches), 2) + ] + + file = root / "m50_100000_100000_0.txt" + with open(file, "w") as fh: + fh.writelines(lines) + + return len(lines), file + + def _create_archive(self, root, name, *files): + archive = root / f"{name}.zip" + with zipfile.ZipFile(archive, "w") as zip: + for file in files: + zip.write(file, arcname=file.relative_to(root)) + + return archive + + @datasets_utils.test_all_configs + def test_feature_types(self, config): + feature_types = self.FEATURE_TYPES + self.FEATURE_TYPES = self._TRAIN_FEATURE_TYPES if config["train"] else self._TEST_FEATURE_TYPES + try: + super().test_feature_types.__wrapped__(self, config) + finally: + self.FEATURE_TYPES = feature_types + + +class Flickr8kTestCase(datasets_utils.ImageDatasetTestCase): + DATASET_CLASS = datasets.Flickr8k + + FEATURE_TYPES = (PIL.Image.Image, list) + + _IMAGES_FOLDER = "images" + _ANNOTATIONS_FILE = "captions.html" + + def dataset_args(self, tmpdir, config): + tmpdir = pathlib.Path(tmpdir) + root = tmpdir / self._IMAGES_FOLDER + ann_file = tmpdir / self._ANNOTATIONS_FILE + return str(root), str(ann_file) + + def inject_fake_data(self, tmpdir, config): + num_images = 3 + num_captions_per_image = 3 + + tmpdir = pathlib.Path(tmpdir) + + images = self._create_images(tmpdir, self._IMAGES_FOLDER, num_images) + self._create_annotations_file(tmpdir, self._ANNOTATIONS_FILE, images, num_captions_per_image) + + return dict(num_examples=num_images, captions=self._create_captions(num_captions_per_image)) + + def _create_images(self, root, name, num_images): + return datasets_utils.create_image_folder(root, name, self._image_file_name, num_images) + + def _image_file_name(self, idx): + id = datasets_utils.create_random_string(10, string.digits) + checksum = datasets_utils.create_random_string(10, string.digits, string.ascii_lowercase[:6]) + size = datasets_utils.create_random_string(1, "qwcko") + return f"{id}_{checksum}_{size}.jpg" + + def _create_annotations_file(self, root, name, images, num_captions_per_image): + with open(root / name, "w") as fh: + fh.write("<table>") + for image in (None, *images): + self._add_image(fh, image, num_captions_per_image) + fh.write("</table>") + + def _add_image(self, fh, image, num_captions_per_image): + fh.write("<tr>") + self._add_image_header(fh, image) + fh.write("</tr><tr><td><ul>") + self._add_image_captions(fh, num_captions_per_image) + fh.write("</ul></td></tr>") + + def _add_image_header(self, fh, image=None): + if image: + url = f"http://www.flickr.com/photos/user/{image.name.split('_')[0]}/" + data = f'<a href="{url}">{url}</a>' + else: + data = "Image Not Found" + fh.write(f"<td>{data}</td>") + + def _add_image_captions(self, fh, num_captions_per_image): + for caption in self._create_captions(num_captions_per_image): + fh.write(f"<li>{caption}") + + def _create_captions(self, num_captions_per_image): + return [str(idx) for idx in range(num_captions_per_image)] + + def test_captions(self): + with self.create_dataset() as (dataset, info): + _, captions = dataset[0] + self.assertSequenceEqual(captions, info["captions"]) + + +class Flickr30kTestCase(Flickr8kTestCase): + DATASET_CLASS = datasets.Flickr30k + + FEATURE_TYPES = (PIL.Image.Image, list) + + _ANNOTATIONS_FILE = "captions.token" + + def _image_file_name(self, idx): + return f"{idx}.jpg" + + def _create_annotations_file(self, root, name, images, num_captions_per_image): + with open(root / name, "w") as fh: + for image, (idx, caption) in itertools.product( + images, enumerate(self._create_captions(num_captions_per_image)) + ): + fh.write(f"{image.name}#{idx}\t{caption}\n") + + +class MNISTTestCase(datasets_utils.ImageDatasetTestCase): + DATASET_CLASS = datasets.MNIST + + ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(train=(True, False)) + + _MAGIC_DTYPES = { + torch.uint8: 8, + torch.int8: 9, + torch.int16: 11, + torch.int32: 12, + torch.float32: 13, + torch.float64: 14, + } + + _IMAGES_SIZE = (28, 28) + _IMAGES_DTYPE = torch.uint8 + + _LABELS_SIZE = () + _LABELS_DTYPE = torch.uint8 + + def inject_fake_data(self, tmpdir, config): + raw_dir = pathlib.Path(tmpdir) / self.DATASET_CLASS.__name__ / "raw" + os.makedirs(raw_dir, exist_ok=True) + + num_images = self._num_images(config) + self._create_binary_file( + raw_dir, self._images_file(config), (num_images, *self._IMAGES_SIZE), self._IMAGES_DTYPE + ) + self._create_binary_file( + raw_dir, self._labels_file(config), (num_images, *self._LABELS_SIZE), self._LABELS_DTYPE + ) + return num_images + + def _num_images(self, config): + return 2 if config["train"] else 1 + + def _images_file(self, config): + return f"{self._prefix(config)}-images-idx3-ubyte" + + def _labels_file(self, config): + return f"{self._prefix(config)}-labels-idx1-ubyte" + + def _prefix(self, config): + return "train" if config["train"] else "t10k" + + def _create_binary_file(self, root, filename, size, dtype): + with open(pathlib.Path(root) / filename, "wb") as fh: + for meta in (self._magic(dtype, len(size)), *size): + fh.write(self._encode(meta)) + + # If ever an MNIST variant is added that uses floating point data, this should be adapted. + data = torch.randint(0, torch.iinfo(dtype).max + 1, size, dtype=dtype) + fh.write(data.numpy().tobytes()) + + def _magic(self, dtype, dims): + return self._MAGIC_DTYPES[dtype] * 256 + dims + + def _encode(self, v): + return torch.tensor(v, dtype=torch.int32).numpy().tobytes()[::-1] + + +class FashionMNISTTestCase(MNISTTestCase): + DATASET_CLASS = datasets.FashionMNIST + + +class KMNISTTestCase(MNISTTestCase): + DATASET_CLASS = datasets.KMNIST + + +class EMNISTTestCase(MNISTTestCase): + DATASET_CLASS = datasets.EMNIST + + DEFAULT_CONFIG = dict(split="byclass") + ADDITIONAL_CONFIGS = datasets_utils.combinations_grid( + split=("byclass", "bymerge", "balanced", "letters", "digits", "mnist"), train=(True, False) + ) + + def _prefix(self, config): + return f"emnist-{config['split']}-{'train' if config['train'] else 'test'}" + + +class QMNISTTestCase(MNISTTestCase): + DATASET_CLASS = datasets.QMNIST + + ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(what=("train", "test", "test10k", "nist")) + + _LABELS_SIZE = (8,) + _LABELS_DTYPE = torch.int32 + + def _num_images(self, config): + if config["what"] == "nist": + return 3 + elif config["what"] == "train": + return 2 + elif config["what"] == "test50k": + # The split 'test50k' is defined as the last 50k images beginning at index 10000. Thus, we need to create + # more than 10000 images for the dataset to not be empty. Since this takes significantly longer than the + # creation of all other splits, this is excluded from the 'ADDITIONAL_CONFIGS' and is tested only once in + # 'test_num_examples_test50k'. + return 10001 + else: + return 1 + + def _labels_file(self, config): + return f"{self._prefix(config)}-labels-idx2-int" + + def _prefix(self, config): + if config["what"] == "nist": + return "xnist" + + if config["what"] is None: + what = "train" if config["train"] else "test" + elif config["what"].startswith("test"): + what = "test" + else: + what = config["what"] + + return f"qmnist-{what}" + + def test_num_examples_test50k(self): + with self.create_dataset(what="test50k") as (dataset, info): + # Since the split 'test50k' selects all images beginning from the index 10000, we subtract the number of + # created examples by this. + self.assertEqual(len(dataset), info["num_examples"] - 10000) + + +class DatasetFolderTestCase(datasets_utils.ImageDatasetTestCase): + DATASET_CLASS = datasets.DatasetFolder + + # The dataset has no fixed return type since it is defined by the loader parameter. For testing, we use a loader + # that simply returns the path as type 'str' instead of loading anything. See the 'dataset_args()' method. + FEATURE_TYPES = (str, int) + + _IMAGE_EXTENSIONS = ("jpg", "png") + _VIDEO_EXTENSIONS = ("avi", "mp4") + _EXTENSIONS = (*_IMAGE_EXTENSIONS, *_VIDEO_EXTENSIONS) + + # DatasetFolder has two mutually exclusive parameters: 'extensions' and 'is_valid_file'. One of both is required. + # We only iterate over different 'extensions' here and handle the tests for 'is_valid_file' in the + # 'test_is_valid_file()' method. + DEFAULT_CONFIG = dict(extensions=_EXTENSIONS) + ADDITIONAL_CONFIGS = ( + *datasets_utils.combinations_grid(extensions=[(ext,) for ext in _IMAGE_EXTENSIONS]), + dict(extensions=_IMAGE_EXTENSIONS), + *datasets_utils.combinations_grid(extensions=[(ext,) for ext in _VIDEO_EXTENSIONS]), + dict(extensions=_VIDEO_EXTENSIONS), + ) + + def dataset_args(self, tmpdir, config): + return tmpdir, lambda x: x + + def inject_fake_data(self, tmpdir, config): + extensions = config["extensions"] or self._is_valid_file_to_extensions(config["is_valid_file"]) + + num_examples_total = 0 + classes = [] + for ext, cls in zip(self._EXTENSIONS, string.ascii_letters): + if ext not in extensions: + continue + + create_example_folder = ( + datasets_utils.create_image_folder + if ext in self._IMAGE_EXTENSIONS + else datasets_utils.create_video_folder + ) + + num_examples = torch.randint(1, 3, size=()).item() + create_example_folder(tmpdir, cls, lambda idx: self._file_name_fn(cls, ext, idx), num_examples) + + num_examples_total += num_examples + classes.append(cls) + + return dict(num_examples=num_examples_total, classes=classes) + + def _file_name_fn(self, cls, ext, idx): + return f"{cls}_{idx}.{ext}" + + def _is_valid_file_to_extensions(self, is_valid_file): + return {ext for ext in self._EXTENSIONS if is_valid_file(f"foo.{ext}")} + + @datasets_utils.test_all_configs + def test_is_valid_file(self, config): + extensions = config.pop("extensions") + # We need to explicitly pass extensions=None here or otherwise it would be filled by the value from the + # DEFAULT_CONFIG. + with self.create_dataset( + config, extensions=None, is_valid_file=lambda file: pathlib.Path(file).suffix[1:] in extensions + ) as (dataset, info): + self.assertEqual(len(dataset), info["num_examples"]) + + @datasets_utils.test_all_configs + def test_classes(self, config): + with self.create_dataset(config) as (dataset, info): + self.assertSequenceEqual(dataset.classes, info["classes"]) + + +class ImageFolderTestCase(datasets_utils.ImageDatasetTestCase): + DATASET_CLASS = datasets.ImageFolder + + def inject_fake_data(self, tmpdir, config): + num_examples_total = 0 + classes = ("a", "b") + for cls in classes: + num_examples = torch.randint(1, 3, size=()).item() + num_examples_total += num_examples + + datasets_utils.create_image_folder(tmpdir, cls, lambda idx: f"{cls}_{idx}.png", num_examples) + + return dict(num_examples=num_examples_total, classes=classes) + + @datasets_utils.test_all_configs + def test_classes(self, config): + with self.create_dataset(config) as (dataset, info): + self.assertSequenceEqual(dataset.classes, info["classes"]) + + +class KittiTestCase(datasets_utils.ImageDatasetTestCase): + DATASET_CLASS = datasets.Kitti + FEATURE_TYPES = (PIL.Image.Image, (list, type(None))) # test split returns None as target + ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(train=(True, False)) + + def inject_fake_data(self, tmpdir, config): + kitti_dir = os.path.join(tmpdir, "Kitti", "raw") + os.makedirs(kitti_dir) + + split_to_num_examples = { + True: 1, + False: 2, + } + + # We need to create all folders(training and testing). + for is_training in (True, False): + num_examples = split_to_num_examples[is_training] + + datasets_utils.create_image_folder( + root=kitti_dir, + name=os.path.join("training" if is_training else "testing", "image_2"), + file_name_fn=lambda image_idx: f"{image_idx:06d}.png", + num_examples=num_examples, + ) + if is_training: + for image_idx in range(num_examples): + target_file_dir = os.path.join(kitti_dir, "training", "label_2") + os.makedirs(target_file_dir) + target_file_name = os.path.join(target_file_dir, f"{image_idx:06d}.txt") + target_contents = "Pedestrian 0.00 0 -0.20 712.40 143.00 810.73 307.92 1.89 0.48 1.20 1.84 1.47 8.41 0.01\n" # noqa + with open(target_file_name, "w") as target_file: + target_file.write(target_contents) + + return split_to_num_examples[config["train"]] + + +class SvhnTestCase(datasets_utils.ImageDatasetTestCase): + DATASET_CLASS = datasets.SVHN + REQUIRED_PACKAGES = ("scipy",) + ADDITIONAL_CONFIGS = datasets_utils.combinations_grid(split=("train", "test", "extra")) + + def inject_fake_data(self, tmpdir, config): + import scipy.io as sio + + split = config["split"] + num_examples = { + "train": 2, + "test": 3, + "extra": 4, + }.get(split) + + file = f"{split}_32x32.mat" + images = np.zeros((32, 32, 3, num_examples), dtype=np.uint8) + targets = np.zeros((num_examples,), dtype=np.uint8) + sio.savemat(os.path.join(tmpdir, file), {'X': images, 'y': targets}) + return num_examples + + +class Places365TestCase(datasets_utils.ImageDatasetTestCase): + DATASET_CLASS = datasets.Places365 + ADDITIONAL_CONFIGS = datasets_utils.combinations_grid( + split=("train-standard", "train-challenge", "val"), + small=(False, True), + ) + _CATEGORIES = "categories_places365.txt" + # {split: file} + _FILE_LISTS = { + "train-standard": "places365_train_standard.txt", + "train-challenge": "places365_train_challenge.txt", + "val": "places365_val.txt", + } + # {(split, small): folder_name} + _IMAGES = { + ("train-standard", False): "data_large_standard", + ("train-challenge", False): "data_large_challenge", + ("val", False): "val_large", + ("train-standard", True): "data_256_standard", + ("train-challenge", True): "data_256_challenge", + ("val", True): "val_256", + } + # (class, idx) + _CATEGORIES_CONTENT = ( + ("/a/airfield", 0), + ("/a/apartment_building/outdoor", 8), + ("/b/badlands", 30), + ) + # (file, idx) + _FILE_LIST_CONTENT = ( + ("Places365_val_00000001.png", 0), + *((f"{category}/Places365_train_00000001.png", idx) + for category, idx in _CATEGORIES_CONTENT), + ) + + @staticmethod + def _make_txt(root, name, seq): + file = os.path.join(root, name) + with open(file, "w") as fh: + for text, idx in seq: + fh.write(f"{text} {idx}\n") + + @staticmethod + def _make_categories_txt(root, name): + Places365TestCase._make_txt(root, name, Places365TestCase._CATEGORIES_CONTENT) + + @staticmethod + def _make_file_list_txt(root, name): + Places365TestCase._make_txt(root, name, Places365TestCase._FILE_LIST_CONTENT) + + @staticmethod + def _make_image(file_name, size): + os.makedirs(os.path.dirname(file_name), exist_ok=True) + PIL.Image.fromarray(np.zeros((*size, 3), dtype=np.uint8)).save(file_name) + + @staticmethod + def _make_devkit_archive(root, split): + Places365TestCase._make_categories_txt(root, Places365TestCase._CATEGORIES) + Places365TestCase._make_file_list_txt(root, Places365TestCase._FILE_LISTS[split]) + + @staticmethod + def _make_images_archive(root, split, small): + folder_name = Places365TestCase._IMAGES[(split, small)] + image_size = (256, 256) if small else (512, random.randint(512, 1024)) + files, idcs = zip(*Places365TestCase._FILE_LIST_CONTENT) + images = [f.lstrip("/").replace("/", os.sep) for f in files] + for image in images: + Places365TestCase._make_image(os.path.join(root, folder_name, image), image_size) + + return [(os.path.join(root, folder_name, image), idx) for image, idx in zip(images, idcs)] + + def inject_fake_data(self, tmpdir, config): + self._make_devkit_archive(tmpdir, config['split']) + return len(self._make_images_archive(tmpdir, config['split'], config['small'])) + + def test_classes(self): + classes = list(map(lambda x: x[0], self._CATEGORIES_CONTENT)) + with self.create_dataset() as (dataset, _): + self.assertEqual(dataset.classes, classes) + + def test_class_to_idx(self): + class_to_idx = dict(self._CATEGORIES_CONTENT) + with self.create_dataset() as (dataset, _): + self.assertEqual(dataset.class_to_idx, class_to_idx) + + def test_images_download_preexisting(self): + with self.assertRaises(RuntimeError): + with self.create_dataset({'download': True}): + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/test_datasets_download.py b/pretrained_model/pytorch_vision_v0.10.0/test/test_datasets_download.py new file mode 100644 index 0000000000000000000000000000000000000000..0066b76ccbe4364e32651ccaadbb5646f2003295 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/test_datasets_download.py @@ -0,0 +1,453 @@ +import contextlib +import itertools +import time +import unittest.mock +from datetime import datetime +from distutils import dir_util +from os import path +from urllib.error import HTTPError, URLError +from urllib.parse import urlparse +from urllib.request import urlopen, Request +import tempfile +import warnings + +import pytest + +from torchvision import datasets +from torchvision.datasets.utils import ( + download_url, + check_integrity, + download_file_from_google_drive, + _get_redirect_url, + USER_AGENT, +) + +from common_utils import get_tmp_dir + + +def limit_requests_per_time(min_secs_between_requests=2.0): + last_requests = {} + + def outer_wrapper(fn): + def inner_wrapper(request, *args, **kwargs): + url = request.full_url if isinstance(request, Request) else request + + netloc = urlparse(url).netloc + last_request = last_requests.get(netloc) + if last_request is not None: + elapsed_secs = (datetime.now() - last_request).total_seconds() + delta = min_secs_between_requests - elapsed_secs + if delta > 0: + time.sleep(delta) + + response = fn(request, *args, **kwargs) + last_requests[netloc] = datetime.now() + + return response + + return inner_wrapper + + return outer_wrapper + + +urlopen = limit_requests_per_time()(urlopen) + + +def resolve_redirects(max_hops=3): + def outer_wrapper(fn): + def inner_wrapper(request, *args, **kwargs): + initial_url = request.full_url if isinstance(request, Request) else request + url = _get_redirect_url(initial_url, max_hops=max_hops) + + if url == initial_url: + return fn(request, *args, **kwargs) + + warnings.warn(f"The URL {initial_url} ultimately redirects to {url}.") + + if not isinstance(request, Request): + return fn(url, *args, **kwargs) + + request_attrs = { + attr: getattr(request, attr) for attr in ("data", "headers", "origin_req_host", "unverifiable") + } + # the 'method' attribute does only exist if the request was created with it + if hasattr(request, "method"): + request_attrs["method"] = request.method + + return fn(Request(url, **request_attrs), *args, **kwargs) + + return inner_wrapper + + return outer_wrapper + + +urlopen = resolve_redirects()(urlopen) + + +@contextlib.contextmanager +def log_download_attempts( + urls_and_md5s=None, + file="utils", + patch=True, + mock_auxiliaries=None, +): + def add_mock(stack, name, file, **kwargs): + try: + return stack.enter_context(unittest.mock.patch(f"torchvision.datasets.{file}.{name}", **kwargs)) + except AttributeError as error: + if file != "utils": + return add_mock(stack, name, "utils", **kwargs) + else: + raise pytest.UsageError from error + + if urls_and_md5s is None: + urls_and_md5s = set() + if mock_auxiliaries is None: + mock_auxiliaries = patch + + with contextlib.ExitStack() as stack: + url_mock = add_mock(stack, "download_url", file, wraps=None if patch else download_url) + google_drive_mock = add_mock( + stack, "download_file_from_google_drive", file, wraps=None if patch else download_file_from_google_drive + ) + + if mock_auxiliaries: + add_mock(stack, "extract_archive", file) + + try: + yield urls_and_md5s + finally: + for args, kwargs in url_mock.call_args_list: + url = args[0] + md5 = args[-1] if len(args) == 4 else kwargs.get("md5") + urls_and_md5s.add((url, md5)) + + for args, kwargs in google_drive_mock.call_args_list: + id = args[0] + url = f"https://drive.google.com/file/d/{id}" + md5 = args[3] if len(args) == 4 else kwargs.get("md5") + urls_and_md5s.add((url, md5)) + + +def retry(fn, times=1, wait=5.0): + msgs = [] + for _ in range(times + 1): + try: + return fn() + except AssertionError as error: + msgs.append(str(error)) + time.sleep(wait) + else: + raise AssertionError( + "\n".join( + ( + f"Assertion failed {times + 1} times with {wait:.1f} seconds intermediate wait time.\n", + *(f"{idx}: {error}" for idx, error in enumerate(msgs, 1)), + ) + ) + ) + + +@contextlib.contextmanager +def assert_server_response_ok(): + try: + yield + except URLError as error: + raise AssertionError("The request timed out.") from error + except HTTPError as error: + raise AssertionError(f"The server returned {error.code}: {error.reason}.") from error + except RecursionError as error: + raise AssertionError(str(error)) from error + + +def assert_url_is_accessible(url, timeout=5.0): + request = Request(url, headers={"User-Agent": USER_AGENT}, method="HEAD") + with assert_server_response_ok(): + urlopen(request, timeout=timeout) + + +def assert_file_downloads_correctly(url, md5, timeout=5.0): + with get_tmp_dir() as root: + file = path.join(root, path.basename(url)) + with assert_server_response_ok(): + with open(file, "wb") as fh: + request = Request(url, headers={"User-Agent": USER_AGENT}) + response = urlopen(request, timeout=timeout) + fh.write(response.read()) + + assert check_integrity(file, md5=md5), "The MD5 checksums mismatch" + + +class DownloadConfig: + def __init__(self, url, md5=None, id=None): + self.url = url + self.md5 = md5 + self.id = id or url + + def __repr__(self): + return self.id + + +def make_download_configs(urls_and_md5s, name=None): + return [ + DownloadConfig(url, md5=md5, id=f"{name}, {url}" if name is not None else None) for url, md5 in urls_and_md5s + ] + + +def collect_download_configs(dataset_loader, name=None, **kwargs): + urls_and_md5s = set() + try: + with log_download_attempts(urls_and_md5s=urls_and_md5s, **kwargs): + dataset = dataset_loader() + except Exception: + dataset = None + + if name is None and dataset is not None: + name = type(dataset).__name__ + + return make_download_configs(urls_and_md5s, name) + + +# This is a workaround since fixtures, such as the built-in tmp_dir, can only be used within a test but not within a +# parametrization. Thus, we use a single root directory for all datasets and remove it when all download tests are run. +ROOT = tempfile.mkdtemp() + + +@pytest.fixture(scope="module", autouse=True) +def root(): + yield ROOT + dir_util.remove_tree(ROOT) + + +def places365(): + return itertools.chain( + *[ + collect_download_configs( + lambda: datasets.Places365(ROOT, split=split, small=small, download=True), + name=f"Places365, {split}, {'small' if small else 'large'}", + file="places365", + ) + for split, small in itertools.product(("train-standard", "train-challenge", "val"), (False, True)) + ] + ) + + +def caltech101(): + return collect_download_configs(lambda: datasets.Caltech101(ROOT, download=True), name="Caltech101") + + +def caltech256(): + return collect_download_configs(lambda: datasets.Caltech256(ROOT, download=True), name="Caltech256") + + +def cifar10(): + return collect_download_configs(lambda: datasets.CIFAR10(ROOT, download=True), name="CIFAR10") + + +def cifar100(): + return collect_download_configs(lambda: datasets.CIFAR100(ROOT, download=True), name="CIFAR100") + + +def voc(): + return itertools.chain( + *[ + collect_download_configs( + lambda: datasets.VOCSegmentation(ROOT, year=year, download=True), + name=f"VOC, {year}", + file="voc", + ) + for year in ("2007", "2007-test", "2008", "2009", "2010", "2011", "2012") + ] + ) + + +def mnist(): + with unittest.mock.patch.object(datasets.MNIST, "mirrors", datasets.MNIST.mirrors[-1:]): + return collect_download_configs(lambda: datasets.MNIST(ROOT, download=True), name="MNIST") + + +def fashion_mnist(): + return collect_download_configs(lambda: datasets.FashionMNIST(ROOT, download=True), name="FashionMNIST") + + +def kmnist(): + return collect_download_configs(lambda: datasets.KMNIST(ROOT, download=True), name="KMNIST") + + +def emnist(): + # the 'split' argument can be any valid one, since everything is downloaded anyway + return collect_download_configs(lambda: datasets.EMNIST(ROOT, split="byclass", download=True), name="EMNIST") + + +def qmnist(): + return itertools.chain( + *[ + collect_download_configs( + lambda: datasets.QMNIST(ROOT, what=what, download=True), + name=f"QMNIST, {what}", + file="mnist", + ) + for what in ("train", "test", "nist") + ] + ) + + +def omniglot(): + return itertools.chain( + *[ + collect_download_configs( + lambda: datasets.Omniglot(ROOT, background=background, download=True), + name=f"Omniglot, {'background' if background else 'evaluation'}", + ) + for background in (True, False) + ] + ) + + +def phototour(): + return itertools.chain( + *[ + collect_download_configs( + lambda: datasets.PhotoTour(ROOT, name=name, download=True), + name=f"PhotoTour, {name}", + file="phototour", + ) + # The names postfixed with '_harris' point to the domain 'matthewalunbrown.com'. For some reason all + # requests timeout from within CI. They are disabled until this is resolved. + for name in ("notredame", "yosemite", "liberty") # "notredame_harris", "yosemite_harris", "liberty_harris" + ] + ) + + +def sbdataset(): + return collect_download_configs( + lambda: datasets.SBDataset(ROOT, download=True), + name="SBDataset", + file="voc", + ) + + +def sbu(): + return collect_download_configs( + lambda: datasets.SBU(ROOT, download=True), + name="SBU", + file="sbu", + ) + + +def semeion(): + return collect_download_configs( + lambda: datasets.SEMEION(ROOT, download=True), + name="SEMEION", + file="semeion", + ) + + +def stl10(): + return collect_download_configs( + lambda: datasets.STL10(ROOT, download=True), + name="STL10", + ) + + +def svhn(): + return itertools.chain( + *[ + collect_download_configs( + lambda: datasets.SVHN(ROOT, split=split, download=True), + name=f"SVHN, {split}", + file="svhn", + ) + for split in ("train", "test", "extra") + ] + ) + + +def usps(): + return itertools.chain( + *[ + collect_download_configs( + lambda: datasets.USPS(ROOT, train=train, download=True), + name=f"USPS, {'train' if train else 'test'}", + file="usps", + ) + for train in (True, False) + ] + ) + + +def celeba(): + return collect_download_configs( + lambda: datasets.CelebA(ROOT, download=True), + name="CelebA", + file="celeba", + ) + + +def widerface(): + return collect_download_configs( + lambda: datasets.WIDERFace(ROOT, download=True), + name="WIDERFace", + file="widerface", + ) + + +def kitti(): + return itertools.chain( + *[ + collect_download_configs( + lambda train=train: datasets.Kitti(ROOT, train=train, download=True), + name=f"Kitti, {'train' if train else 'test'}", + file="kitti", + ) + for train in (True, False) + ] + ) + + +def make_parametrize_kwargs(download_configs): + argvalues = [] + ids = [] + for config in download_configs: + argvalues.append((config.url, config.md5)) + ids.append(config.id) + + return dict(argnames=("url", "md5"), argvalues=argvalues, ids=ids) + + +@pytest.mark.parametrize( + **make_parametrize_kwargs( + itertools.chain( + places365(), + caltech101(), + caltech256(), + cifar10(), + cifar100(), + # The VOC download server is unstable. See https://github.com/pytorch/vision/issues/2953 for details. + # voc(), + mnist(), + fashion_mnist(), + kmnist(), + emnist(), + qmnist(), + omniglot(), + phototour(), + sbdataset(), + sbu(), + semeion(), + stl10(), + svhn(), + usps(), + celeba(), + widerface(), + kitti(), + ) + ) +) +def test_url_is_accessible(url, md5): + retry(lambda: assert_url_is_accessible(url)) + + +@pytest.mark.parametrize(**make_parametrize_kwargs(itertools.chain())) +def test_file_downloads_correctly(url, md5): + retry(lambda: assert_file_downloads_correctly(url, md5)) diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/test_datasets_samplers.py b/pretrained_model/pytorch_vision_v0.10.0/test/test_datasets_samplers.py new file mode 100644 index 0000000000000000000000000000000000000000..10d8704dbb1ae9cd302f7ef68318f837d6b02a0c --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/test_datasets_samplers.py @@ -0,0 +1,118 @@ +import contextlib +import sys +import os +import torch +import unittest + +from torchvision import io +from torchvision.datasets.samplers import ( + DistributedSampler, + RandomClipSampler, + UniformClipSampler, +) +from torchvision.datasets.video_utils import VideoClips, unfold +from torchvision import get_video_backend + +from common_utils import get_tmp_dir +from _assert_utils import assert_equal + + +@contextlib.contextmanager +def get_list_of_videos(num_videos=5, sizes=None, fps=None): + with get_tmp_dir() as tmp_dir: + names = [] + for i in range(num_videos): + if sizes is None: + size = 5 * (i + 1) + else: + size = sizes[i] + if fps is None: + f = 5 + else: + f = fps[i] + data = torch.randint(0, 256, (size, 300, 400, 3), dtype=torch.uint8) + name = os.path.join(tmp_dir, "{}.mp4".format(i)) + names.append(name) + io.write_video(name, data, fps=f) + + yield names + + +@unittest.skipIf(not io.video._av_available(), "this test requires av") +class Tester(unittest.TestCase): + def test_random_clip_sampler(self): + with get_list_of_videos(num_videos=3, sizes=[25, 25, 25]) as video_list: + video_clips = VideoClips(video_list, 5, 5) + sampler = RandomClipSampler(video_clips, 3) + self.assertEqual(len(sampler), 3 * 3) + indices = torch.tensor(list(iter(sampler))) + videos = torch.div(indices, 5, rounding_mode='floor') + v_idxs, count = torch.unique(videos, return_counts=True) + assert_equal(v_idxs, torch.tensor([0, 1, 2])) + assert_equal(count, torch.tensor([3, 3, 3])) + + def test_random_clip_sampler_unequal(self): + with get_list_of_videos(num_videos=3, sizes=[10, 25, 25]) as video_list: + video_clips = VideoClips(video_list, 5, 5) + sampler = RandomClipSampler(video_clips, 3) + self.assertEqual(len(sampler), 2 + 3 + 3) + indices = list(iter(sampler)) + self.assertIn(0, indices) + self.assertIn(1, indices) + # remove elements of the first video, to simplify testing + indices.remove(0) + indices.remove(1) + indices = torch.tensor(indices) - 2 + videos = torch.div(indices, 5, rounding_mode='floor') + v_idxs, count = torch.unique(videos, return_counts=True) + assert_equal(v_idxs, torch.tensor([0, 1])) + assert_equal(count, torch.tensor([3, 3])) + + def test_uniform_clip_sampler(self): + with get_list_of_videos(num_videos=3, sizes=[25, 25, 25]) as video_list: + video_clips = VideoClips(video_list, 5, 5) + sampler = UniformClipSampler(video_clips, 3) + self.assertEqual(len(sampler), 3 * 3) + indices = torch.tensor(list(iter(sampler))) + videos = torch.div(indices, 5, rounding_mode='floor') + v_idxs, count = torch.unique(videos, return_counts=True) + assert_equal(v_idxs, torch.tensor([0, 1, 2])) + assert_equal(count, torch.tensor([3, 3, 3])) + assert_equal(indices, torch.tensor([0, 2, 4, 5, 7, 9, 10, 12, 14])) + + def test_uniform_clip_sampler_insufficient_clips(self): + with get_list_of_videos(num_videos=3, sizes=[10, 25, 25]) as video_list: + video_clips = VideoClips(video_list, 5, 5) + sampler = UniformClipSampler(video_clips, 3) + self.assertEqual(len(sampler), 3 * 3) + indices = torch.tensor(list(iter(sampler))) + assert_equal(indices, torch.tensor([0, 0, 1, 2, 4, 6, 7, 9, 11])) + + def test_distributed_sampler_and_uniform_clip_sampler(self): + with get_list_of_videos(num_videos=3, sizes=[25, 25, 25]) as video_list: + video_clips = VideoClips(video_list, 5, 5) + clip_sampler = UniformClipSampler(video_clips, 3) + + distributed_sampler_rank0 = DistributedSampler( + clip_sampler, + num_replicas=2, + rank=0, + group_size=3, + ) + indices = torch.tensor(list(iter(distributed_sampler_rank0))) + self.assertEqual(len(distributed_sampler_rank0), 6) + assert_equal(indices, torch.tensor([0, 2, 4, 10, 12, 14])) + + distributed_sampler_rank1 = DistributedSampler( + clip_sampler, + num_replicas=2, + rank=1, + group_size=3, + ) + indices = torch.tensor(list(iter(distributed_sampler_rank1))) + self.assertEqual(len(distributed_sampler_rank1), 6) + assert_equal(indices, torch.tensor([5, 7, 9, 0, 2, 4])) + + +if __name__ == '__main__': + unittest.main() diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/test_datasets_utils.py b/pretrained_model/pytorch_vision_v0.10.0/test/test_datasets_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..949026d31cb46013e901204a32f17c2f3da0928e --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/test_datasets_utils.py @@ -0,0 +1,237 @@ +import os +import torchvision.datasets.utils as utils +import unittest +import unittest.mock +import zipfile +import tarfile +import gzip +import warnings +from torch._utils_internal import get_file_path_2 +from urllib.error import URLError +import itertools +import lzma + +from common_utils import get_tmp_dir, call_args_to_kwargs_only + + +TEST_FILE = get_file_path_2( + os.path.dirname(os.path.abspath(__file__)), 'assets', 'encode_jpeg', 'grace_hopper_517x606.jpg') + + +class Tester(unittest.TestCase): + + def test_check_md5(self): + fpath = TEST_FILE + correct_md5 = '9c0bb82894bb3af7f7675ef2b3b6dcdc' + false_md5 = '' + self.assertTrue(utils.check_md5(fpath, correct_md5)) + self.assertFalse(utils.check_md5(fpath, false_md5)) + + def test_check_integrity(self): + existing_fpath = TEST_FILE + nonexisting_fpath = '' + correct_md5 = '9c0bb82894bb3af7f7675ef2b3b6dcdc' + false_md5 = '' + self.assertTrue(utils.check_integrity(existing_fpath, correct_md5)) + self.assertFalse(utils.check_integrity(existing_fpath, false_md5)) + self.assertTrue(utils.check_integrity(existing_fpath)) + self.assertFalse(utils.check_integrity(nonexisting_fpath)) + + def test_get_google_drive_file_id(self): + url = "https://drive.google.com/file/d/1hbzc_P1FuxMkcabkgn9ZKinBwW683j45/view" + expected = "1hbzc_P1FuxMkcabkgn9ZKinBwW683j45" + + actual = utils._get_google_drive_file_id(url) + assert actual == expected + + def test_get_google_drive_file_id_invalid_url(self): + url = "http://www.vision.caltech.edu/visipedia-data/CUB-200-2011/CUB_200_2011.tgz" + + assert utils._get_google_drive_file_id(url) is None + + def test_detect_file_type(self): + for file, expected in [ + ("foo.tar.xz", (".tar.xz", ".tar", ".xz")), + ("foo.tar", (".tar", ".tar", None)), + ("foo.tar.gz", (".tar.gz", ".tar", ".gz")), + ("foo.tgz", (".tgz", ".tar", ".gz")), + ("foo.gz", (".gz", None, ".gz")), + ("foo.zip", (".zip", ".zip", None)), + ("foo.xz", (".xz", None, ".xz")), + ]: + with self.subTest(file=file): + self.assertSequenceEqual(utils._detect_file_type(file), expected) + + def test_detect_file_type_no_ext(self): + with self.assertRaises(RuntimeError): + utils._detect_file_type("foo") + + def test_detect_file_type_to_many_exts(self): + with self.assertRaises(RuntimeError): + utils._detect_file_type("foo.bar.tar.gz") + + def test_detect_file_type_unknown_archive_type(self): + with self.assertRaises(RuntimeError): + utils._detect_file_type("foo.bar.gz") + + def test_detect_file_type_unknown_compression(self): + with self.assertRaises(RuntimeError): + utils._detect_file_type("foo.tar.baz") + + def test_detect_file_type_unknown_partial_ext(self): + with self.assertRaises(RuntimeError): + utils._detect_file_type("foo.bar") + + def test_decompress_gzip(self): + def create_compressed(root, content="this is the content"): + file = os.path.join(root, "file") + compressed = f"{file}.gz" + + with gzip.open(compressed, "wb") as fh: + fh.write(content.encode()) + + return compressed, file, content + + with get_tmp_dir() as temp_dir: + compressed, file, content = create_compressed(temp_dir) + + utils._decompress(compressed) + + self.assertTrue(os.path.exists(file)) + + with open(file, "r") as fh: + self.assertEqual(fh.read(), content) + + def test_decompress_lzma(self): + def create_compressed(root, content="this is the content"): + file = os.path.join(root, "file") + compressed = f"{file}.xz" + + with lzma.open(compressed, "wb") as fh: + fh.write(content.encode()) + + return compressed, file, content + + with get_tmp_dir() as temp_dir: + compressed, file, content = create_compressed(temp_dir) + + utils.extract_archive(compressed, temp_dir) + + self.assertTrue(os.path.exists(file)) + + with open(file, "r") as fh: + self.assertEqual(fh.read(), content) + + def test_decompress_no_compression(self): + with self.assertRaises(RuntimeError): + utils._decompress("foo.tar") + + def test_decompress_remove_finished(self): + def create_compressed(root, content="this is the content"): + file = os.path.join(root, "file") + compressed = f"{file}.gz" + + with gzip.open(compressed, "wb") as fh: + fh.write(content.encode()) + + return compressed, file, content + + with get_tmp_dir() as temp_dir: + compressed, file, content = create_compressed(temp_dir) + + utils.extract_archive(compressed, temp_dir, remove_finished=True) + + self.assertFalse(os.path.exists(compressed)) + + def test_extract_archive_defer_to_decompress(self): + filename = "foo" + for ext, remove_finished in itertools.product((".gz", ".xz"), (True, False)): + with self.subTest(ext=ext, remove_finished=remove_finished): + with unittest.mock.patch("torchvision.datasets.utils._decompress") as mock: + file = f"{filename}{ext}" + utils.extract_archive(file, remove_finished=remove_finished) + + mock.assert_called_once() + self.assertEqual( + call_args_to_kwargs_only(mock.call_args, utils._decompress), + dict(from_path=file, to_path=filename, remove_finished=remove_finished), + ) + + def test_extract_zip(self): + def create_archive(root, content="this is the content"): + file = os.path.join(root, "dst.txt") + archive = os.path.join(root, "archive.zip") + + with zipfile.ZipFile(archive, "w") as zf: + zf.writestr(os.path.basename(file), content) + + return archive, file, content + + with get_tmp_dir() as temp_dir: + archive, file, content = create_archive(temp_dir) + + utils.extract_archive(archive, temp_dir) + + self.assertTrue(os.path.exists(file)) + + with open(file, "r") as fh: + self.assertEqual(fh.read(), content) + + def test_extract_tar(self): + def create_archive(root, ext, mode, content="this is the content"): + src = os.path.join(root, "src.txt") + dst = os.path.join(root, "dst.txt") + archive = os.path.join(root, f"archive{ext}") + + with open(src, "w") as fh: + fh.write(content) + + with tarfile.open(archive, mode=mode) as fh: + fh.add(src, arcname=os.path.basename(dst)) + + return archive, dst, content + + for ext, mode in zip(['.tar', '.tar.gz', '.tgz'], ['w', 'w:gz', 'w:gz']): + with get_tmp_dir() as temp_dir: + archive, file, content = create_archive(temp_dir, ext, mode) + + utils.extract_archive(archive, temp_dir) + + self.assertTrue(os.path.exists(file)) + + with open(file, "r") as fh: + self.assertEqual(fh.read(), content) + + def test_extract_tar_xz(self): + def create_archive(root, ext, mode, content="this is the content"): + src = os.path.join(root, "src.txt") + dst = os.path.join(root, "dst.txt") + archive = os.path.join(root, f"archive{ext}") + + with open(src, "w") as fh: + fh.write(content) + + with tarfile.open(archive, mode=mode) as fh: + fh.add(src, arcname=os.path.basename(dst)) + + return archive, dst, content + + for ext, mode in zip(['.tar.xz'], ['w:xz']): + with get_tmp_dir() as temp_dir: + archive, file, content = create_archive(temp_dir, ext, mode) + + utils.extract_archive(archive, temp_dir) + + self.assertTrue(os.path.exists(file)) + + with open(file, "r") as fh: + self.assertEqual(fh.read(), content) + + def test_verify_str_arg(self): + self.assertEqual("a", utils.verify_str_arg("a", "arg", ("a",))) + self.assertRaises(ValueError, utils.verify_str_arg, 0, ("a",), "arg") + self.assertRaises(ValueError, utils.verify_str_arg, "b", ("a",), "arg") + + +if __name__ == '__main__': + unittest.main() diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/test_datasets_video_utils.py b/pretrained_model/pytorch_vision_v0.10.0/test/test_datasets_video_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0a9d3bdfc36c14e9af6df08b65adf389af93e526 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/test_datasets_video_utils.py @@ -0,0 +1,135 @@ +import contextlib +import os +import torch +import unittest + +from torchvision import io +from torchvision.datasets.video_utils import VideoClips, unfold + +from common_utils import get_tmp_dir +from _assert_utils import assert_equal + + +@contextlib.contextmanager +def get_list_of_videos(num_videos=5, sizes=None, fps=None): + with get_tmp_dir() as tmp_dir: + names = [] + for i in range(num_videos): + if sizes is None: + size = 5 * (i + 1) + else: + size = sizes[i] + if fps is None: + f = 5 + else: + f = fps[i] + data = torch.randint(0, 256, (size, 300, 400, 3), dtype=torch.uint8) + name = os.path.join(tmp_dir, "{}.mp4".format(i)) + names.append(name) + io.write_video(name, data, fps=f) + + yield names + + +class Tester(unittest.TestCase): + + def test_unfold(self): + a = torch.arange(7) + + r = unfold(a, 3, 3, 1) + expected = torch.tensor([ + [0, 1, 2], + [3, 4, 5], + ]) + assert_equal(r, expected, check_stride=False) + + r = unfold(a, 3, 2, 1) + expected = torch.tensor([ + [0, 1, 2], + [2, 3, 4], + [4, 5, 6] + ]) + assert_equal(r, expected, check_stride=False) + + r = unfold(a, 3, 2, 2) + expected = torch.tensor([ + [0, 2, 4], + [2, 4, 6], + ]) + assert_equal(r, expected, check_stride=False) + + @unittest.skipIf(not io.video._av_available(), "this test requires av") + def test_video_clips(self): + with get_list_of_videos(num_videos=3) as video_list: + video_clips = VideoClips(video_list, 5, 5, num_workers=2) + assert video_clips.num_clips() == 1 + 2 + 3 + for i, (v_idx, c_idx) in enumerate([(0, 0), (1, 0), (1, 1), (2, 0), (2, 1), (2, 2)]): + video_idx, clip_idx = video_clips.get_clip_location(i) + assert video_idx == v_idx + assert clip_idx == c_idx + + video_clips = VideoClips(video_list, 6, 6) + assert video_clips.num_clips() == 0 + 1 + 2 + for i, (v_idx, c_idx) in enumerate([(1, 0), (2, 0), (2, 1)]): + video_idx, clip_idx = video_clips.get_clip_location(i) + assert video_idx == v_idx + assert clip_idx == c_idx + + video_clips = VideoClips(video_list, 6, 1) + assert video_clips.num_clips() == 0 + (10 - 6 + 1) + (15 - 6 + 1) + for i, v_idx, c_idx in [(0, 1, 0), (4, 1, 4), (5, 2, 0), (6, 2, 1)]: + video_idx, clip_idx = video_clips.get_clip_location(i) + assert video_idx == v_idx + assert clip_idx == c_idx + + @unittest.skipIf(not io.video._av_available(), "this test requires av") + def test_video_clips_custom_fps(self): + with get_list_of_videos(num_videos=3, sizes=[12, 12, 12], fps=[3, 4, 6]) as video_list: + num_frames = 4 + for fps in [1, 3, 4, 10]: + video_clips = VideoClips(video_list, num_frames, num_frames, fps, num_workers=2) + for i in range(video_clips.num_clips()): + video, audio, info, video_idx = video_clips.get_clip(i) + assert video.shape[0] == num_frames + assert info["video_fps"] == fps + # TODO add tests checking that the content is right + + def test_compute_clips_for_video(self): + video_pts = torch.arange(30) + # case 1: single clip + num_frames = 13 + orig_fps = 30 + duration = float(len(video_pts)) / orig_fps + new_fps = 13 + clips, idxs = VideoClips.compute_clips_for_video(video_pts, num_frames, num_frames, + orig_fps, new_fps) + resampled_idxs = VideoClips._resample_video_idx(int(duration * new_fps), orig_fps, new_fps) + assert len(clips) == 1 + assert_equal(clips, idxs) + assert_equal(idxs[0], resampled_idxs) + + # case 2: all frames appear only once + num_frames = 4 + orig_fps = 30 + duration = float(len(video_pts)) / orig_fps + new_fps = 12 + clips, idxs = VideoClips.compute_clips_for_video(video_pts, num_frames, num_frames, + orig_fps, new_fps) + resampled_idxs = VideoClips._resample_video_idx(int(duration * new_fps), orig_fps, new_fps) + assert len(clips) == 3 + assert_equal(clips, idxs) + assert_equal(idxs.flatten(), resampled_idxs) + + # case 3: frames aren't enough for a clip + num_frames = 32 + orig_fps = 30 + new_fps = 13 + with self.assertWarns(UserWarning): + clips, idxs = VideoClips.compute_clips_for_video(video_pts, num_frames, num_frames, + orig_fps, new_fps) + assert len(clips) == 0 + assert len(idxs) == 0 + + +if __name__ == '__main__': + unittest.main() diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/test_datasets_video_utils_opt.py b/pretrained_model/pytorch_vision_v0.10.0/test/test_datasets_video_utils_opt.py new file mode 100644 index 0000000000000000000000000000000000000000..8075c701ed90a8f51723034c9950dffc24739d97 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/test_datasets_video_utils_opt.py @@ -0,0 +1,11 @@ +import unittest +from torchvision import set_video_backend +import test_datasets_video_utils + +# Disabling the video backend switching temporarily +# set_video_backend('video_reader') + + +if __name__ == '__main__': + suite = unittest.TestLoader().loadTestsFromModule(test_datasets_video_utils) + unittest.TextTestRunner(verbosity=1).run(suite) diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/test_functional_tensor.py b/pretrained_model/pytorch_vision_v0.10.0/test/test_functional_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..12a8d41914bd354815853de7653be81d955fb833 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/test_functional_tensor.py @@ -0,0 +1,1162 @@ +import itertools +import os +import unittest +import colorsys +import math + +import numpy as np +import pytest + +import torch +import torchvision.transforms.functional_tensor as F_t +import torchvision.transforms.functional_pil as F_pil +import torchvision.transforms.functional as F +import torchvision.transforms as T +from torchvision.transforms import InterpolationMode + +from common_utils import TransformsTester, cpu_and_gpu, needs_cuda +from _assert_utils import assert_equal + +from typing import Dict, List, Sequence, Tuple + + +NEAREST, BILINEAR, BICUBIC = InterpolationMode.NEAREST, InterpolationMode.BILINEAR, InterpolationMode.BICUBIC + + +@pytest.fixture(scope='module') +def tester(): + # instanciation of the Tester class used for equality assertions and other utilities + # TODO: remove this eventually when we don't need the class anymore + return Tester() + + +class Tester(TransformsTester): + + def setUp(self): + self.device = "cpu" + + def _test_fn_on_batch(self, batch_tensors, fn, scripted_fn_atol=1e-8, **fn_kwargs): + transformed_batch = fn(batch_tensors, **fn_kwargs) + for i in range(len(batch_tensors)): + img_tensor = batch_tensors[i, ...] + transformed_img = fn(img_tensor, **fn_kwargs) + assert_equal(transformed_img, transformed_batch[i, ...]) + + if scripted_fn_atol >= 0: + scripted_fn = torch.jit.script(fn) + # scriptable function test + s_transformed_batch = scripted_fn(batch_tensors, **fn_kwargs) + torch.testing.assert_close(transformed_batch, s_transformed_batch, rtol=1e-5, atol=scripted_fn_atol) + + def test_assert_image_tensor(self): + shape = (100,) + tensor = torch.rand(*shape, dtype=torch.float, device=self.device) + + list_of_methods = [(F_t._get_image_size, (tensor, )), (F_t.vflip, (tensor, )), + (F_t.hflip, (tensor, )), (F_t.crop, (tensor, 1, 2, 4, 5)), + (F_t.adjust_brightness, (tensor, 0.)), (F_t.adjust_contrast, (tensor, 1.)), + (F_t.adjust_hue, (tensor, -0.5)), (F_t.adjust_saturation, (tensor, 2.)), + (F_t.center_crop, (tensor, [10, 11])), (F_t.five_crop, (tensor, [10, 11])), + (F_t.ten_crop, (tensor, [10, 11])), (F_t.pad, (tensor, [2, ], 2, "constant")), + (F_t.resize, (tensor, [10, 11])), (F_t.perspective, (tensor, [0.2, ])), + (F_t.gaussian_blur, (tensor, (2, 2), (0.7, 0.5))), + (F_t.invert, (tensor, )), (F_t.posterize, (tensor, 0)), + (F_t.solarize, (tensor, 0.3)), (F_t.adjust_sharpness, (tensor, 0.3)), + (F_t.autocontrast, (tensor, )), (F_t.equalize, (tensor, ))] + + for func, args in list_of_methods: + with self.assertRaises(Exception) as context: + func(*args) + + self.assertTrue('Tensor is not a torch image.' in str(context.exception)) + + def test_vflip(self): + script_vflip = torch.jit.script(F.vflip) + + img_tensor, pil_img = self._create_data(16, 18, device=self.device) + vflipped_img = F.vflip(img_tensor) + vflipped_pil_img = F.vflip(pil_img) + self.compareTensorToPIL(vflipped_img, vflipped_pil_img) + + # scriptable function test + vflipped_img_script = script_vflip(img_tensor) + assert_equal(vflipped_img, vflipped_img_script) + + batch_tensors = self._create_data_batch(16, 18, num_samples=4, device=self.device) + self._test_fn_on_batch(batch_tensors, F.vflip) + + def test_hflip(self): + script_hflip = torch.jit.script(F.hflip) + + img_tensor, pil_img = self._create_data(16, 18, device=self.device) + hflipped_img = F.hflip(img_tensor) + hflipped_pil_img = F.hflip(pil_img) + self.compareTensorToPIL(hflipped_img, hflipped_pil_img) + + # scriptable function test + hflipped_img_script = script_hflip(img_tensor) + assert_equal(hflipped_img, hflipped_img_script) + + batch_tensors = self._create_data_batch(16, 18, num_samples=4, device=self.device) + self._test_fn_on_batch(batch_tensors, F.hflip) + + def test_crop(self): + script_crop = torch.jit.script(F.crop) + + img_tensor, pil_img = self._create_data(16, 18, device=self.device) + + test_configs = [ + (1, 2, 4, 5), # crop inside top-left corner + (2, 12, 3, 4), # crop inside top-right corner + (8, 3, 5, 6), # crop inside bottom-left corner + (8, 11, 4, 3), # crop inside bottom-right corner + ] + + for top, left, height, width in test_configs: + pil_img_cropped = F.crop(pil_img, top, left, height, width) + + img_tensor_cropped = F.crop(img_tensor, top, left, height, width) + self.compareTensorToPIL(img_tensor_cropped, pil_img_cropped) + + img_tensor_cropped = script_crop(img_tensor, top, left, height, width) + self.compareTensorToPIL(img_tensor_cropped, pil_img_cropped) + + batch_tensors = self._create_data_batch(16, 18, num_samples=4, device=self.device) + self._test_fn_on_batch(batch_tensors, F.crop, top=top, left=left, height=height, width=width) + + def test_hsv2rgb(self): + scripted_fn = torch.jit.script(F_t._hsv2rgb) + shape = (3, 100, 150) + for _ in range(10): + hsv_img = torch.rand(*shape, dtype=torch.float, device=self.device) + rgb_img = F_t._hsv2rgb(hsv_img) + ft_img = rgb_img.permute(1, 2, 0).flatten(0, 1) + + h, s, v, = hsv_img.unbind(0) + h = h.flatten().cpu().numpy() + s = s.flatten().cpu().numpy() + v = v.flatten().cpu().numpy() + + rgb = [] + for h1, s1, v1 in zip(h, s, v): + rgb.append(colorsys.hsv_to_rgb(h1, s1, v1)) + colorsys_img = torch.tensor(rgb, dtype=torch.float32, device=self.device) + torch.testing.assert_close(ft_img, colorsys_img, rtol=0.0, atol=1e-5) + + s_rgb_img = scripted_fn(hsv_img) + torch.testing.assert_close(rgb_img, s_rgb_img) + + batch_tensors = self._create_data_batch(120, 100, num_samples=4, device=self.device).float() + self._test_fn_on_batch(batch_tensors, F_t._hsv2rgb) + + def test_rgb2hsv(self): + scripted_fn = torch.jit.script(F_t._rgb2hsv) + shape = (3, 150, 100) + for _ in range(10): + rgb_img = torch.rand(*shape, dtype=torch.float, device=self.device) + hsv_img = F_t._rgb2hsv(rgb_img) + ft_hsv_img = hsv_img.permute(1, 2, 0).flatten(0, 1) + + r, g, b, = rgb_img.unbind(dim=-3) + r = r.flatten().cpu().numpy() + g = g.flatten().cpu().numpy() + b = b.flatten().cpu().numpy() + + hsv = [] + for r1, g1, b1 in zip(r, g, b): + hsv.append(colorsys.rgb_to_hsv(r1, g1, b1)) + + colorsys_img = torch.tensor(hsv, dtype=torch.float32, device=self.device) + + ft_hsv_img_h, ft_hsv_img_sv = torch.split(ft_hsv_img, [1, 2], dim=1) + colorsys_img_h, colorsys_img_sv = torch.split(colorsys_img, [1, 2], dim=1) + + max_diff_h = ((colorsys_img_h * 2 * math.pi).sin() - (ft_hsv_img_h * 2 * math.pi).sin()).abs().max() + max_diff_sv = (colorsys_img_sv - ft_hsv_img_sv).abs().max() + max_diff = max(max_diff_h, max_diff_sv) + self.assertLess(max_diff, 1e-5) + + s_hsv_img = scripted_fn(rgb_img) + torch.testing.assert_close(hsv_img, s_hsv_img, rtol=1e-5, atol=1e-7) + + batch_tensors = self._create_data_batch(120, 100, num_samples=4, device=self.device).float() + self._test_fn_on_batch(batch_tensors, F_t._rgb2hsv) + + def test_rgb_to_grayscale(self): + script_rgb_to_grayscale = torch.jit.script(F.rgb_to_grayscale) + + img_tensor, pil_img = self._create_data(32, 34, device=self.device) + + for num_output_channels in (3, 1): + gray_pil_image = F.rgb_to_grayscale(pil_img, num_output_channels=num_output_channels) + gray_tensor = F.rgb_to_grayscale(img_tensor, num_output_channels=num_output_channels) + + self.approxEqualTensorToPIL(gray_tensor.float(), gray_pil_image, tol=1.0 + 1e-10, agg_method="max") + + s_gray_tensor = script_rgb_to_grayscale(img_tensor, num_output_channels=num_output_channels) + assert_equal(s_gray_tensor, gray_tensor) + + batch_tensors = self._create_data_batch(16, 18, num_samples=4, device=self.device) + self._test_fn_on_batch(batch_tensors, F.rgb_to_grayscale, num_output_channels=num_output_channels) + + def test_center_crop(self): + script_center_crop = torch.jit.script(F.center_crop) + + img_tensor, pil_img = self._create_data(32, 34, device=self.device) + + cropped_pil_image = F.center_crop(pil_img, [10, 11]) + + cropped_tensor = F.center_crop(img_tensor, [10, 11]) + self.compareTensorToPIL(cropped_tensor, cropped_pil_image) + + cropped_tensor = script_center_crop(img_tensor, [10, 11]) + self.compareTensorToPIL(cropped_tensor, cropped_pil_image) + + batch_tensors = self._create_data_batch(16, 18, num_samples=4, device=self.device) + self._test_fn_on_batch(batch_tensors, F.center_crop, output_size=[10, 11]) + + def test_five_crop(self): + script_five_crop = torch.jit.script(F.five_crop) + + img_tensor, pil_img = self._create_data(32, 34, device=self.device) + + cropped_pil_images = F.five_crop(pil_img, [10, 11]) + + cropped_tensors = F.five_crop(img_tensor, [10, 11]) + for i in range(5): + self.compareTensorToPIL(cropped_tensors[i], cropped_pil_images[i]) + + cropped_tensors = script_five_crop(img_tensor, [10, 11]) + for i in range(5): + self.compareTensorToPIL(cropped_tensors[i], cropped_pil_images[i]) + + batch_tensors = self._create_data_batch(16, 18, num_samples=4, device=self.device) + tuple_transformed_batches = F.five_crop(batch_tensors, [10, 11]) + for i in range(len(batch_tensors)): + img_tensor = batch_tensors[i, ...] + tuple_transformed_imgs = F.five_crop(img_tensor, [10, 11]) + self.assertEqual(len(tuple_transformed_imgs), len(tuple_transformed_batches)) + + for j in range(len(tuple_transformed_imgs)): + true_transformed_img = tuple_transformed_imgs[j] + transformed_img = tuple_transformed_batches[j][i, ...] + assert_equal(true_transformed_img, transformed_img) + + # scriptable function test + s_tuple_transformed_batches = script_five_crop(batch_tensors, [10, 11]) + for transformed_batch, s_transformed_batch in zip(tuple_transformed_batches, s_tuple_transformed_batches): + assert_equal(transformed_batch, s_transformed_batch) + + def test_ten_crop(self): + script_ten_crop = torch.jit.script(F.ten_crop) + + img_tensor, pil_img = self._create_data(32, 34, device=self.device) + + cropped_pil_images = F.ten_crop(pil_img, [10, 11]) + + cropped_tensors = F.ten_crop(img_tensor, [10, 11]) + for i in range(10): + self.compareTensorToPIL(cropped_tensors[i], cropped_pil_images[i]) + + cropped_tensors = script_ten_crop(img_tensor, [10, 11]) + for i in range(10): + self.compareTensorToPIL(cropped_tensors[i], cropped_pil_images[i]) + + batch_tensors = self._create_data_batch(16, 18, num_samples=4, device=self.device) + tuple_transformed_batches = F.ten_crop(batch_tensors, [10, 11]) + for i in range(len(batch_tensors)): + img_tensor = batch_tensors[i, ...] + tuple_transformed_imgs = F.ten_crop(img_tensor, [10, 11]) + self.assertEqual(len(tuple_transformed_imgs), len(tuple_transformed_batches)) + + for j in range(len(tuple_transformed_imgs)): + true_transformed_img = tuple_transformed_imgs[j] + transformed_img = tuple_transformed_batches[j][i, ...] + assert_equal(true_transformed_img, transformed_img) + + # scriptable function test + s_tuple_transformed_batches = script_ten_crop(batch_tensors, [10, 11]) + for transformed_batch, s_transformed_batch in zip(tuple_transformed_batches, s_tuple_transformed_batches): + assert_equal(transformed_batch, s_transformed_batch) + + def test_pad(self): + script_fn = torch.jit.script(F.pad) + tensor, pil_img = self._create_data(7, 8, device=self.device) + batch_tensors = self._create_data_batch(16, 18, num_samples=4, device=self.device) + + for dt in [None, torch.float32, torch.float64, torch.float16]: + + if dt == torch.float16 and torch.device(self.device).type == "cpu": + # skip float16 on CPU case + continue + + if dt is not None: + # This is a trivial cast to float of uint8 data to test all cases + tensor = tensor.to(dt) + batch_tensors = batch_tensors.to(dt) + + for pad in [2, [3, ], [0, 3], (3, 3), [4, 2, 4, 3]]: + configs = [ + {"padding_mode": "constant", "fill": 0}, + {"padding_mode": "constant", "fill": 10}, + {"padding_mode": "constant", "fill": 20}, + {"padding_mode": "edge"}, + {"padding_mode": "reflect"}, + {"padding_mode": "symmetric"}, + ] + for kwargs in configs: + pad_tensor = F_t.pad(tensor, pad, **kwargs) + pad_pil_img = F_pil.pad(pil_img, pad, **kwargs) + + pad_tensor_8b = pad_tensor + # we need to cast to uint8 to compare with PIL image + if pad_tensor_8b.dtype != torch.uint8: + pad_tensor_8b = pad_tensor_8b.to(torch.uint8) + + self.compareTensorToPIL(pad_tensor_8b, pad_pil_img, msg="{}, {}".format(pad, kwargs)) + + if isinstance(pad, int): + script_pad = [pad, ] + else: + script_pad = pad + pad_tensor_script = script_fn(tensor, script_pad, **kwargs) + assert_equal(pad_tensor, pad_tensor_script, msg="{}, {}".format(pad, kwargs)) + + self._test_fn_on_batch(batch_tensors, F.pad, padding=script_pad, **kwargs) + + def test_resized_crop(self): + # test values of F.resized_crop in several cases: + # 1) resize to the same size, crop to the same size => should be identity + tensor, _ = self._create_data(26, 36, device=self.device) + + for mode in [NEAREST, BILINEAR, BICUBIC]: + out_tensor = F.resized_crop(tensor, top=0, left=0, height=26, width=36, size=[26, 36], interpolation=mode) + assert_equal(tensor, out_tensor, msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5])) + + # 2) resize by half and crop a TL corner + tensor, _ = self._create_data(26, 36, device=self.device) + out_tensor = F.resized_crop(tensor, top=0, left=0, height=20, width=30, size=[10, 15], interpolation=NEAREST) + expected_out_tensor = tensor[:, :20:2, :30:2] + assert_equal( + expected_out_tensor, + out_tensor, + check_stride=False, + msg="{} vs {}".format(expected_out_tensor[0, :10, :10], out_tensor[0, :10, :10]), + ) + + batch_tensors = self._create_data_batch(26, 36, num_samples=4, device=self.device) + self._test_fn_on_batch( + batch_tensors, F.resized_crop, top=1, left=2, height=20, width=30, size=[10, 15], interpolation=NEAREST + ) + + def _test_affine_identity_map(self, tensor, scripted_affine): + # 1) identity map + out_tensor = F.affine(tensor, angle=0, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST) + + assert_equal(tensor, out_tensor, msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5])) + out_tensor = scripted_affine( + tensor, angle=0, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST + ) + assert_equal(tensor, out_tensor, msg="{} vs {}".format(out_tensor[0, :5, :5], tensor[0, :5, :5])) + + def _test_affine_square_rotations(self, tensor, pil_img, scripted_affine): + # 2) Test rotation + test_configs = [ + (90, torch.rot90(tensor, k=1, dims=(-1, -2))), + (45, None), + (30, None), + (-30, None), + (-45, None), + (-90, torch.rot90(tensor, k=-1, dims=(-1, -2))), + (180, torch.rot90(tensor, k=2, dims=(-1, -2))), + ] + for a, true_tensor in test_configs: + out_pil_img = F.affine( + pil_img, angle=a, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST + ) + out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1))).to(self.device) + + for fn in [F.affine, scripted_affine]: + out_tensor = fn( + tensor, angle=a, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST + ) + if true_tensor is not None: + assert_equal( + true_tensor, + out_tensor, + msg="{}\n{} vs \n{}".format(a, out_tensor[0, :5, :5], true_tensor[0, :5, :5]), + check_stride=False, + ) + + if out_tensor.dtype != torch.uint8: + out_tensor = out_tensor.to(torch.uint8) + + num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0 + ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2] + # Tolerance : less than 6% of different pixels + self.assertLess( + ratio_diff_pixels, + 0.06, + msg="{}\n{} vs \n{}".format( + ratio_diff_pixels, out_tensor[0, :7, :7], out_pil_tensor[0, :7, :7] + ) + ) + + def _test_affine_rect_rotations(self, tensor, pil_img, scripted_affine): + test_configs = [ + 90, 45, 15, -30, -60, -120 + ] + for a in test_configs: + + out_pil_img = F.affine( + pil_img, angle=a, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST + ) + out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1))) + + for fn in [F.affine, scripted_affine]: + out_tensor = fn( + tensor, angle=a, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST + ).cpu() + + if out_tensor.dtype != torch.uint8: + out_tensor = out_tensor.to(torch.uint8) + + num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0 + ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2] + # Tolerance : less than 3% of different pixels + self.assertLess( + ratio_diff_pixels, + 0.03, + msg="{}: {}\n{} vs \n{}".format( + a, ratio_diff_pixels, out_tensor[0, :7, :7], out_pil_tensor[0, :7, :7] + ) + ) + + def _test_affine_translations(self, tensor, pil_img, scripted_affine): + # 3) Test translation + test_configs = [ + [10, 12], (-12, -13) + ] + for t in test_configs: + + out_pil_img = F.affine(pil_img, angle=0, translate=t, scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST) + + for fn in [F.affine, scripted_affine]: + out_tensor = fn(tensor, angle=0, translate=t, scale=1.0, shear=[0.0, 0.0], interpolation=NEAREST) + + if out_tensor.dtype != torch.uint8: + out_tensor = out_tensor.to(torch.uint8) + + self.compareTensorToPIL(out_tensor, out_pil_img) + + def _test_affine_all_ops(self, tensor, pil_img, scripted_affine): + # 4) Test rotation + translation + scale + share + test_configs = [ + (45.5, [5, 6], 1.0, [0.0, 0.0], None), + (33, (5, -4), 1.0, [0.0, 0.0], [0, 0, 0]), + (45, [-5, 4], 1.2, [0.0, 0.0], (1, 2, 3)), + (33, (-4, -8), 2.0, [0.0, 0.0], [255, 255, 255]), + (85, (10, -10), 0.7, [0.0, 0.0], [1, ]), + (0, [0, 0], 1.0, [35.0, ], (2.0, )), + (-25, [0, 0], 1.2, [0.0, 15.0], None), + (-45, [-10, 0], 0.7, [2.0, 5.0], None), + (-45, [-10, -10], 1.2, [4.0, 5.0], None), + (-90, [0, 0], 1.0, [0.0, 0.0], None), + ] + for r in [NEAREST, ]: + for a, t, s, sh, f in test_configs: + f_pil = int(f[0]) if f is not None and len(f) == 1 else f + out_pil_img = F.affine(pil_img, angle=a, translate=t, scale=s, shear=sh, interpolation=r, fill=f_pil) + out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1))) + + for fn in [F.affine, scripted_affine]: + out_tensor = fn(tensor, angle=a, translate=t, scale=s, shear=sh, interpolation=r, fill=f).cpu() + + if out_tensor.dtype != torch.uint8: + out_tensor = out_tensor.to(torch.uint8) + + num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0 + ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2] + # Tolerance : less than 5% (cpu), 6% (cuda) of different pixels + tol = 0.06 if self.device == "cuda" else 0.05 + self.assertLess( + ratio_diff_pixels, + tol, + msg="{}: {}\n{} vs \n{}".format( + (r, a, t, s, sh, f), ratio_diff_pixels, out_tensor[0, :7, :7], out_pil_tensor[0, :7, :7] + ) + ) + + def test_affine(self): + # Tests on square and rectangular images + scripted_affine = torch.jit.script(F.affine) + + data = [self._create_data(26, 26, device=self.device), self._create_data(32, 26, device=self.device)] + for tensor, pil_img in data: + + for dt in [None, torch.float32, torch.float64, torch.float16]: + + if dt == torch.float16 and torch.device(self.device).type == "cpu": + # skip float16 on CPU case + continue + + if dt is not None: + tensor = tensor.to(dtype=dt) + + self._test_affine_identity_map(tensor, scripted_affine) + if pil_img.size[0] == pil_img.size[1]: + self._test_affine_square_rotations(tensor, pil_img, scripted_affine) + else: + self._test_affine_rect_rotations(tensor, pil_img, scripted_affine) + self._test_affine_translations(tensor, pil_img, scripted_affine) + self._test_affine_all_ops(tensor, pil_img, scripted_affine) + + batch_tensors = self._create_data_batch(26, 36, num_samples=4, device=self.device) + if dt is not None: + batch_tensors = batch_tensors.to(dtype=dt) + + self._test_fn_on_batch( + batch_tensors, F.affine, angle=-43, translate=[-3, 4], scale=1.2, shear=[4.0, 5.0] + ) + + tensor, pil_img = data[0] + # assert deprecation warning and non-BC + with self.assertWarnsRegex(UserWarning, r"Argument resample is deprecated and will be removed"): + res1 = F.affine(tensor, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], resample=2) + res2 = F.affine(tensor, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=BILINEAR) + assert_equal(res1, res2) + + # assert changed type warning + with self.assertWarnsRegex(UserWarning, r"Argument interpolation should be of type InterpolationMode"): + res1 = F.affine(tensor, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=2) + res2 = F.affine(tensor, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], interpolation=BILINEAR) + assert_equal(res1, res2) + + with self.assertWarnsRegex(UserWarning, r"Argument fillcolor is deprecated and will be removed"): + res1 = F.affine(pil_img, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], fillcolor=10) + res2 = F.affine(pil_img, 45, translate=[0, 0], scale=1.0, shear=[0.0, 0.0], fill=10) + # we convert the PIL images to numpy as assert_equal doesn't work on PIL images. + assert_equal(np.asarray(res1), np.asarray(res2)) + + def _test_rotate_all_options(self, tensor, pil_img, scripted_rotate, centers): + img_size = pil_img.size + dt = tensor.dtype + for r in [NEAREST, ]: + for a in range(-180, 180, 17): + for e in [True, False]: + for c in centers: + for f in [None, [0, 0, 0], (1, 2, 3), [255, 255, 255], [1, ], (2.0, )]: + f_pil = int(f[0]) if f is not None and len(f) == 1 else f + out_pil_img = F.rotate(pil_img, angle=a, interpolation=r, expand=e, center=c, fill=f_pil) + out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1))) + for fn in [F.rotate, scripted_rotate]: + out_tensor = fn(tensor, angle=a, interpolation=r, expand=e, center=c, fill=f).cpu() + + if out_tensor.dtype != torch.uint8: + out_tensor = out_tensor.to(torch.uint8) + + self.assertEqual( + out_tensor.shape, + out_pil_tensor.shape, + msg="{}: {} vs {}".format( + (img_size, r, dt, a, e, c), out_tensor.shape, out_pil_tensor.shape + )) + + num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0 + ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2] + # Tolerance : less than 3% of different pixels + self.assertLess( + ratio_diff_pixels, + 0.03, + msg="{}: {}\n{} vs \n{}".format( + (img_size, r, dt, a, e, c, f), + ratio_diff_pixels, + out_tensor[0, :7, :7], + out_pil_tensor[0, :7, :7] + ) + ) + + def test_rotate(self): + # Tests on square image + scripted_rotate = torch.jit.script(F.rotate) + + data = [self._create_data(26, 26, device=self.device), self._create_data(32, 26, device=self.device)] + for tensor, pil_img in data: + + img_size = pil_img.size + centers = [ + None, + (int(img_size[0] * 0.3), int(img_size[0] * 0.4)), + [int(img_size[0] * 0.5), int(img_size[0] * 0.6)] + ] + + for dt in [None, torch.float32, torch.float64, torch.float16]: + + if dt == torch.float16 and torch.device(self.device).type == "cpu": + # skip float16 on CPU case + continue + + if dt is not None: + tensor = tensor.to(dtype=dt) + + self._test_rotate_all_options(tensor, pil_img, scripted_rotate, centers) + + batch_tensors = self._create_data_batch(26, 36, num_samples=4, device=self.device) + if dt is not None: + batch_tensors = batch_tensors.to(dtype=dt) + + center = (20, 22) + self._test_fn_on_batch( + batch_tensors, F.rotate, angle=32, interpolation=NEAREST, expand=True, center=center + ) + tensor, pil_img = data[0] + # assert deprecation warning and non-BC + with self.assertWarnsRegex(UserWarning, r"Argument resample is deprecated and will be removed"): + res1 = F.rotate(tensor, 45, resample=2) + res2 = F.rotate(tensor, 45, interpolation=BILINEAR) + assert_equal(res1, res2) + + # assert changed type warning + with self.assertWarnsRegex(UserWarning, r"Argument interpolation should be of type InterpolationMode"): + res1 = F.rotate(tensor, 45, interpolation=2) + res2 = F.rotate(tensor, 45, interpolation=BILINEAR) + assert_equal(res1, res2) + + def test_gaussian_blur(self): + small_image_tensor = torch.from_numpy( + np.arange(3 * 10 * 12, dtype="uint8").reshape((10, 12, 3)) + ).permute(2, 0, 1).to(self.device) + + large_image_tensor = torch.from_numpy( + np.arange(26 * 28, dtype="uint8").reshape((1, 26, 28)) + ).to(self.device) + + scripted_transform = torch.jit.script(F.gaussian_blur) + + # true_cv2_results = { + # # np_img = np.arange(3 * 10 * 12, dtype="uint8").reshape((10, 12, 3)) + # # cv2.GaussianBlur(np_img, ksize=(3, 3), sigmaX=0.8) + # "3_3_0.8": ... + # # cv2.GaussianBlur(np_img, ksize=(3, 3), sigmaX=0.5) + # "3_3_0.5": ... + # # cv2.GaussianBlur(np_img, ksize=(3, 5), sigmaX=0.8) + # "3_5_0.8": ... + # # cv2.GaussianBlur(np_img, ksize=(3, 5), sigmaX=0.5) + # "3_5_0.5": ... + # # np_img2 = np.arange(26 * 28, dtype="uint8").reshape((26, 28)) + # # cv2.GaussianBlur(np_img2, ksize=(23, 23), sigmaX=1.7) + # "23_23_1.7": ... + # } + p = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'assets', 'gaussian_blur_opencv_results.pt') + true_cv2_results = torch.load(p) + + for tensor in [small_image_tensor, large_image_tensor]: + + for dt in [None, torch.float32, torch.float64, torch.float16]: + if dt == torch.float16 and torch.device(self.device).type == "cpu": + # skip float16 on CPU case + continue + + if dt is not None: + tensor = tensor.to(dtype=dt) + + for ksize in [(3, 3), [3, 5], (23, 23)]: + for sigma in [[0.5, 0.5], (0.5, 0.5), (0.8, 0.8), (1.7, 1.7)]: + + _ksize = (ksize, ksize) if isinstance(ksize, int) else ksize + _sigma = sigma[0] if sigma is not None else None + shape = tensor.shape + gt_key = "{}_{}_{}__{}_{}_{}".format( + shape[-2], shape[-1], shape[-3], + _ksize[0], _ksize[1], _sigma + ) + if gt_key not in true_cv2_results: + continue + + true_out = torch.tensor( + true_cv2_results[gt_key] + ).reshape(shape[-2], shape[-1], shape[-3]).permute(2, 0, 1).to(tensor) + + for fn in [F.gaussian_blur, scripted_transform]: + out = fn(tensor, kernel_size=ksize, sigma=sigma) + torch.testing.assert_close( + out, true_out, rtol=0.0, atol=1.0, check_stride=False, + msg="{}, {}".format(ksize, sigma) + ) + + +@unittest.skipIf(not torch.cuda.is_available(), reason="Skip if no CUDA device") +class CUDATester(Tester): + + def setUp(self): + self.device = "cuda" + + def test_scale_channel(self): + """Make sure that _scale_channel gives the same results on CPU and GPU as + histc or bincount are used depending on the device. + """ + # TODO: when # https://github.com/pytorch/pytorch/issues/53194 is fixed, + # only use bincount and remove that test. + size = (1_000,) + img_chan = torch.randint(0, 256, size=size).to('cpu') + scaled_cpu = F_t._scale_channel(img_chan) + scaled_cuda = F_t._scale_channel(img_chan.to('cuda')) + assert_equal(scaled_cpu, scaled_cuda.to('cpu')) + + +def _get_data_dims_and_points_for_perspective(): + # Ideally we would parametrize independently over data dims and points, but + # we want to tests on some points that also depend on the data dims. + # Pytest doesn't support covariant parametrization, so we do it somewhat manually here. + + data_dims = [(26, 34), (26, 26)] + points = [ + [[[0, 0], [33, 0], [33, 25], [0, 25]], [[3, 2], [32, 3], [30, 24], [2, 25]]], + [[[3, 2], [32, 3], [30, 24], [2, 25]], [[0, 0], [33, 0], [33, 25], [0, 25]]], + [[[3, 2], [32, 3], [30, 24], [2, 25]], [[5, 5], [30, 3], [33, 19], [4, 25]]], + ] + + dims_and_points = list(itertools.product(data_dims, points)) + + # up to here, we could just have used 2 @parametrized. + # Down below is the covarariant part as the points depend on the data dims. + + n = 10 + for dim in data_dims: + points += [ + (dim, T.RandomPerspective.get_params(dim[1], dim[0], i / n)) + for i in range(n) + ] + return dims_and_points + + +@pytest.mark.parametrize('device', cpu_and_gpu()) +@pytest.mark.parametrize('dims_and_points', _get_data_dims_and_points_for_perspective()) +@pytest.mark.parametrize('dt', [None, torch.float32, torch.float64, torch.float16]) +@pytest.mark.parametrize('fill', (None, [0, 0, 0], [1, 2, 3], [255, 255, 255], [1, ], (2.0, ))) +@pytest.mark.parametrize('fn', [F.perspective, torch.jit.script(F.perspective)]) +def test_perspective_pil_vs_tensor(device, dims_and_points, dt, fill, fn, tester): + + if dt == torch.float16 and device == "cpu": + # skip float16 on CPU case + return + + data_dims, (spoints, epoints) = dims_and_points + + tensor, pil_img = tester._create_data(*data_dims, device=device) + if dt is not None: + tensor = tensor.to(dtype=dt) + + interpolation = NEAREST + fill_pil = int(fill[0]) if fill is not None and len(fill) == 1 else fill + out_pil_img = F.perspective(pil_img, startpoints=spoints, endpoints=epoints, interpolation=interpolation, + fill=fill_pil) + out_pil_tensor = torch.from_numpy(np.array(out_pil_img).transpose((2, 0, 1))) + out_tensor = fn(tensor, startpoints=spoints, endpoints=epoints, interpolation=interpolation, fill=fill).cpu() + + if out_tensor.dtype != torch.uint8: + out_tensor = out_tensor.to(torch.uint8) + + num_diff_pixels = (out_tensor != out_pil_tensor).sum().item() / 3.0 + ratio_diff_pixels = num_diff_pixels / out_tensor.shape[-1] / out_tensor.shape[-2] + # Tolerance : less than 5% of different pixels + assert ratio_diff_pixels < 0.05 + + +@pytest.mark.parametrize('device', cpu_and_gpu()) +@pytest.mark.parametrize('dims_and_points', _get_data_dims_and_points_for_perspective()) +@pytest.mark.parametrize('dt', [None, torch.float32, torch.float64, torch.float16]) +def test_perspective_batch(device, dims_and_points, dt, tester): + + if dt == torch.float16 and device == "cpu": + # skip float16 on CPU case + return + + data_dims, (spoints, epoints) = dims_and_points + + batch_tensors = tester._create_data_batch(*data_dims, num_samples=4, device=device) + if dt is not None: + batch_tensors = batch_tensors.to(dtype=dt) + + # Ignore the equivalence between scripted and regular function on float16 cuda. The pixels at + # the border may be entirely different due to small rounding errors. + scripted_fn_atol = -1 if (dt == torch.float16 and device == "cuda") else 1e-8 + tester._test_fn_on_batch( + batch_tensors, F.perspective, scripted_fn_atol=scripted_fn_atol, + startpoints=spoints, endpoints=epoints, interpolation=NEAREST + ) + + +def test_perspective_interpolation_warning(tester): + # assert changed type warning + spoints = [[0, 0], [33, 0], [33, 25], [0, 25]] + epoints = [[3, 2], [32, 3], [30, 24], [2, 25]] + tensor = torch.randint(0, 256, (3, 26, 26)) + with tester.assertWarnsRegex(UserWarning, r"Argument interpolation should be of type InterpolationMode"): + res1 = F.perspective(tensor, startpoints=spoints, endpoints=epoints, interpolation=2) + res2 = F.perspective(tensor, startpoints=spoints, endpoints=epoints, interpolation=BILINEAR) + tester.assertTrue(res1.equal(res2)) + + +@pytest.mark.parametrize('device', cpu_and_gpu()) +@pytest.mark.parametrize('dt', [None, torch.float32, torch.float64, torch.float16]) +@pytest.mark.parametrize('size', [32, 26, [32, ], [32, 32], (32, 32), [26, 35]]) +@pytest.mark.parametrize('max_size', [None, 34, 40, 1000]) +@pytest.mark.parametrize('interpolation', [BILINEAR, BICUBIC, NEAREST]) +def test_resize(device, dt, size, max_size, interpolation, tester): + + if dt == torch.float16 and device == "cpu": + # skip float16 on CPU case + return + + if max_size is not None and isinstance(size, Sequence) and len(size) != 1: + return # unsupported + + torch.manual_seed(12) + script_fn = torch.jit.script(F.resize) + tensor, pil_img = tester._create_data(26, 36, device=device) + batch_tensors = tester._create_data_batch(16, 18, num_samples=4, device=device) + + if dt is not None: + # This is a trivial cast to float of uint8 data to test all cases + tensor = tensor.to(dt) + batch_tensors = batch_tensors.to(dt) + + resized_tensor = F.resize(tensor, size=size, interpolation=interpolation, max_size=max_size) + resized_pil_img = F.resize(pil_img, size=size, interpolation=interpolation, max_size=max_size) + + assert resized_tensor.size()[1:] == resized_pil_img.size[::-1] + + if interpolation not in [NEAREST, ]: + # We can not check values if mode = NEAREST, as results are different + # E.g. resized_tensor = [[a, a, b, c, d, d, e, ...]] + # E.g. resized_pil_img = [[a, b, c, c, d, e, f, ...]] + resized_tensor_f = resized_tensor + # we need to cast to uint8 to compare with PIL image + if resized_tensor_f.dtype == torch.uint8: + resized_tensor_f = resized_tensor_f.to(torch.float) + + # Pay attention to high tolerance for MAE + tester.approxEqualTensorToPIL(resized_tensor_f, resized_pil_img, tol=8.0) + + if isinstance(size, int): + script_size = [size, ] + else: + script_size = size + + resize_result = script_fn( + tensor, size=script_size, interpolation=interpolation, max_size=max_size + ) + assert_equal(resized_tensor, resize_result) + + tester._test_fn_on_batch( + batch_tensors, F.resize, size=script_size, interpolation=interpolation, max_size=max_size + ) + + +@pytest.mark.parametrize('device', cpu_and_gpu()) +def test_resize_asserts(device, tester): + + tensor, pil_img = tester._create_data(26, 36, device=device) + + # assert changed type warning + with pytest.warns(UserWarning, match=r"Argument interpolation should be of type InterpolationMode"): + res1 = F.resize(tensor, size=32, interpolation=2) + + res2 = F.resize(tensor, size=32, interpolation=BILINEAR) + assert_equal(res1, res2) + + for img in (tensor, pil_img): + exp_msg = "max_size should only be passed if size specifies the length of the smaller edge" + with pytest.raises(ValueError, match=exp_msg): + F.resize(img, size=(32, 34), max_size=35) + with pytest.raises(ValueError, match="max_size = 32 must be strictly greater"): + F.resize(img, size=32, max_size=32) + + +@pytest.mark.parametrize('device', cpu_and_gpu()) +@pytest.mark.parametrize('dt', [None, torch.float32, torch.float64, torch.float16]) +@pytest.mark.parametrize('size', [[96, 72], [96, 420], [420, 72]]) +@pytest.mark.parametrize('interpolation', [BILINEAR, BICUBIC]) +def test_resize_antialias(device, dt, size, interpolation, tester): + + if dt == torch.float16 and device == "cpu": + # skip float16 on CPU case + return + + torch.manual_seed(12) + script_fn = torch.jit.script(F.resize) + tensor, pil_img = tester._create_data(320, 290, device=device) + + if dt is not None: + # This is a trivial cast to float of uint8 data to test all cases + tensor = tensor.to(dt) + + resized_tensor = F.resize(tensor, size=size, interpolation=interpolation, antialias=True) + resized_pil_img = F.resize(pil_img, size=size, interpolation=interpolation) + + tester.assertEqual( + resized_tensor.size()[1:], resized_pil_img.size[::-1], + msg=f"{size}, {interpolation}, {dt}" + ) + + resized_tensor_f = resized_tensor + # we need to cast to uint8 to compare with PIL image + if resized_tensor_f.dtype == torch.uint8: + resized_tensor_f = resized_tensor_f.to(torch.float) + + tester.approxEqualTensorToPIL( + resized_tensor_f, resized_pil_img, tol=0.5, msg=f"{size}, {interpolation}, {dt}" + ) + + accepted_tol = 1.0 + 1e-5 + if interpolation == BICUBIC: + # this overall mean value to make the tests pass + # High value is mostly required for test cases with + # downsampling and upsampling where we can not exactly + # match PIL implementation. + accepted_tol = 15.0 + + tester.approxEqualTensorToPIL( + resized_tensor_f, resized_pil_img, tol=accepted_tol, agg_method="max", + msg=f"{size}, {interpolation}, {dt}" + ) + + if isinstance(size, int): + script_size = [size, ] + else: + script_size = size + + resize_result = script_fn(tensor, size=script_size, interpolation=interpolation, antialias=True) + tester.assertTrue(resized_tensor.equal(resize_result), msg=f"{size}, {interpolation}, {dt}") + + +@needs_cuda +@pytest.mark.parametrize('interpolation', [BILINEAR, BICUBIC]) +def test_assert_resize_antialias(interpolation, tester): + + # Checks implementation on very large scales + # and catch TORCH_CHECK inside interpolate_aa_kernels.cu + torch.manual_seed(12) + tensor, pil_img = tester._create_data(1000, 1000, device="cuda") + + with pytest.raises(RuntimeError, match=r"Max supported scale factor is"): + F.resize(tensor, size=(5, 5), interpolation=interpolation, antialias=True) + + +def check_functional_vs_PIL_vs_scripted(fn, fn_pil, fn_t, config, device, dtype, tol=2.0 + 1e-10, agg_method="max"): + + tester = Tester() + + script_fn = torch.jit.script(fn) + torch.manual_seed(15) + tensor, pil_img = tester._create_data(26, 34, device=device) + batch_tensors = tester._create_data_batch(16, 18, num_samples=4, device=device) + + if dtype is not None: + tensor = F.convert_image_dtype(tensor, dtype) + batch_tensors = F.convert_image_dtype(batch_tensors, dtype) + + out_fn_t = fn_t(tensor, **config) + out_pil = fn_pil(pil_img, **config) + out_scripted = script_fn(tensor, **config) + assert out_fn_t.dtype == out_scripted.dtype + assert out_fn_t.size()[1:] == out_pil.size[::-1] + + rbg_tensor = out_fn_t + + if out_fn_t.dtype != torch.uint8: + rbg_tensor = F.convert_image_dtype(out_fn_t, torch.uint8) + + # Check that max difference does not exceed 2 in [0, 255] range + # Exact matching is not possible due to incompatibility convert_image_dtype and PIL results + tester.approxEqualTensorToPIL(rbg_tensor.float(), out_pil, tol=tol, agg_method=agg_method) + + atol = 1e-6 + if out_fn_t.dtype == torch.uint8 and "cuda" in torch.device(device).type: + atol = 1.0 + assert out_fn_t.allclose(out_scripted, atol=atol) + + # FIXME: fn will be scripted again in _test_fn_on_batch. We could avoid that. + tester._test_fn_on_batch(batch_tensors, fn, scripted_fn_atol=atol, **config) + + +@pytest.mark.parametrize('device', cpu_and_gpu()) +@pytest.mark.parametrize('dtype', (None, torch.float32, torch.float64)) +@pytest.mark.parametrize('config', [{"brightness_factor": f} for f in (0.1, 0.5, 1.0, 1.34, 2.5)]) +def test_adjust_brightness(device, dtype, config): + check_functional_vs_PIL_vs_scripted( + F.adjust_brightness, + F_pil.adjust_brightness, + F_t.adjust_brightness, + config, + device, + dtype, + ) + + +@pytest.mark.parametrize('device', cpu_and_gpu()) +@pytest.mark.parametrize('dtype', (None, torch.float32, torch.float64)) +def test_invert(device, dtype): + check_functional_vs_PIL_vs_scripted( + F.invert, + F_pil.invert, + F_t.invert, + {}, + device, + dtype, + tol=1.0, + agg_method="max" + ) + + +@pytest.mark.parametrize('device', cpu_and_gpu()) +@pytest.mark.parametrize('config', [{"bits": bits} for bits in range(0, 8)]) +def test_posterize(device, config): + check_functional_vs_PIL_vs_scripted( + F.posterize, + F_pil.posterize, + F_t.posterize, + config, + device, + dtype=None, + tol=1.0, + agg_method="max", + ) + + +@pytest.mark.parametrize('device', cpu_and_gpu()) +@pytest.mark.parametrize('config', [{"threshold": threshold} for threshold in [0, 64, 128, 192, 255]]) +def test_solarize1(device, config): + check_functional_vs_PIL_vs_scripted( + F.solarize, + F_pil.solarize, + F_t.solarize, + config, + device, + dtype=None, + tol=1.0, + agg_method="max", + ) + + +@pytest.mark.parametrize('device', cpu_and_gpu()) +@pytest.mark.parametrize('dtype', (torch.float32, torch.float64)) +@pytest.mark.parametrize('config', [{"threshold": threshold} for threshold in [0.0, 0.25, 0.5, 0.75, 1.0]]) +def test_solarize2(device, dtype, config): + check_functional_vs_PIL_vs_scripted( + F.solarize, + lambda img, threshold: F_pil.solarize(img, 255 * threshold), + F_t.solarize, + config, + device, + dtype, + tol=1.0, + agg_method="max", + ) + + +@pytest.mark.parametrize('device', cpu_and_gpu()) +@pytest.mark.parametrize('dtype', (None, torch.float32, torch.float64)) +@pytest.mark.parametrize('config', [{"sharpness_factor": f} for f in [0.2, 0.5, 1.0, 1.5, 2.0]]) +def test_adjust_sharpness(device, dtype, config): + check_functional_vs_PIL_vs_scripted( + F.adjust_sharpness, + F_pil.adjust_sharpness, + F_t.adjust_sharpness, + config, + device, + dtype, + ) + + +@pytest.mark.parametrize('device', cpu_and_gpu()) +@pytest.mark.parametrize('dtype', (None, torch.float32, torch.float64)) +def test_autocontrast(device, dtype): + check_functional_vs_PIL_vs_scripted( + F.autocontrast, + F_pil.autocontrast, + F_t.autocontrast, + {}, + device, + dtype, + tol=1.0, + agg_method="max" + ) + + +@pytest.mark.parametrize('device', cpu_and_gpu()) +def test_equalize(device): + torch.set_deterministic(False) + check_functional_vs_PIL_vs_scripted( + F.equalize, + F_pil.equalize, + F_t.equalize, + {}, + device, + dtype=None, + tol=1.0, + agg_method="max", + ) + + +@pytest.mark.parametrize('device', cpu_and_gpu()) +@pytest.mark.parametrize('dtype', (None, torch.float32, torch.float64)) +@pytest.mark.parametrize('config', [{"contrast_factor": f} for f in [0.2, 0.5, 1.0, 1.5, 2.0]]) +def test_adjust_contrast(device, dtype, config): + check_functional_vs_PIL_vs_scripted( + F.adjust_contrast, + F_pil.adjust_contrast, + F_t.adjust_contrast, + config, + device, + dtype + ) + + +@pytest.mark.parametrize('device', cpu_and_gpu()) +@pytest.mark.parametrize('dtype', (None, torch.float32, torch.float64)) +@pytest.mark.parametrize('config', [{"saturation_factor": f} for f in [0.5, 0.75, 1.0, 1.5, 2.0]]) +def test_adjust_saturation(device, dtype, config): + check_functional_vs_PIL_vs_scripted( + F.adjust_saturation, + F_pil.adjust_saturation, + F_t.adjust_saturation, + config, + device, + dtype + ) + + +@pytest.mark.parametrize('device', cpu_and_gpu()) +@pytest.mark.parametrize('dtype', (None, torch.float32, torch.float64)) +@pytest.mark.parametrize('config', [{"hue_factor": f} for f in [-0.45, -0.25, 0.0, 0.25, 0.45]]) +def test_adjust_hue(device, dtype, config): + check_functional_vs_PIL_vs_scripted( + F.adjust_hue, + F_pil.adjust_hue, + F_t.adjust_hue, + config, + device, + dtype, + tol=16.1, + agg_method="max" + ) + + +@pytest.mark.parametrize('device', cpu_and_gpu()) +@pytest.mark.parametrize('dtype', (None, torch.float32, torch.float64)) +@pytest.mark.parametrize('config', [{"gamma": g1, "gain": g2} for g1, g2 in zip([0.8, 1.0, 1.2], [0.7, 1.0, 1.3])]) +def test_adjust_gamma(device, dtype, config): + check_functional_vs_PIL_vs_scripted( + F.adjust_gamma, + F_pil.adjust_gamma, + F_t.adjust_gamma, + config, + device, + dtype, + ) + + +if __name__ == '__main__': + unittest.main() diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/test_hub.py b/pretrained_model/pytorch_vision_v0.10.0/test/test_hub.py new file mode 100644 index 0000000000000000000000000000000000000000..29ae90014d1cd9cf4c404d81fc05e487929ff21b --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/test_hub.py @@ -0,0 +1,58 @@ +import torch.hub as hub +import tempfile +import shutil +import os +import sys +import unittest + + +def sum_of_model_parameters(model): + s = 0 + for p in model.parameters(): + s += p.sum() + return s + + +SUM_OF_PRETRAINED_RESNET18_PARAMS = -12703.9931640625 + + +@unittest.skipIf('torchvision' in sys.modules, + 'TestHub must start without torchvision imported') +class TestHub(unittest.TestCase): + # Only run this check ONCE before all tests start. + # - If torchvision is imported before all tests start, e.g. we might find _C.so + # which doesn't exist in downloaded zip but in the installed wheel. + # - After the first test is run, torchvision is already in sys.modules due to + # Python cache as we run all hub tests in the same python process. + + def test_load_from_github(self): + hub_model = hub.load( + 'pytorch/vision', + 'resnet18', + pretrained=True, + progress=False) + self.assertAlmostEqual(sum_of_model_parameters(hub_model).item(), + SUM_OF_PRETRAINED_RESNET18_PARAMS, + places=2) + + def test_set_dir(self): + temp_dir = tempfile.gettempdir() + hub.set_dir(temp_dir) + hub_model = hub.load( + 'pytorch/vision', + 'resnet18', + pretrained=True, + progress=False) + self.assertAlmostEqual(sum_of_model_parameters(hub_model).item(), + SUM_OF_PRETRAINED_RESNET18_PARAMS, + places=2) + self.assertTrue(os.path.exists(temp_dir + '/pytorch_vision_master')) + shutil.rmtree(temp_dir + '/pytorch_vision_master') + + def test_list_entrypoints(self): + entry_lists = hub.list('pytorch/vision', force_reload=True) + self.assertIn('resnet18', entry_lists) + + +if __name__ == "__main__": + unittest.main() diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/test_image.py b/pretrained_model/pytorch_vision_v0.10.0/test/test_image.py new file mode 100644 index 0000000000000000000000000000000000000000..eae4a1473c533ea8d7cc18f30f9309962cc9aac4 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/test_image.py @@ -0,0 +1,412 @@ +import glob +import io +import os +import sys +import unittest +from pathlib import Path + +import pytest +import numpy as np +import torch +from PIL import Image +import torchvision.transforms.functional as F +from common_utils import get_tmp_dir, needs_cuda, cpu_only +from _assert_utils import assert_equal + +from torchvision.io.image import ( + decode_png, decode_jpeg, encode_jpeg, write_jpeg, decode_image, read_file, + encode_png, write_png, write_file, ImageReadMode, read_image) + +IMAGE_ROOT = os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets") +FAKEDATA_DIR = os.path.join(IMAGE_ROOT, "fakedata") +IMAGE_DIR = os.path.join(FAKEDATA_DIR, "imagefolder") +DAMAGED_JPEG = os.path.join(IMAGE_ROOT, 'damaged_jpeg') +ENCODE_JPEG = os.path.join(IMAGE_ROOT, "encode_jpeg") +IS_WINDOWS = sys.platform in ('win32', 'cygwin') + + +def _get_safe_image_name(name): + # Used when we need to change the pytest "id" for an "image path" parameter. + # If we don't, the test id (i.e. its name) will contain the whole path to the image, which is machine-specific, + # and this creates issues when the test is running in a different machine than where it was collected + # (typically, in fb internal infra) + return name.split(os.path.sep)[-1] + + +def get_images(directory, img_ext): + assert os.path.isdir(directory) + image_paths = glob.glob(directory + f'/**/*{img_ext}', recursive=True) + for path in image_paths: + if path.split(os.sep)[-2] not in ['damaged_jpeg', 'jpeg_write']: + yield path + + +def pil_read_image(img_path): + with Image.open(img_path) as img: + return torch.from_numpy(np.array(img)) + + +def normalize_dimensions(img_pil): + if len(img_pil.shape) == 3: + img_pil = img_pil.permute(2, 0, 1) + else: + img_pil = img_pil.unsqueeze(0) + return img_pil + + +class ImageTester(unittest.TestCase): + def test_decode_jpeg(self): + conversion = [(None, ImageReadMode.UNCHANGED), ("L", ImageReadMode.GRAY), ("RGB", ImageReadMode.RGB)] + for img_path in get_images(IMAGE_ROOT, ".jpg"): + for pil_mode, mode in conversion: + with Image.open(img_path) as img: + is_cmyk = img.mode == "CMYK" + if pil_mode is not None: + if is_cmyk: + # libjpeg does not support the conversion + continue + img = img.convert(pil_mode) + img_pil = torch.from_numpy(np.array(img)) + if is_cmyk: + # flip the colors to match libjpeg + img_pil = 255 - img_pil + + img_pil = normalize_dimensions(img_pil) + data = read_file(img_path) + img_ljpeg = decode_image(data, mode=mode) + + # Permit a small variation on pixel values to account for implementation + # differences between Pillow and LibJPEG. + abs_mean_diff = (img_ljpeg.type(torch.float32) - img_pil).abs().mean().item() + self.assertTrue(abs_mean_diff < 2) + + with self.assertRaisesRegex(RuntimeError, "Expected a non empty 1-dimensional tensor"): + decode_jpeg(torch.empty((100, 1), dtype=torch.uint8)) + + with self.assertRaisesRegex(RuntimeError, "Expected a torch.uint8 tensor"): + decode_jpeg(torch.empty((100,), dtype=torch.float16)) + + with self.assertRaises(RuntimeError): + decode_jpeg(torch.empty((100), dtype=torch.uint8)) + + def test_damaged_images(self): + # Test image with bad Huffman encoding (should not raise) + bad_huff = read_file(os.path.join(DAMAGED_JPEG, 'bad_huffman.jpg')) + try: + _ = decode_jpeg(bad_huff) + except RuntimeError: + self.assertTrue(False) + + # Truncated images should raise an exception + truncated_images = glob.glob( + os.path.join(DAMAGED_JPEG, 'corrupt*.jpg')) + for image_path in truncated_images: + data = read_file(image_path) + with self.assertRaises(RuntimeError): + decode_jpeg(data) + + def test_decode_png(self): + conversion = [(None, ImageReadMode.UNCHANGED), ("L", ImageReadMode.GRAY), ("LA", ImageReadMode.GRAY_ALPHA), + ("RGB", ImageReadMode.RGB), ("RGBA", ImageReadMode.RGB_ALPHA)] + for img_path in get_images(FAKEDATA_DIR, ".png"): + for pil_mode, mode in conversion: + with Image.open(img_path) as img: + if pil_mode is not None: + img = img.convert(pil_mode) + img_pil = torch.from_numpy(np.array(img)) + + img_pil = normalize_dimensions(img_pil) + data = read_file(img_path) + img_lpng = decode_image(data, mode=mode) + + tol = 0 if conversion is None else 1 + self.assertTrue(img_lpng.allclose(img_pil, atol=tol)) + + with self.assertRaises(RuntimeError): + decode_png(torch.empty((), dtype=torch.uint8)) + with self.assertRaises(RuntimeError): + decode_png(torch.randint(3, 5, (300,), dtype=torch.uint8)) + + def test_encode_png(self): + for img_path in get_images(IMAGE_DIR, '.png'): + pil_image = Image.open(img_path) + img_pil = torch.from_numpy(np.array(pil_image)) + img_pil = img_pil.permute(2, 0, 1) + png_buf = encode_png(img_pil, compression_level=6) + + rec_img = Image.open(io.BytesIO(bytes(png_buf.tolist()))) + rec_img = torch.from_numpy(np.array(rec_img)) + rec_img = rec_img.permute(2, 0, 1) + + assert_equal(img_pil, rec_img) + + with self.assertRaisesRegex( + RuntimeError, "Input tensor dtype should be uint8"): + encode_png(torch.empty((3, 100, 100), dtype=torch.float32)) + + with self.assertRaisesRegex( + RuntimeError, "Compression level should be between 0 and 9"): + encode_png(torch.empty((3, 100, 100), dtype=torch.uint8), + compression_level=-1) + + with self.assertRaisesRegex( + RuntimeError, "Compression level should be between 0 and 9"): + encode_png(torch.empty((3, 100, 100), dtype=torch.uint8), + compression_level=10) + + with self.assertRaisesRegex( + RuntimeError, "The number of channels should be 1 or 3, got: 5"): + encode_png(torch.empty((5, 100, 100), dtype=torch.uint8)) + + def test_write_png(self): + with get_tmp_dir() as d: + for img_path in get_images(IMAGE_DIR, '.png'): + pil_image = Image.open(img_path) + img_pil = torch.from_numpy(np.array(pil_image)) + img_pil = img_pil.permute(2, 0, 1) + + filename, _ = os.path.splitext(os.path.basename(img_path)) + torch_png = os.path.join(d, '{0}_torch.png'.format(filename)) + write_png(img_pil, torch_png, compression_level=6) + saved_image = torch.from_numpy(np.array(Image.open(torch_png))) + saved_image = saved_image.permute(2, 0, 1) + + assert_equal(img_pil, saved_image) + + def test_read_file(self): + with get_tmp_dir() as d: + fname, content = 'test1.bin', b'TorchVision\211\n' + fpath = os.path.join(d, fname) + with open(fpath, 'wb') as f: + f.write(content) + + data = read_file(fpath) + expected = torch.tensor(list(content), dtype=torch.uint8) + assert_equal(data, expected) + os.unlink(fpath) + + with self.assertRaisesRegex( + RuntimeError, "No such file or directory: 'tst'"): + read_file('tst') + + def test_read_file_non_ascii(self): + with get_tmp_dir() as d: + fname, content = '日本語(Japanese).bin', b'TorchVision\211\n' + fpath = os.path.join(d, fname) + with open(fpath, 'wb') as f: + f.write(content) + + data = read_file(fpath) + expected = torch.tensor(list(content), dtype=torch.uint8) + assert_equal(data, expected) + os.unlink(fpath) + + def test_write_file(self): + with get_tmp_dir() as d: + fname, content = 'test1.bin', b'TorchVision\211\n' + fpath = os.path.join(d, fname) + content_tensor = torch.tensor(list(content), dtype=torch.uint8) + write_file(fpath, content_tensor) + + with open(fpath, 'rb') as f: + saved_content = f.read() + self.assertEqual(content, saved_content) + os.unlink(fpath) + + def test_write_file_non_ascii(self): + with get_tmp_dir() as d: + fname, content = '日本語(Japanese).bin', b'TorchVision\211\n' + fpath = os.path.join(d, fname) + content_tensor = torch.tensor(list(content), dtype=torch.uint8) + write_file(fpath, content_tensor) + + with open(fpath, 'rb') as f: + saved_content = f.read() + self.assertEqual(content, saved_content) + os.unlink(fpath) + + +@needs_cuda +@pytest.mark.parametrize('img_path', [ + pytest.param(jpeg_path, id=_get_safe_image_name(jpeg_path)) + for jpeg_path in get_images(IMAGE_ROOT, ".jpg") +]) +@pytest.mark.parametrize('mode', [ImageReadMode.UNCHANGED, ImageReadMode.GRAY, ImageReadMode.RGB]) +@pytest.mark.parametrize('scripted', (False, True)) +def test_decode_jpeg_cuda(mode, img_path, scripted): + if 'cmyk' in img_path: + pytest.xfail("Decoding a CMYK jpeg isn't supported") + tester = ImageTester() + data = read_file(img_path) + img = decode_image(data, mode=mode) + f = torch.jit.script(decode_jpeg) if scripted else decode_jpeg + img_nvjpeg = f(data, mode=mode, device='cuda') + + # Some difference expected between jpeg implementations + tester.assertTrue((img.float() - img_nvjpeg.cpu().float()).abs().mean() < 2) + + +@needs_cuda +@pytest.mark.parametrize('cuda_device', ('cuda', 'cuda:0', torch.device('cuda'))) +def test_decode_jpeg_cuda_device_param(cuda_device): + """Make sure we can pass a string or a torch.device as device param""" + data = read_file(next(get_images(IMAGE_ROOT, ".jpg"))) + decode_jpeg(data, device=cuda_device) + + +@needs_cuda +def test_decode_jpeg_cuda_errors(): + data = read_file(next(get_images(IMAGE_ROOT, ".jpg"))) + with pytest.raises(RuntimeError, match="Expected a non empty 1-dimensional tensor"): + decode_jpeg(data.reshape(-1, 1), device='cuda') + with pytest.raises(RuntimeError, match="input tensor must be on CPU"): + decode_jpeg(data.to('cuda'), device='cuda') + with pytest.raises(RuntimeError, match="Expected a torch.uint8 tensor"): + decode_jpeg(data.to(torch.float), device='cuda') + with pytest.raises(RuntimeError, match="Expected a cuda device"): + torch.ops.image.decode_jpeg_cuda(data, ImageReadMode.UNCHANGED.value, 'cpu') + + +@cpu_only +def test_encode_jpeg_errors(): + + with pytest.raises(RuntimeError, match="Input tensor dtype should be uint8"): + encode_jpeg(torch.empty((3, 100, 100), dtype=torch.float32)) + + with pytest.raises(ValueError, match="Image quality should be a positive number " + "between 1 and 100"): + encode_jpeg(torch.empty((3, 100, 100), dtype=torch.uint8), quality=-1) + + with pytest.raises(ValueError, match="Image quality should be a positive number " + "between 1 and 100"): + encode_jpeg(torch.empty((3, 100, 100), dtype=torch.uint8), quality=101) + + with pytest.raises(RuntimeError, match="The number of channels should be 1 or 3, got: 5"): + encode_jpeg(torch.empty((5, 100, 100), dtype=torch.uint8)) + + with pytest.raises(RuntimeError, match="Input data should be a 3-dimensional tensor"): + encode_jpeg(torch.empty((1, 3, 100, 100), dtype=torch.uint8)) + + with pytest.raises(RuntimeError, match="Input data should be a 3-dimensional tensor"): + encode_jpeg(torch.empty((100, 100), dtype=torch.uint8)) + + +def _collect_if(cond): + # TODO: remove this once test_encode_jpeg_windows and test_write_jpeg_windows + # are removed + def _inner(test_func): + if cond: + return test_func + else: + return pytest.mark.dont_collect(test_func) + return _inner + + +@cpu_only +@_collect_if(cond=IS_WINDOWS) +def test_encode_jpeg_windows(): + # This test is *wrong*. + # It compares a torchvision-encoded jpeg with a PIL-encoded jpeg, but it + # starts encoding the torchvision version from an image that comes from + # decode_jpeg, which can yield different results from pil.decode (see + # test_decode... which uses a high tolerance). + # Instead, we should start encoding from the exact same decoded image, for a + # valid comparison. This is done in test_encode_jpeg, but unfortunately + # these more correct tests fail on windows (probably because of a difference + # in libjpeg) between torchvision and PIL. + # FIXME: make the correct tests pass on windows and remove this. + for img_path in get_images(ENCODE_JPEG, ".jpg"): + dirname = os.path.dirname(img_path) + filename, _ = os.path.splitext(os.path.basename(img_path)) + write_folder = os.path.join(dirname, 'jpeg_write') + expected_file = os.path.join( + write_folder, '{0}_pil.jpg'.format(filename)) + img = decode_jpeg(read_file(img_path)) + + with open(expected_file, 'rb') as f: + pil_bytes = f.read() + pil_bytes = torch.as_tensor(list(pil_bytes), dtype=torch.uint8) + for src_img in [img, img.contiguous()]: + # PIL sets jpeg quality to 75 by default + jpeg_bytes = encode_jpeg(src_img, quality=75) + assert_equal(jpeg_bytes, pil_bytes) + + +@cpu_only +@_collect_if(cond=IS_WINDOWS) +def test_write_jpeg_windows(): + # FIXME: Remove this eventually, see test_encode_jpeg_windows + with get_tmp_dir() as d: + for img_path in get_images(ENCODE_JPEG, ".jpg"): + data = read_file(img_path) + img = decode_jpeg(data) + + basedir = os.path.dirname(img_path) + filename, _ = os.path.splitext(os.path.basename(img_path)) + torch_jpeg = os.path.join( + d, '{0}_torch.jpg'.format(filename)) + pil_jpeg = os.path.join( + basedir, 'jpeg_write', '{0}_pil.jpg'.format(filename)) + + write_jpeg(img, torch_jpeg, quality=75) + + with open(torch_jpeg, 'rb') as f: + torch_bytes = f.read() + + with open(pil_jpeg, 'rb') as f: + pil_bytes = f.read() + + assert_equal(torch_bytes, pil_bytes) + + +@cpu_only +@_collect_if(cond=not IS_WINDOWS) +@pytest.mark.parametrize('img_path', [ + pytest.param(jpeg_path, id=_get_safe_image_name(jpeg_path)) + for jpeg_path in get_images(ENCODE_JPEG, ".jpg") +]) +def test_encode_jpeg(img_path): + img = read_image(img_path) + + pil_img = F.to_pil_image(img) + buf = io.BytesIO() + pil_img.save(buf, format='JPEG', quality=75) + + # pytorch can't read from raw bytes so we go through numpy + pil_bytes = np.frombuffer(buf.getvalue(), dtype=np.uint8) + encoded_jpeg_pil = torch.as_tensor(pil_bytes) + + for src_img in [img, img.contiguous()]: + encoded_jpeg_torch = encode_jpeg(src_img, quality=75) + assert_equal(encoded_jpeg_torch, encoded_jpeg_pil) + + +@cpu_only +@_collect_if(cond=not IS_WINDOWS) +@pytest.mark.parametrize('img_path', [ + pytest.param(jpeg_path, id=_get_safe_image_name(jpeg_path)) + for jpeg_path in get_images(ENCODE_JPEG, ".jpg") +]) +def test_write_jpeg(img_path): + with get_tmp_dir() as d: + d = Path(d) + img = read_image(img_path) + pil_img = F.to_pil_image(img) + + torch_jpeg = str(d / 'torch.jpg') + pil_jpeg = str(d / 'pil.jpg') + + write_jpeg(img, torch_jpeg, quality=75) + pil_img.save(pil_jpeg, quality=75) + + with open(torch_jpeg, 'rb') as f: + torch_bytes = f.read() + + with open(pil_jpeg, 'rb') as f: + pil_bytes = f.read() + + assert_equal(torch_bytes, pil_bytes) + + +if __name__ == '__main__': + unittest.main() diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/test_internet.py b/pretrained_model/pytorch_vision_v0.10.0/test/test_internet.py new file mode 100644 index 0000000000000000000000000000000000000000..05496752c7f88054162f5b8f1bfc939dedd26a8f --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/test_internet.py @@ -0,0 +1,71 @@ +"""This file should contain all tests that need access to the internet (apart +from the ones in test_datasets_download.py) + +We want to bundle all internet-related tests in one file, so the file can be +cleanly ignored in FB internal test infra. +""" + +import os +import unittest +import unittest.mock +import warnings +from urllib.error import URLError + +import torchvision.datasets.utils as utils +from common_utils import get_tmp_dir + + +class DatasetUtilsTester(unittest.TestCase): + + def test_get_redirect_url(self): + url = "http://www.vision.caltech.edu/visipedia-data/CUB-200-2011/CUB_200_2011.tgz" + expected = "https://drive.google.com/file/d/1hbzc_P1FuxMkcabkgn9ZKinBwW683j45/view" + + actual = utils._get_redirect_url(url) + assert actual == expected + + def test_get_redirect_url_max_hops_exceeded(self): + url = "http://www.vision.caltech.edu/visipedia-data/CUB-200-2011/CUB_200_2011.tgz" + with self.assertRaises(RecursionError): + utils._get_redirect_url(url, max_hops=0) + + def test_download_url(self): + with get_tmp_dir() as temp_dir: + url = "http://github.com/pytorch/vision/archive/master.zip" + try: + utils.download_url(url, temp_dir) + self.assertFalse(len(os.listdir(temp_dir)) == 0) + except URLError: + msg = "could not download test file '{}'".format(url) + warnings.warn(msg, RuntimeWarning) + raise unittest.SkipTest(msg) + + def test_download_url_retry_http(self): + with get_tmp_dir() as temp_dir: + url = "https://github.com/pytorch/vision/archive/master.zip" + try: + utils.download_url(url, temp_dir) + self.assertFalse(len(os.listdir(temp_dir)) == 0) + except URLError: + msg = "could not download test file '{}'".format(url) + warnings.warn(msg, RuntimeWarning) + raise unittest.SkipTest(msg) + + def test_download_url_dont_exist(self): + with get_tmp_dir() as temp_dir: + url = "http://github.com/pytorch/vision/archive/this_doesnt_exist.zip" + with self.assertRaises(URLError): + utils.download_url(url, temp_dir) + + @unittest.mock.patch("torchvision.datasets.utils.download_file_from_google_drive") + def test_download_url_dispatch_download_from_google_drive(self, mock): + url = "https://drive.google.com/file/d/1hbzc_P1FuxMkcabkgn9ZKinBwW683j45/view" + + id = "1hbzc_P1FuxMkcabkgn9ZKinBwW683j45" + filename = "filename" + md5 = "md5" + + with get_tmp_dir() as root: + utils.download_url(url, root, filename, md5) + + mock.assert_called_once_with(id, root, filename, md5) diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/test_io.py b/pretrained_model/pytorch_vision_v0.10.0/test/test_io.py new file mode 100644 index 0000000000000000000000000000000000000000..e86ea9e84fc7202532584ad58de9186914cc43f1 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/test_io.py @@ -0,0 +1,297 @@ +import os +import contextlib +import sys +import tempfile +import torch +import torchvision.io as io +from torchvision import get_video_backend +import unittest +import warnings +from urllib.error import URLError + +from common_utils import get_tmp_dir +from _assert_utils import assert_equal + + +try: + import av + # Do a version test too + io.video._check_av_available() +except ImportError: + av = None + + +VIDEO_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets", "videos") + + +def _create_video_frames(num_frames, height, width): + y, x = torch.meshgrid(torch.linspace(-2, 2, height), torch.linspace(-2, 2, width)) + data = [] + for i in range(num_frames): + xc = float(i) / num_frames + yc = 1 - float(i) / (2 * num_frames) + d = torch.exp(-((x - xc) ** 2 + (y - yc) ** 2) / 2) * 255 + data.append(d.unsqueeze(2).repeat(1, 1, 3).byte()) + + return torch.stack(data, 0) + + +@contextlib.contextmanager +def temp_video(num_frames, height, width, fps, lossless=False, video_codec=None, options=None): + if lossless: + if video_codec is not None: + raise ValueError("video_codec can't be specified together with lossless") + if options is not None: + raise ValueError("options can't be specified together with lossless") + video_codec = 'libx264rgb' + options = {'crf': '0'} + + if video_codec is None: + if get_video_backend() == "pyav": + video_codec = 'libx264' + else: + # when video_codec is not set, we assume it is libx264rgb which accepts + # RGB pixel formats as input instead of YUV + video_codec = 'libx264rgb' + if options is None: + options = {} + + data = _create_video_frames(num_frames, height, width) + with tempfile.NamedTemporaryFile(suffix='.mp4') as f: + f.close() + io.write_video(f.name, data, fps=fps, video_codec=video_codec, options=options) + yield f.name, data + os.unlink(f.name) + + +@unittest.skipIf(get_video_backend() != "pyav" and not io._HAS_VIDEO_OPT, + "video_reader backend not available") +@unittest.skipIf(av is None, "PyAV unavailable") +class TestIO(unittest.TestCase): + # compression adds artifacts, thus we add a tolerance of + # 6 in 0-255 range + TOLERANCE = 6 + + def test_write_read_video(self): + with temp_video(10, 300, 300, 5, lossless=True) as (f_name, data): + lv, _, info = io.read_video(f_name) + assert_equal(data, lv) + self.assertEqual(info["video_fps"], 5) + + @unittest.skipIf(not io._HAS_VIDEO_OPT, "video_reader backend is not chosen") + def test_probe_video_from_file(self): + with temp_video(10, 300, 300, 5) as (f_name, data): + video_info = io._probe_video_from_file(f_name) + self.assertAlmostEqual(video_info.video_duration, 2, delta=0.1) + self.assertAlmostEqual(video_info.video_fps, 5, delta=0.1) + + @unittest.skipIf(not io._HAS_VIDEO_OPT, "video_reader backend is not chosen") + def test_probe_video_from_memory(self): + with temp_video(10, 300, 300, 5) as (f_name, data): + with open(f_name, "rb") as fp: + filebuffer = fp.read() + video_info = io._probe_video_from_memory(filebuffer) + self.assertAlmostEqual(video_info.video_duration, 2, delta=0.1) + self.assertAlmostEqual(video_info.video_fps, 5, delta=0.1) + + def test_read_timestamps(self): + with temp_video(10, 300, 300, 5) as (f_name, data): + pts, _ = io.read_video_timestamps(f_name) + # note: not all formats/codecs provide accurate information for computing the + # timestamps. For the format that we use here, this information is available, + # so we use it as a baseline + container = av.open(f_name) + stream = container.streams[0] + pts_step = int(round(float(1 / (stream.average_rate * stream.time_base)))) + num_frames = int(round(float(stream.average_rate * stream.time_base * stream.duration))) + expected_pts = [i * pts_step for i in range(num_frames)] + + self.assertEqual(pts, expected_pts) + container.close() + + def test_read_partial_video(self): + with temp_video(10, 300, 300, 5, lossless=True) as (f_name, data): + pts, _ = io.read_video_timestamps(f_name) + for start in range(5): + for offset in range(1, 4): + lv, _, _ = io.read_video(f_name, pts[start], pts[start + offset - 1]) + s_data = data[start:(start + offset)] + self.assertEqual(len(lv), offset) + assert_equal(s_data, lv) + + if get_video_backend() == "pyav": + # for "video_reader" backend, we don't decode the closest early frame + # when the given start pts is not matching any frame pts + lv, _, _ = io.read_video(f_name, pts[4] + 1, pts[7]) + self.assertEqual(len(lv), 4) + assert_equal(data[4:8], lv) + + def test_read_partial_video_bframes(self): + # do not use lossless encoding, to test the presence of B-frames + options = {'bframes': '16', 'keyint': '10', 'min-keyint': '4'} + with temp_video(100, 300, 300, 5, options=options) as (f_name, data): + pts, _ = io.read_video_timestamps(f_name) + for start in range(0, 80, 20): + for offset in range(1, 4): + lv, _, _ = io.read_video(f_name, pts[start], pts[start + offset - 1]) + s_data = data[start:(start + offset)] + self.assertEqual(len(lv), offset) + assert_equal(s_data, lv, rtol=0.0, atol=self.TOLERANCE) + + lv, _, _ = io.read_video(f_name, pts[4] + 1, pts[7]) + # TODO fix this + if get_video_backend() == 'pyav': + self.assertEqual(len(lv), 4) + assert_equal(data[4:8], lv, rtol=0.0, atol=self.TOLERANCE) + else: + self.assertEqual(len(lv), 3) + assert_equal(data[5:8], lv, rtol=0.0, atol=self.TOLERANCE) + + def test_read_packed_b_frames_divx_file(self): + name = "hmdb51_Turnk_r_Pippi_Michel_cartwheel_f_cm_np2_le_med_6.avi" + f_name = os.path.join(VIDEO_DIR, name) + pts, fps = io.read_video_timestamps(f_name) + + self.assertEqual(pts, sorted(pts)) + self.assertEqual(fps, 30) + + def test_read_timestamps_from_packet(self): + with temp_video(10, 300, 300, 5, video_codec='mpeg4') as (f_name, data): + pts, _ = io.read_video_timestamps(f_name) + # note: not all formats/codecs provide accurate information for computing the + # timestamps. For the format that we use here, this information is available, + # so we use it as a baseline + container = av.open(f_name) + stream = container.streams[0] + # make sure we went through the optimized codepath + self.assertIn(b'Lavc', stream.codec_context.extradata) + pts_step = int(round(float(1 / (stream.average_rate * stream.time_base)))) + num_frames = int(round(float(stream.average_rate * stream.time_base * stream.duration))) + expected_pts = [i * pts_step for i in range(num_frames)] + + self.assertEqual(pts, expected_pts) + container.close() + + def test_read_video_pts_unit_sec(self): + with temp_video(10, 300, 300, 5, lossless=True) as (f_name, data): + lv, _, info = io.read_video(f_name, pts_unit='sec') + + assert_equal(data, lv) + self.assertEqual(info["video_fps"], 5) + self.assertEqual(info, {"video_fps": 5}) + + def test_read_timestamps_pts_unit_sec(self): + with temp_video(10, 300, 300, 5) as (f_name, data): + pts, _ = io.read_video_timestamps(f_name, pts_unit='sec') + + container = av.open(f_name) + stream = container.streams[0] + pts_step = int(round(float(1 / (stream.average_rate * stream.time_base)))) + num_frames = int(round(float(stream.average_rate * stream.time_base * stream.duration))) + expected_pts = [i * pts_step * stream.time_base for i in range(num_frames)] + + self.assertEqual(pts, expected_pts) + container.close() + + def test_read_partial_video_pts_unit_sec(self): + with temp_video(10, 300, 300, 5, lossless=True) as (f_name, data): + pts, _ = io.read_video_timestamps(f_name, pts_unit='sec') + + for start in range(5): + for offset in range(1, 4): + lv, _, _ = io.read_video(f_name, pts[start], pts[start + offset - 1], pts_unit='sec') + s_data = data[start:(start + offset)] + self.assertEqual(len(lv), offset) + assert_equal(s_data, lv) + + container = av.open(f_name) + stream = container.streams[0] + lv, _, _ = io.read_video(f_name, + int(pts[4] * (1.0 / stream.time_base) + 1) * stream.time_base, pts[7], + pts_unit='sec') + if get_video_backend() == "pyav": + # for "video_reader" backend, we don't decode the closest early frame + # when the given start pts is not matching any frame pts + self.assertEqual(len(lv), 4) + assert_equal(data[4:8], lv) + container.close() + + def test_read_video_corrupted_file(self): + with tempfile.NamedTemporaryFile(suffix='.mp4') as f: + f.write(b'This is not an mpg4 file') + video, audio, info = io.read_video(f.name) + self.assertIsInstance(video, torch.Tensor) + self.assertIsInstance(audio, torch.Tensor) + self.assertEqual(video.numel(), 0) + self.assertEqual(audio.numel(), 0) + self.assertEqual(info, {}) + + def test_read_video_timestamps_corrupted_file(self): + with tempfile.NamedTemporaryFile(suffix='.mp4') as f: + f.write(b'This is not an mpg4 file') + video_pts, video_fps = io.read_video_timestamps(f.name) + self.assertEqual(video_pts, []) + self.assertIs(video_fps, None) + + @unittest.skip("Temporarily disabled due to new pyav") + def test_read_video_partially_corrupted_file(self): + with temp_video(5, 4, 4, 5, lossless=True) as (f_name, data): + with open(f_name, 'r+b') as f: + size = os.path.getsize(f_name) + bytes_to_overwrite = size // 10 + # seek to the middle of the file + f.seek(5 * bytes_to_overwrite) + # corrupt 10% of the file from the middle + f.write(b'\xff' * bytes_to_overwrite) + # this exercises the container.decode assertion check + video, audio, info = io.read_video(f.name, pts_unit='sec') + # check that size is not equal to 5, but 3 + # TODO fix this + if get_video_backend() == 'pyav': + self.assertEqual(len(video), 3) + else: + self.assertEqual(len(video), 4) + # but the valid decoded content is still correct + assert_equal(video[:3], data[:3]) + # and the last few frames are wrong + with self.assertRaises(AssertionError): + assert_equal(video, data) + + @unittest.skipIf(sys.platform == 'win32', 'temporarily disabled on Windows') + def test_write_video_with_audio(self): + f_name = os.path.join(VIDEO_DIR, "R6llTwEh07w.mp4") + video_tensor, audio_tensor, info = io.read_video(f_name, pts_unit="sec") + + with get_tmp_dir() as tmpdir: + out_f_name = os.path.join(tmpdir, "testing.mp4") + io.video.write_video( + out_f_name, + video_tensor, + round(info["video_fps"]), + video_codec="libx264rgb", + options={'crf': '0'}, + audio_array=audio_tensor, + audio_fps=info["audio_fps"], + audio_codec="aac", + ) + + out_video_tensor, out_audio_tensor, out_info = io.read_video( + out_f_name, pts_unit="sec" + ) + + self.assertEqual(info["video_fps"], out_info["video_fps"]) + assert_equal(video_tensor, out_video_tensor) + + audio_stream = av.open(f_name).streams.audio[0] + out_audio_stream = av.open(out_f_name).streams.audio[0] + + self.assertEqual(info["audio_fps"], out_info["audio_fps"]) + self.assertEqual(audio_stream.rate, out_audio_stream.rate) + self.assertAlmostEqual(audio_stream.frames, out_audio_stream.frames, delta=1) + self.assertEqual(audio_stream.frame_size, out_audio_stream.frame_size) + + # TODO add tests for audio + + +if __name__ == '__main__': + unittest.main() diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/test_io_opt.py b/pretrained_model/pytorch_vision_v0.10.0/test/test_io_opt.py new file mode 100644 index 0000000000000000000000000000000000000000..87698b346249c842d114982f6a3c5919317a4ed4 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/test_io_opt.py @@ -0,0 +1,12 @@ +import unittest +from torchvision import set_video_backend +import test_io + + +# Disabling the video backend switching temporarily +# set_video_backend('video_reader') + + +if __name__ == '__main__': + suite = unittest.TestLoader().loadTestsFromModule(test_io) + unittest.TextTestRunner(verbosity=1).run(suite) diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/test_models.cpp b/pretrained_model/pytorch_vision_v0.10.0/test/test_models.cpp new file mode 100644 index 0000000000000000000000000000000000000000..092fc567ac2070abde0773bc53accde1eab8a4b6 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/test_models.cpp @@ -0,0 +1,209 @@ +#include <torch/script.h> +#include <torch/torch.h> +#include <iostream> + +#include "../torchvision/csrc/models/models.h" + +using namespace vision::models; + +template <typename Model> +torch::Tensor forward_model(const std::string& input_path, torch::Tensor x) { + Model network; + torch::load(network, input_path); + network->eval(); + return network->forward(x); +} + +torch::Tensor forward_alexnet(const std::string& input_path, torch::Tensor x) { + return forward_model<AlexNet>(input_path, x); +} + +torch::Tensor forward_vgg11(const std::string& input_path, torch::Tensor x) { + return forward_model<VGG11>(input_path, x); +} +torch::Tensor forward_vgg13(const std::string& input_path, torch::Tensor x) { + return forward_model<VGG13>(input_path, x); +} +torch::Tensor forward_vgg16(const std::string& input_path, torch::Tensor x) { + return forward_model<VGG16>(input_path, x); +} +torch::Tensor forward_vgg19(const std::string& input_path, torch::Tensor x) { + return forward_model<VGG19>(input_path, x); +} + +torch::Tensor forward_vgg11bn(const std::string& input_path, torch::Tensor x) { + return forward_model<VGG11BN>(input_path, x); +} +torch::Tensor forward_vgg13bn(const std::string& input_path, torch::Tensor x) { + return forward_model<VGG13BN>(input_path, x); +} +torch::Tensor forward_vgg16bn(const std::string& input_path, torch::Tensor x) { + return forward_model<VGG16BN>(input_path, x); +} +torch::Tensor forward_vgg19bn(const std::string& input_path, torch::Tensor x) { + return forward_model<VGG19BN>(input_path, x); +} + +torch::Tensor forward_resnet18(const std::string& input_path, torch::Tensor x) { + return forward_model<ResNet18>(input_path, x); +} +torch::Tensor forward_resnet34(const std::string& input_path, torch::Tensor x) { + return forward_model<ResNet34>(input_path, x); +} +torch::Tensor forward_resnet50(const std::string& input_path, torch::Tensor x) { + return forward_model<ResNet50>(input_path, x); +} +torch::Tensor forward_resnet101( + const std::string& input_path, + torch::Tensor x) { + return forward_model<ResNet101>(input_path, x); +} +torch::Tensor forward_resnet152( + const std::string& input_path, + torch::Tensor x) { + return forward_model<ResNet152>(input_path, x); +} +torch::Tensor forward_resnext50_32x4d( + const std::string& input_path, + torch::Tensor x) { + return forward_model<ResNext50_32x4d>(input_path, x); +} +torch::Tensor forward_resnext101_32x8d( + const std::string& input_path, + torch::Tensor x) { + return forward_model<ResNext101_32x8d>(input_path, x); +} +torch::Tensor forward_wide_resnet50_2( + const std::string& input_path, + torch::Tensor x) { + return forward_model<WideResNet50_2>(input_path, x); +} +torch::Tensor forward_wide_resnet101_2( + const std::string& input_path, + torch::Tensor x) { + return forward_model<WideResNet101_2>(input_path, x); +} + +torch::Tensor forward_squeezenet1_0( + const std::string& input_path, + torch::Tensor x) { + return forward_model<SqueezeNet1_0>(input_path, x); +} +torch::Tensor forward_squeezenet1_1( + const std::string& input_path, + torch::Tensor x) { + return forward_model<SqueezeNet1_1>(input_path, x); +} + +torch::Tensor forward_densenet121( + const std::string& input_path, + torch::Tensor x) { + return forward_model<DenseNet121>(input_path, x); +} +torch::Tensor forward_densenet169( + const std::string& input_path, + torch::Tensor x) { + return forward_model<DenseNet169>(input_path, x); +} +torch::Tensor forward_densenet201( + const std::string& input_path, + torch::Tensor x) { + return forward_model<DenseNet201>(input_path, x); +} +torch::Tensor forward_densenet161( + const std::string& input_path, + torch::Tensor x) { + return forward_model<DenseNet161>(input_path, x); +} + +torch::Tensor forward_mobilenetv2( + const std::string& input_path, + torch::Tensor x) { + return forward_model<MobileNetV2>(input_path, x); +} + +torch::Tensor forward_googlenet( + const std::string& input_path, + torch::Tensor x) { + GoogLeNet network; + torch::load(network, input_path); + network->eval(); + return network->forward(x).output; +} +torch::Tensor forward_inceptionv3( + const std::string& input_path, + torch::Tensor x) { + InceptionV3 network; + torch::load(network, input_path); + network->eval(); + return network->forward(x).output; +} + +torch::Tensor forward_mnasnet0_5(const std::string& input_path, torch::Tensor x) { + return forward_model<MNASNet0_5>(input_path, x); +} +torch::Tensor forward_mnasnet0_75(const std::string& input_path, torch::Tensor x) { + return forward_model<MNASNet0_75>(input_path, x); +} +torch::Tensor forward_mnasnet1_0(const std::string& input_path, torch::Tensor x) { + return forward_model<MNASNet1_0>(input_path, x); +} +torch::Tensor forward_mnasnet1_3(const std::string& input_path, torch::Tensor x) { + return forward_model<MNASNet1_3>(input_path, x); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("forward_alexnet", &forward_alexnet, "forward_alexnet"); + + m.def("forward_vgg11", &forward_vgg11, "forward_vgg11"); + m.def("forward_vgg13", &forward_vgg13, "forward_vgg13"); + m.def("forward_vgg16", &forward_vgg16, "forward_vgg16"); + m.def("forward_vgg19", &forward_vgg19, "forward_vgg19"); + + m.def("forward_vgg11bn", &forward_vgg11bn, "forward_vgg11bn"); + m.def("forward_vgg13bn", &forward_vgg13bn, "forward_vgg13bn"); + m.def("forward_vgg16bn", &forward_vgg16bn, "forward_vgg16bn"); + m.def("forward_vgg19bn", &forward_vgg19bn, "forward_vgg19bn"); + + m.def("forward_resnet18", &forward_resnet18, "forward_resnet18"); + m.def("forward_resnet34", &forward_resnet34, "forward_resnet34"); + m.def("forward_resnet50", &forward_resnet50, "forward_resnet50"); + m.def("forward_resnet101", &forward_resnet101, "forward_resnet101"); + m.def("forward_resnet152", &forward_resnet152, "forward_resnet152"); + m.def( + "forward_resnext50_32x4d", + &forward_resnext50_32x4d, + "forward_resnext50_32x4d"); + m.def( + "forward_resnext101_32x8d", + &forward_resnext101_32x8d, + "forward_resnext101_32x8d"); + m.def( + "forward_wide_resnet50_2", + &forward_wide_resnet50_2, + "forward_wide_resnet50_2"); + m.def( + "forward_wide_resnet101_2", + &forward_wide_resnet101_2, + "forward_wide_resnet101_2"); + + m.def( + "forward_squeezenet1_0", &forward_squeezenet1_0, "forward_squeezenet1_0"); + m.def( + "forward_squeezenet1_1", &forward_squeezenet1_1, "forward_squeezenet1_1"); + + m.def("forward_densenet121", &forward_densenet121, "forward_densenet121"); + m.def("forward_densenet169", &forward_densenet169, "forward_densenet169"); + m.def("forward_densenet201", &forward_densenet201, "forward_densenet201"); + m.def("forward_densenet161", &forward_densenet161, "forward_densenet161"); + + m.def("forward_mobilenetv2", &forward_mobilenetv2, "forward_mobilenetv2"); + + m.def("forward_googlenet", &forward_googlenet, "forward_googlenet"); + m.def("forward_inceptionv3", &forward_inceptionv3, "forward_inceptionv3"); + + m.def("forward_mnasnet0_5", &forward_mnasnet0_5, "forward_mnasnet0_5"); + m.def("forward_mnasnet0_75", &forward_mnasnet0_75, "forward_mnasnet0_75"); + m.def("forward_mnasnet1_0", &forward_mnasnet1_0, "forward_mnasnet1_0"); + m.def("forward_mnasnet1_3", &forward_mnasnet1_3, "forward_mnasnet1_3"); +} diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/test_models.py b/pretrained_model/pytorch_vision_v0.10.0/test/test_models.py new file mode 100644 index 0000000000000000000000000000000000000000..180bbcd032db170a0965835da1ec0dc0ac8ec9a6 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/test_models.py @@ -0,0 +1,466 @@ +import sys +from common_utils import TestCase, map_nested_tensor_object, freeze_rng_state, set_rng_seed, IN_CIRCLE_CI +from collections import OrderedDict +from itertools import product +import functools +import operator +import torch +import torch.nn as nn +from torchvision import models +import unittest +import warnings + +import pytest + + +def get_available_classification_models(): + # TODO add a registration mechanism to torchvision.models + return [k for k, v in models.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"] + + +def get_available_segmentation_models(): + # TODO add a registration mechanism to torchvision.models + return [k for k, v in models.segmentation.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"] + + +def get_available_detection_models(): + # TODO add a registration mechanism to torchvision.models + return [k for k, v in models.detection.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"] + + +def get_available_video_models(): + # TODO add a registration mechanism to torchvision.models + return [k for k, v in models.video.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"] + + +# If 'unwrapper' is provided it will be called with the script model outputs +# before they are compared to the eager model outputs. This is useful if the +# model outputs are different between TorchScript / Eager mode +script_model_unwrapper = { + 'googlenet': lambda x: x.logits, + 'inception_v3': lambda x: x.logits, + "fasterrcnn_resnet50_fpn": lambda x: x[1], + "fasterrcnn_mobilenet_v3_large_fpn": lambda x: x[1], + "fasterrcnn_mobilenet_v3_large_320_fpn": lambda x: x[1], + "maskrcnn_resnet50_fpn": lambda x: x[1], + "keypointrcnn_resnet50_fpn": lambda x: x[1], + "retinanet_resnet50_fpn": lambda x: x[1], + "ssd300_vgg16": lambda x: x[1], + "ssdlite320_mobilenet_v3_large": lambda x: x[1], +} + + +# The following models exhibit flaky numerics under autocast in _test_*_model harnesses. +# This may be caused by the harness environment (e.g. num classes, input initialization +# via torch.rand), and does not prove autocast is unsuitable when training with real data +# (autocast has been used successfully with real data for some of these models). +# TODO: investigate why autocast numerics are flaky in the harnesses. +# +# For the following models, _test_*_model harnesses skip numerical checks on outputs when +# trying autocast. However, they still try an autocasted forward pass, so they still ensure +# autocast coverage suffices to prevent dtype errors in each model. +autocast_flaky_numerics = ( + "inception_v3", + "resnet101", + "resnet152", + "wide_resnet101_2", + "deeplabv3_resnet50", + "deeplabv3_resnet101", + "deeplabv3_mobilenet_v3_large", + "fcn_resnet50", + "fcn_resnet101", + "lraspp_mobilenet_v3_large", + "maskrcnn_resnet50_fpn", +) + + +class ModelTester(TestCase): + def _test_classification_model(self, name, input_shape, dev): + set_rng_seed(0) + # passing num_class equal to a number other than 1000 helps in making the test + # more enforcing in nature + model = models.__dict__[name](num_classes=50) + model.eval().to(device=dev) + # RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests + x = torch.rand(input_shape).to(device=dev) + out = model(x) + self.assertExpected(out.cpu(), name, prec=0.1) + self.assertEqual(out.shape[-1], 50) + self.check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(name, None)) + + if dev == torch.device("cuda"): + with torch.cuda.amp.autocast(): + out = model(x) + # See autocast_flaky_numerics comment at top of file. + if name not in autocast_flaky_numerics: + self.assertExpected(out.cpu(), name, prec=0.1) + self.assertEqual(out.shape[-1], 50) + + def _test_segmentation_model(self, name, dev): + set_rng_seed(0) + # passing num_classes equal to a number other than 21 helps in making the test's + # expected file size smaller + model = models.segmentation.__dict__[name](num_classes=10, pretrained_backbone=False) + model.eval().to(device=dev) + input_shape = (1, 3, 32, 32) + # RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests + x = torch.rand(input_shape).to(device=dev) + out = model(x)["out"] + + def check_out(out): + prec = 0.01 + try: + # We first try to assert the entire output if possible. This is not + # only the best way to assert results but also handles the cases + # where we need to create a new expected result. + self.assertExpected(out.cpu(), name, prec=prec) + except AssertionError: + # Unfortunately some segmentation models are flaky with autocast + # so instead of validating the probability scores, check that the class + # predictions match. + expected_file = self._get_expected_file(name) + expected = torch.load(expected_file) + torch.testing.assert_close(out.argmax(dim=1), expected.argmax(dim=1), rtol=prec, atol=prec) + return False # Partial validation performed + + return True # Full validation performed + + full_validation = check_out(out) + + self.check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(name, None)) + + if dev == torch.device("cuda"): + with torch.cuda.amp.autocast(): + out = model(x)["out"] + # See autocast_flaky_numerics comment at top of file. + if name not in autocast_flaky_numerics: + full_validation &= check_out(out) + + if not full_validation: + msg = "The output of {} could only be partially validated. " \ + "This is likely due to unit-test flakiness, but you may " \ + "want to do additional manual checks if you made " \ + "significant changes to the codebase.".format(self._testMethodName) + warnings.warn(msg, RuntimeWarning) + raise unittest.SkipTest(msg) + + def _test_detection_model(self, name, dev): + set_rng_seed(0) + kwargs = {} + if "retinanet" in name: + # Reduce the default threshold to ensure the returned boxes are not empty. + kwargs["score_thresh"] = 0.01 + elif "fasterrcnn_mobilenet_v3_large" in name: + kwargs["box_score_thresh"] = 0.02076 + if "fasterrcnn_mobilenet_v3_large_320_fpn" in name: + kwargs["rpn_pre_nms_top_n_test"] = 1000 + kwargs["rpn_post_nms_top_n_test"] = 1000 + model = models.detection.__dict__[name](num_classes=50, pretrained_backbone=False, **kwargs) + model.eval().to(device=dev) + input_shape = (3, 300, 300) + # RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests + x = torch.rand(input_shape).to(device=dev) + model_input = [x] + out = model(model_input) + self.assertIs(model_input[0], x) + + def check_out(out): + self.assertEqual(len(out), 1) + + def compact(tensor): + size = tensor.size() + elements_per_sample = functools.reduce(operator.mul, size[1:], 1) + if elements_per_sample > 30: + return compute_mean_std(tensor) + else: + return subsample_tensor(tensor) + + def subsample_tensor(tensor): + num_elems = tensor.size(0) + num_samples = 20 + if num_elems <= num_samples: + return tensor + + ith_index = num_elems // num_samples + return tensor[ith_index - 1::ith_index] + + def compute_mean_std(tensor): + # can't compute mean of integral tensor + tensor = tensor.to(torch.double) + mean = torch.mean(tensor) + std = torch.std(tensor) + return {"mean": mean, "std": std} + + output = map_nested_tensor_object(out, tensor_map_fn=compact) + prec = 0.01 + try: + # We first try to assert the entire output if possible. This is not + # only the best way to assert results but also handles the cases + # where we need to create a new expected result. + self.assertExpected(output, name, prec=prec) + except AssertionError: + # Unfortunately detection models are flaky due to the unstable sort + # in NMS. If matching across all outputs fails, use the same approach + # as in NMSTester.test_nms_cuda to see if this is caused by duplicate + # scores. + expected_file = self._get_expected_file(name) + expected = torch.load(expected_file) + torch.testing.assert_close(output[0]["scores"], expected[0]["scores"], rtol=prec, atol=prec, + check_device=False, check_dtype=False) + + # Note: Fmassa proposed turning off NMS by adapting the threshold + # and then using the Hungarian algorithm as in DETR to find the + # best match between output and expected boxes and eliminate some + # of the flakiness. Worth exploring. + return False # Partial validation performed + + return True # Full validation performed + + full_validation = check_out(out) + self.check_jit_scriptable(model, ([x],), unwrapper=script_model_unwrapper.get(name, None)) + + if dev == torch.device("cuda"): + with torch.cuda.amp.autocast(): + out = model(model_input) + # See autocast_flaky_numerics comment at top of file. + if name not in autocast_flaky_numerics: + full_validation &= check_out(out) + + if not full_validation: + msg = "The output of {} could only be partially validated. " \ + "This is likely due to unit-test flakiness, but you may " \ + "want to do additional manual checks if you made " \ + "significant changes to the codebase.".format(self._testMethodName) + warnings.warn(msg, RuntimeWarning) + raise unittest.SkipTest(msg) + + def _test_detection_model_validation(self, name): + set_rng_seed(0) + model = models.detection.__dict__[name](num_classes=50, pretrained_backbone=False) + input_shape = (3, 300, 300) + x = [torch.rand(input_shape)] + + # validate that targets are present in training + self.assertRaises(ValueError, model, x) + + # validate type + targets = [{'boxes': 0.}] + self.assertRaises(ValueError, model, x, targets=targets) + + # validate boxes shape + for boxes in (torch.rand((4,)), torch.rand((1, 5))): + targets = [{'boxes': boxes}] + self.assertRaises(ValueError, model, x, targets=targets) + + # validate that no degenerate boxes are present + boxes = torch.tensor([[1, 3, 1, 4], [2, 4, 3, 4]]) + targets = [{'boxes': boxes}] + self.assertRaises(ValueError, model, x, targets=targets) + + def _test_video_model(self, name, dev): + # the default input shape is + # bs * num_channels * clip_len * h *w + input_shape = (1, 3, 4, 112, 112) + # test both basicblock and Bottleneck + model = models.video.__dict__[name](num_classes=50) + model.eval().to(device=dev) + # RNG always on CPU, to ensure x in cuda tests is bitwise identical to x in cpu tests + x = torch.rand(input_shape).to(device=dev) + out = model(x) + self.check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(name, None)) + self.assertEqual(out.shape[-1], 50) + + if dev == torch.device("cuda"): + with torch.cuda.amp.autocast(): + out = model(x) + self.assertEqual(out.shape[-1], 50) + + def _make_sliced_model(self, model, stop_layer): + layers = OrderedDict() + for name, layer in model.named_children(): + layers[name] = layer + if name == stop_layer: + break + new_model = torch.nn.Sequential(layers) + return new_model + + def test_memory_efficient_densenet(self): + input_shape = (1, 3, 300, 300) + x = torch.rand(input_shape) + + for name in ['densenet121', 'densenet169', 'densenet201', 'densenet161']: + model1 = models.__dict__[name](num_classes=50, memory_efficient=True) + params = model1.state_dict() + num_params = sum([x.numel() for x in model1.parameters()]) + model1.eval() + out1 = model1(x) + out1.sum().backward() + num_grad = sum([x.grad.numel() for x in model1.parameters() if x.grad is not None]) + + model2 = models.__dict__[name](num_classes=50, memory_efficient=False) + model2.load_state_dict(params) + model2.eval() + out2 = model2(x) + + self.assertTrue(num_params == num_grad) + torch.testing.assert_close(out1, out2, rtol=0.0, atol=1e-5) + + def test_resnet_dilation(self): + # TODO improve tests to also check that each layer has the right dimensionality + for i in product([False, True], [False, True], [False, True]): + model = models.__dict__["resnet50"](replace_stride_with_dilation=i) + model = self._make_sliced_model(model, stop_layer="layer4") + model.eval() + x = torch.rand(1, 3, 224, 224) + out = model(x) + f = 2 ** sum(i) + self.assertEqual(out.shape, (1, 2048, 7 * f, 7 * f)) + + def test_mobilenet_v2_residual_setting(self): + model = models.__dict__["mobilenet_v2"](inverted_residual_setting=[[1, 16, 1, 1], [6, 24, 2, 2]]) + model.eval() + x = torch.rand(1, 3, 224, 224) + out = model(x) + self.assertEqual(out.shape[-1], 1000) + + def test_mobilenet_norm_layer(self): + for name in ["mobilenet_v2", "mobilenet_v3_large", "mobilenet_v3_small"]: + model = models.__dict__[name]() + self.assertTrue(any(isinstance(x, nn.BatchNorm2d) for x in model.modules())) + + def get_gn(num_channels): + return nn.GroupNorm(32, num_channels) + + model = models.__dict__[name](norm_layer=get_gn) + self.assertFalse(any(isinstance(x, nn.BatchNorm2d) for x in model.modules())) + self.assertTrue(any(isinstance(x, nn.GroupNorm) for x in model.modules())) + + def test_inception_v3_eval(self): + # replacement for models.inception_v3(pretrained=True) that does not download weights + kwargs = {} + kwargs['transform_input'] = True + kwargs['aux_logits'] = True + kwargs['init_weights'] = False + name = "inception_v3" + model = models.Inception3(**kwargs) + model.aux_logits = False + model.AuxLogits = None + model = model.eval() + x = torch.rand(1, 3, 299, 299) + self.check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(name, None)) + + def test_fasterrcnn_double(self): + model = models.detection.fasterrcnn_resnet50_fpn(num_classes=50, pretrained_backbone=False) + model.double() + model.eval() + input_shape = (3, 300, 300) + x = torch.rand(input_shape, dtype=torch.float64) + model_input = [x] + out = model(model_input) + self.assertIs(model_input[0], x) + self.assertEqual(len(out), 1) + self.assertTrue("boxes" in out[0]) + self.assertTrue("scores" in out[0]) + self.assertTrue("labels" in out[0]) + + def test_googlenet_eval(self): + # replacement for models.googlenet(pretrained=True) that does not download weights + kwargs = {} + kwargs['transform_input'] = True + kwargs['aux_logits'] = True + kwargs['init_weights'] = False + name = "googlenet" + model = models.GoogLeNet(**kwargs) + model.aux_logits = False + model.aux1 = None + model.aux2 = None + model = model.eval() + x = torch.rand(1, 3, 224, 224) + self.check_jit_scriptable(model, (x,), unwrapper=script_model_unwrapper.get(name, None)) + + @unittest.skipIf(not torch.cuda.is_available(), 'needs GPU') + def test_fasterrcnn_switch_devices(self): + def checkOut(out): + self.assertEqual(len(out), 1) + self.assertTrue("boxes" in out[0]) + self.assertTrue("scores" in out[0]) + self.assertTrue("labels" in out[0]) + + model = models.detection.fasterrcnn_resnet50_fpn(num_classes=50, pretrained_backbone=False) + model.cuda() + model.eval() + input_shape = (3, 300, 300) + x = torch.rand(input_shape, device='cuda') + model_input = [x] + out = model(model_input) + self.assertIs(model_input[0], x) + + checkOut(out) + + with torch.cuda.amp.autocast(): + out = model(model_input) + + checkOut(out) + + # now switch to cpu and make sure it works + model.cpu() + x = x.cpu() + out_cpu = model([x]) + + checkOut(out_cpu) + + def test_generalizedrcnn_transform_repr(self): + + min_size, max_size = 224, 299 + image_mean = [0.485, 0.456, 0.406] + image_std = [0.229, 0.224, 0.225] + + t = models.detection.transform.GeneralizedRCNNTransform(min_size=min_size, + max_size=max_size, + image_mean=image_mean, + image_std=image_std) + + # Check integrity of object __repr__ attribute + expected_string = 'GeneralizedRCNNTransform(' + _indent = '\n ' + expected_string += '{0}Normalize(mean={1}, std={2})'.format(_indent, image_mean, image_std) + expected_string += '{0}Resize(min_size=({1},), max_size={2}, '.format(_indent, min_size, max_size) + expected_string += "mode='bilinear')\n)" + self.assertEqual(t.__repr__(), expected_string) + + +_devs = [torch.device("cpu"), torch.device("cuda")] if torch.cuda.is_available() else [torch.device("cpu")] + + +@pytest.mark.parametrize('model_name', get_available_classification_models()) +@pytest.mark.parametrize('dev', _devs) +def test_classification_model(model_name, dev): + input_shape = (1, 3, 299, 299) if model_name == 'inception_v3' else (1, 3, 224, 224) + ModelTester()._test_classification_model(model_name, input_shape, dev) + + +@pytest.mark.parametrize('model_name', get_available_segmentation_models()) +@pytest.mark.parametrize('dev', _devs) +def test_segmentation_model(model_name, dev): + ModelTester()._test_segmentation_model(model_name, dev) + + +@pytest.mark.parametrize('model_name', get_available_detection_models()) +@pytest.mark.parametrize('dev', _devs) +def test_detection_model(model_name, dev): + ModelTester()._test_detection_model(model_name, dev) + + +@pytest.mark.parametrize('model_name', get_available_detection_models()) +def test_detection_model_validation(model_name): + ModelTester()._test_detection_model_validation(model_name) + + +@pytest.mark.parametrize('model_name', get_available_video_models()) +@pytest.mark.parametrize('dev', _devs) +def test_video_model(model_name, dev): + ModelTester()._test_video_model(model_name, dev) + + +if __name__ == '__main__': + pytest.main([__file__]) diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/test_models_detection_anchor_utils.py b/pretrained_model/pytorch_vision_v0.10.0/test/test_models_detection_anchor_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..13c399a0c32605d8de5e278b4db4d3dcf7d9986c --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/test_models_detection_anchor_utils.py @@ -0,0 +1,90 @@ +import torch +from common_utils import TestCase +from _assert_utils import assert_equal +from torchvision.models.detection.anchor_utils import AnchorGenerator, DefaultBoxGenerator +from torchvision.models.detection.image_list import ImageList + + +class Tester(TestCase): + def test_incorrect_anchors(self): + incorrect_sizes = ((2, 4, 8), (32, 8), ) + incorrect_aspects = (0.5, 1.0) + anc = AnchorGenerator(incorrect_sizes, incorrect_aspects) + image1 = torch.randn(3, 800, 800) + image_list = ImageList(image1, [(800, 800)]) + feature_maps = [torch.randn(1, 50)] + self.assertRaises(ValueError, anc, image_list, feature_maps) + + def _init_test_anchor_generator(self): + anchor_sizes = ((10,),) + aspect_ratios = ((1,),) + anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios) + + return anchor_generator + + def _init_test_defaultbox_generator(self): + aspect_ratios = [[2]] + dbox_generator = DefaultBoxGenerator(aspect_ratios) + + return dbox_generator + + def get_features(self, images): + s0, s1 = images.shape[-2:] + features = [torch.rand(2, 8, s0 // 5, s1 // 5)] + return features + + def test_anchor_generator(self): + images = torch.randn(2, 3, 15, 15) + features = self.get_features(images) + image_shapes = [i.shape[-2:] for i in images] + images = ImageList(images, image_shapes) + + model = self._init_test_anchor_generator() + model.eval() + anchors = model(images, features) + + # Estimate the number of target anchors + grid_sizes = [f.shape[-2:] for f in features] + num_anchors_estimated = 0 + for sizes, num_anchors_per_loc in zip(grid_sizes, model.num_anchors_per_location()): + num_anchors_estimated += sizes[0] * sizes[1] * num_anchors_per_loc + + anchors_output = torch.tensor([[-5., -5., 5., 5.], + [0., -5., 10., 5.], + [5., -5., 15., 5.], + [-5., 0., 5., 10.], + [0., 0., 10., 10.], + [5., 0., 15., 10.], + [-5., 5., 5., 15.], + [0., 5., 10., 15.], + [5., 5., 15., 15.]]) + + self.assertEqual(num_anchors_estimated, 9) + self.assertEqual(len(anchors), 2) + self.assertEqual(tuple(anchors[0].shape), (9, 4)) + self.assertEqual(tuple(anchors[1].shape), (9, 4)) + assert_equal(anchors[0], anchors_output) + assert_equal(anchors[1], anchors_output) + + def test_defaultbox_generator(self): + images = torch.zeros(2, 3, 15, 15) + features = [torch.zeros(2, 8, 1, 1)] + image_shapes = [i.shape[-2:] for i in images] + images = ImageList(images, image_shapes) + + model = self._init_test_defaultbox_generator() + model.eval() + dboxes = model(images, features) + + dboxes_output = torch.tensor([ + [6.3750, 6.3750, 8.6250, 8.6250], + [4.7443, 4.7443, 10.2557, 10.2557], + [5.9090, 6.7045, 9.0910, 8.2955], + [6.7045, 5.9090, 8.2955, 9.0910] + ]) + + self.assertEqual(len(dboxes), 2) + self.assertEqual(tuple(dboxes[0].shape), (4, 4)) + self.assertEqual(tuple(dboxes[1].shape), (4, 4)) + torch.testing.assert_close(dboxes[0], dboxes_output, rtol=1e-5, atol=1e-8) + torch.testing.assert_close(dboxes[1], dboxes_output, rtol=1e-5, atol=1e-8) diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/test_models_detection_negative_samples.py b/pretrained_model/pytorch_vision_v0.10.0/test/test_models_detection_negative_samples.py new file mode 100644 index 0000000000000000000000000000000000000000..83ccc58ade5c9498ab3a3fd210d035444e2d15b4 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/test_models_detection_negative_samples.py @@ -0,0 +1,153 @@ +import torch + +import torchvision.models +from torchvision.ops import MultiScaleRoIAlign +from torchvision.models.detection.rpn import AnchorGenerator, RPNHead, RegionProposalNetwork +from torchvision.models.detection.roi_heads import RoIHeads +from torchvision.models.detection.faster_rcnn import FastRCNNPredictor, TwoMLPHead + +import unittest + + +class Tester(unittest.TestCase): + + def _make_empty_sample(self, add_masks=False, add_keypoints=False): + images = [torch.rand((3, 100, 100), dtype=torch.float32)] + boxes = torch.zeros((0, 4), dtype=torch.float32) + negative_target = {"boxes": boxes, + "labels": torch.zeros(0, dtype=torch.int64), + "image_id": 4, + "area": (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0]), + "iscrowd": torch.zeros((0,), dtype=torch.int64)} + + if add_masks: + negative_target["masks"] = torch.zeros(0, 100, 100, dtype=torch.uint8) + + if add_keypoints: + negative_target["keypoints"] = torch.zeros(17, 0, 3, dtype=torch.float32) + + targets = [negative_target] + return images, targets + + def test_targets_to_anchors(self): + _, targets = self._make_empty_sample() + anchors = [torch.randint(-50, 50, (3, 4), dtype=torch.float32)] + + anchor_sizes = ((32,), (64,), (128,), (256,), (512,)) + aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes) + rpn_anchor_generator = AnchorGenerator( + anchor_sizes, aspect_ratios + ) + rpn_head = RPNHead(4, rpn_anchor_generator.num_anchors_per_location()[0]) + + head = RegionProposalNetwork( + rpn_anchor_generator, rpn_head, + 0.5, 0.3, + 256, 0.5, + 2000, 2000, 0.7, 0.05) + + labels, matched_gt_boxes = head.assign_targets_to_anchors(anchors, targets) + + self.assertEqual(labels[0].sum(), 0) + self.assertEqual(labels[0].shape, torch.Size([anchors[0].shape[0]])) + self.assertEqual(labels[0].dtype, torch.float32) + + self.assertEqual(matched_gt_boxes[0].sum(), 0) + self.assertEqual(matched_gt_boxes[0].shape, anchors[0].shape) + self.assertEqual(matched_gt_boxes[0].dtype, torch.float32) + + def test_assign_targets_to_proposals(self): + + proposals = [torch.randint(-50, 50, (20, 4), dtype=torch.float32)] + gt_boxes = [torch.zeros((0, 4), dtype=torch.float32)] + gt_labels = [torch.tensor([[0]], dtype=torch.int64)] + + box_roi_pool = MultiScaleRoIAlign( + featmap_names=['0', '1', '2', '3'], + output_size=7, + sampling_ratio=2) + + resolution = box_roi_pool.output_size[0] + representation_size = 1024 + box_head = TwoMLPHead( + 4 * resolution ** 2, + representation_size) + + representation_size = 1024 + box_predictor = FastRCNNPredictor( + representation_size, + 2) + + roi_heads = RoIHeads( + # Box + box_roi_pool, box_head, box_predictor, + 0.5, 0.5, + 512, 0.25, + None, + 0.05, 0.5, 100) + + matched_idxs, labels = roi_heads.assign_targets_to_proposals(proposals, gt_boxes, gt_labels) + + self.assertEqual(matched_idxs[0].sum(), 0) + self.assertEqual(matched_idxs[0].shape, torch.Size([proposals[0].shape[0]])) + self.assertEqual(matched_idxs[0].dtype, torch.int64) + + self.assertEqual(labels[0].sum(), 0) + self.assertEqual(labels[0].shape, torch.Size([proposals[0].shape[0]])) + self.assertEqual(labels[0].dtype, torch.int64) + + def test_forward_negative_sample_frcnn(self): + for name in ["fasterrcnn_resnet50_fpn", "fasterrcnn_mobilenet_v3_large_fpn", + "fasterrcnn_mobilenet_v3_large_320_fpn"]: + model = torchvision.models.detection.__dict__[name]( + num_classes=2, min_size=100, max_size=100) + + images, targets = self._make_empty_sample() + loss_dict = model(images, targets) + + self.assertEqual(loss_dict["loss_box_reg"], torch.tensor(0.)) + self.assertEqual(loss_dict["loss_rpn_box_reg"], torch.tensor(0.)) + + def test_forward_negative_sample_mrcnn(self): + model = torchvision.models.detection.maskrcnn_resnet50_fpn( + num_classes=2, min_size=100, max_size=100) + + images, targets = self._make_empty_sample(add_masks=True) + loss_dict = model(images, targets) + + self.assertEqual(loss_dict["loss_box_reg"], torch.tensor(0.)) + self.assertEqual(loss_dict["loss_rpn_box_reg"], torch.tensor(0.)) + self.assertEqual(loss_dict["loss_mask"], torch.tensor(0.)) + + def test_forward_negative_sample_krcnn(self): + model = torchvision.models.detection.keypointrcnn_resnet50_fpn( + num_classes=2, min_size=100, max_size=100) + + images, targets = self._make_empty_sample(add_keypoints=True) + loss_dict = model(images, targets) + + self.assertEqual(loss_dict["loss_box_reg"], torch.tensor(0.)) + self.assertEqual(loss_dict["loss_rpn_box_reg"], torch.tensor(0.)) + self.assertEqual(loss_dict["loss_keypoint"], torch.tensor(0.)) + + def test_forward_negative_sample_retinanet(self): + model = torchvision.models.detection.retinanet_resnet50_fpn( + num_classes=2, min_size=100, max_size=100, pretrained_backbone=False) + + images, targets = self._make_empty_sample() + loss_dict = model(images, targets) + + self.assertEqual(loss_dict["bbox_regression"], torch.tensor(0.)) + + def test_forward_negative_sample_ssd(self): + model = torchvision.models.detection.ssd300_vgg16( + num_classes=2, pretrained_backbone=False) + + images, targets = self._make_empty_sample() + loss_dict = model(images, targets) + + self.assertEqual(loss_dict["bbox_regression"], torch.tensor(0.)) + + +if __name__ == '__main__': + unittest.main() diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/test_models_detection_utils.py b/pretrained_model/pytorch_vision_v0.10.0/test/test_models_detection_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..a20e0abc965638d5a41b40df0befc2afe4374ddb --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/test_models_detection_utils.py @@ -0,0 +1,71 @@ +import copy +import torch +from torchvision.models.detection import _utils +from torchvision.models.detection.transform import GeneralizedRCNNTransform +import unittest +from torchvision.models.detection import backbone_utils +from _assert_utils import assert_equal + + +class Tester(unittest.TestCase): + def test_balanced_positive_negative_sampler(self): + sampler = _utils.BalancedPositiveNegativeSampler(4, 0.25) + # keep all 6 negatives first, then add 3 positives, last two are ignore + matched_idxs = [torch.tensor([0, 0, 0, 0, 0, 0, 1, 1, 1, -1, -1])] + pos, neg = sampler(matched_idxs) + # we know the number of elements that should be sampled for the positive (1) + # and the negative (3), and their location. Let's make sure that they are + # there + self.assertEqual(pos[0].sum(), 1) + self.assertEqual(pos[0][6:9].sum(), 1) + self.assertEqual(neg[0].sum(), 3) + self.assertEqual(neg[0][0:6].sum(), 3) + + def test_resnet_fpn_backbone_frozen_layers(self): + # we know how many initial layers and parameters of the network should + # be frozen for each trainable_backbone_layers parameter value + # i.e all 53 params are frozen if trainable_backbone_layers=0 + # ad first 24 params are frozen if trainable_backbone_layers=2 + expected_frozen_params = {0: 53, 1: 43, 2: 24, 3: 11, 4: 1, 5: 0} + for train_layers, exp_froz_params in expected_frozen_params.items(): + model = backbone_utils.resnet_fpn_backbone( + 'resnet50', pretrained=False, trainable_layers=train_layers) + # boolean list that is true if the param at that index is frozen + is_frozen = [not parameter.requires_grad for _, parameter in model.named_parameters()] + # check that expected initial number of layers are frozen + self.assertTrue(all(is_frozen[:exp_froz_params])) + + def test_validate_resnet_inputs_detection(self): + # default number of backbone layers to train + ret = backbone_utils._validate_trainable_layers( + pretrained=True, trainable_backbone_layers=None, max_value=5, default_value=3) + self.assertEqual(ret, 3) + # can't go beyond 5 + with self.assertRaises(AssertionError): + ret = backbone_utils._validate_trainable_layers( + pretrained=True, trainable_backbone_layers=6, max_value=5, default_value=3) + # if not pretrained, should use all trainable layers and warn + with self.assertWarns(UserWarning): + ret = backbone_utils._validate_trainable_layers( + pretrained=False, trainable_backbone_layers=0, max_value=5, default_value=3) + self.assertEqual(ret, 5) + + def test_transform_copy_targets(self): + transform = GeneralizedRCNNTransform(300, 500, torch.zeros(3), torch.ones(3)) + image = [torch.rand(3, 200, 300), torch.rand(3, 200, 200)] + targets = [{'boxes': torch.rand(3, 4)}, {'boxes': torch.rand(2, 4)}] + targets_copy = copy.deepcopy(targets) + out = transform(image, targets) # noqa: F841 + assert_equal(targets[0]['boxes'], targets_copy[0]['boxes']) + assert_equal(targets[1]['boxes'], targets_copy[1]['boxes']) + + def test_not_float_normalize(self): + transform = GeneralizedRCNNTransform(300, 500, torch.zeros(3), torch.ones(3)) + image = [torch.randint(0, 255, (3, 200, 300), dtype=torch.uint8)] + targets = [{'boxes': torch.rand(3, 4)}] + with self.assertRaises(TypeError): + out = transform(image, targets) # noqa: F841 + + +if __name__ == '__main__': + unittest.main() diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/test_onnx.py b/pretrained_model/pytorch_vision_v0.10.0/test/test_onnx.py new file mode 100644 index 0000000000000000000000000000000000000000..d0140c79dfc83779bc0387a16cc6fb4ab22b7570 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/test_onnx.py @@ -0,0 +1,530 @@ +# onnxruntime requires python 3.5 or above +try: + # This import should be before that of torch + # see https://github.com/onnx/onnx/issues/2394#issuecomment-581638840 + import onnxruntime +except ImportError: + onnxruntime = None + +from common_utils import set_rng_seed +from _assert_utils import assert_equal +import io +import torch +from torchvision import ops +from torchvision import models +from torchvision.models.detection.image_list import ImageList +from torchvision.models.detection.transform import GeneralizedRCNNTransform +from torchvision.models.detection.rpn import AnchorGenerator, RPNHead, RegionProposalNetwork +from torchvision.models.detection.backbone_utils import resnet_fpn_backbone +from torchvision.models.detection.roi_heads import RoIHeads +from torchvision.models.detection.faster_rcnn import FastRCNNPredictor, TwoMLPHead +from torchvision.models.detection.mask_rcnn import MaskRCNNHeads, MaskRCNNPredictor + +from collections import OrderedDict + +import unittest +from torchvision.ops._register_onnx_ops import _onnx_opset_version + + +@unittest.skipIf(onnxruntime is None, 'ONNX Runtime unavailable') +class ONNXExporterTester(unittest.TestCase): + @classmethod + def setUpClass(cls): + torch.manual_seed(123) + + def run_model(self, model, inputs_list, tolerate_small_mismatch=False, do_constant_folding=True, dynamic_axes=None, + output_names=None, input_names=None): + model.eval() + + onnx_io = io.BytesIO() + if isinstance(inputs_list[0][-1], dict): + torch_onnx_input = inputs_list[0] + ({},) + else: + torch_onnx_input = inputs_list[0] + # export to onnx with the first input + torch.onnx.export(model, torch_onnx_input, onnx_io, + do_constant_folding=do_constant_folding, opset_version=_onnx_opset_version, + dynamic_axes=dynamic_axes, input_names=input_names, output_names=output_names) + # validate the exported model with onnx runtime + for test_inputs in inputs_list: + with torch.no_grad(): + if isinstance(test_inputs, torch.Tensor) or \ + isinstance(test_inputs, list): + test_inputs = (test_inputs,) + test_ouputs = model(*test_inputs) + if isinstance(test_ouputs, torch.Tensor): + test_ouputs = (test_ouputs,) + self.ort_validate(onnx_io, test_inputs, test_ouputs, tolerate_small_mismatch) + + def ort_validate(self, onnx_io, inputs, outputs, tolerate_small_mismatch=False): + + inputs, _ = torch.jit._flatten(inputs) + outputs, _ = torch.jit._flatten(outputs) + + def to_numpy(tensor): + if tensor.requires_grad: + return tensor.detach().cpu().numpy() + else: + return tensor.cpu().numpy() + + inputs = list(map(to_numpy, inputs)) + outputs = list(map(to_numpy, outputs)) + + ort_session = onnxruntime.InferenceSession(onnx_io.getvalue()) + # compute onnxruntime output prediction + ort_inputs = dict((ort_session.get_inputs()[i].name, inpt) for i, inpt in enumerate(inputs)) + ort_outs = ort_session.run(None, ort_inputs) + + for i in range(0, len(outputs)): + try: + torch.testing.assert_allclose(outputs[i], ort_outs[i], rtol=1e-03, atol=1e-05) + except AssertionError as error: + if tolerate_small_mismatch: + self.assertIn("(0.00%)", str(error), str(error)) + else: + raise + + def test_nms(self): + num_boxes = 100 + boxes = torch.rand(num_boxes, 4) + boxes[:, 2:] += boxes[:, :2] + scores = torch.randn(num_boxes) + + class Module(torch.nn.Module): + def forward(self, boxes, scores): + return ops.nms(boxes, scores, 0.5) + + self.run_model(Module(), [(boxes, scores)]) + + def test_batched_nms(self): + num_boxes = 100 + boxes = torch.rand(num_boxes, 4) + boxes[:, 2:] += boxes[:, :2] + scores = torch.randn(num_boxes) + idxs = torch.randint(0, 5, size=(num_boxes,)) + + class Module(torch.nn.Module): + def forward(self, boxes, scores, idxs): + return ops.batched_nms(boxes, scores, idxs, 0.5) + + self.run_model(Module(), [(boxes, scores, idxs)]) + + def test_clip_boxes_to_image(self): + boxes = torch.randn(5, 4) * 500 + boxes[:, 2:] += boxes[:, :2] + size = torch.randn(200, 300) + + size_2 = torch.randn(300, 400) + + class Module(torch.nn.Module): + def forward(self, boxes, size): + return ops.boxes.clip_boxes_to_image(boxes, size.shape) + + self.run_model(Module(), [(boxes, size), (boxes, size_2)], + input_names=["boxes", "size"], + dynamic_axes={"size": [0, 1]}) + + def test_roi_align(self): + x = torch.rand(1, 1, 10, 10, dtype=torch.float32) + single_roi = torch.tensor([[0, 0, 0, 4, 4]], dtype=torch.float32) + model = ops.RoIAlign((5, 5), 1, 2) + self.run_model(model, [(x, single_roi)]) + + x = torch.rand(1, 1, 10, 10, dtype=torch.float32) + single_roi = torch.tensor([[0, 0, 0, 4, 4]], dtype=torch.float32) + model = ops.RoIAlign((5, 5), 1, -1) + self.run_model(model, [(x, single_roi)]) + + def test_roi_align_aligned(self): + x = torch.rand(1, 1, 10, 10, dtype=torch.float32) + single_roi = torch.tensor([[0, 1.5, 1.5, 3, 3]], dtype=torch.float32) + model = ops.RoIAlign((5, 5), 1, 2, aligned=True) + self.run_model(model, [(x, single_roi)]) + + x = torch.rand(1, 1, 10, 10, dtype=torch.float32) + single_roi = torch.tensor([[0, 0.2, 0.3, 4.5, 3.5]], dtype=torch.float32) + model = ops.RoIAlign((5, 5), 0.5, 3, aligned=True) + self.run_model(model, [(x, single_roi)]) + + x = torch.rand(1, 1, 10, 10, dtype=torch.float32) + single_roi = torch.tensor([[0, 0.2, 0.3, 4.5, 3.5]], dtype=torch.float32) + model = ops.RoIAlign((5, 5), 1.8, 2, aligned=True) + self.run_model(model, [(x, single_roi)]) + + x = torch.rand(1, 1, 10, 10, dtype=torch.float32) + single_roi = torch.tensor([[0, 0.2, 0.3, 4.5, 3.5]], dtype=torch.float32) + model = ops.RoIAlign((2, 2), 2.5, 0, aligned=True) + self.run_model(model, [(x, single_roi)]) + + x = torch.rand(1, 1, 10, 10, dtype=torch.float32) + single_roi = torch.tensor([[0, 0.2, 0.3, 4.5, 3.5]], dtype=torch.float32) + model = ops.RoIAlign((2, 2), 2.5, -1, aligned=True) + self.run_model(model, [(x, single_roi)]) + + @unittest.skip # Issue in exporting ROIAlign with aligned = True for malformed boxes + def test_roi_align_malformed_boxes(self): + x = torch.randn(1, 1, 10, 10, dtype=torch.float32) + single_roi = torch.tensor([[0, 2, 0.3, 1.5, 1.5]], dtype=torch.float32) + model = ops.RoIAlign((5, 5), 1, 1, aligned=True) + self.run_model(model, [(x, single_roi)]) + + def test_roi_pool(self): + x = torch.rand(1, 1, 10, 10, dtype=torch.float32) + rois = torch.tensor([[0, 0, 0, 4, 4]], dtype=torch.float32) + pool_h = 5 + pool_w = 5 + model = ops.RoIPool((pool_h, pool_w), 2) + self.run_model(model, [(x, rois)]) + + def test_resize_images(self): + class TransformModule(torch.nn.Module): + def __init__(self_module): + super(TransformModule, self_module).__init__() + self_module.transform = self._init_test_generalized_rcnn_transform() + + def forward(self_module, images): + return self_module.transform.resize(images, None)[0] + + input = torch.rand(3, 10, 20) + input_test = torch.rand(3, 100, 150) + self.run_model(TransformModule(), [(input,), (input_test,)], + input_names=["input1"], dynamic_axes={"input1": [0, 1, 2]}) + + def test_transform_images(self): + + class TransformModule(torch.nn.Module): + def __init__(self_module): + super(TransformModule, self_module).__init__() + self_module.transform = self._init_test_generalized_rcnn_transform() + + def forward(self_module, images): + return self_module.transform(images)[0].tensors + + input = torch.rand(3, 100, 200), torch.rand(3, 200, 200) + input_test = torch.rand(3, 100, 200), torch.rand(3, 200, 200) + self.run_model(TransformModule(), [(input,), (input_test,)]) + + def _init_test_generalized_rcnn_transform(self): + min_size = 100 + max_size = 200 + image_mean = [0.485, 0.456, 0.406] + image_std = [0.229, 0.224, 0.225] + transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std) + return transform + + def _init_test_rpn(self): + anchor_sizes = ((32,), (64,), (128,), (256,), (512,)) + aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes) + rpn_anchor_generator = AnchorGenerator(anchor_sizes, aspect_ratios) + out_channels = 256 + rpn_head = RPNHead(out_channels, rpn_anchor_generator.num_anchors_per_location()[0]) + rpn_fg_iou_thresh = 0.7 + rpn_bg_iou_thresh = 0.3 + rpn_batch_size_per_image = 256 + rpn_positive_fraction = 0.5 + rpn_pre_nms_top_n = dict(training=2000, testing=1000) + rpn_post_nms_top_n = dict(training=2000, testing=1000) + rpn_nms_thresh = 0.7 + rpn_score_thresh = 0.0 + + rpn = RegionProposalNetwork( + rpn_anchor_generator, rpn_head, + rpn_fg_iou_thresh, rpn_bg_iou_thresh, + rpn_batch_size_per_image, rpn_positive_fraction, + rpn_pre_nms_top_n, rpn_post_nms_top_n, rpn_nms_thresh, + score_thresh=rpn_score_thresh) + return rpn + + def _init_test_roi_heads_faster_rcnn(self): + out_channels = 256 + num_classes = 91 + + box_fg_iou_thresh = 0.5 + box_bg_iou_thresh = 0.5 + box_batch_size_per_image = 512 + box_positive_fraction = 0.25 + bbox_reg_weights = None + box_score_thresh = 0.05 + box_nms_thresh = 0.5 + box_detections_per_img = 100 + + box_roi_pool = ops.MultiScaleRoIAlign( + featmap_names=['0', '1', '2', '3'], + output_size=7, + sampling_ratio=2) + + resolution = box_roi_pool.output_size[0] + representation_size = 1024 + box_head = TwoMLPHead( + out_channels * resolution ** 2, + representation_size) + + representation_size = 1024 + box_predictor = FastRCNNPredictor( + representation_size, + num_classes) + + roi_heads = RoIHeads( + box_roi_pool, box_head, box_predictor, + box_fg_iou_thresh, box_bg_iou_thresh, + box_batch_size_per_image, box_positive_fraction, + bbox_reg_weights, + box_score_thresh, box_nms_thresh, box_detections_per_img) + return roi_heads + + def get_features(self, images): + s0, s1 = images.shape[-2:] + features = [ + ('0', torch.rand(2, 256, s0 // 4, s1 // 4)), + ('1', torch.rand(2, 256, s0 // 8, s1 // 8)), + ('2', torch.rand(2, 256, s0 // 16, s1 // 16)), + ('3', torch.rand(2, 256, s0 // 32, s1 // 32)), + ('4', torch.rand(2, 256, s0 // 64, s1 // 64)), + ] + features = OrderedDict(features) + return features + + def test_rpn(self): + set_rng_seed(0) + + class RPNModule(torch.nn.Module): + def __init__(self_module): + super(RPNModule, self_module).__init__() + self_module.rpn = self._init_test_rpn() + + def forward(self_module, images, features): + images = ImageList(images, [i.shape[-2:] for i in images]) + return self_module.rpn(images, features) + + images = torch.rand(2, 3, 150, 150) + features = self.get_features(images) + images2 = torch.rand(2, 3, 80, 80) + test_features = self.get_features(images2) + + model = RPNModule() + model.eval() + model(images, features) + + self.run_model(model, [(images, features), (images2, test_features)], tolerate_small_mismatch=True, + input_names=["input1", "input2", "input3", "input4", "input5", "input6"], + dynamic_axes={"input1": [0, 1, 2, 3], "input2": [0, 1, 2, 3], + "input3": [0, 1, 2, 3], "input4": [0, 1, 2, 3], + "input5": [0, 1, 2, 3], "input6": [0, 1, 2, 3]}) + + def test_multi_scale_roi_align(self): + + class TransformModule(torch.nn.Module): + def __init__(self): + super(TransformModule, self).__init__() + self.model = ops.MultiScaleRoIAlign(['feat1', 'feat2'], 3, 2) + self.image_sizes = [(512, 512)] + + def forward(self, input, boxes): + return self.model(input, boxes, self.image_sizes) + + i = OrderedDict() + i['feat1'] = torch.rand(1, 5, 64, 64) + i['feat2'] = torch.rand(1, 5, 16, 16) + boxes = torch.rand(6, 4) * 256 + boxes[:, 2:] += boxes[:, :2] + + i1 = OrderedDict() + i1['feat1'] = torch.rand(1, 5, 64, 64) + i1['feat2'] = torch.rand(1, 5, 16, 16) + boxes1 = torch.rand(6, 4) * 256 + boxes1[:, 2:] += boxes1[:, :2] + + self.run_model(TransformModule(), [(i, [boxes],), (i1, [boxes1],)]) + + def test_roi_heads(self): + class RoiHeadsModule(torch.nn.Module): + def __init__(self_module): + super(RoiHeadsModule, self_module).__init__() + self_module.transform = self._init_test_generalized_rcnn_transform() + self_module.rpn = self._init_test_rpn() + self_module.roi_heads = self._init_test_roi_heads_faster_rcnn() + + def forward(self_module, images, features): + original_image_sizes = [img.shape[-2:] for img in images] + images = ImageList(images, [i.shape[-2:] for i in images]) + proposals, _ = self_module.rpn(images, features) + detections, _ = self_module.roi_heads(features, proposals, images.image_sizes) + detections = self_module.transform.postprocess(detections, + images.image_sizes, + original_image_sizes) + return detections + + images = torch.rand(2, 3, 100, 100) + features = self.get_features(images) + images2 = torch.rand(2, 3, 150, 150) + test_features = self.get_features(images2) + + model = RoiHeadsModule() + model.eval() + model(images, features) + + self.run_model(model, [(images, features), (images2, test_features)], tolerate_small_mismatch=True, + input_names=["input1", "input2", "input3", "input4", "input5", "input6"], + dynamic_axes={"input1": [0, 1, 2, 3], "input2": [0, 1, 2, 3], "input3": [0, 1, 2, 3], + "input4": [0, 1, 2, 3], "input5": [0, 1, 2, 3], "input6": [0, 1, 2, 3]}) + + def get_image_from_url(self, url, size=None): + import requests + from PIL import Image + from io import BytesIO + from torchvision import transforms + + data = requests.get(url) + image = Image.open(BytesIO(data.content)).convert("RGB") + + if size is None: + size = (300, 200) + image = image.resize(size, Image.BILINEAR) + + to_tensor = transforms.ToTensor() + return to_tensor(image) + + def get_test_images(self): + image_url = "http://farm3.staticflickr.com/2469/3915380994_2e611b1779_z.jpg" + image = self.get_image_from_url(url=image_url, size=(100, 320)) + + image_url2 = "https://pytorch.org/tutorials/_static/img/tv_tutorial/tv_image05.png" + image2 = self.get_image_from_url(url=image_url2, size=(250, 380)) + + images = [image] + test_images = [image2] + return images, test_images + + def test_faster_rcnn(self): + images, test_images = self.get_test_images() + dummy_image = [torch.ones(3, 100, 100) * 0.3] + model = models.detection.faster_rcnn.fasterrcnn_resnet50_fpn(pretrained=True, min_size=200, max_size=300) + model.eval() + model(images) + # Test exported model on images of different size, or dummy input + self.run_model(model, [(images,), (test_images,), (dummy_image,)], input_names=["images_tensors"], + output_names=["outputs"], + dynamic_axes={"images_tensors": [0, 1, 2], "outputs": [0, 1, 2]}, + tolerate_small_mismatch=True) + # Test exported model for an image with no detections on other images + self.run_model(model, [(dummy_image,), (images,)], input_names=["images_tensors"], + output_names=["outputs"], + dynamic_axes={"images_tensors": [0, 1, 2], "outputs": [0, 1, 2]}, + tolerate_small_mismatch=True) + + # Verify that paste_mask_in_image beahves the same in tracing. + # This test also compares both paste_masks_in_image and _onnx_paste_masks_in_image + # (since jit_trace witll call _onnx_paste_masks_in_image). + def test_paste_mask_in_image(self): + # disable profiling + torch._C._jit_set_profiling_executor(False) + torch._C._jit_set_profiling_mode(False) + + masks = torch.rand(10, 1, 26, 26) + boxes = torch.rand(10, 4) + boxes[:, 2:] += torch.rand(10, 2) + boxes *= 50 + o_im_s = (100, 100) + from torchvision.models.detection.roi_heads import paste_masks_in_image + out = paste_masks_in_image(masks, boxes, o_im_s) + jit_trace = torch.jit.trace(paste_masks_in_image, + (masks, boxes, + [torch.tensor(o_im_s[0]), + torch.tensor(o_im_s[1])])) + out_trace = jit_trace(masks, boxes, [torch.tensor(o_im_s[0]), torch.tensor(o_im_s[1])]) + + assert torch.all(out.eq(out_trace)) + + masks2 = torch.rand(20, 1, 26, 26) + boxes2 = torch.rand(20, 4) + boxes2[:, 2:] += torch.rand(20, 2) + boxes2 *= 100 + o_im_s2 = (200, 200) + from torchvision.models.detection.roi_heads import paste_masks_in_image + out2 = paste_masks_in_image(masks2, boxes2, o_im_s2) + out_trace2 = jit_trace(masks2, boxes2, [torch.tensor(o_im_s2[0]), torch.tensor(o_im_s2[1])]) + + assert torch.all(out2.eq(out_trace2)) + + def test_mask_rcnn(self): + images, test_images = self.get_test_images() + dummy_image = [torch.ones(3, 100, 100) * 0.3] + model = models.detection.mask_rcnn.maskrcnn_resnet50_fpn(pretrained=True, min_size=200, max_size=300) + model.eval() + model(images) + # Test exported model on images of different size, or dummy input + self.run_model(model, [(images,), (test_images,), (dummy_image,)], + input_names=["images_tensors"], + output_names=["boxes", "labels", "scores", "masks"], + dynamic_axes={"images_tensors": [0, 1, 2], "boxes": [0, 1], "labels": [0], + "scores": [0], "masks": [0, 1, 2]}, + tolerate_small_mismatch=True) + # TODO: enable this test once dynamic model export is fixed + # Test exported model for an image with no detections on other images + self.run_model(model, [(dummy_image,), (images,)], + input_names=["images_tensors"], + output_names=["boxes", "labels", "scores", "masks"], + dynamic_axes={"images_tensors": [0, 1, 2], "boxes": [0, 1], "labels": [0], + "scores": [0], "masks": [0, 1, 2]}, + tolerate_small_mismatch=True) + + # Verify that heatmaps_to_keypoints behaves the same in tracing. + # This test also compares both heatmaps_to_keypoints and _onnx_heatmaps_to_keypoints + # (since jit_trace witll call _heatmaps_to_keypoints). + # @unittest.skip("Disable test until Resize bug fixed in ORT") + def test_heatmaps_to_keypoints(self): + # disable profiling + torch._C._jit_set_profiling_executor(False) + torch._C._jit_set_profiling_mode(False) + + maps = torch.rand(10, 1, 26, 26) + rois = torch.rand(10, 4) + from torchvision.models.detection.roi_heads import heatmaps_to_keypoints + out = heatmaps_to_keypoints(maps, rois) + jit_trace = torch.jit.trace(heatmaps_to_keypoints, (maps, rois)) + out_trace = jit_trace(maps, rois) + + assert_equal(out[0], out_trace[0]) + assert_equal(out[1], out_trace[1]) + + maps2 = torch.rand(20, 2, 21, 21) + rois2 = torch.rand(20, 4) + from torchvision.models.detection.roi_heads import heatmaps_to_keypoints + out2 = heatmaps_to_keypoints(maps2, rois2) + out_trace2 = jit_trace(maps2, rois2) + + assert_equal(out2[0], out_trace2[0]) + assert_equal(out2[1], out_trace2[1]) + + def test_keypoint_rcnn(self): + images, test_images = self.get_test_images() + dummy_images = [torch.ones(3, 100, 100) * 0.3] + model = models.detection.keypoint_rcnn.keypointrcnn_resnet50_fpn(pretrained=True, min_size=200, max_size=300) + model.eval() + model(images) + self.run_model(model, [(images,), (test_images,), (dummy_images,)], + input_names=["images_tensors"], + output_names=["outputs1", "outputs2", "outputs3", "outputs4"], + dynamic_axes={"images_tensors": [0, 1, 2]}, + tolerate_small_mismatch=True) + + self.run_model(model, [(dummy_images,), (test_images,)], + input_names=["images_tensors"], + output_names=["outputs1", "outputs2", "outputs3", "outputs4"], + dynamic_axes={"images_tensors": [0, 1, 2]}, + tolerate_small_mismatch=True) + + def test_shufflenet_v2_dynamic_axes(self): + model = models.shufflenet_v2_x0_5(pretrained=True) + dummy_input = torch.randn(1, 3, 224, 224, requires_grad=True) + test_inputs = torch.cat([dummy_input, dummy_input, dummy_input], 0) + + self.run_model(model, [(dummy_input,), (test_inputs,)], + input_names=["input_images"], + output_names=["output"], + dynamic_axes={"input_images": {0: 'batch_size'}, "output": {0: 'batch_size'}}, + tolerate_small_mismatch=True) + + +if __name__ == '__main__': + unittest.main() diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/test_ops.py b/pretrained_model/pytorch_vision_v0.10.0/test/test_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..964199edc6673856ae9e3ea2baaf7d41327b967e --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/test_ops.py @@ -0,0 +1,1017 @@ +from common_utils import needs_cuda, cpu_only +from _assert_utils import assert_equal +import math +import unittest +import pytest + +import numpy as np + +import torch +from functools import lru_cache +from torch import Tensor +from torch.autograd import gradcheck +from torch.nn.modules.utils import _pair +from torchvision import ops +from typing import Tuple + + +class OpTester(object): + @classmethod + def setUpClass(cls): + cls.dtype = torch.float64 + + def test_forward_cpu_contiguous(self): + self._test_forward(device=torch.device('cpu'), contiguous=True) + + def test_forward_cpu_non_contiguous(self): + self._test_forward(device=torch.device('cpu'), contiguous=False) + + def test_backward_cpu_contiguous(self): + self._test_backward(device=torch.device('cpu'), contiguous=True) + + def test_backward_cpu_non_contiguous(self): + self._test_backward(device=torch.device('cpu'), contiguous=False) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable") + def test_forward_cuda_contiguous(self): + self._test_forward(device=torch.device('cuda'), contiguous=True) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable") + def test_forward_cuda_non_contiguous(self): + self._test_forward(device=torch.device('cuda'), contiguous=False) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable") + def test_backward_cuda_contiguous(self): + self._test_backward(device=torch.device('cuda'), contiguous=True) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable") + def test_backward_cuda_non_contiguous(self): + self._test_backward(device=torch.device('cuda'), contiguous=False) + + def _test_forward(self, device, contiguous): + pass + + def _test_backward(self, device, contiguous): + pass + + +class RoIOpTester(OpTester): + def _test_forward(self, device, contiguous, x_dtype=None, rois_dtype=None, **kwargs): + x_dtype = self.dtype if x_dtype is None else x_dtype + rois_dtype = self.dtype if rois_dtype is None else rois_dtype + pool_size = 5 + # n_channels % (pool_size ** 2) == 0 required for PS opeartions. + n_channels = 2 * (pool_size ** 2) + x = torch.rand(2, n_channels, 10, 10, dtype=x_dtype, device=device) + if not contiguous: + x = x.permute(0, 1, 3, 2) + rois = torch.tensor([[0, 0, 0, 9, 9], # format is (xyxy) + [0, 0, 5, 4, 9], + [0, 5, 5, 9, 9], + [1, 0, 0, 9, 9]], + dtype=rois_dtype, device=device) + + pool_h, pool_w = pool_size, pool_size + y = self.fn(x, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1, **kwargs) + # the following should be true whether we're running an autocast test or not. + self.assertTrue(y.dtype == x.dtype) + gt_y = self.expected_fn(x, rois, pool_h, pool_w, spatial_scale=1, + sampling_ratio=-1, device=device, dtype=self.dtype, **kwargs) + + tol = 1e-3 if (x_dtype is torch.half or rois_dtype is torch.half) else 1e-5 + torch.testing.assert_close(gt_y.to(y), y, rtol=tol, atol=tol) + + def _test_backward(self, device, contiguous): + pool_size = 2 + x = torch.rand(1, 2 * (pool_size ** 2), 5, 5, dtype=self.dtype, device=device, requires_grad=True) + if not contiguous: + x = x.permute(0, 1, 3, 2) + rois = torch.tensor([[0, 0, 0, 4, 4], # format is (xyxy) + [0, 0, 2, 3, 4], + [0, 2, 2, 4, 4]], + dtype=self.dtype, device=device) + + def func(z): + return self.fn(z, rois, pool_size, pool_size, spatial_scale=1, sampling_ratio=1) + + script_func = self.get_script_fn(rois, pool_size) + + self.assertTrue(gradcheck(func, (x,))) + self.assertTrue(gradcheck(script_func, (x,))) + + def test_boxes_shape(self): + self._test_boxes_shape() + + def _helper_boxes_shape(self, func): + # test boxes as Tensor[N, 5] + with self.assertRaises(AssertionError): + a = torch.linspace(1, 8 * 8, 8 * 8).reshape(1, 1, 8, 8) + boxes = torch.tensor([[0, 0, 3, 3]], dtype=a.dtype) + func(a, boxes, output_size=(2, 2)) + + # test boxes as List[Tensor[N, 4]] + with self.assertRaises(AssertionError): + a = torch.linspace(1, 8 * 8, 8 * 8).reshape(1, 1, 8, 8) + boxes = torch.tensor([[0, 0, 3]], dtype=a.dtype) + ops.roi_pool(a, [boxes], output_size=(2, 2)) + + def fn(*args, **kwargs): + pass + + def get_script_fn(*args, **kwargs): + pass + + def expected_fn(*args, **kwargs): + pass + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable") + def test_autocast(self): + for x_dtype in (torch.float, torch.half): + for rois_dtype in (torch.float, torch.half): + with torch.cuda.amp.autocast(): + self._test_forward(torch.device("cuda"), contiguous=False, x_dtype=x_dtype, rois_dtype=rois_dtype) + + +class RoIPoolTester(RoIOpTester, unittest.TestCase): + def fn(self, x, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1, **kwargs): + return ops.RoIPool((pool_h, pool_w), spatial_scale)(x, rois) + + def get_script_fn(self, rois, pool_size): + scriped = torch.jit.script(ops.roi_pool) + return lambda x: scriped(x, rois, pool_size) + + def expected_fn(self, x, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1, + device=None, dtype=torch.float64): + if device is None: + device = torch.device("cpu") + + n_channels = x.size(1) + y = torch.zeros(rois.size(0), n_channels, pool_h, pool_w, dtype=dtype, device=device) + + def get_slice(k, block): + return slice(int(np.floor(k * block)), int(np.ceil((k + 1) * block))) + + for roi_idx, roi in enumerate(rois): + batch_idx = int(roi[0]) + j_begin, i_begin, j_end, i_end = (int(round(x.item() * spatial_scale)) for x in roi[1:]) + roi_x = x[batch_idx, :, i_begin:i_end + 1, j_begin:j_end + 1] + + roi_h, roi_w = roi_x.shape[-2:] + bin_h = roi_h / pool_h + bin_w = roi_w / pool_w + + for i in range(0, pool_h): + for j in range(0, pool_w): + bin_x = roi_x[:, get_slice(i, bin_h), get_slice(j, bin_w)] + if bin_x.numel() > 0: + y[roi_idx, :, i, j] = bin_x.reshape(n_channels, -1).max(dim=1)[0] + return y + + def _test_boxes_shape(self): + self._helper_boxes_shape(ops.roi_pool) + + +class PSRoIPoolTester(RoIOpTester, unittest.TestCase): + def fn(self, x, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1, **kwargs): + return ops.PSRoIPool((pool_h, pool_w), 1)(x, rois) + + def get_script_fn(self, rois, pool_size): + scriped = torch.jit.script(ops.ps_roi_pool) + return lambda x: scriped(x, rois, pool_size) + + def expected_fn(self, x, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1, + device=None, dtype=torch.float64): + if device is None: + device = torch.device("cpu") + n_input_channels = x.size(1) + self.assertEqual(n_input_channels % (pool_h * pool_w), 0, "input channels must be divisible by ph * pw") + n_output_channels = int(n_input_channels / (pool_h * pool_w)) + y = torch.zeros(rois.size(0), n_output_channels, pool_h, pool_w, dtype=dtype, device=device) + + def get_slice(k, block): + return slice(int(np.floor(k * block)), int(np.ceil((k + 1) * block))) + + for roi_idx, roi in enumerate(rois): + batch_idx = int(roi[0]) + j_begin, i_begin, j_end, i_end = (int(round(x.item() * spatial_scale)) for x in roi[1:]) + roi_x = x[batch_idx, :, i_begin:i_end + 1, j_begin:j_end + 1] + + roi_height = max(i_end - i_begin, 1) + roi_width = max(j_end - j_begin, 1) + bin_h, bin_w = roi_height / float(pool_h), roi_width / float(pool_w) + + for i in range(0, pool_h): + for j in range(0, pool_w): + bin_x = roi_x[:, get_slice(i, bin_h), get_slice(j, bin_w)] + if bin_x.numel() > 0: + area = bin_x.size(-2) * bin_x.size(-1) + for c_out in range(0, n_output_channels): + c_in = c_out * (pool_h * pool_w) + pool_w * i + j + t = torch.sum(bin_x[c_in, :, :]) + y[roi_idx, c_out, i, j] = t / area + return y + + def _test_boxes_shape(self): + self._helper_boxes_shape(ops.ps_roi_pool) + + +def bilinear_interpolate(data, y, x, snap_border=False): + height, width = data.shape + + if snap_border: + if -1 < y <= 0: + y = 0 + elif height - 1 <= y < height: + y = height - 1 + + if -1 < x <= 0: + x = 0 + elif width - 1 <= x < width: + x = width - 1 + + y_low = int(math.floor(y)) + x_low = int(math.floor(x)) + y_high = y_low + 1 + x_high = x_low + 1 + + wy_h = y - y_low + wx_h = x - x_low + wy_l = 1 - wy_h + wx_l = 1 - wx_h + + val = 0 + for wx, xp in zip((wx_l, wx_h), (x_low, x_high)): + for wy, yp in zip((wy_l, wy_h), (y_low, y_high)): + if 0 <= yp < height and 0 <= xp < width: + val += wx * wy * data[yp, xp] + return val + + +class RoIAlignTester(RoIOpTester, unittest.TestCase): + def fn(self, x, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1, aligned=False, **kwargs): + return ops.RoIAlign((pool_h, pool_w), spatial_scale=spatial_scale, + sampling_ratio=sampling_ratio, aligned=aligned)(x, rois) + + def get_script_fn(self, rois, pool_size): + scriped = torch.jit.script(ops.roi_align) + return lambda x: scriped(x, rois, pool_size) + + def expected_fn(self, in_data, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1, aligned=False, + device=None, dtype=torch.float64): + if device is None: + device = torch.device("cpu") + n_channels = in_data.size(1) + out_data = torch.zeros(rois.size(0), n_channels, pool_h, pool_w, dtype=dtype, device=device) + + offset = 0.5 if aligned else 0. + + for r, roi in enumerate(rois): + batch_idx = int(roi[0]) + j_begin, i_begin, j_end, i_end = (x.item() * spatial_scale - offset for x in roi[1:]) + + roi_h = i_end - i_begin + roi_w = j_end - j_begin + bin_h = roi_h / pool_h + bin_w = roi_w / pool_w + + for i in range(0, pool_h): + start_h = i_begin + i * bin_h + grid_h = sampling_ratio if sampling_ratio > 0 else int(np.ceil(bin_h)) + for j in range(0, pool_w): + start_w = j_begin + j * bin_w + grid_w = sampling_ratio if sampling_ratio > 0 else int(np.ceil(bin_w)) + + for channel in range(0, n_channels): + + val = 0 + for iy in range(0, grid_h): + y = start_h + (iy + 0.5) * bin_h / grid_h + for ix in range(0, grid_w): + x = start_w + (ix + 0.5) * bin_w / grid_w + val += bilinear_interpolate(in_data[batch_idx, channel, :, :], y, x, snap_border=True) + val /= grid_h * grid_w + + out_data[r, channel, i, j] = val + return out_data + + def _test_boxes_shape(self): + self._helper_boxes_shape(ops.roi_align) + + def _test_forward(self, device, contiguous, x_dtype=None, rois_dtype=None, **kwargs): + for aligned in (True, False): + super()._test_forward(device, contiguous, x_dtype, rois_dtype, aligned=aligned) + + def test_qroialign(self): + """Make sure quantized version of RoIAlign is close to float version""" + pool_size = 5 + img_size = 10 + n_channels = 2 + num_imgs = 1 + dtype = torch.float + + def make_rois(num_rois=1000): + rois = torch.randint(0, img_size // 2, size=(num_rois, 5)).to(dtype) + rois[:, 0] = torch.randint(0, num_imgs, size=(num_rois,)) # set batch index + rois[:, 3:] += rois[:, 1:3] # make sure boxes aren't degenerate + return rois + + for aligned in (True, False): + for scale, zero_point in ((1, 0), (2, 10), (0.1, 50)): + for qdtype in (torch.qint8, torch.quint8, torch.qint32): + + x = torch.randint(50, 100, size=(num_imgs, n_channels, img_size, img_size)).to(dtype) + qx = torch.quantize_per_tensor(x, scale=scale, zero_point=zero_point, dtype=qdtype) + + rois = make_rois() + qrois = torch.quantize_per_tensor(rois, scale=scale, zero_point=zero_point, dtype=qdtype) + + x, rois = qx.dequantize(), qrois.dequantize() # we want to pass the same inputs + + y = ops.roi_align( + x, + rois, + output_size=pool_size, + spatial_scale=1, + sampling_ratio=-1, + aligned=aligned, + ) + qy = ops.roi_align( + qx, + qrois, + output_size=pool_size, + spatial_scale=1, + sampling_ratio=-1, + aligned=aligned, + ) + + # The output qy is itself a quantized tensor and there might have been a loss of info when it was + # quantized. For a fair comparison we need to quantize y as well + quantized_float_y = torch.quantize_per_tensor(y, scale=scale, zero_point=zero_point, dtype=qdtype) + + try: + # Ideally, we would assert this, which passes with (scale, zero) == (1, 0) + self.assertTrue((qy == quantized_float_y).all()) + except AssertionError: + # But because the computation aren't exactly the same between the 2 RoIAlign procedures, some + # rounding error may lead to a difference of 2 in the output. + # For example with (scale, zero) = (2, 10), 45.00000... will be quantized to 44 + # but 45.00000001 will be rounded to 46. We make sure below that: + # - such discrepancies between qy and quantized_float_y are very rare (less then 5%) + # - any difference between qy and quantized_float_y is == scale + diff_idx = torch.where(qy != quantized_float_y) + num_diff = diff_idx[0].numel() + self.assertTrue(num_diff / qy.numel() < .05) + + abs_diff = torch.abs(qy[diff_idx].dequantize() - quantized_float_y[diff_idx].dequantize()) + t_scale = torch.full_like(abs_diff, fill_value=scale) + torch.testing.assert_close(abs_diff, t_scale, rtol=1e-5, atol=1e-5) + + x = torch.randint(50, 100, size=(2, 3, 10, 10)).to(dtype) + qx = torch.quantize_per_tensor(x, scale=1, zero_point=0, dtype=torch.qint8) + rois = make_rois(10) + qrois = torch.quantize_per_tensor(rois, scale=1, zero_point=0, dtype=torch.qint8) + with self.assertRaisesRegex(RuntimeError, "Only one image per batch is allowed"): + ops.roi_align(qx, qrois, output_size=pool_size) + + +class PSRoIAlignTester(RoIOpTester, unittest.TestCase): + def fn(self, x, rois, pool_h, pool_w, spatial_scale=1, sampling_ratio=-1, **kwargs): + return ops.PSRoIAlign((pool_h, pool_w), spatial_scale=spatial_scale, + sampling_ratio=sampling_ratio)(x, rois) + + def get_script_fn(self, rois, pool_size): + scriped = torch.jit.script(ops.ps_roi_align) + return lambda x: scriped(x, rois, pool_size) + + def expected_fn(self, in_data, rois, pool_h, pool_w, device, spatial_scale=1, + sampling_ratio=-1, dtype=torch.float64): + if device is None: + device = torch.device("cpu") + n_input_channels = in_data.size(1) + self.assertEqual(n_input_channels % (pool_h * pool_w), 0, "input channels must be divisible by ph * pw") + n_output_channels = int(n_input_channels / (pool_h * pool_w)) + out_data = torch.zeros(rois.size(0), n_output_channels, pool_h, pool_w, dtype=dtype, device=device) + + for r, roi in enumerate(rois): + batch_idx = int(roi[0]) + j_begin, i_begin, j_end, i_end = (x.item() * spatial_scale - 0.5 for x in roi[1:]) + + roi_h = i_end - i_begin + roi_w = j_end - j_begin + bin_h = roi_h / pool_h + bin_w = roi_w / pool_w + + for i in range(0, pool_h): + start_h = i_begin + i * bin_h + grid_h = sampling_ratio if sampling_ratio > 0 else int(np.ceil(bin_h)) + for j in range(0, pool_w): + start_w = j_begin + j * bin_w + grid_w = sampling_ratio if sampling_ratio > 0 else int(np.ceil(bin_w)) + for c_out in range(0, n_output_channels): + c_in = c_out * (pool_h * pool_w) + pool_w * i + j + + val = 0 + for iy in range(0, grid_h): + y = start_h + (iy + 0.5) * bin_h / grid_h + for ix in range(0, grid_w): + x = start_w + (ix + 0.5) * bin_w / grid_w + val += bilinear_interpolate(in_data[batch_idx, c_in, :, :], y, x, snap_border=True) + val /= grid_h * grid_w + + out_data[r, c_out, i, j] = val + return out_data + + def _test_boxes_shape(self): + self._helper_boxes_shape(ops.ps_roi_align) + + +class MultiScaleRoIAlignTester(unittest.TestCase): + def test_msroialign_repr(self): + fmap_names = ['0'] + output_size = (7, 7) + sampling_ratio = 2 + # Pass mock feature map names + t = ops.poolers.MultiScaleRoIAlign(fmap_names, output_size, sampling_ratio) + + # Check integrity of object __repr__ attribute + expected_string = (f"MultiScaleRoIAlign(featmap_names={fmap_names}, output_size={output_size}, " + f"sampling_ratio={sampling_ratio})") + self.assertEqual(t.__repr__(), expected_string) + + +class TestNMS: + def _reference_nms(self, boxes, scores, iou_threshold): + """ + Args: + box_scores (N, 5): boxes in corner-form and probabilities. + iou_threshold: intersection over union threshold. + Returns: + picked: a list of indexes of the kept boxes + """ + picked = [] + _, indexes = scores.sort(descending=True) + while len(indexes) > 0: + current = indexes[0] + picked.append(current.item()) + if len(indexes) == 1: + break + current_box = boxes[current, :] + indexes = indexes[1:] + rest_boxes = boxes[indexes, :] + iou = ops.box_iou(rest_boxes, current_box.unsqueeze(0)).squeeze(1) + indexes = indexes[iou <= iou_threshold] + + return torch.as_tensor(picked) + + def _create_tensors_with_iou(self, N, iou_thresh): + # force last box to have a pre-defined iou with the first box + # let b0 be [x0, y0, x1, y1], and b1 be [x0, y0, x1 + d, y1], + # then, in order to satisfy ops.iou(b0, b1) == iou_thresh, + # we need to have d = (x1 - x0) * (1 - iou_thresh) / iou_thresh + # Adjust the threshold upward a bit with the intent of creating + # at least one box that exceeds (barely) the threshold and so + # should be suppressed. + boxes = torch.rand(N, 4) * 100 + boxes[:, 2:] += boxes[:, :2] + boxes[-1, :] = boxes[0, :] + x0, y0, x1, y1 = boxes[-1].tolist() + iou_thresh += 1e-5 + boxes[-1, 2] += (x1 - x0) * (1 - iou_thresh) / iou_thresh + scores = torch.rand(N) + return boxes, scores + + @cpu_only + @pytest.mark.parametrize("iou", (.2, .5, .8)) + def test_nms_ref(self, iou): + err_msg = 'NMS incompatible between CPU and reference implementation for IoU={}' + boxes, scores = self._create_tensors_with_iou(1000, iou) + keep_ref = self._reference_nms(boxes, scores, iou) + keep = ops.nms(boxes, scores, iou) + assert torch.allclose(keep, keep_ref), err_msg.format(iou) + + @cpu_only + def test_nms_input_errors(self): + with pytest.raises(RuntimeError): + ops.nms(torch.rand(4), torch.rand(3), 0.5) + with pytest.raises(RuntimeError): + ops.nms(torch.rand(3, 5), torch.rand(3), 0.5) + with pytest.raises(RuntimeError): + ops.nms(torch.rand(3, 4), torch.rand(3, 2), 0.5) + with pytest.raises(RuntimeError): + ops.nms(torch.rand(3, 4), torch.rand(4), 0.5) + + @cpu_only + @pytest.mark.parametrize("iou", (.2, .5, .8)) + @pytest.mark.parametrize("scale, zero_point", ((1, 0), (2, 50), (3, 10))) + def test_qnms(self, iou, scale, zero_point): + # Note: we compare qnms vs nms instead of qnms vs reference implementation. + # This is because with the int convertion, the trick used in _create_tensors_with_iou + # doesn't really work (in fact, nms vs reference implem will also fail with ints) + err_msg = 'NMS and QNMS give different results for IoU={}' + boxes, scores = self._create_tensors_with_iou(1000, iou) + scores *= 100 # otherwise most scores would be 0 or 1 after int convertion + + qboxes = torch.quantize_per_tensor(boxes, scale=scale, zero_point=zero_point, dtype=torch.quint8) + qscores = torch.quantize_per_tensor(scores, scale=scale, zero_point=zero_point, dtype=torch.quint8) + + boxes = qboxes.dequantize() + scores = qscores.dequantize() + + keep = ops.nms(boxes, scores, iou) + qkeep = ops.nms(qboxes, qscores, iou) + + assert torch.allclose(qkeep, keep), err_msg.format(iou) + + @needs_cuda + @pytest.mark.parametrize("iou", (.2, .5, .8)) + def test_nms_cuda(self, iou, dtype=torch.float64): + tol = 1e-3 if dtype is torch.half else 1e-5 + err_msg = 'NMS incompatible between CPU and CUDA for IoU={}' + + boxes, scores = self._create_tensors_with_iou(1000, iou) + r_cpu = ops.nms(boxes, scores, iou) + r_cuda = ops.nms(boxes.cuda(), scores.cuda(), iou) + + is_eq = torch.allclose(r_cpu, r_cuda.cpu()) + if not is_eq: + # if the indices are not the same, ensure that it's because the scores + # are duplicate + is_eq = torch.allclose(scores[r_cpu], scores[r_cuda.cpu()], rtol=tol, atol=tol) + assert is_eq, err_msg.format(iou) + + @needs_cuda + @pytest.mark.parametrize("iou", (.2, .5, .8)) + @pytest.mark.parametrize("dtype", (torch.float, torch.half)) + def test_autocast(self, iou, dtype): + with torch.cuda.amp.autocast(): + self.test_nms_cuda(iou=iou, dtype=dtype) + + @needs_cuda + def test_nms_cuda_float16(self): + boxes = torch.tensor([[285.3538, 185.5758, 1193.5110, 851.4551], + [285.1472, 188.7374, 1192.4984, 851.0669], + [279.2440, 197.9812, 1189.4746, 849.2019]]).cuda() + scores = torch.tensor([0.6370, 0.7569, 0.3966]).cuda() + + iou_thres = 0.2 + keep32 = ops.nms(boxes, scores, iou_thres) + keep16 = ops.nms(boxes.to(torch.float16), scores.to(torch.float16), iou_thres) + assert_equal(keep32, keep16) + + @cpu_only + def test_batched_nms_implementations(self): + """Make sure that both implementations of batched_nms yield identical results""" + + num_boxes = 1000 + iou_threshold = .9 + + boxes = torch.cat((torch.rand(num_boxes, 2), torch.rand(num_boxes, 2) + 10), dim=1) + assert max(boxes[:, 0]) < min(boxes[:, 2]) # x1 < x2 + assert max(boxes[:, 1]) < min(boxes[:, 3]) # y1 < y2 + + scores = torch.rand(num_boxes) + idxs = torch.randint(0, 4, size=(num_boxes,)) + keep_vanilla = ops.boxes._batched_nms_vanilla(boxes, scores, idxs, iou_threshold) + keep_trick = ops.boxes._batched_nms_coordinate_trick(boxes, scores, idxs, iou_threshold) + + torch.testing.assert_close( + keep_vanilla, keep_trick, msg="The vanilla and the trick implementation yield different nms outputs." + ) + + # Also make sure an empty tensor is returned if boxes is empty + empty = torch.empty((0,), dtype=torch.int64) + torch.testing.assert_close(empty, ops.batched_nms(empty, None, None, None)) + + +class DeformConvTester(OpTester, unittest.TestCase): + def expected_fn(self, x, weight, offset, mask, bias, stride=1, padding=0, dilation=1): + stride_h, stride_w = _pair(stride) + pad_h, pad_w = _pair(padding) + dil_h, dil_w = _pair(dilation) + weight_h, weight_w = weight.shape[-2:] + + n_batches, n_in_channels, in_h, in_w = x.shape + n_out_channels = weight.shape[0] + + out_h = (in_h + 2 * pad_h - (dil_h * (weight_h - 1) + 1)) // stride_h + 1 + out_w = (in_w + 2 * pad_w - (dil_w * (weight_w - 1) + 1)) // stride_w + 1 + + n_offset_grps = offset.shape[1] // (2 * weight_h * weight_w) + in_c_per_offset_grp = n_in_channels // n_offset_grps + + n_weight_grps = n_in_channels // weight.shape[1] + in_c_per_weight_grp = weight.shape[1] + out_c_per_weight_grp = n_out_channels // n_weight_grps + + out = torch.zeros(n_batches, n_out_channels, out_h, out_w, device=x.device, dtype=x.dtype) + for b in range(n_batches): + for c_out in range(n_out_channels): + for i in range(out_h): + for j in range(out_w): + for di in range(weight_h): + for dj in range(weight_w): + for c in range(in_c_per_weight_grp): + weight_grp = c_out // out_c_per_weight_grp + c_in = weight_grp * in_c_per_weight_grp + c + + offset_grp = c_in // in_c_per_offset_grp + mask_idx = offset_grp * (weight_h * weight_w) + di * weight_w + dj + offset_idx = 2 * mask_idx + + pi = stride_h * i - pad_h + dil_h * di + offset[b, offset_idx, i, j] + pj = stride_w * j - pad_w + dil_w * dj + offset[b, offset_idx + 1, i, j] + + mask_value = 1.0 + if mask is not None: + mask_value = mask[b, mask_idx, i, j] + + out[b, c_out, i, j] += (mask_value * weight[c_out, c, di, dj] * + bilinear_interpolate(x[b, c_in, :, :], pi, pj)) + out += bias.view(1, n_out_channels, 1, 1) + return out + + @lru_cache(maxsize=None) + def get_fn_args(self, device, contiguous, batch_sz, dtype): + n_in_channels = 6 + n_out_channels = 2 + n_weight_grps = 2 + n_offset_grps = 3 + + stride = (2, 1) + pad = (1, 0) + dilation = (2, 1) + + stride_h, stride_w = stride + pad_h, pad_w = pad + dil_h, dil_w = dilation + weight_h, weight_w = (3, 2) + in_h, in_w = (5, 4) + + out_h = (in_h + 2 * pad_h - (dil_h * (weight_h - 1) + 1)) // stride_h + 1 + out_w = (in_w + 2 * pad_w - (dil_w * (weight_w - 1) + 1)) // stride_w + 1 + + x = torch.rand(batch_sz, n_in_channels, in_h, in_w, device=device, dtype=dtype, requires_grad=True) + + offset = torch.randn(batch_sz, n_offset_grps * 2 * weight_h * weight_w, out_h, out_w, + device=device, dtype=dtype, requires_grad=True) + + mask = torch.randn(batch_sz, n_offset_grps * weight_h * weight_w, out_h, out_w, + device=device, dtype=dtype, requires_grad=True) + + weight = torch.randn(n_out_channels, n_in_channels // n_weight_grps, weight_h, weight_w, + device=device, dtype=dtype, requires_grad=True) + + bias = torch.randn(n_out_channels, device=device, dtype=dtype, requires_grad=True) + + if not contiguous: + x = x.permute(0, 1, 3, 2).contiguous().permute(0, 1, 3, 2) + offset = offset.permute(1, 3, 0, 2).contiguous().permute(2, 0, 3, 1) + mask = mask.permute(1, 3, 0, 2).contiguous().permute(2, 0, 3, 1) + weight = weight.permute(3, 2, 0, 1).contiguous().permute(2, 3, 1, 0) + + return x, weight, offset, mask, bias, stride, pad, dilation + + def _test_forward(self, device, contiguous, dtype=None): + dtype = self.dtype if dtype is None else dtype + for batch_sz in [0, 33]: + self._test_forward_with_batchsize(device, contiguous, batch_sz, dtype) + + def _test_forward_with_batchsize(self, device, contiguous, batch_sz, dtype): + x, _, offset, mask, _, stride, padding, dilation = self.get_fn_args(device, contiguous, batch_sz, dtype) + in_channels = 6 + out_channels = 2 + kernel_size = (3, 2) + groups = 2 + tol = 2e-3 if dtype is torch.half else 1e-5 + + layer = ops.DeformConv2d(in_channels, out_channels, kernel_size, stride=stride, padding=padding, + dilation=dilation, groups=groups).to(device=x.device, dtype=dtype) + res = layer(x, offset, mask) + + weight = layer.weight.data + bias = layer.bias.data + expected = self.expected_fn(x, weight, offset, mask, bias, stride=stride, padding=padding, dilation=dilation) + + torch.testing.assert_close( + res.to(expected), expected, rtol=tol, atol=tol, msg='\nres:\n{}\nexpected:\n{}'.format(res, expected) + ) + + # no modulation test + res = layer(x, offset) + expected = self.expected_fn(x, weight, offset, None, bias, stride=stride, padding=padding, dilation=dilation) + + torch.testing.assert_close( + res.to(expected), expected, rtol=tol, atol=tol, msg='\nres:\n{}\nexpected:\n{}'.format(res, expected) + ) + + # test for wrong sizes + with self.assertRaises(RuntimeError): + wrong_offset = torch.rand_like(offset[:, :2]) + res = layer(x, wrong_offset) + + with self.assertRaises(RuntimeError): + wrong_mask = torch.rand_like(mask[:, :2]) + res = layer(x, offset, wrong_mask) + + def _test_backward(self, device, contiguous): + for batch_sz in [0, 33]: + self._test_backward_with_batchsize(device, contiguous, batch_sz) + + def _test_backward_with_batchsize(self, device, contiguous, batch_sz): + x, weight, offset, mask, bias, stride, padding, dilation = self.get_fn_args(device, contiguous, + batch_sz, self.dtype) + + def func(x_, offset_, mask_, weight_, bias_): + return ops.deform_conv2d(x_, offset_, weight_, bias_, stride=stride, + padding=padding, dilation=dilation, mask=mask_) + + gradcheck(func, (x, offset, mask, weight, bias), nondet_tol=1e-5) + + def func_no_mask(x_, offset_, weight_, bias_): + return ops.deform_conv2d(x_, offset_, weight_, bias_, stride=stride, + padding=padding, dilation=dilation, mask=None) + + gradcheck(func_no_mask, (x, offset, weight, bias), nondet_tol=1e-5) + + @torch.jit.script + def script_func(x_, offset_, mask_, weight_, bias_, stride_, pad_, dilation_): + # type:(Tensor, Tensor, Tensor, Tensor, Tensor, Tuple[int, int], Tuple[int, int], Tuple[int, int])->Tensor + return ops.deform_conv2d(x_, offset_, weight_, bias_, stride=stride_, + padding=pad_, dilation=dilation_, mask=mask_) + + gradcheck(lambda z, off, msk, wei, bi: script_func(z, off, msk, wei, bi, stride, padding, dilation), + (x, offset, mask, weight, bias), nondet_tol=1e-5) + + @torch.jit.script + def script_func_no_mask(x_, offset_, weight_, bias_, stride_, pad_, dilation_): + # type:(Tensor, Tensor, Tensor, Tensor, Tuple[int, int], Tuple[int, int], Tuple[int, int])->Tensor + return ops.deform_conv2d(x_, offset_, weight_, bias_, stride=stride_, + padding=pad_, dilation=dilation_, mask=None) + + gradcheck(lambda z, off, wei, bi: script_func_no_mask(z, off, wei, bi, stride, padding, dilation), + (x, offset, weight, bias), nondet_tol=1e-5) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable") + def test_compare_cpu_cuda_grads(self): + # Test from https://github.com/pytorch/vision/issues/2598 + # Run on CUDA only + for contiguous in [False, True]: + # compare grads computed on CUDA with grads computed on CPU + true_cpu_grads = None + + init_weight = torch.randn(9, 9, 3, 3, requires_grad=True) + img = torch.randn(8, 9, 1000, 110) + offset = torch.rand(8, 2 * 3 * 3, 1000, 110) + mask = torch.rand(8, 3 * 3, 1000, 110) + + if not contiguous: + img = img.permute(0, 1, 3, 2).contiguous().permute(0, 1, 3, 2) + offset = offset.permute(1, 3, 0, 2).contiguous().permute(2, 0, 3, 1) + mask = mask.permute(1, 3, 0, 2).contiguous().permute(2, 0, 3, 1) + weight = init_weight.permute(3, 2, 0, 1).contiguous().permute(2, 3, 1, 0) + else: + weight = init_weight + + for d in ["cpu", "cuda"]: + + out = ops.deform_conv2d(img.to(d), offset.to(d), weight.to(d), padding=1, mask=mask.to(d)) + out.mean().backward() + if true_cpu_grads is None: + true_cpu_grads = init_weight.grad + self.assertTrue(true_cpu_grads is not None) + else: + self.assertTrue(init_weight.grad is not None) + res_grads = init_weight.grad.to("cpu") + torch.testing.assert_close(true_cpu_grads, res_grads) + + @unittest.skipIf(not torch.cuda.is_available(), "CUDA unavailable") + def test_autocast(self): + for dtype in (torch.float, torch.half): + with torch.cuda.amp.autocast(): + self._test_forward(torch.device("cuda"), False, dtype=dtype) + + +class FrozenBNTester(unittest.TestCase): + def test_frozenbatchnorm2d_repr(self): + num_features = 32 + eps = 1e-5 + t = ops.misc.FrozenBatchNorm2d(num_features, eps=eps) + + # Check integrity of object __repr__ attribute + expected_string = f"FrozenBatchNorm2d({num_features}, eps={eps})" + self.assertEqual(t.__repr__(), expected_string) + + def test_frozenbatchnorm2d_eps(self): + sample_size = (4, 32, 28, 28) + x = torch.rand(sample_size) + state_dict = dict(weight=torch.rand(sample_size[1]), + bias=torch.rand(sample_size[1]), + running_mean=torch.rand(sample_size[1]), + running_var=torch.rand(sample_size[1]), + num_batches_tracked=torch.tensor(100)) + + # Check that default eps is equal to the one of BN + fbn = ops.misc.FrozenBatchNorm2d(sample_size[1]) + fbn.load_state_dict(state_dict, strict=False) + bn = torch.nn.BatchNorm2d(sample_size[1]).eval() + bn.load_state_dict(state_dict) + # Difference is expected to fall in an acceptable range + torch.testing.assert_close(fbn(x), bn(x), rtol=1e-5, atol=1e-6) + + # Check computation for eps > 0 + fbn = ops.misc.FrozenBatchNorm2d(sample_size[1], eps=1e-5) + fbn.load_state_dict(state_dict, strict=False) + bn = torch.nn.BatchNorm2d(sample_size[1], eps=1e-5).eval() + bn.load_state_dict(state_dict) + torch.testing.assert_close(fbn(x), bn(x), rtol=1e-5, atol=1e-6) + + def test_frozenbatchnorm2d_n_arg(self): + """Ensure a warning is thrown when passing `n` kwarg + (remove this when support of `n` is dropped)""" + self.assertWarns(DeprecationWarning, ops.misc.FrozenBatchNorm2d, 32, eps=1e-5, n=32) + + +class BoxConversionTester(unittest.TestCase): + @staticmethod + def _get_box_sequences(): + # Define here the argument type of `boxes` supported by region pooling operations + box_tensor = torch.tensor([[0, 0, 0, 100, 100], [1, 0, 0, 100, 100]], dtype=torch.float) + box_list = [torch.tensor([[0, 0, 100, 100]], dtype=torch.float), + torch.tensor([[0, 0, 100, 100]], dtype=torch.float)] + box_tuple = tuple(box_list) + return box_tensor, box_list, box_tuple + + def test_check_roi_boxes_shape(self): + # Ensure common sequences of tensors are supported + for box_sequence in self._get_box_sequences(): + self.assertIsNone(ops._utils.check_roi_boxes_shape(box_sequence)) + + def test_convert_boxes_to_roi_format(self): + # Ensure common sequences of tensors yield the same result + ref_tensor = None + for box_sequence in self._get_box_sequences(): + if ref_tensor is None: + ref_tensor = box_sequence + else: + self.assertTrue(torch.equal(ref_tensor, ops._utils.convert_boxes_to_roi_format(box_sequence))) + + +class BoxTester(unittest.TestCase): + def test_bbox_same(self): + box_tensor = torch.tensor([[0, 0, 100, 100], [0, 0, 0, 0], + [10, 15, 30, 35], [23, 35, 93, 95]], dtype=torch.float) + + exp_xyxy = torch.tensor([[0, 0, 100, 100], [0, 0, 0, 0], + [10, 15, 30, 35], [23, 35, 93, 95]], dtype=torch.float) + + assert exp_xyxy.size() == torch.Size([4, 4]) + assert_equal(ops.box_convert(box_tensor, in_fmt="xyxy", out_fmt="xyxy"), exp_xyxy) + assert_equal(ops.box_convert(box_tensor, in_fmt="xywh", out_fmt="xywh"), exp_xyxy) + assert_equal(ops.box_convert(box_tensor, in_fmt="cxcywh", out_fmt="cxcywh"), exp_xyxy) + + def test_bbox_xyxy_xywh(self): + # Simple test convert boxes to xywh and back. Make sure they are same. + # box_tensor is in x1 y1 x2 y2 format. + box_tensor = torch.tensor([[0, 0, 100, 100], [0, 0, 0, 0], + [10, 15, 30, 35], [23, 35, 93, 95]], dtype=torch.float) + exp_xywh = torch.tensor([[0, 0, 100, 100], [0, 0, 0, 0], + [10, 15, 20, 20], [23, 35, 70, 60]], dtype=torch.float) + + assert exp_xywh.size() == torch.Size([4, 4]) + box_xywh = ops.box_convert(box_tensor, in_fmt="xyxy", out_fmt="xywh") + assert_equal(box_xywh, exp_xywh) + + # Reverse conversion + box_xyxy = ops.box_convert(box_xywh, in_fmt="xywh", out_fmt="xyxy") + assert_equal(box_xyxy, box_tensor) + + def test_bbox_xyxy_cxcywh(self): + # Simple test convert boxes to xywh and back. Make sure they are same. + # box_tensor is in x1 y1 x2 y2 format. + box_tensor = torch.tensor([[0, 0, 100, 100], [0, 0, 0, 0], + [10, 15, 30, 35], [23, 35, 93, 95]], dtype=torch.float) + exp_cxcywh = torch.tensor([[50, 50, 100, 100], [0, 0, 0, 0], + [20, 25, 20, 20], [58, 65, 70, 60]], dtype=torch.float) + + assert exp_cxcywh.size() == torch.Size([4, 4]) + box_cxcywh = ops.box_convert(box_tensor, in_fmt="xyxy", out_fmt="cxcywh") + assert_equal(box_cxcywh, exp_cxcywh) + + # Reverse conversion + box_xyxy = ops.box_convert(box_cxcywh, in_fmt="cxcywh", out_fmt="xyxy") + assert_equal(box_xyxy, box_tensor) + + def test_bbox_xywh_cxcywh(self): + box_tensor = torch.tensor([[0, 0, 100, 100], [0, 0, 0, 0], + [10, 15, 20, 20], [23, 35, 70, 60]], dtype=torch.float) + + # This is wrong + exp_cxcywh = torch.tensor([[50, 50, 100, 100], [0, 0, 0, 0], + [20, 25, 20, 20], [58, 65, 70, 60]], dtype=torch.float) + + assert exp_cxcywh.size() == torch.Size([4, 4]) + box_cxcywh = ops.box_convert(box_tensor, in_fmt="xywh", out_fmt="cxcywh") + assert_equal(box_cxcywh, exp_cxcywh) + + # Reverse conversion + box_xywh = ops.box_convert(box_cxcywh, in_fmt="cxcywh", out_fmt="xywh") + assert_equal(box_xywh, box_tensor) + + def test_bbox_invalid(self): + box_tensor = torch.tensor([[0, 0, 100, 100], [0, 0, 0, 0], + [10, 15, 20, 20], [23, 35, 70, 60]], dtype=torch.float) + + invalid_infmts = ["xwyh", "cxwyh"] + invalid_outfmts = ["xwcx", "xhwcy"] + for inv_infmt in invalid_infmts: + for inv_outfmt in invalid_outfmts: + self.assertRaises(ValueError, ops.box_convert, box_tensor, inv_infmt, inv_outfmt) + + def test_bbox_convert_jit(self): + box_tensor = torch.tensor([[0, 0, 100, 100], [0, 0, 0, 0], + [10, 15, 30, 35], [23, 35, 93, 95]], dtype=torch.float) + + scripted_fn = torch.jit.script(ops.box_convert) + TOLERANCE = 1e-3 + + box_xywh = ops.box_convert(box_tensor, in_fmt="xyxy", out_fmt="xywh") + scripted_xywh = scripted_fn(box_tensor, 'xyxy', 'xywh') + torch.testing.assert_close(scripted_xywh, box_xywh, rtol=0.0, atol=TOLERANCE) + + box_cxcywh = ops.box_convert(box_tensor, in_fmt="xyxy", out_fmt="cxcywh") + scripted_cxcywh = scripted_fn(box_tensor, 'xyxy', 'cxcywh') + torch.testing.assert_close(scripted_cxcywh, box_cxcywh, rtol=0.0, atol=TOLERANCE) + + +class BoxAreaTester(unittest.TestCase): + def test_box_area(self): + def area_check(box, expected, tolerance=1e-4): + out = ops.box_area(box) + torch.testing.assert_close(out, expected, rtol=0.0, check_dtype=False, atol=tolerance) + + # Check for int boxes + for dtype in [torch.int8, torch.int16, torch.int32, torch.int64]: + box_tensor = torch.tensor([[0, 0, 100, 100], [0, 0, 0, 0]], dtype=dtype) + expected = torch.tensor([10000, 0]) + area_check(box_tensor, expected) + + # Check for float32 and float64 boxes + for dtype in [torch.float32, torch.float64]: + box_tensor = torch.tensor([[285.3538, 185.5758, 1193.5110, 851.4551], + [285.1472, 188.7374, 1192.4984, 851.0669], + [279.2440, 197.9812, 1189.4746, 849.2019]], dtype=dtype) + expected = torch.tensor([604723.0806, 600965.4666, 592761.0085], dtype=torch.float64) + area_check(box_tensor, expected, tolerance=0.05) + + # Check for float16 box + box_tensor = torch.tensor([[285.25, 185.625, 1194.0, 851.5], + [285.25, 188.75, 1192.0, 851.0], + [279.25, 198.0, 1189.0, 849.0]], dtype=torch.float16) + expected = torch.tensor([605113.875, 600495.1875, 592247.25]) + area_check(box_tensor, expected) + + +class BoxIouTester(unittest.TestCase): + def test_iou(self): + def iou_check(box, expected, tolerance=1e-4): + out = ops.box_iou(box, box) + torch.testing.assert_close(out, expected, rtol=0.0, check_dtype=False, atol=tolerance) + + # Check for int boxes + for dtype in [torch.int16, torch.int32, torch.int64]: + box = torch.tensor([[0, 0, 100, 100], [0, 0, 50, 50], [200, 200, 300, 300]], dtype=dtype) + expected = torch.tensor([[1.0, 0.25, 0.0], [0.25, 1.0, 0.0], [0.0, 0.0, 1.0]]) + iou_check(box, expected) + + # Check for float boxes + for dtype in [torch.float16, torch.float32, torch.float64]: + box_tensor = torch.tensor([[285.3538, 185.5758, 1193.5110, 851.4551], + [285.1472, 188.7374, 1192.4984, 851.0669], + [279.2440, 197.9812, 1189.4746, 849.2019]], dtype=dtype) + expected = torch.tensor([[1.0, 0.9933, 0.9673], [0.9933, 1.0, 0.9737], [0.9673, 0.9737, 1.0]]) + iou_check(box_tensor, expected, tolerance=0.002 if dtype == torch.float16 else 1e-4) + + +class GenBoxIouTester(unittest.TestCase): + def test_gen_iou(self): + def gen_iou_check(box, expected, tolerance=1e-4): + out = ops.generalized_box_iou(box, box) + torch.testing.assert_close(out, expected, rtol=0.0, check_dtype=False, atol=tolerance) + + # Check for int boxes + for dtype in [torch.int16, torch.int32, torch.int64]: + box = torch.tensor([[0, 0, 100, 100], [0, 0, 50, 50], [200, 200, 300, 300]], dtype=dtype) + expected = torch.tensor([[1.0, 0.25, -0.7778], [0.25, 1.0, -0.8611], [-0.7778, -0.8611, 1.0]]) + gen_iou_check(box, expected) + + # Check for float boxes + for dtype in [torch.float16, torch.float32, torch.float64]: + box_tensor = torch.tensor([[285.3538, 185.5758, 1193.5110, 851.4551], + [285.1472, 188.7374, 1192.4984, 851.0669], + [279.2440, 197.9812, 1189.4746, 849.2019]], dtype=dtype) + expected = torch.tensor([[1.0, 0.9933, 0.9673], [0.9933, 1.0, 0.9737], [0.9673, 0.9737, 1.0]]) + gen_iou_check(box_tensor, expected, tolerance=0.002 if dtype == torch.float16 else 1e-3) + + +if __name__ == '__main__': + unittest.main() diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/test_quantized_models.py b/pretrained_model/pytorch_vision_v0.10.0/test/test_quantized_models.py new file mode 100644 index 0000000000000000000000000000000000000000..d8fd5325755d4c666a85d4e204e2c3ed04a24709 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/test_quantized_models.py @@ -0,0 +1,93 @@ +import torchvision +from common_utils import TestCase, map_nested_tensor_object +from collections import OrderedDict +from itertools import product +import torch +import numpy as np +from torchvision import models +import unittest +import traceback +import random + + +def set_rng_seed(seed): + torch.manual_seed(seed) + random.seed(seed) + np.random.seed(seed) + + +def get_available_quantizable_models(): + # TODO add a registration mechanism to torchvision.models + return [k for k, v in models.quantization.__dict__.items() if callable(v) and k[0].lower() == k[0] and k[0] != "_"] + + +# list of models that are not scriptable +scriptable_quantizable_models_blacklist = [] + + +@unittest.skipUnless('fbgemm' in torch.backends.quantized.supported_engines and + 'qnnpack' in torch.backends.quantized.supported_engines, + "This Pytorch Build has not been built with fbgemm and qnnpack") +class ModelTester(TestCase): + def check_quantized_model(self, model, input_shape): + x = torch.rand(input_shape) + model(x) + return + + def check_script(self, model, name): + if name in scriptable_quantizable_models_blacklist: + return + scriptable = True + msg = "" + try: + torch.jit.script(model) + except Exception as e: + tb = traceback.format_exc() + scriptable = False + msg = str(e) + str(tb) + self.assertTrue(scriptable, msg) + + def _test_classification_model(self, name, input_shape): + # First check if quantize=True provides models that can run with input data + + model = torchvision.models.quantization.__dict__[name](pretrained=False, quantize=True) + self.check_quantized_model(model, input_shape) + + for eval_mode in [True, False]: + model = torchvision.models.quantization.__dict__[name](pretrained=False, quantize=False) + if eval_mode: + model.eval() + model.qconfig = torch.quantization.default_qconfig + else: + model.train() + model.qconfig = torch.quantization.default_qat_qconfig + + model.fuse_model() + if eval_mode: + torch.quantization.prepare(model, inplace=True) + else: + torch.quantization.prepare_qat(model, inplace=True) + model.eval() + + torch.quantization.convert(model, inplace=True) + + self.check_script(model, name) + + +for model_name in get_available_quantizable_models(): + # for-loop bodies don't define scopes, so we have to save the variables + # we want to close over in some way + def do_test(self, model_name=model_name): + input_shape = (1, 3, 224, 224) + if model_name in ['inception_v3']: + input_shape = (1, 3, 299, 299) + self._test_classification_model(model_name, input_shape) + + # inception_v3 was causing timeouts on circleci + # See https://github.com/pytorch/vision/issues/1857 + if model_name not in ['inception_v3']: + setattr(ModelTester, "test_" + model_name, do_test) + + +if __name__ == '__main__': + unittest.main() diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/test_transforms.py b/pretrained_model/pytorch_vision_v0.10.0/test/test_transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..de309cb66c52ad971fa89406c7bbc7fc0f35b3c4 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/test_transforms.py @@ -0,0 +1,2023 @@ +import itertools +import os +import torch +import torchvision.transforms as transforms +import torchvision.transforms.functional as F +import torchvision.transforms.functional_tensor as F_t +from torch._utils_internal import get_file_path_2 +from numpy.testing import assert_array_almost_equal +import unittest +import math +import random +import numpy as np +from PIL import Image +try: + import accimage +except ImportError: + accimage = None + +try: + from scipy import stats +except ImportError: + stats = None + +from common_utils import cycle_over, int_dtypes, float_dtypes +from _assert_utils import assert_equal + + +GRACE_HOPPER = get_file_path_2( + os.path.dirname(os.path.abspath(__file__)), 'assets', 'encode_jpeg', 'grace_hopper_517x606.jpg') + + +class Tester(unittest.TestCase): + + def test_center_crop(self): + height = random.randint(10, 32) * 2 + width = random.randint(10, 32) * 2 + oheight = random.randint(5, (height - 2) / 2) * 2 + owidth = random.randint(5, (width - 2) / 2) * 2 + + img = torch.ones(3, height, width) + oh1 = (height - oheight) // 2 + ow1 = (width - owidth) // 2 + imgnarrow = img[:, oh1:oh1 + oheight, ow1:ow1 + owidth] + imgnarrow.fill_(0) + result = transforms.Compose([ + transforms.ToPILImage(), + transforms.CenterCrop((oheight, owidth)), + transforms.ToTensor(), + ])(img) + self.assertEqual(result.sum(), 0, + "height: {} width: {} oheight: {} owdith: {}".format(height, width, oheight, owidth)) + oheight += 1 + owidth += 1 + result = transforms.Compose([ + transforms.ToPILImage(), + transforms.CenterCrop((oheight, owidth)), + transforms.ToTensor(), + ])(img) + sum1 = result.sum() + self.assertGreater(sum1, 1, + "height: {} width: {} oheight: {} owdith: {}".format(height, width, oheight, owidth)) + oheight += 1 + owidth += 1 + result = transforms.Compose([ + transforms.ToPILImage(), + transforms.CenterCrop((oheight, owidth)), + transforms.ToTensor(), + ])(img) + sum2 = result.sum() + self.assertGreater(sum2, 0, + "height: {} width: {} oheight: {} owdith: {}".format(height, width, oheight, owidth)) + self.assertGreater(sum2, sum1, + "height: {} width: {} oheight: {} owdith: {}".format(height, width, oheight, owidth)) + + def test_center_crop_2(self): + """ Tests when center crop size is larger than image size, along any dimension""" + even_image_size = (random.randint(10, 32) * 2, random.randint(10, 32) * 2) + odd_image_size = (even_image_size[0] + 1, even_image_size[1] + 1) + + # Since height is independent of width, we can ignore images with odd height and even width and vice-versa. + input_image_sizes = [even_image_size, odd_image_size] + + # Get different crop sizes + delta = random.choice((1, 3, 5)) + crop_size_delta = [-2 * delta, -delta, 0, delta, 2 * delta] + crop_size_params = itertools.product(input_image_sizes, crop_size_delta, crop_size_delta) + + for (input_image_size, delta_height, delta_width) in crop_size_params: + img = torch.ones(3, *input_image_size) + crop_size = (input_image_size[0] + delta_height, input_image_size[1] + delta_width) + + # Test both transforms, one with PIL input and one with tensor + output_pil = transforms.Compose([ + transforms.ToPILImage(), + transforms.CenterCrop(crop_size), + transforms.ToTensor()], + )(img) + self.assertEqual(output_pil.size()[1:3], crop_size, + "image_size: {} crop_size: {}".format(input_image_size, crop_size)) + + output_tensor = transforms.CenterCrop(crop_size)(img) + self.assertEqual(output_tensor.size()[1:3], crop_size, + "image_size: {} crop_size: {}".format(input_image_size, crop_size)) + + # Ensure output for PIL and Tensor are equal + assert_equal( + output_tensor, output_pil, check_stride=False, + msg="image_size: {} crop_size: {}".format(input_image_size, crop_size) + ) + + # Check if content in center of both image and cropped output is same. + center_size = (min(crop_size[0], input_image_size[0]), min(crop_size[1], input_image_size[1])) + crop_center_tl, input_center_tl = [0, 0], [0, 0] + for index in range(2): + if crop_size[index] > input_image_size[index]: + crop_center_tl[index] = (crop_size[index] - input_image_size[index]) // 2 + else: + input_center_tl[index] = (input_image_size[index] - crop_size[index]) // 2 + + output_center = output_pil[ + :, + crop_center_tl[0]:crop_center_tl[0] + center_size[0], + crop_center_tl[1]:crop_center_tl[1] + center_size[1] + ] + + img_center = img[ + :, + input_center_tl[0]:input_center_tl[0] + center_size[0], + input_center_tl[1]:input_center_tl[1] + center_size[1] + ] + + assert_equal( + output_center, img_center, check_stride=False, + msg="image_size: {} crop_size: {}".format(input_image_size, crop_size) + ) + + def test_five_crop(self): + to_pil_image = transforms.ToPILImage() + h = random.randint(5, 25) + w = random.randint(5, 25) + for single_dim in [True, False]: + crop_h = random.randint(1, h) + crop_w = random.randint(1, w) + if single_dim: + crop_h = min(crop_h, crop_w) + crop_w = crop_h + transform = transforms.FiveCrop(crop_h) + else: + transform = transforms.FiveCrop((crop_h, crop_w)) + + img = torch.FloatTensor(3, h, w).uniform_() + results = transform(to_pil_image(img)) + + self.assertEqual(len(results), 5) + for crop in results: + self.assertEqual(crop.size, (crop_w, crop_h)) + + to_pil_image = transforms.ToPILImage() + tl = to_pil_image(img[:, 0:crop_h, 0:crop_w]) + tr = to_pil_image(img[:, 0:crop_h, w - crop_w:]) + bl = to_pil_image(img[:, h - crop_h:, 0:crop_w]) + br = to_pil_image(img[:, h - crop_h:, w - crop_w:]) + center = transforms.CenterCrop((crop_h, crop_w))(to_pil_image(img)) + expected_output = (tl, tr, bl, br, center) + self.assertEqual(results, expected_output) + + def test_ten_crop(self): + to_pil_image = transforms.ToPILImage() + h = random.randint(5, 25) + w = random.randint(5, 25) + for should_vflip in [True, False]: + for single_dim in [True, False]: + crop_h = random.randint(1, h) + crop_w = random.randint(1, w) + if single_dim: + crop_h = min(crop_h, crop_w) + crop_w = crop_h + transform = transforms.TenCrop(crop_h, + vertical_flip=should_vflip) + five_crop = transforms.FiveCrop(crop_h) + else: + transform = transforms.TenCrop((crop_h, crop_w), + vertical_flip=should_vflip) + five_crop = transforms.FiveCrop((crop_h, crop_w)) + + img = to_pil_image(torch.FloatTensor(3, h, w).uniform_()) + results = transform(img) + expected_output = five_crop(img) + + # Checking if FiveCrop and TenCrop can be printed as string + transform.__repr__() + five_crop.__repr__() + + if should_vflip: + vflipped_img = img.transpose(Image.FLIP_TOP_BOTTOM) + expected_output += five_crop(vflipped_img) + else: + hflipped_img = img.transpose(Image.FLIP_LEFT_RIGHT) + expected_output += five_crop(hflipped_img) + + self.assertEqual(len(results), 10) + self.assertEqual(results, expected_output) + + def test_randomresized_params(self): + height = random.randint(24, 32) * 2 + width = random.randint(24, 32) * 2 + img = torch.ones(3, height, width) + to_pil_image = transforms.ToPILImage() + img = to_pil_image(img) + size = 100 + epsilon = 0.05 + min_scale = 0.25 + for _ in range(10): + scale_min = max(round(random.random(), 2), min_scale) + scale_range = (scale_min, scale_min + round(random.random(), 2)) + aspect_min = max(round(random.random(), 2), epsilon) + aspect_ratio_range = (aspect_min, aspect_min + round(random.random(), 2)) + randresizecrop = transforms.RandomResizedCrop(size, scale_range, aspect_ratio_range) + i, j, h, w = randresizecrop.get_params(img, scale_range, aspect_ratio_range) + aspect_ratio_obtained = w / h + self.assertTrue((min(aspect_ratio_range) - epsilon <= aspect_ratio_obtained and + aspect_ratio_obtained <= max(aspect_ratio_range) + epsilon) or + aspect_ratio_obtained == 1.0) + self.assertIsInstance(i, int) + self.assertIsInstance(j, int) + self.assertIsInstance(h, int) + self.assertIsInstance(w, int) + + def test_randomperspective(self): + for _ in range(10): + height = random.randint(24, 32) * 2 + width = random.randint(24, 32) * 2 + img = torch.ones(3, height, width) + to_pil_image = transforms.ToPILImage() + img = to_pil_image(img) + perp = transforms.RandomPerspective() + startpoints, endpoints = perp.get_params(width, height, 0.5) + tr_img = F.perspective(img, startpoints, endpoints) + tr_img2 = F.to_tensor(F.perspective(tr_img, endpoints, startpoints)) + tr_img = F.to_tensor(tr_img) + self.assertEqual(img.size[0], width) + self.assertEqual(img.size[1], height) + self.assertGreater(torch.nn.functional.mse_loss(tr_img, F.to_tensor(img)) + 0.3, + torch.nn.functional.mse_loss(tr_img2, F.to_tensor(img))) + + def test_randomperspective_fill(self): + + # assert fill being either a Sequence or a Number + with self.assertRaises(TypeError): + transforms.RandomPerspective(fill={}) + + t = transforms.RandomPerspective(fill=None) + self.assertTrue(t.fill == 0) + + height = 100 + width = 100 + img = torch.ones(3, height, width) + to_pil_image = transforms.ToPILImage() + img = to_pil_image(img) + + modes = ("L", "RGB", "F") + nums_bands = [len(mode) for mode in modes] + fill = 127 + + for mode, num_bands in zip(modes, nums_bands): + img_conv = img.convert(mode) + perspective = transforms.RandomPerspective(p=1, fill=fill) + tr_img = perspective(img_conv) + pixel = tr_img.getpixel((0, 0)) + + if not isinstance(pixel, tuple): + pixel = (pixel,) + self.assertTupleEqual(pixel, tuple([fill] * num_bands)) + + for mode, num_bands in zip(modes, nums_bands): + img_conv = img.convert(mode) + startpoints, endpoints = transforms.RandomPerspective.get_params(width, height, 0.5) + tr_img = F.perspective(img_conv, startpoints, endpoints, fill=fill) + pixel = tr_img.getpixel((0, 0)) + + if not isinstance(pixel, tuple): + pixel = (pixel,) + self.assertTupleEqual(pixel, tuple([fill] * num_bands)) + + for wrong_num_bands in set(nums_bands) - {num_bands}: + with self.assertRaises(ValueError): + F.perspective(img_conv, startpoints, endpoints, fill=tuple([fill] * wrong_num_bands)) + + def test_resize(self): + + input_sizes = [ + # height, width + # square image + (28, 28), + (27, 27), + # rectangular image: h < w + (28, 34), + (29, 35), + # rectangular image: h > w + (34, 28), + (35, 29), + ] + test_output_sizes_1 = [ + # single integer + 22, 27, 28, 36, + # single integer in tuple/list + [22, ], (27, ), + ] + test_output_sizes_2 = [ + # two integers + [22, 22], [22, 28], [22, 36], + [27, 22], [36, 22], [28, 28], + [28, 37], [37, 27], [37, 37] + ] + + for height, width in input_sizes: + img = Image.new("RGB", size=(width, height), color=127) + + for osize in test_output_sizes_1: + for max_size in (None, 37, 1000): + + t = transforms.Resize(osize, max_size=max_size) + result = t(img) + + msg = "{}, {} - {} - {}".format(height, width, osize, max_size) + osize = osize[0] if isinstance(osize, (list, tuple)) else osize + # If size is an int, smaller edge of the image will be matched to this number. + # i.e, if height > width, then image will be rescaled to (size * height / width, size). + if height < width: + exp_w, exp_h = (int(osize * width / height), osize) # (w, h) + if max_size is not None and max_size < exp_w: + exp_w, exp_h = max_size, int(max_size * exp_h / exp_w) + self.assertEqual(result.size, (exp_w, exp_h), msg=msg) + elif width < height: + exp_w, exp_h = (osize, int(osize * height / width)) # (w, h) + if max_size is not None and max_size < exp_h: + exp_w, exp_h = int(max_size * exp_w / exp_h), max_size + self.assertEqual(result.size, (exp_w, exp_h), msg=msg) + else: + exp_w, exp_h = (osize, osize) # (w, h) + if max_size is not None and max_size < osize: + exp_w, exp_h = max_size, max_size + self.assertEqual(result.size, (exp_w, exp_h), msg=msg) + + for height, width in input_sizes: + img = Image.new("RGB", size=(width, height), color=127) + + for osize in test_output_sizes_2: + oheight, owidth = osize + + t = transforms.Resize(osize) + result = t(img) + + self.assertEqual((owidth, oheight), result.size) + + with self.assertWarnsRegex(UserWarning, r"Anti-alias option is always applied for PIL Image input"): + t = transforms.Resize(osize, antialias=False) + t(img) + + def test_random_crop(self): + height = random.randint(10, 32) * 2 + width = random.randint(10, 32) * 2 + oheight = random.randint(5, (height - 2) / 2) * 2 + owidth = random.randint(5, (width - 2) / 2) * 2 + img = torch.ones(3, height, width) + result = transforms.Compose([ + transforms.ToPILImage(), + transforms.RandomCrop((oheight, owidth)), + transforms.ToTensor(), + ])(img) + self.assertEqual(result.size(1), oheight) + self.assertEqual(result.size(2), owidth) + + padding = random.randint(1, 20) + result = transforms.Compose([ + transforms.ToPILImage(), + transforms.RandomCrop((oheight, owidth), padding=padding), + transforms.ToTensor(), + ])(img) + self.assertEqual(result.size(1), oheight) + self.assertEqual(result.size(2), owidth) + + result = transforms.Compose([ + transforms.ToPILImage(), + transforms.RandomCrop((height, width)), + transforms.ToTensor() + ])(img) + self.assertEqual(result.size(1), height) + self.assertEqual(result.size(2), width) + torch.testing.assert_close(result, img) + + result = transforms.Compose([ + transforms.ToPILImage(), + transforms.RandomCrop((height + 1, width + 1), pad_if_needed=True), + transforms.ToTensor(), + ])(img) + self.assertEqual(result.size(1), height + 1) + self.assertEqual(result.size(2), width + 1) + + t = transforms.RandomCrop(48) + img = torch.ones(3, 32, 32) + with self.assertRaisesRegex(ValueError, r"Required crop size .+ is larger then input image size .+"): + t(img) + + def test_pad(self): + height = random.randint(10, 32) * 2 + width = random.randint(10, 32) * 2 + img = torch.ones(3, height, width) + padding = random.randint(1, 20) + fill = random.randint(1, 50) + result = transforms.Compose([ + transforms.ToPILImage(), + transforms.Pad(padding, fill=fill), + transforms.ToTensor(), + ])(img) + self.assertEqual(result.size(1), height + 2 * padding) + self.assertEqual(result.size(2), width + 2 * padding) + # check that all elements in the padded region correspond + # to the pad value + fill_v = fill / 255 + eps = 1e-5 + h_padded = result[:, :padding, :] + w_padded = result[:, :, :padding] + torch.testing.assert_close( + h_padded, torch.full_like(h_padded, fill_value=fill_v), check_stride=False, rtol=0.0, atol=eps + ) + torch.testing.assert_close( + w_padded, torch.full_like(w_padded, fill_value=fill_v), check_stride=False, rtol=0.0, atol=eps + ) + self.assertRaises(ValueError, transforms.Pad(padding, fill=(1, 2)), + transforms.ToPILImage()(img)) + + def test_pad_with_tuple_of_pad_values(self): + height = random.randint(10, 32) * 2 + width = random.randint(10, 32) * 2 + img = transforms.ToPILImage()(torch.ones(3, height, width)) + + padding = tuple([random.randint(1, 20) for _ in range(2)]) + output = transforms.Pad(padding)(img) + self.assertEqual(output.size, (width + padding[0] * 2, height + padding[1] * 2)) + + padding = tuple([random.randint(1, 20) for _ in range(4)]) + output = transforms.Pad(padding)(img) + self.assertEqual(output.size[0], width + padding[0] + padding[2]) + self.assertEqual(output.size[1], height + padding[1] + padding[3]) + + # Checking if Padding can be printed as string + transforms.Pad(padding).__repr__() + + def test_pad_with_non_constant_padding_modes(self): + """Unit tests for edge, reflect, symmetric padding""" + img = torch.zeros(3, 27, 27).byte() + img[:, :, 0] = 1 # Constant value added to leftmost edge + img = transforms.ToPILImage()(img) + img = F.pad(img, 1, (200, 200, 200)) + + # pad 3 to all sidess + edge_padded_img = F.pad(img, 3, padding_mode='edge') + # First 6 elements of leftmost edge in the middle of the image, values are in order: + # edge_pad, edge_pad, edge_pad, constant_pad, constant value added to leftmost edge, 0 + edge_middle_slice = np.asarray(edge_padded_img).transpose(2, 0, 1)[0][17][:6] + assert_equal(edge_middle_slice, np.asarray([200, 200, 200, 200, 1, 0], dtype=np.uint8), check_stride=False) + self.assertEqual(transforms.ToTensor()(edge_padded_img).size(), (3, 35, 35)) + + # Pad 3 to left/right, 2 to top/bottom + reflect_padded_img = F.pad(img, (3, 2), padding_mode='reflect') + # First 6 elements of leftmost edge in the middle of the image, values are in order: + # reflect_pad, reflect_pad, reflect_pad, constant_pad, constant value added to leftmost edge, 0 + reflect_middle_slice = np.asarray(reflect_padded_img).transpose(2, 0, 1)[0][17][:6] + assert_equal(reflect_middle_slice, np.asarray([0, 0, 1, 200, 1, 0], dtype=np.uint8), check_stride=False) + self.assertEqual(transforms.ToTensor()(reflect_padded_img).size(), (3, 33, 35)) + + # Pad 3 to left, 2 to top, 2 to right, 1 to bottom + symmetric_padded_img = F.pad(img, (3, 2, 2, 1), padding_mode='symmetric') + # First 6 elements of leftmost edge in the middle of the image, values are in order: + # sym_pad, sym_pad, sym_pad, constant_pad, constant value added to leftmost edge, 0 + symmetric_middle_slice = np.asarray(symmetric_padded_img).transpose(2, 0, 1)[0][17][:6] + assert_equal(symmetric_middle_slice, np.asarray([0, 1, 200, 200, 1, 0], dtype=np.uint8), check_stride=False) + self.assertEqual(transforms.ToTensor()(symmetric_padded_img).size(), (3, 32, 34)) + + # Check negative padding explicitly for symmetric case, since it is not + # implemented for tensor case to compare to + # Crop 1 to left, pad 2 to top, pad 3 to right, crop 3 to bottom + symmetric_padded_img_neg = F.pad(img, (-1, 2, 3, -3), padding_mode='symmetric') + symmetric_neg_middle_left = np.asarray(symmetric_padded_img_neg).transpose(2, 0, 1)[0][17][:3] + symmetric_neg_middle_right = np.asarray(symmetric_padded_img_neg).transpose(2, 0, 1)[0][17][-4:] + assert_equal(symmetric_neg_middle_left, np.asarray([1, 0, 0], dtype=np.uint8), check_stride=False) + assert_equal(symmetric_neg_middle_right, np.asarray([200, 200, 0, 0], dtype=np.uint8), check_stride=False) + self.assertEqual(transforms.ToTensor()(symmetric_padded_img_neg).size(), (3, 28, 31)) + + def test_pad_raises_with_invalid_pad_sequence_len(self): + with self.assertRaises(ValueError): + transforms.Pad(()) + + with self.assertRaises(ValueError): + transforms.Pad((1, 2, 3)) + + with self.assertRaises(ValueError): + transforms.Pad((1, 2, 3, 4, 5)) + + def test_pad_with_mode_F_images(self): + pad = 2 + transform = transforms.Pad(pad) + + img = Image.new("F", (10, 10)) + padded_img = transform(img) + self.assertSequenceEqual(padded_img.size, [edge_size + 2 * pad for edge_size in img.size]) + + def test_lambda(self): + trans = transforms.Lambda(lambda x: x.add(10)) + x = torch.randn(10) + y = trans(x) + assert_equal(y, torch.add(x, 10)) + + trans = transforms.Lambda(lambda x: x.add_(10)) + x = torch.randn(10) + y = trans(x) + assert_equal(y, x) + + # Checking if Lambda can be printed as string + trans.__repr__() + + @unittest.skipIf(stats is None, 'scipy.stats not available') + def test_random_apply(self): + random_state = random.getstate() + random.seed(42) + random_apply_transform = transforms.RandomApply( + [ + transforms.RandomRotation((-45, 45)), + transforms.RandomHorizontalFlip(), + transforms.RandomVerticalFlip(), + ], p=0.75 + ) + img = transforms.ToPILImage()(torch.rand(3, 10, 10)) + num_samples = 250 + num_applies = 0 + for _ in range(num_samples): + out = random_apply_transform(img) + if out != img: + num_applies += 1 + + p_value = stats.binom_test(num_applies, num_samples, p=0.75) + random.setstate(random_state) + self.assertGreater(p_value, 0.0001) + + # Checking if RandomApply can be printed as string + random_apply_transform.__repr__() + + @unittest.skipIf(stats is None, 'scipy.stats not available') + def test_random_choice(self): + random_state = random.getstate() + random.seed(42) + random_choice_transform = transforms.RandomChoice( + [ + transforms.Resize(15), + transforms.Resize(20), + transforms.CenterCrop(10) + ] + ) + img = transforms.ToPILImage()(torch.rand(3, 25, 25)) + num_samples = 250 + num_resize_15 = 0 + num_resize_20 = 0 + num_crop_10 = 0 + for _ in range(num_samples): + out = random_choice_transform(img) + if out.size == (15, 15): + num_resize_15 += 1 + elif out.size == (20, 20): + num_resize_20 += 1 + elif out.size == (10, 10): + num_crop_10 += 1 + + p_value = stats.binom_test(num_resize_15, num_samples, p=0.33333) + self.assertGreater(p_value, 0.0001) + p_value = stats.binom_test(num_resize_20, num_samples, p=0.33333) + self.assertGreater(p_value, 0.0001) + p_value = stats.binom_test(num_crop_10, num_samples, p=0.33333) + self.assertGreater(p_value, 0.0001) + + random.setstate(random_state) + # Checking if RandomChoice can be printed as string + random_choice_transform.__repr__() + + @unittest.skipIf(stats is None, 'scipy.stats not available') + def test_random_order(self): + random_state = random.getstate() + random.seed(42) + random_order_transform = transforms.RandomOrder( + [ + transforms.Resize(20), + transforms.CenterCrop(10) + ] + ) + img = transforms.ToPILImage()(torch.rand(3, 25, 25)) + num_samples = 250 + num_normal_order = 0 + resize_crop_out = transforms.CenterCrop(10)(transforms.Resize(20)(img)) + for _ in range(num_samples): + out = random_order_transform(img) + if out == resize_crop_out: + num_normal_order += 1 + + p_value = stats.binom_test(num_normal_order, num_samples, p=0.5) + random.setstate(random_state) + self.assertGreater(p_value, 0.0001) + + # Checking if RandomOrder can be printed as string + random_order_transform.__repr__() + + def test_to_tensor(self): + test_channels = [1, 3, 4] + height, width = 4, 4 + trans = transforms.ToTensor() + + with self.assertRaises(TypeError): + trans(np.random.rand(1, height, width).tolist()) + + with self.assertRaises(ValueError): + trans(np.random.rand(height)) + trans(np.random.rand(1, 1, height, width)) + + for channels in test_channels: + input_data = torch.ByteTensor(channels, height, width).random_(0, 255).float().div_(255) + img = transforms.ToPILImage()(input_data) + output = trans(img) + torch.testing.assert_close(output, input_data, check_stride=False) + + ndarray = np.random.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8) + output = trans(ndarray) + expected_output = ndarray.transpose((2, 0, 1)) / 255.0 + torch.testing.assert_close(output.numpy(), expected_output, check_stride=False, check_dtype=False) + + ndarray = np.random.rand(height, width, channels).astype(np.float32) + output = trans(ndarray) + expected_output = ndarray.transpose((2, 0, 1)) + torch.testing.assert_close(output.numpy(), expected_output, check_stride=False, check_dtype=False) + + # separate test for mode '1' PIL images + input_data = torch.ByteTensor(1, height, width).bernoulli_() + img = transforms.ToPILImage()(input_data.mul(255)).convert('1') + output = trans(img) + torch.testing.assert_close(input_data, output, check_dtype=False, check_stride=False) + + def test_to_tensor_with_other_default_dtypes(self): + current_def_dtype = torch.get_default_dtype() + + t = transforms.ToTensor() + np_arr = np.random.randint(0, 255, (32, 32, 3), dtype=np.uint8) + img = Image.fromarray(np_arr) + + for dtype in [torch.float16, torch.float, torch.double]: + torch.set_default_dtype(dtype) + res = t(img) + self.assertTrue(res.dtype == dtype, msg=f"{res.dtype} vs {dtype}") + + torch.set_default_dtype(current_def_dtype) + + def test_max_value(self): + for dtype in int_dtypes(): + self.assertEqual(F_t._max_value(dtype), torch.iinfo(dtype).max) + + # remove float testing as it can lead to errors such as + # runtime error: 5.7896e+76 is outside the range of representable values of type 'float' + # for dtype in float_dtypes(): + # self.assertGreater(F_t._max_value(dtype), torch.finfo(dtype).max) + + def test_convert_image_dtype_float_to_float(self): + for input_dtype, output_dtypes in cycle_over(float_dtypes()): + input_image = torch.tensor((0.0, 1.0), dtype=input_dtype) + for output_dtype in output_dtypes: + with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype): + transform = transforms.ConvertImageDtype(output_dtype) + transform_script = torch.jit.script(F.convert_image_dtype) + + output_image = transform(input_image) + output_image_script = transform_script(input_image, output_dtype) + + torch.testing.assert_close(output_image_script, output_image, rtol=0.0, atol=1e-6) + + actual_min, actual_max = output_image.tolist() + desired_min, desired_max = 0.0, 1.0 + + self.assertAlmostEqual(actual_min, desired_min) + self.assertAlmostEqual(actual_max, desired_max) + + def test_convert_image_dtype_float_to_int(self): + for input_dtype in float_dtypes(): + input_image = torch.tensor((0.0, 1.0), dtype=input_dtype) + for output_dtype in int_dtypes(): + with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype): + transform = transforms.ConvertImageDtype(output_dtype) + transform_script = torch.jit.script(F.convert_image_dtype) + + if (input_dtype == torch.float32 and output_dtype in (torch.int32, torch.int64)) or ( + input_dtype == torch.float64 and output_dtype == torch.int64 + ): + with self.assertRaises(RuntimeError): + transform(input_image) + else: + output_image = transform(input_image) + output_image_script = transform_script(input_image, output_dtype) + + torch.testing.assert_close(output_image_script, output_image, rtol=0.0, atol=1e-6) + + actual_min, actual_max = output_image.tolist() + desired_min, desired_max = 0, torch.iinfo(output_dtype).max + + self.assertEqual(actual_min, desired_min) + self.assertEqual(actual_max, desired_max) + + def test_convert_image_dtype_int_to_float(self): + for input_dtype in int_dtypes(): + input_image = torch.tensor((0, torch.iinfo(input_dtype).max), dtype=input_dtype) + for output_dtype in float_dtypes(): + with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype): + transform = transforms.ConvertImageDtype(output_dtype) + transform_script = torch.jit.script(F.convert_image_dtype) + + output_image = transform(input_image) + output_image_script = transform_script(input_image, output_dtype) + + torch.testing.assert_close(output_image_script, output_image, rtol=0.0, atol=1e-6) + + actual_min, actual_max = output_image.tolist() + desired_min, desired_max = 0.0, 1.0 + + self.assertAlmostEqual(actual_min, desired_min) + self.assertGreaterEqual(actual_min, desired_min) + self.assertAlmostEqual(actual_max, desired_max) + self.assertLessEqual(actual_max, desired_max) + + def test_convert_image_dtype_int_to_int(self): + for input_dtype, output_dtypes in cycle_over(int_dtypes()): + input_max = torch.iinfo(input_dtype).max + input_image = torch.tensor((0, input_max), dtype=input_dtype) + for output_dtype in output_dtypes: + output_max = torch.iinfo(output_dtype).max + + with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype): + transform = transforms.ConvertImageDtype(output_dtype) + transform_script = torch.jit.script(F.convert_image_dtype) + + output_image = transform(input_image) + output_image_script = transform_script(input_image, output_dtype) + + torch.testing.assert_close( + output_image_script, + output_image, + rtol=0.0, + atol=1e-6, + msg="{} vs {}".format(output_image_script, output_image), + ) + + actual_min, actual_max = output_image.tolist() + desired_min, desired_max = 0, output_max + + # see https://github.com/pytorch/vision/pull/2078#issuecomment-641036236 for details + if input_max >= output_max: + error_term = 0 + else: + error_term = 1 - (torch.iinfo(output_dtype).max + 1) // (torch.iinfo(input_dtype).max + 1) + + self.assertEqual(actual_min, desired_min) + self.assertEqual(actual_max, desired_max + error_term) + + def test_convert_image_dtype_int_to_int_consistency(self): + for input_dtype, output_dtypes in cycle_over(int_dtypes()): + input_max = torch.iinfo(input_dtype).max + input_image = torch.tensor((0, input_max), dtype=input_dtype) + for output_dtype in output_dtypes: + output_max = torch.iinfo(output_dtype).max + if output_max <= input_max: + continue + + with self.subTest(input_dtype=input_dtype, output_dtype=output_dtype): + transform = transforms.ConvertImageDtype(output_dtype) + inverse_transfrom = transforms.ConvertImageDtype(input_dtype) + output_image = inverse_transfrom(transform(input_image)) + + actual_min, actual_max = output_image.tolist() + desired_min, desired_max = 0, input_max + + self.assertEqual(actual_min, desired_min) + self.assertEqual(actual_max, desired_max) + + @unittest.skipIf(accimage is None, 'accimage not available') + def test_accimage_to_tensor(self): + trans = transforms.ToTensor() + + expected_output = trans(Image.open(GRACE_HOPPER).convert('RGB')) + output = trans(accimage.Image(GRACE_HOPPER)) + + torch.testing.assert_close(output, expected_output) + + def test_pil_to_tensor(self): + test_channels = [1, 3, 4] + height, width = 4, 4 + trans = transforms.PILToTensor() + + with self.assertRaises(TypeError): + trans(np.random.rand(1, height, width).tolist()) + trans(np.random.rand(1, height, width)) + + for channels in test_channels: + input_data = torch.ByteTensor(channels, height, width).random_(0, 255) + img = transforms.ToPILImage()(input_data) + output = trans(img) + torch.testing.assert_close(input_data, output, check_stride=False) + + input_data = np.random.randint(low=0, high=255, size=(height, width, channels)).astype(np.uint8) + img = transforms.ToPILImage()(input_data) + output = trans(img) + expected_output = input_data.transpose((2, 0, 1)) + torch.testing.assert_close(output.numpy(), expected_output) + + input_data = torch.as_tensor(np.random.rand(channels, height, width).astype(np.float32)) + img = transforms.ToPILImage()(input_data) # CHW -> HWC and (* 255).byte() + output = trans(img) # HWC -> CHW + expected_output = (input_data * 255).byte() + torch.testing.assert_close(output, expected_output, check_stride=False) + + # separate test for mode '1' PIL images + input_data = torch.ByteTensor(1, height, width).bernoulli_() + img = transforms.ToPILImage()(input_data.mul(255)).convert('1') + output = trans(img).view(torch.uint8).bool().to(torch.uint8) + torch.testing.assert_close(input_data, output, check_stride=False) + + @unittest.skipIf(accimage is None, 'accimage not available') + def test_accimage_pil_to_tensor(self): + trans = transforms.PILToTensor() + + expected_output = trans(Image.open(GRACE_HOPPER).convert('RGB')) + output = trans(accimage.Image(GRACE_HOPPER)) + + self.assertEqual(expected_output.size(), output.size()) + torch.testing.assert_close(output, expected_output, check_stride=False) + + @unittest.skipIf(accimage is None, 'accimage not available') + def test_accimage_resize(self): + trans = transforms.Compose([ + transforms.Resize(256, interpolation=Image.LINEAR), + transforms.ToTensor(), + ]) + + # Checking if Compose, Resize and ToTensor can be printed as string + trans.__repr__() + + expected_output = trans(Image.open(GRACE_HOPPER).convert('RGB')) + output = trans(accimage.Image(GRACE_HOPPER)) + + self.assertEqual(expected_output.size(), output.size()) + self.assertLess(np.abs((expected_output - output).mean()), 1e-3) + self.assertLess((expected_output - output).var(), 1e-5) + # note the high absolute tolerance + self.assertTrue(np.allclose(output.numpy(), expected_output.numpy(), atol=5e-2)) + + @unittest.skipIf(accimage is None, 'accimage not available') + def test_accimage_crop(self): + trans = transforms.Compose([ + transforms.CenterCrop(256), + transforms.ToTensor(), + ]) + + # Checking if Compose, CenterCrop and ToTensor can be printed as string + trans.__repr__() + + expected_output = trans(Image.open(GRACE_HOPPER).convert('RGB')) + output = trans(accimage.Image(GRACE_HOPPER)) + + self.assertEqual(expected_output.size(), output.size()) + torch.testing.assert_close(output, expected_output) + + def test_1_channel_tensor_to_pil_image(self): + to_tensor = transforms.ToTensor() + + img_data_float = torch.Tensor(1, 4, 4).uniform_() + img_data_byte = torch.ByteTensor(1, 4, 4).random_(0, 255) + img_data_short = torch.ShortTensor(1, 4, 4).random_() + img_data_int = torch.IntTensor(1, 4, 4).random_() + + inputs = [img_data_float, img_data_byte, img_data_short, img_data_int] + expected_outputs = [img_data_float.mul(255).int().float().div(255).numpy(), + img_data_byte.float().div(255.0).numpy(), + img_data_short.numpy(), + img_data_int.numpy()] + expected_modes = ['L', 'L', 'I;16', 'I'] + + for img_data, expected_output, mode in zip(inputs, expected_outputs, expected_modes): + for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: + img = transform(img_data) + self.assertEqual(img.mode, mode) + torch.testing.assert_close(expected_output, to_tensor(img).numpy(), check_stride=False) + # 'F' mode for torch.FloatTensor + img_F_mode = transforms.ToPILImage(mode='F')(img_data_float) + self.assertEqual(img_F_mode.mode, 'F') + torch.testing.assert_close( + np.array(Image.fromarray(img_data_float.squeeze(0).numpy(), mode='F')), np.array(img_F_mode) + ) + + def test_1_channel_ndarray_to_pil_image(self): + img_data_float = torch.Tensor(4, 4, 1).uniform_().numpy() + img_data_byte = torch.ByteTensor(4, 4, 1).random_(0, 255).numpy() + img_data_short = torch.ShortTensor(4, 4, 1).random_().numpy() + img_data_int = torch.IntTensor(4, 4, 1).random_().numpy() + + inputs = [img_data_float, img_data_byte, img_data_short, img_data_int] + expected_modes = ['F', 'L', 'I;16', 'I'] + for img_data, mode in zip(inputs, expected_modes): + for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: + img = transform(img_data) + self.assertEqual(img.mode, mode) + # note: we explicitly convert img's dtype because pytorch doesn't support uint16 + # and otherwise assert_close wouldn't be able to construct a tensor from the uint16 array + torch.testing.assert_close(img_data[:, :, 0], np.asarray(img).astype(img_data.dtype)) + + def test_2_channel_ndarray_to_pil_image(self): + def verify_img_data(img_data, mode): + if mode is None: + img = transforms.ToPILImage()(img_data) + self.assertEqual(img.mode, 'LA') # default should assume LA + else: + img = transforms.ToPILImage(mode=mode)(img_data) + self.assertEqual(img.mode, mode) + split = img.split() + for i in range(2): + torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False) + + img_data = torch.ByteTensor(4, 4, 2).random_(0, 255).numpy() + for mode in [None, 'LA']: + verify_img_data(img_data, mode) + + transforms.ToPILImage().__repr__() + + with self.assertRaises(ValueError): + # should raise if we try a mode for 4 or 1 or 3 channel images + transforms.ToPILImage(mode='RGBA')(img_data) + transforms.ToPILImage(mode='P')(img_data) + transforms.ToPILImage(mode='RGB')(img_data) + + def test_2_channel_tensor_to_pil_image(self): + def verify_img_data(img_data, expected_output, mode): + if mode is None: + img = transforms.ToPILImage()(img_data) + self.assertEqual(img.mode, 'LA') # default should assume LA + else: + img = transforms.ToPILImage(mode=mode)(img_data) + self.assertEqual(img.mode, mode) + split = img.split() + for i in range(2): + self.assertTrue(np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy())) + + img_data = torch.Tensor(2, 4, 4).uniform_() + expected_output = img_data.mul(255).int().float().div(255) + for mode in [None, 'LA']: + verify_img_data(img_data, expected_output, mode=mode) + + with self.assertRaises(ValueError): + # should raise if we try a mode for 4 or 1 or 3 channel images + transforms.ToPILImage(mode='RGBA')(img_data) + transforms.ToPILImage(mode='P')(img_data) + transforms.ToPILImage(mode='RGB')(img_data) + + def test_3_channel_tensor_to_pil_image(self): + def verify_img_data(img_data, expected_output, mode): + if mode is None: + img = transforms.ToPILImage()(img_data) + self.assertEqual(img.mode, 'RGB') # default should assume RGB + else: + img = transforms.ToPILImage(mode=mode)(img_data) + self.assertEqual(img.mode, mode) + split = img.split() + for i in range(3): + self.assertTrue(np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy())) + + img_data = torch.Tensor(3, 4, 4).uniform_() + expected_output = img_data.mul(255).int().float().div(255) + for mode in [None, 'RGB', 'HSV', 'YCbCr']: + verify_img_data(img_data, expected_output, mode=mode) + + with self.assertRaises(ValueError): + # should raise if we try a mode for 4 or 1 or 2 channel images + transforms.ToPILImage(mode='RGBA')(img_data) + transforms.ToPILImage(mode='P')(img_data) + transforms.ToPILImage(mode='LA')(img_data) + + with self.assertRaises(ValueError): + transforms.ToPILImage()(torch.Tensor(1, 3, 4, 4).uniform_()) + + def test_3_channel_ndarray_to_pil_image(self): + def verify_img_data(img_data, mode): + if mode is None: + img = transforms.ToPILImage()(img_data) + self.assertEqual(img.mode, 'RGB') # default should assume RGB + else: + img = transforms.ToPILImage(mode=mode)(img_data) + self.assertEqual(img.mode, mode) + split = img.split() + for i in range(3): + torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False) + + img_data = torch.ByteTensor(4, 4, 3).random_(0, 255).numpy() + for mode in [None, 'RGB', 'HSV', 'YCbCr']: + verify_img_data(img_data, mode) + + # Checking if ToPILImage can be printed as string + transforms.ToPILImage().__repr__() + + with self.assertRaises(ValueError): + # should raise if we try a mode for 4 or 1 or 2 channel images + transforms.ToPILImage(mode='RGBA')(img_data) + transforms.ToPILImage(mode='P')(img_data) + transforms.ToPILImage(mode='LA')(img_data) + + def test_4_channel_tensor_to_pil_image(self): + def verify_img_data(img_data, expected_output, mode): + if mode is None: + img = transforms.ToPILImage()(img_data) + self.assertEqual(img.mode, 'RGBA') # default should assume RGBA + else: + img = transforms.ToPILImage(mode=mode)(img_data) + self.assertEqual(img.mode, mode) + + split = img.split() + for i in range(4): + self.assertTrue(np.allclose(expected_output[i].numpy(), F.to_tensor(split[i]).numpy())) + + img_data = torch.Tensor(4, 4, 4).uniform_() + expected_output = img_data.mul(255).int().float().div(255) + for mode in [None, 'RGBA', 'CMYK', 'RGBX']: + verify_img_data(img_data, expected_output, mode) + + with self.assertRaises(ValueError): + # should raise if we try a mode for 3 or 1 or 2 channel images + transforms.ToPILImage(mode='RGB')(img_data) + transforms.ToPILImage(mode='P')(img_data) + transforms.ToPILImage(mode='LA')(img_data) + + def test_4_channel_ndarray_to_pil_image(self): + def verify_img_data(img_data, mode): + if mode is None: + img = transforms.ToPILImage()(img_data) + self.assertEqual(img.mode, 'RGBA') # default should assume RGBA + else: + img = transforms.ToPILImage(mode=mode)(img_data) + self.assertEqual(img.mode, mode) + split = img.split() + for i in range(4): + torch.testing.assert_close(img_data[:, :, i], np.asarray(split[i]), check_stride=False) + + img_data = torch.ByteTensor(4, 4, 4).random_(0, 255).numpy() + for mode in [None, 'RGBA', 'CMYK', 'RGBX']: + verify_img_data(img_data, mode) + + with self.assertRaises(ValueError): + # should raise if we try a mode for 3 or 1 or 2 channel images + transforms.ToPILImage(mode='RGB')(img_data) + transforms.ToPILImage(mode='P')(img_data) + transforms.ToPILImage(mode='LA')(img_data) + + def test_2d_tensor_to_pil_image(self): + to_tensor = transforms.ToTensor() + + img_data_float = torch.Tensor(4, 4).uniform_() + img_data_byte = torch.ByteTensor(4, 4).random_(0, 255) + img_data_short = torch.ShortTensor(4, 4).random_() + img_data_int = torch.IntTensor(4, 4).random_() + + inputs = [img_data_float, img_data_byte, img_data_short, img_data_int] + expected_outputs = [img_data_float.mul(255).int().float().div(255).numpy(), + img_data_byte.float().div(255.0).numpy(), + img_data_short.numpy(), + img_data_int.numpy()] + expected_modes = ['L', 'L', 'I;16', 'I'] + + for img_data, expected_output, mode in zip(inputs, expected_outputs, expected_modes): + for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: + img = transform(img_data) + self.assertEqual(img.mode, mode) + np.testing.assert_allclose(expected_output, to_tensor(img).numpy()[0]) + + def test_2d_ndarray_to_pil_image(self): + img_data_float = torch.Tensor(4, 4).uniform_().numpy() + img_data_byte = torch.ByteTensor(4, 4).random_(0, 255).numpy() + img_data_short = torch.ShortTensor(4, 4).random_().numpy() + img_data_int = torch.IntTensor(4, 4).random_().numpy() + + inputs = [img_data_float, img_data_byte, img_data_short, img_data_int] + expected_modes = ['F', 'L', 'I;16', 'I'] + for img_data, mode in zip(inputs, expected_modes): + for transform in [transforms.ToPILImage(), transforms.ToPILImage(mode=mode)]: + img = transform(img_data) + self.assertEqual(img.mode, mode) + np.testing.assert_allclose(img_data, img) + + def test_tensor_bad_types_to_pil_image(self): + with self.assertRaisesRegex(ValueError, r'pic should be 2/3 dimensional. Got \d+ dimensions.'): + transforms.ToPILImage()(torch.ones(1, 3, 4, 4)) + with self.assertRaisesRegex(ValueError, r'pic should not have > 4 channels. Got \d+ channels.'): + transforms.ToPILImage()(torch.ones(6, 4, 4)) + + def test_ndarray_bad_types_to_pil_image(self): + trans = transforms.ToPILImage() + reg_msg = r'Input type \w+ is not supported' + with self.assertRaisesRegex(TypeError, reg_msg): + trans(np.ones([4, 4, 1], np.int64)) + with self.assertRaisesRegex(TypeError, reg_msg): + trans(np.ones([4, 4, 1], np.uint16)) + with self.assertRaisesRegex(TypeError, reg_msg): + trans(np.ones([4, 4, 1], np.uint32)) + with self.assertRaisesRegex(TypeError, reg_msg): + trans(np.ones([4, 4, 1], np.float64)) + + with self.assertRaisesRegex(ValueError, r'pic should be 2/3 dimensional. Got \d+ dimensions.'): + transforms.ToPILImage()(np.ones([1, 4, 4, 3])) + with self.assertRaisesRegex(ValueError, r'pic should not have > 4 channels. Got \d+ channels.'): + transforms.ToPILImage()(np.ones([4, 4, 6])) + + @unittest.skipIf(stats is None, 'scipy.stats not available') + def test_random_vertical_flip(self): + random_state = random.getstate() + random.seed(42) + img = transforms.ToPILImage()(torch.rand(3, 10, 10)) + vimg = img.transpose(Image.FLIP_TOP_BOTTOM) + + num_samples = 250 + num_vertical = 0 + for _ in range(num_samples): + out = transforms.RandomVerticalFlip()(img) + if out == vimg: + num_vertical += 1 + + p_value = stats.binom_test(num_vertical, num_samples, p=0.5) + random.setstate(random_state) + self.assertGreater(p_value, 0.0001) + + num_samples = 250 + num_vertical = 0 + for _ in range(num_samples): + out = transforms.RandomVerticalFlip(p=0.7)(img) + if out == vimg: + num_vertical += 1 + + p_value = stats.binom_test(num_vertical, num_samples, p=0.7) + random.setstate(random_state) + self.assertGreater(p_value, 0.0001) + + # Checking if RandomVerticalFlip can be printed as string + transforms.RandomVerticalFlip().__repr__() + + @unittest.skipIf(stats is None, 'scipy.stats not available') + def test_random_horizontal_flip(self): + random_state = random.getstate() + random.seed(42) + img = transforms.ToPILImage()(torch.rand(3, 10, 10)) + himg = img.transpose(Image.FLIP_LEFT_RIGHT) + + num_samples = 250 + num_horizontal = 0 + for _ in range(num_samples): + out = transforms.RandomHorizontalFlip()(img) + if out == himg: + num_horizontal += 1 + + p_value = stats.binom_test(num_horizontal, num_samples, p=0.5) + random.setstate(random_state) + self.assertGreater(p_value, 0.0001) + + num_samples = 250 + num_horizontal = 0 + for _ in range(num_samples): + out = transforms.RandomHorizontalFlip(p=0.7)(img) + if out == himg: + num_horizontal += 1 + + p_value = stats.binom_test(num_horizontal, num_samples, p=0.7) + random.setstate(random_state) + self.assertGreater(p_value, 0.0001) + + # Checking if RandomHorizontalFlip can be printed as string + transforms.RandomHorizontalFlip().__repr__() + + @unittest.skipIf(stats is None, 'scipy.stats is not available') + def test_normalize(self): + def samples_from_standard_normal(tensor): + p_value = stats.kstest(list(tensor.view(-1)), 'norm', args=(0, 1)).pvalue + return p_value > 0.0001 + + random_state = random.getstate() + random.seed(42) + for channels in [1, 3]: + img = torch.rand(channels, 10, 10) + mean = [img[c].mean() for c in range(channels)] + std = [img[c].std() for c in range(channels)] + normalized = transforms.Normalize(mean, std)(img) + self.assertTrue(samples_from_standard_normal(normalized)) + random.setstate(random_state) + + # Checking if Normalize can be printed as string + transforms.Normalize(mean, std).__repr__() + + # Checking the optional in-place behaviour + tensor = torch.rand((1, 16, 16)) + tensor_inplace = transforms.Normalize((0.5,), (0.5,), inplace=True)(tensor) + assert_equal(tensor, tensor_inplace) + + def test_normalize_different_dtype(self): + for dtype1 in [torch.float32, torch.float64]: + img = torch.rand(3, 10, 10, dtype=dtype1) + for dtype2 in [torch.int64, torch.float32, torch.float64]: + mean = torch.tensor([1, 2, 3], dtype=dtype2) + std = torch.tensor([1, 2, 1], dtype=dtype2) + # checks that it doesn't crash + transforms.functional.normalize(img, mean, std) + + def test_normalize_3d_tensor(self): + torch.manual_seed(28) + n_channels = 3 + img_size = 10 + mean = torch.rand(n_channels) + std = torch.rand(n_channels) + img = torch.rand(n_channels, img_size, img_size) + target = F.normalize(img, mean, std) + + mean_unsqueezed = mean.view(-1, 1, 1) + std_unsqueezed = std.view(-1, 1, 1) + result1 = F.normalize(img, mean_unsqueezed, std_unsqueezed) + result2 = F.normalize(img, + mean_unsqueezed.repeat(1, img_size, img_size), + std_unsqueezed.repeat(1, img_size, img_size)) + torch.testing.assert_close(target, result1) + torch.testing.assert_close(target, result2) + + def test_adjust_brightness(self): + x_shape = [2, 2, 3] + x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] + x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) + x_pil = Image.fromarray(x_np, mode='RGB') + + # test 0 + y_pil = F.adjust_brightness(x_pil, 1) + y_np = np.array(y_pil) + torch.testing.assert_close(y_np, x_np) + + # test 1 + y_pil = F.adjust_brightness(x_pil, 0.5) + y_np = np.array(y_pil) + y_ans = [0, 2, 6, 27, 67, 113, 18, 4, 117, 45, 127, 0] + y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) + torch.testing.assert_close(y_np, y_ans) + + # test 2 + y_pil = F.adjust_brightness(x_pil, 2) + y_np = np.array(y_pil) + y_ans = [0, 10, 26, 108, 255, 255, 74, 16, 255, 180, 255, 2] + y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) + torch.testing.assert_close(y_np, y_ans) + + def test_adjust_contrast(self): + x_shape = [2, 2, 3] + x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] + x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) + x_pil = Image.fromarray(x_np, mode='RGB') + + # test 0 + y_pil = F.adjust_contrast(x_pil, 1) + y_np = np.array(y_pil) + torch.testing.assert_close(y_np, x_np) + + # test 1 + y_pil = F.adjust_contrast(x_pil, 0.5) + y_np = np.array(y_pil) + y_ans = [43, 45, 49, 70, 110, 156, 61, 47, 160, 88, 170, 43] + y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) + torch.testing.assert_close(y_np, y_ans) + + # test 2 + y_pil = F.adjust_contrast(x_pil, 2) + y_np = np.array(y_pil) + y_ans = [0, 0, 0, 22, 184, 255, 0, 0, 255, 94, 255, 0] + y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) + torch.testing.assert_close(y_np, y_ans) + + @unittest.skipIf(Image.__version__ >= '7', "Temporarily disabled") + def test_adjust_saturation(self): + x_shape = [2, 2, 3] + x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] + x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) + x_pil = Image.fromarray(x_np, mode='RGB') + + # test 0 + y_pil = F.adjust_saturation(x_pil, 1) + y_np = np.array(y_pil) + torch.testing.assert_close(y_np, x_np) + + # test 1 + y_pil = F.adjust_saturation(x_pil, 0.5) + y_np = np.array(y_pil) + y_ans = [2, 4, 8, 87, 128, 173, 39, 25, 138, 133, 215, 88] + y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) + torch.testing.assert_close(y_np, y_ans) + + # test 2 + y_pil = F.adjust_saturation(x_pil, 2) + y_np = np.array(y_pil) + y_ans = [0, 6, 22, 0, 149, 255, 32, 0, 255, 4, 255, 0] + y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) + torch.testing.assert_close(y_np, y_ans) + + def test_adjust_hue(self): + x_shape = [2, 2, 3] + x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] + x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) + x_pil = Image.fromarray(x_np, mode='RGB') + + with self.assertRaises(ValueError): + F.adjust_hue(x_pil, -0.7) + F.adjust_hue(x_pil, 1) + + # test 0: almost same as x_data but not exact. + # probably because hsv <-> rgb floating point ops + y_pil = F.adjust_hue(x_pil, 0) + y_np = np.array(y_pil) + y_ans = [0, 5, 13, 54, 139, 226, 35, 8, 234, 91, 255, 1] + y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) + torch.testing.assert_close(y_np, y_ans) + + # test 1 + y_pil = F.adjust_hue(x_pil, 0.25) + y_np = np.array(y_pil) + y_ans = [13, 0, 12, 224, 54, 226, 234, 8, 99, 1, 222, 255] + y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) + torch.testing.assert_close(y_np, y_ans) + + # test 2 + y_pil = F.adjust_hue(x_pil, -0.25) + y_np = np.array(y_pil) + y_ans = [0, 13, 2, 54, 226, 58, 8, 234, 152, 255, 43, 1] + y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) + torch.testing.assert_close(y_np, y_ans) + + def test_adjust_sharpness(self): + x_shape = [4, 4, 3] + x_data = [75, 121, 114, 105, 97, 107, 105, 32, 66, 111, 117, 114, 99, 104, 97, 0, + 0, 65, 108, 101, 120, 97, 110, 100, 101, 114, 32, 86, 114, 121, 110, 105, + 111, 116, 105, 115, 0, 0, 73, 32, 108, 111, 118, 101, 32, 121, 111, 117] + x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) + x_pil = Image.fromarray(x_np, mode='RGB') + + # test 0 + y_pil = F.adjust_sharpness(x_pil, 1) + y_np = np.array(y_pil) + torch.testing.assert_close(y_np, x_np) + + # test 1 + y_pil = F.adjust_sharpness(x_pil, 0.5) + y_np = np.array(y_pil) + y_ans = [75, 121, 114, 105, 97, 107, 105, 32, 66, 111, 117, 114, 99, 104, 97, 30, + 30, 74, 103, 96, 114, 97, 110, 100, 101, 114, 32, 81, 103, 108, 102, 101, + 107, 116, 105, 115, 0, 0, 73, 32, 108, 111, 118, 101, 32, 121, 111, 117] + y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) + torch.testing.assert_close(y_np, y_ans) + + # test 2 + y_pil = F.adjust_sharpness(x_pil, 2) + y_np = np.array(y_pil) + y_ans = [75, 121, 114, 105, 97, 107, 105, 32, 66, 111, 117, 114, 99, 104, 97, 0, + 0, 46, 118, 111, 132, 97, 110, 100, 101, 114, 32, 95, 135, 146, 126, 112, + 119, 116, 105, 115, 0, 0, 73, 32, 108, 111, 118, 101, 32, 121, 111, 117] + y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) + torch.testing.assert_close(y_np, y_ans) + + # test 3 + x_shape = [2, 2, 3] + x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] + x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) + x_pil = Image.fromarray(x_np, mode='RGB') + x_th = torch.tensor(x_np.transpose(2, 0, 1)) + y_pil = F.adjust_sharpness(x_pil, 2) + y_np = np.array(y_pil).transpose(2, 0, 1) + y_th = F.adjust_sharpness(x_th, 2) + torch.testing.assert_close(y_np, y_th.numpy()) + + def test_adjust_gamma(self): + x_shape = [2, 2, 3] + x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] + x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) + x_pil = Image.fromarray(x_np, mode='RGB') + + # test 0 + y_pil = F.adjust_gamma(x_pil, 1) + y_np = np.array(y_pil) + torch.testing.assert_close(y_np, x_np) + + # test 1 + y_pil = F.adjust_gamma(x_pil, 0.5) + y_np = np.array(y_pil) + y_ans = [0, 35, 57, 117, 186, 241, 97, 45, 245, 152, 255, 16] + y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) + torch.testing.assert_close(y_np, y_ans) + + # test 2 + y_pil = F.adjust_gamma(x_pil, 2) + y_np = np.array(y_pil) + y_ans = [0, 0, 0, 11, 71, 201, 5, 0, 215, 31, 255, 0] + y_ans = np.array(y_ans, dtype=np.uint8).reshape(x_shape) + torch.testing.assert_close(y_np, y_ans) + + def test_adjusts_L_mode(self): + x_shape = [2, 2, 3] + x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] + x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) + x_rgb = Image.fromarray(x_np, mode='RGB') + + x_l = x_rgb.convert('L') + self.assertEqual(F.adjust_brightness(x_l, 2).mode, 'L') + self.assertEqual(F.adjust_saturation(x_l, 2).mode, 'L') + self.assertEqual(F.adjust_contrast(x_l, 2).mode, 'L') + self.assertEqual(F.adjust_hue(x_l, 0.4).mode, 'L') + self.assertEqual(F.adjust_sharpness(x_l, 2).mode, 'L') + self.assertEqual(F.adjust_gamma(x_l, 0.5).mode, 'L') + + def test_color_jitter(self): + color_jitter = transforms.ColorJitter(2, 2, 2, 0.1) + + x_shape = [2, 2, 3] + x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] + x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) + x_pil = Image.fromarray(x_np, mode='RGB') + x_pil_2 = x_pil.convert('L') + + for i in range(10): + y_pil = color_jitter(x_pil) + self.assertEqual(y_pil.mode, x_pil.mode) + + y_pil_2 = color_jitter(x_pil_2) + self.assertEqual(y_pil_2.mode, x_pil_2.mode) + + # Checking if ColorJitter can be printed as string + color_jitter.__repr__() + + def test_linear_transformation(self): + num_samples = 1000 + x = torch.randn(num_samples, 3, 10, 10) + flat_x = x.view(x.size(0), x.size(1) * x.size(2) * x.size(3)) + # compute principal components + sigma = torch.mm(flat_x.t(), flat_x) / flat_x.size(0) + u, s, _ = np.linalg.svd(sigma.numpy()) + zca_epsilon = 1e-10 # avoid division by 0 + d = torch.Tensor(np.diag(1. / np.sqrt(s + zca_epsilon))) + u = torch.Tensor(u) + principal_components = torch.mm(torch.mm(u, d), u.t()) + mean_vector = (torch.sum(flat_x, dim=0) / flat_x.size(0)) + # initialize whitening matrix + whitening = transforms.LinearTransformation(principal_components, mean_vector) + # estimate covariance and mean using weak law of large number + num_features = flat_x.size(1) + cov = 0.0 + mean = 0.0 + for i in x: + xwhite = whitening(i) + xwhite = xwhite.view(1, -1).numpy() + cov += np.dot(xwhite, xwhite.T) / num_features + mean += np.sum(xwhite) / num_features + # if rtol for std = 1e-3 then rtol for cov = 2e-3 as std**2 = cov + torch.testing.assert_close(cov / num_samples, np.identity(1), rtol=2e-3, atol=1e-8, check_dtype=False, + msg="cov not close to 1") + torch.testing.assert_close(mean / num_samples, 0, rtol=1e-3, atol=1e-8, check_dtype=False, + msg="mean not close to 0") + + # Checking if LinearTransformation can be printed as string + whitening.__repr__() + + def test_rotate(self): + x = np.zeros((100, 100, 3), dtype=np.uint8) + x[40, 40] = [255, 255, 255] + + with self.assertRaisesRegex(TypeError, r"img should be PIL Image"): + F.rotate(x, 10) + + img = F.to_pil_image(x) + + result = F.rotate(img, 45) + self.assertEqual(result.size, (100, 100)) + r, c, ch = np.where(result) + self.assertTrue(all(x in r for x in [49, 50])) + self.assertTrue(all(x in c for x in [36])) + self.assertTrue(all(x in ch for x in [0, 1, 2])) + + result = F.rotate(img, 45, expand=True) + self.assertEqual(result.size, (142, 142)) + r, c, ch = np.where(result) + self.assertTrue(all(x in r for x in [70, 71])) + self.assertTrue(all(x in c for x in [57])) + self.assertTrue(all(x in ch for x in [0, 1, 2])) + + result = F.rotate(img, 45, center=(40, 40)) + self.assertEqual(result.size, (100, 100)) + r, c, ch = np.where(result) + self.assertTrue(all(x in r for x in [40])) + self.assertTrue(all(x in c for x in [40])) + self.assertTrue(all(x in ch for x in [0, 1, 2])) + + result_a = F.rotate(img, 90) + result_b = F.rotate(img, -270) + + assert_equal(np.array(result_a), np.array(result_b)) + + def test_rotate_fill(self): + img = F.to_pil_image(np.ones((100, 100, 3), dtype=np.uint8) * 255, "RGB") + + modes = ("L", "RGB", "F") + nums_bands = [len(mode) for mode in modes] + fill = 127 + + for mode, num_bands in zip(modes, nums_bands): + img_conv = img.convert(mode) + img_rot = F.rotate(img_conv, 45.0, fill=fill) + pixel = img_rot.getpixel((0, 0)) + + if not isinstance(pixel, tuple): + pixel = (pixel,) + self.assertTupleEqual(pixel, tuple([fill] * num_bands)) + + for wrong_num_bands in set(nums_bands) - {num_bands}: + with self.assertRaises(ValueError): + F.rotate(img_conv, 45.0, fill=tuple([fill] * wrong_num_bands)) + + def test_affine(self): + input_img = np.zeros((40, 40, 3), dtype=np.uint8) + cnt = [20, 20] + for pt in [(16, 16), (20, 16), (20, 20)]: + for i in range(-5, 5): + for j in range(-5, 5): + input_img[pt[0] + i, pt[1] + j, :] = [255, 155, 55] + + with self.assertRaises(TypeError, msg="Argument translate should be a sequence"): + F.affine(input_img, 10, translate=0, scale=1, shear=1) + + pil_img = F.to_pil_image(input_img) + + def _to_3x3_inv(inv_result_matrix): + result_matrix = np.zeros((3, 3)) + result_matrix[:2, :] = np.array(inv_result_matrix).reshape((2, 3)) + result_matrix[2, 2] = 1 + return np.linalg.inv(result_matrix) + + def _test_transformation(a, t, s, sh): + a_rad = math.radians(a) + s_rad = [math.radians(sh_) for sh_ in sh] + cx, cy = cnt + tx, ty = t + sx, sy = s_rad + rot = a_rad + + # 1) Check transformation matrix: + C = np.array([[1, 0, cx], + [0, 1, cy], + [0, 0, 1]]) + T = np.array([[1, 0, tx], + [0, 1, ty], + [0, 0, 1]]) + Cinv = np.linalg.inv(C) + + RS = np.array( + [[s * math.cos(rot), -s * math.sin(rot), 0], + [s * math.sin(rot), s * math.cos(rot), 0], + [0, 0, 1]]) + + SHx = np.array([[1, -math.tan(sx), 0], + [0, 1, 0], + [0, 0, 1]]) + + SHy = np.array([[1, 0, 0], + [-math.tan(sy), 1, 0], + [0, 0, 1]]) + + RSS = np.matmul(RS, np.matmul(SHy, SHx)) + + true_matrix = np.matmul(T, np.matmul(C, np.matmul(RSS, Cinv))) + + result_matrix = _to_3x3_inv(F._get_inverse_affine_matrix(center=cnt, angle=a, + translate=t, scale=s, shear=sh)) + self.assertLess(np.sum(np.abs(true_matrix - result_matrix)), 1e-10) + # 2) Perform inverse mapping: + true_result = np.zeros((40, 40, 3), dtype=np.uint8) + inv_true_matrix = np.linalg.inv(true_matrix) + for y in range(true_result.shape[0]): + for x in range(true_result.shape[1]): + # Same as for PIL: + # https://github.com/python-pillow/Pillow/blob/71f8ec6a0cfc1008076a023c0756542539d057ab/ + # src/libImaging/Geometry.c#L1060 + input_pt = np.array([x + 0.5, y + 0.5, 1.0]) + res = np.floor(np.dot(inv_true_matrix, input_pt)).astype(np.int) + _x, _y = res[:2] + if 0 <= _x < input_img.shape[1] and 0 <= _y < input_img.shape[0]: + true_result[y, x, :] = input_img[_y, _x, :] + + result = F.affine(pil_img, angle=a, translate=t, scale=s, shear=sh) + self.assertEqual(result.size, pil_img.size) + # Compute number of different pixels: + np_result = np.array(result) + n_diff_pixels = np.sum(np_result != true_result) / 3 + # Accept 3 wrong pixels + self.assertLess(n_diff_pixels, 3, + "a={}, t={}, s={}, sh={}\n".format(a, t, s, sh) + + "n diff pixels={}\n".format(np.sum(np.array(result)[:, :, 0] != true_result[:, :, 0]))) + + # Test rotation + a = 45 + _test_transformation(a=a, t=(0, 0), s=1.0, sh=(0.0, 0.0)) + + # Test translation + t = [10, 15] + _test_transformation(a=0.0, t=t, s=1.0, sh=(0.0, 0.0)) + + # Test scale + s = 1.2 + _test_transformation(a=0.0, t=(0.0, 0.0), s=s, sh=(0.0, 0.0)) + + # Test shear + sh = [45.0, 25.0] + _test_transformation(a=0.0, t=(0.0, 0.0), s=1.0, sh=sh) + + # Test rotation, scale, translation, shear + for a in range(-90, 90, 25): + for t1 in range(-10, 10, 5): + for s in [0.75, 0.98, 1.0, 1.2, 1.4]: + for sh in range(-15, 15, 5): + _test_transformation(a=a, t=(t1, t1), s=s, sh=(sh, sh)) + + def test_random_rotation(self): + + with self.assertRaises(ValueError): + transforms.RandomRotation(-0.7) + transforms.RandomRotation([-0.7]) + transforms.RandomRotation([-0.7, 0, 0.7]) + + # assert fill being either a Sequence or a Number + with self.assertRaises(TypeError): + transforms.RandomRotation(0, fill={}) + + t = transforms.RandomRotation(0, fill=None) + self.assertTrue(t.fill == 0) + + t = transforms.RandomRotation(10) + angle = t.get_params(t.degrees) + self.assertTrue(angle > -10 and angle < 10) + + t = transforms.RandomRotation((-10, 10)) + angle = t.get_params(t.degrees) + self.assertTrue(-10 < angle < 10) + + # Checking if RandomRotation can be printed as string + t.__repr__() + + # assert deprecation warning and non-BC + with self.assertWarnsRegex(UserWarning, r"Argument resample is deprecated and will be removed"): + t = transforms.RandomRotation((-10, 10), resample=2) + self.assertEqual(t.interpolation, transforms.InterpolationMode.BILINEAR) + + # assert changed type warning + with self.assertWarnsRegex(UserWarning, r"Argument interpolation should be of type InterpolationMode"): + t = transforms.RandomRotation((-10, 10), interpolation=2) + self.assertEqual(t.interpolation, transforms.InterpolationMode.BILINEAR) + + def test_random_affine(self): + + with self.assertRaises(ValueError): + transforms.RandomAffine(-0.7) + transforms.RandomAffine([-0.7]) + transforms.RandomAffine([-0.7, 0, 0.7]) + + transforms.RandomAffine([-90, 90], translate=2.0) + transforms.RandomAffine([-90, 90], translate=[-1.0, 1.0]) + transforms.RandomAffine([-90, 90], translate=[-1.0, 0.0, 1.0]) + + transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.0]) + transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[-1.0, 1.0]) + transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, -0.5]) + transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, 3.0, -0.5]) + + transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, 0.5], shear=-7) + transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, 0.5], shear=[-10]) + transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, 0.5], shear=[-10, 0, 10]) + transforms.RandomAffine([-90, 90], translate=[0.2, 0.2], scale=[0.5, 0.5], shear=[-10, 0, 10, 0, 10]) + + # assert fill being either a Sequence or a Number + with self.assertRaises(TypeError): + transforms.RandomAffine(0, fill={}) + + t = transforms.RandomAffine(0, fill=None) + self.assertTrue(t.fill == 0) + + x = np.zeros((100, 100, 3), dtype=np.uint8) + img = F.to_pil_image(x) + + t = transforms.RandomAffine(10, translate=[0.5, 0.3], scale=[0.7, 1.3], shear=[-10, 10, 20, 40]) + for _ in range(100): + angle, translations, scale, shear = t.get_params(t.degrees, t.translate, t.scale, t.shear, + img_size=img.size) + self.assertTrue(-10 < angle < 10) + self.assertTrue(-img.size[0] * 0.5 <= translations[0] <= img.size[0] * 0.5, + "{} vs {}".format(translations[0], img.size[0] * 0.5)) + self.assertTrue(-img.size[1] * 0.5 <= translations[1] <= img.size[1] * 0.5, + "{} vs {}".format(translations[1], img.size[1] * 0.5)) + self.assertTrue(0.7 < scale < 1.3) + self.assertTrue(-10 < shear[0] < 10) + self.assertTrue(-20 < shear[1] < 40) + + # Checking if RandomAffine can be printed as string + t.__repr__() + + t = transforms.RandomAffine(10, interpolation=transforms.InterpolationMode.BILINEAR) + self.assertIn("bilinear", t.__repr__()) + + # assert deprecation warning and non-BC + with self.assertWarnsRegex(UserWarning, r"Argument resample is deprecated and will be removed"): + t = transforms.RandomAffine(10, resample=2) + self.assertEqual(t.interpolation, transforms.InterpolationMode.BILINEAR) + + with self.assertWarnsRegex(UserWarning, r"Argument fillcolor is deprecated and will be removed"): + t = transforms.RandomAffine(10, fillcolor=10) + self.assertEqual(t.fill, 10) + + # assert changed type warning + with self.assertWarnsRegex(UserWarning, r"Argument interpolation should be of type InterpolationMode"): + t = transforms.RandomAffine(10, interpolation=2) + self.assertEqual(t.interpolation, transforms.InterpolationMode.BILINEAR) + + def test_to_grayscale(self): + """Unit tests for grayscale transform""" + + x_shape = [2, 2, 3] + x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] + x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) + x_pil = Image.fromarray(x_np, mode='RGB') + x_pil_2 = x_pil.convert('L') + gray_np = np.array(x_pil_2) + + # Test Set: Grayscale an image with desired number of output channels + # Case 1: RGB -> 1 channel grayscale + trans1 = transforms.Grayscale(num_output_channels=1) + gray_pil_1 = trans1(x_pil) + gray_np_1 = np.array(gray_pil_1) + self.assertEqual(gray_pil_1.mode, 'L', 'mode should be L') + self.assertEqual(gray_np_1.shape, tuple(x_shape[0:2]), 'should be 1 channel') + assert_equal(gray_np, gray_np_1) + + # Case 2: RGB -> 3 channel grayscale + trans2 = transforms.Grayscale(num_output_channels=3) + gray_pil_2 = trans2(x_pil) + gray_np_2 = np.array(gray_pil_2) + self.assertEqual(gray_pil_2.mode, 'RGB', 'mode should be RGB') + self.assertEqual(gray_np_2.shape, tuple(x_shape), 'should be 3 channel') + assert_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1]) + assert_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2]) + assert_equal(gray_np, gray_np_2[:, :, 0], check_stride=False) + + # Case 3: 1 channel grayscale -> 1 channel grayscale + trans3 = transforms.Grayscale(num_output_channels=1) + gray_pil_3 = trans3(x_pil_2) + gray_np_3 = np.array(gray_pil_3) + self.assertEqual(gray_pil_3.mode, 'L', 'mode should be L') + self.assertEqual(gray_np_3.shape, tuple(x_shape[0:2]), 'should be 1 channel') + assert_equal(gray_np, gray_np_3) + + # Case 4: 1 channel grayscale -> 3 channel grayscale + trans4 = transforms.Grayscale(num_output_channels=3) + gray_pil_4 = trans4(x_pil_2) + gray_np_4 = np.array(gray_pil_4) + self.assertEqual(gray_pil_4.mode, 'RGB', 'mode should be RGB') + self.assertEqual(gray_np_4.shape, tuple(x_shape), 'should be 3 channel') + assert_equal(gray_np_4[:, :, 0], gray_np_4[:, :, 1]) + assert_equal(gray_np_4[:, :, 1], gray_np_4[:, :, 2]) + assert_equal(gray_np, gray_np_4[:, :, 0], check_stride=False) + + # Checking if Grayscale can be printed as string + trans4.__repr__() + + @unittest.skipIf(stats is None, 'scipy.stats not available') + def test_random_grayscale(self): + """Unit tests for random grayscale transform""" + + # Test Set 1: RGB -> 3 channel grayscale + random_state = random.getstate() + random.seed(42) + x_shape = [2, 2, 3] + x_np = np.random.randint(0, 256, x_shape, np.uint8) + x_pil = Image.fromarray(x_np, mode='RGB') + x_pil_2 = x_pil.convert('L') + gray_np = np.array(x_pil_2) + + num_samples = 250 + num_gray = 0 + for _ in range(num_samples): + gray_pil_2 = transforms.RandomGrayscale(p=0.5)(x_pil) + gray_np_2 = np.array(gray_pil_2) + if np.array_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1]) and \ + np.array_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2]) and \ + np.array_equal(gray_np, gray_np_2[:, :, 0]): + num_gray = num_gray + 1 + + p_value = stats.binom_test(num_gray, num_samples, p=0.5) + random.setstate(random_state) + self.assertGreater(p_value, 0.0001) + + # Test Set 2: grayscale -> 1 channel grayscale + random_state = random.getstate() + random.seed(42) + x_shape = [2, 2, 3] + x_np = np.random.randint(0, 256, x_shape, np.uint8) + x_pil = Image.fromarray(x_np, mode='RGB') + x_pil_2 = x_pil.convert('L') + gray_np = np.array(x_pil_2) + + num_samples = 250 + num_gray = 0 + for _ in range(num_samples): + gray_pil_3 = transforms.RandomGrayscale(p=0.5)(x_pil_2) + gray_np_3 = np.array(gray_pil_3) + if np.array_equal(gray_np, gray_np_3): + num_gray = num_gray + 1 + + p_value = stats.binom_test(num_gray, num_samples, p=1.0) # Note: grayscale is always unchanged + random.setstate(random_state) + self.assertGreater(p_value, 0.0001) + + # Test set 3: Explicit tests + x_shape = [2, 2, 3] + x_data = [0, 5, 13, 54, 135, 226, 37, 8, 234, 90, 255, 1] + x_np = np.array(x_data, dtype=np.uint8).reshape(x_shape) + x_pil = Image.fromarray(x_np, mode='RGB') + x_pil_2 = x_pil.convert('L') + gray_np = np.array(x_pil_2) + + # Case 3a: RGB -> 3 channel grayscale (grayscaled) + trans2 = transforms.RandomGrayscale(p=1.0) + gray_pil_2 = trans2(x_pil) + gray_np_2 = np.array(gray_pil_2) + self.assertEqual(gray_pil_2.mode, 'RGB', 'mode should be RGB') + self.assertEqual(gray_np_2.shape, tuple(x_shape), 'should be 3 channel') + assert_equal(gray_np_2[:, :, 0], gray_np_2[:, :, 1]) + assert_equal(gray_np_2[:, :, 1], gray_np_2[:, :, 2]) + assert_equal(gray_np, gray_np_2[:, :, 0], check_stride=False) + + # Case 3b: RGB -> 3 channel grayscale (unchanged) + trans2 = transforms.RandomGrayscale(p=0.0) + gray_pil_2 = trans2(x_pil) + gray_np_2 = np.array(gray_pil_2) + self.assertEqual(gray_pil_2.mode, 'RGB', 'mode should be RGB') + self.assertEqual(gray_np_2.shape, tuple(x_shape), 'should be 3 channel') + assert_equal(x_np, gray_np_2) + + # Case 3c: 1 channel grayscale -> 1 channel grayscale (grayscaled) + trans3 = transforms.RandomGrayscale(p=1.0) + gray_pil_3 = trans3(x_pil_2) + gray_np_3 = np.array(gray_pil_3) + self.assertEqual(gray_pil_3.mode, 'L', 'mode should be L') + self.assertEqual(gray_np_3.shape, tuple(x_shape[0:2]), 'should be 1 channel') + assert_equal(gray_np, gray_np_3) + + # Case 3d: 1 channel grayscale -> 1 channel grayscale (unchanged) + trans3 = transforms.RandomGrayscale(p=0.0) + gray_pil_3 = trans3(x_pil_2) + gray_np_3 = np.array(gray_pil_3) + self.assertEqual(gray_pil_3.mode, 'L', 'mode should be L') + self.assertEqual(gray_np_3.shape, tuple(x_shape[0:2]), 'should be 1 channel') + assert_equal(gray_np, gray_np_3) + + # Checking if RandomGrayscale can be printed as string + trans3.__repr__() + + def test_gaussian_blur_asserts(self): + np_img = np.ones((100, 100, 3), dtype=np.uint8) * 255 + img = F.to_pil_image(np_img, "RGB") + + with self.assertRaisesRegex(ValueError, r"If kernel_size is a sequence its length should be 2"): + F.gaussian_blur(img, [3]) + + with self.assertRaisesRegex(ValueError, r"If kernel_size is a sequence its length should be 2"): + F.gaussian_blur(img, [3, 3, 3]) + with self.assertRaisesRegex(ValueError, r"Kernel size should be a tuple/list of two integers"): + transforms.GaussianBlur([3, 3, 3]) + + with self.assertRaisesRegex(ValueError, r"kernel_size should have odd and positive integers"): + F.gaussian_blur(img, [4, 4]) + with self.assertRaisesRegex(ValueError, r"Kernel size value should be an odd and positive number"): + transforms.GaussianBlur([4, 4]) + + with self.assertRaisesRegex(ValueError, r"kernel_size should have odd and positive integers"): + F.gaussian_blur(img, [-3, -3]) + with self.assertRaisesRegex(ValueError, r"Kernel size value should be an odd and positive number"): + transforms.GaussianBlur([-3, -3]) + + with self.assertRaisesRegex(ValueError, r"If sigma is a sequence, its length should be 2"): + F.gaussian_blur(img, 3, [1, 1, 1]) + with self.assertRaisesRegex(ValueError, r"sigma should be a single number or a list/tuple with length 2"): + transforms.GaussianBlur(3, [1, 1, 1]) + + with self.assertRaisesRegex(ValueError, r"sigma should have positive values"): + F.gaussian_blur(img, 3, -1.0) + with self.assertRaisesRegex(ValueError, r"If sigma is a single number, it must be positive"): + transforms.GaussianBlur(3, -1.0) + + with self.assertRaisesRegex(TypeError, r"kernel_size should be int or a sequence of integers"): + F.gaussian_blur(img, "kernel_size_string") + with self.assertRaisesRegex(ValueError, r"Kernel size should be a tuple/list of two integers"): + transforms.GaussianBlur("kernel_size_string") + + with self.assertRaisesRegex(TypeError, r"sigma should be either float or sequence of floats"): + F.gaussian_blur(img, 3, "sigma_string") + with self.assertRaisesRegex(ValueError, r"sigma should be a single number or a list/tuple with length 2"): + transforms.GaussianBlur(3, "sigma_string") + + def _test_randomness(self, fn, trans, configs): + random_state = random.getstate() + random.seed(42) + img = transforms.ToPILImage()(torch.rand(3, 16, 18)) + + for p in [0.5, 0.7]: + for config in configs: + inv_img = fn(img, **config) + + num_samples = 250 + counts = 0 + for _ in range(num_samples): + tranformation = trans(p=p, **config) + tranformation.__repr__() + out = tranformation(img) + if out == inv_img: + counts += 1 + + p_value = stats.binom_test(counts, num_samples, p=p) + random.setstate(random_state) + self.assertGreater(p_value, 0.0001) + + @unittest.skipIf(stats is None, 'scipy.stats not available') + def test_random_invert(self): + self._test_randomness( + F.invert, + transforms.RandomInvert, + [{}] + ) + + @unittest.skipIf(stats is None, 'scipy.stats not available') + def test_random_posterize(self): + self._test_randomness( + F.posterize, + transforms.RandomPosterize, + [{"bits": 4}] + ) + + @unittest.skipIf(stats is None, 'scipy.stats not available') + def test_random_solarize(self): + self._test_randomness( + F.solarize, + transforms.RandomSolarize, + [{"threshold": 192}] + ) + + @unittest.skipIf(stats is None, 'scipy.stats not available') + def test_random_adjust_sharpness(self): + self._test_randomness( + F.adjust_sharpness, + transforms.RandomAdjustSharpness, + [{"sharpness_factor": 2.0}] + ) + + @unittest.skipIf(stats is None, 'scipy.stats not available') + def test_random_autocontrast(self): + self._test_randomness( + F.autocontrast, + transforms.RandomAutocontrast, + [{}] + ) + + @unittest.skipIf(stats is None, 'scipy.stats not available') + def test_random_equalize(self): + self._test_randomness( + F.equalize, + transforms.RandomEqualize, + [{}] + ) + + def test_autoaugment(self): + for policy in transforms.AutoAugmentPolicy: + for fill in [None, 85, (128, 128, 128)]: + random.seed(42) + img = Image.open(GRACE_HOPPER) + transform = transforms.AutoAugment(policy=policy, fill=fill) + for _ in range(100): + img = transform(img) + transform.__repr__() + + @unittest.skipIf(stats is None, 'scipy.stats not available') + def test_random_erasing(self): + img = torch.ones(3, 128, 128) + + t = transforms.RandomErasing(scale=(0.1, 0.1), ratio=(1 / 3, 3.)) + y, x, h, w, v = t.get_params(img, t.scale, t.ratio, [t.value, ]) + aspect_ratio = h / w + # Add some tolerance due to the rounding and int conversion used in the transform + tol = 0.05 + self.assertTrue(1 / 3 - tol <= aspect_ratio <= 3 + tol) + + aspect_ratios = [] + random.seed(42) + trial = 1000 + for _ in range(trial): + y, x, h, w, v = t.get_params(img, t.scale, t.ratio, [t.value, ]) + aspect_ratios.append(h / w) + + count_bigger_then_ones = len([1 for aspect_ratio in aspect_ratios if aspect_ratio > 1]) + p_value = stats.binom_test(count_bigger_then_ones, trial, p=0.5) + self.assertGreater(p_value, 0.0001) + + # Checking if RandomErasing can be printed as string + t.__repr__() + + +if __name__ == '__main__': + unittest.main() diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/test_transforms_tensor.py b/pretrained_model/pytorch_vision_v0.10.0/test/test_transforms_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..0d5e365351db628dcb7f8d2e6b1fe87421463bc8 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/test_transforms_tensor.py @@ -0,0 +1,684 @@ +import os +import torch +from torchvision import transforms as T +from torchvision.transforms import functional as F +from torchvision.transforms import InterpolationMode + +import numpy as np + +import unittest +from typing import Sequence + +from common_utils import TransformsTester, get_tmp_dir, int_dtypes, float_dtypes +from _assert_utils import assert_equal + + +NEAREST, BILINEAR, BICUBIC = InterpolationMode.NEAREST, InterpolationMode.BILINEAR, InterpolationMode.BICUBIC + + +class Tester(TransformsTester): + + def setUp(self): + self.device = "cpu" + + def _test_functional_op(self, func, fn_kwargs, test_exact_match=True, **match_kwargs): + if fn_kwargs is None: + fn_kwargs = {} + + f = getattr(F, func) + tensor, pil_img = self._create_data(height=10, width=10, device=self.device) + transformed_tensor = f(tensor, **fn_kwargs) + transformed_pil_img = f(pil_img, **fn_kwargs) + if test_exact_match: + self.compareTensorToPIL(transformed_tensor, transformed_pil_img, **match_kwargs) + else: + self.approxEqualTensorToPIL(transformed_tensor, transformed_pil_img, **match_kwargs) + + def _test_transform_vs_scripted(self, transform, s_transform, tensor, msg=None): + torch.manual_seed(12) + out1 = transform(tensor) + torch.manual_seed(12) + out2 = s_transform(tensor) + assert_equal(out1, out2, msg=msg) + + def _test_transform_vs_scripted_on_batch(self, transform, s_transform, batch_tensors, msg=None): + torch.manual_seed(12) + transformed_batch = transform(batch_tensors) + + for i in range(len(batch_tensors)): + img_tensor = batch_tensors[i, ...] + torch.manual_seed(12) + transformed_img = transform(img_tensor) + assert_equal(transformed_img, transformed_batch[i, ...], msg=msg) + + torch.manual_seed(12) + s_transformed_batch = s_transform(batch_tensors) + assert_equal(transformed_batch, s_transformed_batch, msg=msg) + + def _test_class_op(self, method, meth_kwargs=None, test_exact_match=True, **match_kwargs): + if meth_kwargs is None: + meth_kwargs = {} + + # test for class interface + f = getattr(T, method)(**meth_kwargs) + scripted_fn = torch.jit.script(f) + + tensor, pil_img = self._create_data(26, 34, device=self.device) + # set seed to reproduce the same transformation for tensor and PIL image + torch.manual_seed(12) + transformed_tensor = f(tensor) + torch.manual_seed(12) + transformed_pil_img = f(pil_img) + if test_exact_match: + self.compareTensorToPIL(transformed_tensor, transformed_pil_img, **match_kwargs) + else: + self.approxEqualTensorToPIL(transformed_tensor.float(), transformed_pil_img, **match_kwargs) + + torch.manual_seed(12) + transformed_tensor_script = scripted_fn(tensor) + assert_equal(transformed_tensor, transformed_tensor_script) + + batch_tensors = self._create_data_batch(height=23, width=34, channels=3, num_samples=4, device=self.device) + self._test_transform_vs_scripted_on_batch(f, scripted_fn, batch_tensors) + + with get_tmp_dir() as tmp_dir: + scripted_fn.save(os.path.join(tmp_dir, "t_{}.pt".format(method))) + + def _test_op(self, func, method, fn_kwargs=None, meth_kwargs=None, test_exact_match=True, **match_kwargs): + self._test_functional_op(func, fn_kwargs, test_exact_match=test_exact_match, **match_kwargs) + self._test_class_op(method, meth_kwargs, test_exact_match=test_exact_match, **match_kwargs) + + def test_random_horizontal_flip(self): + self._test_op('hflip', 'RandomHorizontalFlip') + + def test_random_vertical_flip(self): + self._test_op('vflip', 'RandomVerticalFlip') + + def test_random_invert(self): + self._test_op('invert', 'RandomInvert') + + def test_random_posterize(self): + fn_kwargs = meth_kwargs = {"bits": 4} + self._test_op( + 'posterize', 'RandomPosterize', fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs + ) + + def test_random_solarize(self): + fn_kwargs = meth_kwargs = {"threshold": 192.0} + self._test_op( + 'solarize', 'RandomSolarize', fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs + ) + + def test_random_adjust_sharpness(self): + fn_kwargs = meth_kwargs = {"sharpness_factor": 2.0} + self._test_op( + 'adjust_sharpness', 'RandomAdjustSharpness', fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs + ) + + def test_random_autocontrast(self): + # We check the max abs difference because on some (very rare) pixels, the actual value may be different + # between PIL and tensors due to floating approximations. + self._test_op('autocontrast', 'RandomAutocontrast', test_exact_match=False, agg_method='max', + tol=(1 + 1e-5), allowed_percentage_diff=.05) + + def test_random_equalize(self): + self._test_op('equalize', 'RandomEqualize') + + def test_color_jitter(self): + + tol = 1.0 + 1e-10 + for f in [0.1, 0.5, 1.0, 1.34, (0.3, 0.7), [0.4, 0.5]]: + meth_kwargs = {"brightness": f} + self._test_class_op( + "ColorJitter", meth_kwargs=meth_kwargs, test_exact_match=False, tol=tol, agg_method="max" + ) + + for f in [0.2, 0.5, 1.0, 1.5, (0.3, 0.7), [0.4, 0.5]]: + meth_kwargs = {"contrast": f} + self._test_class_op( + "ColorJitter", meth_kwargs=meth_kwargs, test_exact_match=False, tol=tol, agg_method="max" + ) + + for f in [0.5, 0.75, 1.0, 1.25, (0.3, 0.7), [0.3, 0.4]]: + meth_kwargs = {"saturation": f} + self._test_class_op( + "ColorJitter", meth_kwargs=meth_kwargs, test_exact_match=False, tol=tol, agg_method="max" + ) + + for f in [0.2, 0.5, (-0.2, 0.3), [-0.4, 0.5]]: + meth_kwargs = {"hue": f} + self._test_class_op( + "ColorJitter", meth_kwargs=meth_kwargs, test_exact_match=False, tol=16.1, agg_method="max" + ) + + # All 4 parameters together + meth_kwargs = {"brightness": 0.2, "contrast": 0.2, "saturation": 0.2, "hue": 0.2} + self._test_class_op( + "ColorJitter", meth_kwargs=meth_kwargs, test_exact_match=False, tol=12.1, agg_method="max" + ) + + def test_pad(self): + for m in ["constant", "edge", "reflect", "symmetric"]: + fill = 127 if m == "constant" else 0 + for mul in [1, -1]: + # Test functional.pad (PIL and Tensor) with padding as single int + self._test_functional_op( + "pad", fn_kwargs={"padding": mul * 2, "fill": fill, "padding_mode": m} + ) + # Test functional.pad and transforms.Pad with padding as [int, ] + fn_kwargs = meth_kwargs = {"padding": [mul * 2, ], "fill": fill, "padding_mode": m} + self._test_op( + "pad", "Pad", fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs + ) + # Test functional.pad and transforms.Pad with padding as list + fn_kwargs = meth_kwargs = {"padding": [mul * 4, 4], "fill": fill, "padding_mode": m} + self._test_op( + "pad", "Pad", fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs + ) + # Test functional.pad and transforms.Pad with padding as tuple + fn_kwargs = meth_kwargs = {"padding": (mul * 2, 2, 2, mul * 2), "fill": fill, "padding_mode": m} + self._test_op( + "pad", "Pad", fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs + ) + + def test_crop(self): + fn_kwargs = {"top": 2, "left": 3, "height": 4, "width": 5} + # Test transforms.RandomCrop with size and padding as tuple + meth_kwargs = {"size": (4, 5), "padding": (4, 4), "pad_if_needed": True, } + self._test_op( + 'crop', 'RandomCrop', fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs + ) + + # Test transforms.functional.crop including outside the image area + fn_kwargs = {"top": -2, "left": 3, "height": 4, "width": 5} # top + self._test_functional_op('crop', fn_kwargs=fn_kwargs) + + fn_kwargs = {"top": 1, "left": -3, "height": 4, "width": 5} # left + self._test_functional_op('crop', fn_kwargs=fn_kwargs) + + fn_kwargs = {"top": 7, "left": 3, "height": 4, "width": 5} # bottom + self._test_functional_op('crop', fn_kwargs=fn_kwargs) + + fn_kwargs = {"top": 3, "left": 8, "height": 4, "width": 5} # right + self._test_functional_op('crop', fn_kwargs=fn_kwargs) + + fn_kwargs = {"top": -3, "left": -3, "height": 15, "width": 15} # all + self._test_functional_op('crop', fn_kwargs=fn_kwargs) + + sizes = [5, [5, ], [6, 6]] + padding_configs = [ + {"padding_mode": "constant", "fill": 0}, + {"padding_mode": "constant", "fill": 10}, + {"padding_mode": "constant", "fill": 20}, + {"padding_mode": "edge"}, + {"padding_mode": "reflect"}, + ] + + for size in sizes: + for padding_config in padding_configs: + config = dict(padding_config) + config["size"] = size + self._test_class_op("RandomCrop", config) + + def test_center_crop(self): + fn_kwargs = {"output_size": (4, 5)} + meth_kwargs = {"size": (4, 5), } + self._test_op( + "center_crop", "CenterCrop", fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs + ) + fn_kwargs = {"output_size": (5,)} + meth_kwargs = {"size": (5, )} + self._test_op( + "center_crop", "CenterCrop", fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs + ) + tensor = torch.randint(0, 256, (3, 10, 10), dtype=torch.uint8, device=self.device) + # Test torchscript of transforms.CenterCrop with size as int + f = T.CenterCrop(size=5) + scripted_fn = torch.jit.script(f) + scripted_fn(tensor) + + # Test torchscript of transforms.CenterCrop with size as [int, ] + f = T.CenterCrop(size=[5, ]) + scripted_fn = torch.jit.script(f) + scripted_fn(tensor) + + # Test torchscript of transforms.CenterCrop with size as tuple + f = T.CenterCrop(size=(6, 6)) + scripted_fn = torch.jit.script(f) + scripted_fn(tensor) + + with get_tmp_dir() as tmp_dir: + scripted_fn.save(os.path.join(tmp_dir, "t_center_crop.pt")) + + def _test_op_list_output(self, func, method, out_length, fn_kwargs=None, meth_kwargs=None): + if fn_kwargs is None: + fn_kwargs = {} + if meth_kwargs is None: + meth_kwargs = {} + + fn = getattr(F, func) + scripted_fn = torch.jit.script(fn) + + tensor, pil_img = self._create_data(height=20, width=20, device=self.device) + transformed_t_list = fn(tensor, **fn_kwargs) + transformed_p_list = fn(pil_img, **fn_kwargs) + self.assertEqual(len(transformed_t_list), len(transformed_p_list)) + self.assertEqual(len(transformed_t_list), out_length) + for transformed_tensor, transformed_pil_img in zip(transformed_t_list, transformed_p_list): + self.compareTensorToPIL(transformed_tensor, transformed_pil_img) + + transformed_t_list_script = scripted_fn(tensor.detach().clone(), **fn_kwargs) + self.assertEqual(len(transformed_t_list), len(transformed_t_list_script)) + self.assertEqual(len(transformed_t_list_script), out_length) + for transformed_tensor, transformed_tensor_script in zip(transformed_t_list, transformed_t_list_script): + assert_equal( + transformed_tensor, + transformed_tensor_script, + msg="{} vs {}".format(transformed_tensor, transformed_tensor_script), + ) + + # test for class interface + fn = getattr(T, method)(**meth_kwargs) + scripted_fn = torch.jit.script(fn) + output = scripted_fn(tensor) + self.assertEqual(len(output), len(transformed_t_list_script)) + + # test on batch of tensors + batch_tensors = self._create_data_batch(height=23, width=34, channels=3, num_samples=4, device=self.device) + torch.manual_seed(12) + transformed_batch_list = fn(batch_tensors) + + for i in range(len(batch_tensors)): + img_tensor = batch_tensors[i, ...] + torch.manual_seed(12) + transformed_img_list = fn(img_tensor) + for transformed_img, transformed_batch in zip(transformed_img_list, transformed_batch_list): + assert_equal( + transformed_img, + transformed_batch[i, ...], + msg="{} vs {}".format(transformed_img, transformed_batch[i, ...]), + ) + + with get_tmp_dir() as tmp_dir: + scripted_fn.save(os.path.join(tmp_dir, "t_op_list_{}.pt".format(method))) + + def test_five_crop(self): + fn_kwargs = meth_kwargs = {"size": (5,)} + self._test_op_list_output( + "five_crop", "FiveCrop", out_length=5, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs + ) + fn_kwargs = meth_kwargs = {"size": [5, ]} + self._test_op_list_output( + "five_crop", "FiveCrop", out_length=5, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs + ) + fn_kwargs = meth_kwargs = {"size": (4, 5)} + self._test_op_list_output( + "five_crop", "FiveCrop", out_length=5, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs + ) + fn_kwargs = meth_kwargs = {"size": [4, 5]} + self._test_op_list_output( + "five_crop", "FiveCrop", out_length=5, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs + ) + + def test_ten_crop(self): + fn_kwargs = meth_kwargs = {"size": (5,)} + self._test_op_list_output( + "ten_crop", "TenCrop", out_length=10, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs + ) + fn_kwargs = meth_kwargs = {"size": [5, ]} + self._test_op_list_output( + "ten_crop", "TenCrop", out_length=10, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs + ) + fn_kwargs = meth_kwargs = {"size": (4, 5)} + self._test_op_list_output( + "ten_crop", "TenCrop", out_length=10, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs + ) + fn_kwargs = meth_kwargs = {"size": [4, 5]} + self._test_op_list_output( + "ten_crop", "TenCrop", out_length=10, fn_kwargs=fn_kwargs, meth_kwargs=meth_kwargs + ) + + def test_resize(self): + + # TODO: Minimal check for bug-fix, improve this later + x = torch.rand(3, 32, 46) + t = T.Resize(size=38) + y = t(x) + # If size is an int, smaller edge of the image will be matched to this number. + # i.e, if height > width, then image will be rescaled to (size * height / width, size). + self.assertTrue(isinstance(y, torch.Tensor)) + self.assertEqual(y.shape[1], 38) + self.assertEqual(y.shape[2], int(38 * 46 / 32)) + + tensor, _ = self._create_data(height=34, width=36, device=self.device) + batch_tensors = torch.randint(0, 256, size=(4, 3, 44, 56), dtype=torch.uint8, device=self.device) + + for dt in [None, torch.float32, torch.float64]: + if dt is not None: + # This is a trivial cast to float of uint8 data to test all cases + tensor = tensor.to(dt) + for size in [32, 34, [32, ], [32, 32], (32, 32), [34, 35]]: + for max_size in (None, 35, 1000): + if max_size is not None and isinstance(size, Sequence) and len(size) != 1: + continue # Not supported + for interpolation in [BILINEAR, BICUBIC, NEAREST]: + + if isinstance(size, int): + script_size = [size, ] + else: + script_size = size + + transform = T.Resize(size=script_size, interpolation=interpolation, max_size=max_size) + s_transform = torch.jit.script(transform) + self._test_transform_vs_scripted(transform, s_transform, tensor) + self._test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors) + + with get_tmp_dir() as tmp_dir: + s_transform.save(os.path.join(tmp_dir, "t_resize.pt")) + + def test_resized_crop(self): + tensor = torch.randint(0, 256, size=(3, 44, 56), dtype=torch.uint8, device=self.device) + batch_tensors = torch.randint(0, 256, size=(4, 3, 44, 56), dtype=torch.uint8, device=self.device) + + for scale in [(0.7, 1.2), [0.7, 1.2]]: + for ratio in [(0.75, 1.333), [0.75, 1.333]]: + for size in [(32, ), [44, ], [32, ], [32, 32], (32, 32), [44, 55]]: + for interpolation in [NEAREST, BILINEAR, BICUBIC]: + transform = T.RandomResizedCrop( + size=size, scale=scale, ratio=ratio, interpolation=interpolation + ) + s_transform = torch.jit.script(transform) + self._test_transform_vs_scripted(transform, s_transform, tensor) + self._test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors) + + with get_tmp_dir() as tmp_dir: + s_transform.save(os.path.join(tmp_dir, "t_resized_crop.pt")) + + def test_random_affine(self): + tensor = torch.randint(0, 256, size=(3, 44, 56), dtype=torch.uint8, device=self.device) + batch_tensors = torch.randint(0, 256, size=(4, 3, 44, 56), dtype=torch.uint8, device=self.device) + + for shear in [15, 10.0, (5.0, 10.0), [-15, 15], [-10.0, 10.0, -11.0, 11.0]]: + for scale in [(0.7, 1.2), [0.7, 1.2]]: + for translate in [(0.1, 0.2), [0.2, 0.1]]: + for degrees in [45, 35.0, (-45, 45), [-90.0, 90.0]]: + for interpolation in [NEAREST, BILINEAR]: + for fill in [85, (10, -10, 10), 0.7, [0.0, 0.0, 0.0], [1, ], 1]: + transform = T.RandomAffine( + degrees=degrees, translate=translate, + scale=scale, shear=shear, interpolation=interpolation, fill=fill + ) + s_transform = torch.jit.script(transform) + + self._test_transform_vs_scripted(transform, s_transform, tensor) + self._test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors) + + with get_tmp_dir() as tmp_dir: + s_transform.save(os.path.join(tmp_dir, "t_random_affine.pt")) + + def test_random_rotate(self): + tensor = torch.randint(0, 256, size=(3, 44, 56), dtype=torch.uint8, device=self.device) + batch_tensors = torch.randint(0, 256, size=(4, 3, 44, 56), dtype=torch.uint8, device=self.device) + + for center in [(0, 0), [10, 10], None, (56, 44)]: + for expand in [True, False]: + for degrees in [45, 35.0, (-45, 45), [-90.0, 90.0]]: + for interpolation in [NEAREST, BILINEAR]: + for fill in [85, (10, -10, 10), 0.7, [0.0, 0.0, 0.0], [1, ], 1]: + transform = T.RandomRotation( + degrees=degrees, interpolation=interpolation, expand=expand, center=center, fill=fill + ) + s_transform = torch.jit.script(transform) + + self._test_transform_vs_scripted(transform, s_transform, tensor) + self._test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors) + + with get_tmp_dir() as tmp_dir: + s_transform.save(os.path.join(tmp_dir, "t_random_rotate.pt")) + + def test_random_perspective(self): + tensor = torch.randint(0, 256, size=(3, 44, 56), dtype=torch.uint8, device=self.device) + batch_tensors = torch.randint(0, 256, size=(4, 3, 44, 56), dtype=torch.uint8, device=self.device) + + for distortion_scale in np.linspace(0.1, 1.0, num=20): + for interpolation in [NEAREST, BILINEAR]: + for fill in [85, (10, -10, 10), 0.7, [0.0, 0.0, 0.0], [1, ], 1]: + transform = T.RandomPerspective( + distortion_scale=distortion_scale, + interpolation=interpolation, + fill=fill + ) + s_transform = torch.jit.script(transform) + + self._test_transform_vs_scripted(transform, s_transform, tensor) + self._test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors) + + with get_tmp_dir() as tmp_dir: + s_transform.save(os.path.join(tmp_dir, "t_perspective.pt")) + + def test_to_grayscale(self): + + meth_kwargs = {"num_output_channels": 1} + tol = 1.0 + 1e-10 + self._test_class_op( + "Grayscale", meth_kwargs=meth_kwargs, test_exact_match=False, tol=tol, agg_method="max" + ) + + meth_kwargs = {"num_output_channels": 3} + self._test_class_op( + "Grayscale", meth_kwargs=meth_kwargs, test_exact_match=False, tol=tol, agg_method="max" + ) + + meth_kwargs = {} + self._test_class_op( + "RandomGrayscale", meth_kwargs=meth_kwargs, test_exact_match=False, tol=tol, agg_method="max" + ) + + def test_normalize(self): + fn = T.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) + tensor, _ = self._create_data(26, 34, device=self.device) + + with self.assertRaisesRegex(TypeError, r"Input tensor should be a float tensor"): + fn(tensor) + + batch_tensors = torch.rand(4, 3, 44, 56, device=self.device) + tensor = tensor.to(dtype=torch.float32) / 255.0 + # test for class interface + scripted_fn = torch.jit.script(fn) + + self._test_transform_vs_scripted(fn, scripted_fn, tensor) + self._test_transform_vs_scripted_on_batch(fn, scripted_fn, batch_tensors) + + with get_tmp_dir() as tmp_dir: + scripted_fn.save(os.path.join(tmp_dir, "t_norm.pt")) + + def test_linear_transformation(self): + c, h, w = 3, 24, 32 + + tensor, _ = self._create_data(h, w, channels=c, device=self.device) + + matrix = torch.rand(c * h * w, c * h * w, device=self.device) + mean_vector = torch.rand(c * h * w, device=self.device) + + fn = T.LinearTransformation(matrix, mean_vector) + scripted_fn = torch.jit.script(fn) + + self._test_transform_vs_scripted(fn, scripted_fn, tensor) + + batch_tensors = torch.rand(4, c, h, w, device=self.device) + # We skip some tests from _test_transform_vs_scripted_on_batch as + # results for scripted and non-scripted transformations are not exactly the same + torch.manual_seed(12) + transformed_batch = fn(batch_tensors) + torch.manual_seed(12) + s_transformed_batch = scripted_fn(batch_tensors) + assert_equal(transformed_batch, s_transformed_batch) + + with get_tmp_dir() as tmp_dir: + scripted_fn.save(os.path.join(tmp_dir, "t_norm.pt")) + + def test_compose(self): + tensor, _ = self._create_data(26, 34, device=self.device) + tensor = tensor.to(dtype=torch.float32) / 255.0 + + transforms = T.Compose([ + T.CenterCrop(10), + T.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), + ]) + s_transforms = torch.nn.Sequential(*transforms.transforms) + + scripted_fn = torch.jit.script(s_transforms) + torch.manual_seed(12) + transformed_tensor = transforms(tensor) + torch.manual_seed(12) + transformed_tensor_script = scripted_fn(tensor) + assert_equal(transformed_tensor, transformed_tensor_script, msg="{}".format(transforms)) + + t = T.Compose([ + lambda x: x, + ]) + with self.assertRaisesRegex(RuntimeError, r"Could not get name of python class object"): + torch.jit.script(t) + + def test_random_apply(self): + tensor, _ = self._create_data(26, 34, device=self.device) + tensor = tensor.to(dtype=torch.float32) / 255.0 + + transforms = T.RandomApply([ + T.RandomHorizontalFlip(), + T.ColorJitter(), + ], p=0.4) + s_transforms = T.RandomApply(torch.nn.ModuleList([ + T.RandomHorizontalFlip(), + T.ColorJitter(), + ]), p=0.4) + + scripted_fn = torch.jit.script(s_transforms) + torch.manual_seed(12) + transformed_tensor = transforms(tensor) + torch.manual_seed(12) + transformed_tensor_script = scripted_fn(tensor) + assert_equal(transformed_tensor, transformed_tensor_script, msg="{}".format(transforms)) + + if torch.device(self.device).type == "cpu": + # Can't check this twice, otherwise + # "Can't redefine method: forward on class: __torch__.torchvision.transforms.transforms.RandomApply" + transforms = T.RandomApply([ + T.ColorJitter(), + ], p=0.3) + with self.assertRaisesRegex(RuntimeError, r"Module 'RandomApply' has no attribute 'transforms'"): + torch.jit.script(transforms) + + def test_gaussian_blur(self): + tol = 1.0 + 1e-10 + self._test_class_op( + "GaussianBlur", meth_kwargs={"kernel_size": 3, "sigma": 0.75}, + test_exact_match=False, agg_method="max", tol=tol + ) + + self._test_class_op( + "GaussianBlur", meth_kwargs={"kernel_size": 23, "sigma": [0.1, 2.0]}, + test_exact_match=False, agg_method="max", tol=tol + ) + + self._test_class_op( + "GaussianBlur", meth_kwargs={"kernel_size": 23, "sigma": (0.1, 2.0)}, + test_exact_match=False, agg_method="max", tol=tol + ) + + self._test_class_op( + "GaussianBlur", meth_kwargs={"kernel_size": [3, 3], "sigma": (1.0, 1.0)}, + test_exact_match=False, agg_method="max", tol=tol + ) + + self._test_class_op( + "GaussianBlur", meth_kwargs={"kernel_size": (3, 3), "sigma": (0.1, 2.0)}, + test_exact_match=False, agg_method="max", tol=tol + ) + + self._test_class_op( + "GaussianBlur", meth_kwargs={"kernel_size": [23], "sigma": 0.75}, + test_exact_match=False, agg_method="max", tol=tol + ) + + def test_random_erasing(self): + img = torch.rand(3, 60, 60) + + # Test Set 0: invalid value + random_erasing = T.RandomErasing(value=(0.1, 0.2, 0.3, 0.4), p=1.0) + with self.assertRaises(ValueError, msg="If value is a sequence, it should have either a single value or 3"): + random_erasing(img) + + tensor, _ = self._create_data(24, 32, channels=3, device=self.device) + batch_tensors = torch.rand(4, 3, 44, 56, device=self.device) + + test_configs = [ + {"value": 0.2}, + {"value": "random"}, + {"value": (0.2, 0.2, 0.2)}, + {"value": "random", "ratio": (0.1, 0.2)}, + ] + + for config in test_configs: + fn = T.RandomErasing(**config) + scripted_fn = torch.jit.script(fn) + self._test_transform_vs_scripted(fn, scripted_fn, tensor) + self._test_transform_vs_scripted_on_batch(fn, scripted_fn, batch_tensors) + + with get_tmp_dir() as tmp_dir: + scripted_fn.save(os.path.join(tmp_dir, "t_random_erasing.pt")) + + def test_convert_image_dtype(self): + tensor, _ = self._create_data(26, 34, device=self.device) + batch_tensors = torch.rand(4, 3, 44, 56, device=self.device) + + for in_dtype in int_dtypes() + float_dtypes(): + in_tensor = tensor.to(in_dtype) + in_batch_tensors = batch_tensors.to(in_dtype) + for out_dtype in int_dtypes() + float_dtypes(): + + fn = T.ConvertImageDtype(dtype=out_dtype) + scripted_fn = torch.jit.script(fn) + + if (in_dtype == torch.float32 and out_dtype in (torch.int32, torch.int64)) or \ + (in_dtype == torch.float64 and out_dtype == torch.int64): + with self.assertRaisesRegex(RuntimeError, r"cannot be performed safely"): + self._test_transform_vs_scripted(fn, scripted_fn, in_tensor) + with self.assertRaisesRegex(RuntimeError, r"cannot be performed safely"): + self._test_transform_vs_scripted_on_batch(fn, scripted_fn, in_batch_tensors) + continue + + self._test_transform_vs_scripted(fn, scripted_fn, in_tensor) + self._test_transform_vs_scripted_on_batch(fn, scripted_fn, in_batch_tensors) + + with get_tmp_dir() as tmp_dir: + scripted_fn.save(os.path.join(tmp_dir, "t_convert_dtype.pt")) + + def test_autoaugment(self): + tensor = torch.randint(0, 256, size=(3, 44, 56), dtype=torch.uint8, device=self.device) + batch_tensors = torch.randint(0, 256, size=(4, 3, 44, 56), dtype=torch.uint8, device=self.device) + + s_transform = None + for policy in T.AutoAugmentPolicy: + for fill in [None, 85, (10, -10, 10), 0.7, [0.0, 0.0, 0.0], [1, ], 1]: + transform = T.AutoAugment(policy=policy, fill=fill) + s_transform = torch.jit.script(transform) + for _ in range(100): + self._test_transform_vs_scripted(transform, s_transform, tensor) + self._test_transform_vs_scripted_on_batch(transform, s_transform, batch_tensors) + + if s_transform is not None: + with get_tmp_dir() as tmp_dir: + s_transform.save(os.path.join(tmp_dir, "t_autoaugment.pt")) + + +@unittest.skipIf(not torch.cuda.is_available(), reason="Skip if no CUDA device") +class CUDATester(Tester): + + def setUp(self): + torch.set_deterministic(False) + self.device = "cuda" + + +if __name__ == '__main__': + unittest.main() diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/test_transforms_video.py b/pretrained_model/pytorch_vision_v0.10.0/test/test_transforms_video.py new file mode 100644 index 0000000000000000000000000000000000000000..942bb010f71786c4a40bd4b603b73418d597de45 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/test_transforms_video.py @@ -0,0 +1,177 @@ +import torch +from torchvision.transforms import Compose +import unittest +import random +import numpy as np +import warnings +from _assert_utils import assert_equal + +try: + from scipy import stats +except ImportError: + stats = None + + +with warnings.catch_warnings(record=True): + warnings.simplefilter("always") + import torchvision.transforms._transforms_video as transforms + + +class TestVideoTransforms(unittest.TestCase): + + def test_random_crop_video(self): + numFrames = random.randint(4, 128) + height = random.randint(10, 32) * 2 + width = random.randint(10, 32) * 2 + oheight = random.randint(5, (height - 2) / 2) * 2 + owidth = random.randint(5, (width - 2) / 2) * 2 + clip = torch.randint(0, 256, (numFrames, height, width, 3), dtype=torch.uint8) + result = Compose([ + transforms.ToTensorVideo(), + transforms.RandomCropVideo((oheight, owidth)), + ])(clip) + self.assertEqual(result.size(2), oheight) + self.assertEqual(result.size(3), owidth) + + transforms.RandomCropVideo((oheight, owidth)).__repr__() + + def test_random_resized_crop_video(self): + numFrames = random.randint(4, 128) + height = random.randint(10, 32) * 2 + width = random.randint(10, 32) * 2 + oheight = random.randint(5, (height - 2) / 2) * 2 + owidth = random.randint(5, (width - 2) / 2) * 2 + clip = torch.randint(0, 256, (numFrames, height, width, 3), dtype=torch.uint8) + result = Compose([ + transforms.ToTensorVideo(), + transforms.RandomResizedCropVideo((oheight, owidth)), + ])(clip) + self.assertEqual(result.size(2), oheight) + self.assertEqual(result.size(3), owidth) + + transforms.RandomResizedCropVideo((oheight, owidth)).__repr__() + + def test_center_crop_video(self): + numFrames = random.randint(4, 128) + height = random.randint(10, 32) * 2 + width = random.randint(10, 32) * 2 + oheight = random.randint(5, (height - 2) / 2) * 2 + owidth = random.randint(5, (width - 2) / 2) * 2 + + clip = torch.ones((numFrames, height, width, 3), dtype=torch.uint8) * 255 + oh1 = (height - oheight) // 2 + ow1 = (width - owidth) // 2 + clipNarrow = clip[:, oh1:oh1 + oheight, ow1:ow1 + owidth, :] + clipNarrow.fill_(0) + result = Compose([ + transforms.ToTensorVideo(), + transforms.CenterCropVideo((oheight, owidth)), + ])(clip) + + msg = "height: " + str(height) + " width: " \ + + str(width) + " oheight: " + str(oheight) + " owidth: " + str(owidth) + self.assertEqual(result.sum().item(), 0, msg) + + oheight += 1 + owidth += 1 + result = Compose([ + transforms.ToTensorVideo(), + transforms.CenterCropVideo((oheight, owidth)), + ])(clip) + sum1 = result.sum() + + msg = "height: " + str(height) + " width: " \ + + str(width) + " oheight: " + str(oheight) + " owidth: " + str(owidth) + self.assertEqual(sum1.item() > 1, True, msg) + + oheight += 1 + owidth += 1 + result = Compose([ + transforms.ToTensorVideo(), + transforms.CenterCropVideo((oheight, owidth)), + ])(clip) + sum2 = result.sum() + + msg = "height: " + str(height) + " width: " \ + + str(width) + " oheight: " + str(oheight) + " owidth: " + str(owidth) + self.assertTrue(sum2.item() > 1, msg) + self.assertTrue(sum2.item() > sum1.item(), msg) + + @unittest.skipIf(stats is None, 'scipy.stats is not available') + def test_normalize_video(self): + def samples_from_standard_normal(tensor): + p_value = stats.kstest(list(tensor.view(-1)), 'norm', args=(0, 1)).pvalue + return p_value > 0.0001 + + random_state = random.getstate() + random.seed(42) + for channels in [1, 3]: + numFrames = random.randint(4, 128) + height = random.randint(32, 256) + width = random.randint(32, 256) + mean = random.random() + std = random.random() + clip = torch.normal(mean, std, size=(channels, numFrames, height, width)) + mean = [clip[c].mean().item() for c in range(channels)] + std = [clip[c].std().item() for c in range(channels)] + normalized = transforms.NormalizeVideo(mean, std)(clip) + self.assertTrue(samples_from_standard_normal(normalized)) + random.setstate(random_state) + + # Checking the optional in-place behaviour + tensor = torch.rand((3, 128, 16, 16)) + tensor_inplace = transforms.NormalizeVideo((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True)(tensor) + assert_equal(tensor, tensor_inplace) + + transforms.NormalizeVideo((0.5, 0.5, 0.5), (0.5, 0.5, 0.5), inplace=True).__repr__() + + def test_to_tensor_video(self): + numFrames, height, width = 64, 4, 4 + trans = transforms.ToTensorVideo() + + with self.assertRaises(TypeError): + trans(np.random.rand(numFrames, height, width, 1).tolist()) + trans(torch.rand((numFrames, height, width, 1), dtype=torch.float)) + + with self.assertRaises(ValueError): + trans(torch.ones((3, numFrames, height, width, 3), dtype=torch.uint8)) + trans(torch.ones((height, width, 3), dtype=torch.uint8)) + trans(torch.ones((width, 3), dtype=torch.uint8)) + trans(torch.ones((3), dtype=torch.uint8)) + + trans.__repr__() + + @unittest.skipIf(stats is None, 'scipy.stats not available') + def test_random_horizontal_flip_video(self): + random_state = random.getstate() + random.seed(42) + clip = torch.rand((3, 4, 112, 112), dtype=torch.float) + hclip = clip.flip((-1)) + + num_samples = 250 + num_horizontal = 0 + for _ in range(num_samples): + out = transforms.RandomHorizontalFlipVideo()(clip) + if torch.all(torch.eq(out, hclip)): + num_horizontal += 1 + + p_value = stats.binom_test(num_horizontal, num_samples, p=0.5) + random.setstate(random_state) + self.assertGreater(p_value, 0.0001) + + num_samples = 250 + num_horizontal = 0 + for _ in range(num_samples): + out = transforms.RandomHorizontalFlipVideo(p=0.7)(clip) + if torch.all(torch.eq(out, hclip)): + num_horizontal += 1 + + p_value = stats.binom_test(num_horizontal, num_samples, p=0.7) + random.setstate(random_state) + self.assertGreater(p_value, 0.0001) + + transforms.RandomHorizontalFlipVideo().__repr__() + + +if __name__ == '__main__': + unittest.main() diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/test_utils.py b/pretrained_model/pytorch_vision_v0.10.0/test/test_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3fed2535c77f2dcefc35c9130383fa276eb589ec --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/test_utils.py @@ -0,0 +1,231 @@ +import pytest +import numpy as np +import os +import sys +import tempfile +import torch +import torchvision.utils as utils + +from io import BytesIO +import torchvision.transforms.functional as F +from PIL import Image, __version__ as PILLOW_VERSION, ImageColor +from _assert_utils import assert_equal + + +PILLOW_VERSION = tuple(int(x) for x in PILLOW_VERSION.split('.')) + +boxes = torch.tensor([[0, 0, 20, 20], [0, 0, 0, 0], + [10, 15, 30, 35], [23, 35, 93, 95]], dtype=torch.float) + + +def test_make_grid_not_inplace(): + t = torch.rand(5, 3, 10, 10) + t_clone = t.clone() + + utils.make_grid(t, normalize=False) + assert_equal(t, t_clone, msg='make_grid modified tensor in-place') + + utils.make_grid(t, normalize=True, scale_each=False) + assert_equal(t, t_clone, msg='make_grid modified tensor in-place') + + utils.make_grid(t, normalize=True, scale_each=True) + assert_equal(t, t_clone, msg='make_grid modified tensor in-place') + + +def test_normalize_in_make_grid(): + t = torch.rand(5, 3, 10, 10) * 255 + norm_max = torch.tensor(1.0) + norm_min = torch.tensor(0.0) + + grid = utils.make_grid(t, normalize=True) + grid_max = torch.max(grid) + grid_min = torch.min(grid) + + # Rounding the result to one decimal for comparison + n_digits = 1 + rounded_grid_max = torch.round(grid_max * 10 ** n_digits) / (10 ** n_digits) + rounded_grid_min = torch.round(grid_min * 10 ** n_digits) / (10 ** n_digits) + + assert_equal(norm_max, rounded_grid_max, msg='Normalized max is not equal to 1') + assert_equal(norm_min, rounded_grid_min, msg='Normalized min is not equal to 0') + + +@pytest.mark.skipif(sys.platform in ('win32', 'cygwin'), reason='temporarily disabled on Windows') +def test_save_image(): + with tempfile.NamedTemporaryFile(suffix='.png') as f: + t = torch.rand(2, 3, 64, 64) + utils.save_image(t, f.name) + assert os.path.exists(f.name), 'The image is not present after save' + + +@pytest.mark.skipif(sys.platform in ('win32', 'cygwin'), reason='temporarily disabled on Windows') +def test_save_image_single_pixel(): + with tempfile.NamedTemporaryFile(suffix='.png') as f: + t = torch.rand(1, 3, 1, 1) + utils.save_image(t, f.name) + assert os.path.exists(f.name), 'The pixel image is not present after save' + + +@pytest.mark.skipif(sys.platform in ('win32', 'cygwin'), reason='temporarily disabled on Windows') +def test_save_image_file_object(): + with tempfile.NamedTemporaryFile(suffix='.png') as f: + t = torch.rand(2, 3, 64, 64) + utils.save_image(t, f.name) + img_orig = Image.open(f.name) + fp = BytesIO() + utils.save_image(t, fp, format='png') + img_bytes = Image.open(fp) + assert_equal(F.to_tensor(img_orig), F.to_tensor(img_bytes), msg='Image not stored in file object') + + +@pytest.mark.skipif(sys.platform in ('win32', 'cygwin'), reason='temporarily disabled on Windows') +def test_save_image_single_pixel_file_object(): + with tempfile.NamedTemporaryFile(suffix='.png') as f: + t = torch.rand(1, 3, 1, 1) + utils.save_image(t, f.name) + img_orig = Image.open(f.name) + fp = BytesIO() + utils.save_image(t, fp, format='png') + img_bytes = Image.open(fp) + assert_equal(F.to_tensor(img_orig), F.to_tensor(img_bytes), msg='Image not stored in file object') + + +def test_draw_boxes(): + img = torch.full((3, 100, 100), 255, dtype=torch.uint8) + img_cp = img.clone() + boxes_cp = boxes.clone() + labels = ["a", "b", "c", "d"] + colors = ["green", "#FF00FF", (0, 255, 0), "red"] + result = utils.draw_bounding_boxes(img, boxes, labels=labels, colors=colors, fill=True) + + path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets", "fakedata", "draw_boxes_util.png") + if not os.path.exists(path): + res = Image.fromarray(result.permute(1, 2, 0).contiguous().numpy()) + res.save(path) + + if PILLOW_VERSION >= (8, 2): + # The reference image is only valid for new PIL versions + expected = torch.as_tensor(np.array(Image.open(path))).permute(2, 0, 1) + assert_equal(result, expected) + + # Check if modification is not in place + assert_equal(boxes, boxes_cp) + assert_equal(img, img_cp) + + +def test_draw_boxes_vanilla(): + img = torch.full((3, 100, 100), 0, dtype=torch.uint8) + img_cp = img.clone() + boxes_cp = boxes.clone() + result = utils.draw_bounding_boxes(img, boxes, fill=False, width=7) + + path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets", "fakedata", "draw_boxes_vanilla.png") + if not os.path.exists(path): + res = Image.fromarray(result.permute(1, 2, 0).contiguous().numpy()) + res.save(path) + + expected = torch.as_tensor(np.array(Image.open(path))).permute(2, 0, 1) + assert_equal(result, expected) + # Check if modification is not in place + assert_equal(boxes, boxes_cp) + assert_equal(img, img_cp) + + +def test_draw_invalid_boxes(): + img_tp = ((1, 1, 1), (1, 2, 3)) + img_wrong1 = torch.full((3, 5, 5), 255, dtype=torch.float) + img_wrong2 = torch.full((1, 3, 5, 5), 255, dtype=torch.uint8) + boxes = torch.tensor([[0, 0, 20, 20], [0, 0, 0, 0], + [10, 15, 30, 35], [23, 35, 93, 95]], dtype=torch.float) + with pytest.raises(TypeError, match="Tensor expected"): + utils.draw_bounding_boxes(img_tp, boxes) + with pytest.raises(ValueError, match="Tensor uint8 expected"): + utils.draw_bounding_boxes(img_wrong1, boxes) + with pytest.raises(ValueError, match="Pass individual images, not batches"): + utils.draw_bounding_boxes(img_wrong2, boxes) + + +@pytest.mark.parametrize('colors', [ + None, + ['red', 'blue'], + ['#FF00FF', (1, 34, 122)], +]) +@pytest.mark.parametrize('alpha', (0, .5, .7, 1)) +def test_draw_segmentation_masks(colors, alpha): + """This test makes sure that masks draw their corresponding color where they should""" + num_masks, h, w = 2, 100, 100 + dtype = torch.uint8 + img = torch.randint(0, 256, size=(3, h, w), dtype=dtype) + masks = torch.randint(0, 2, (num_masks, h, w), dtype=torch.bool) + + # For testing we enforce that there's no overlap between the masks. The + # current behaviour is that the last mask's color will take priority when + # masks overlap, but this makes testing slightly harder so we don't really + # care + overlap = masks[0] & masks[1] + masks[:, overlap] = False + + out = utils.draw_segmentation_masks(img, masks, colors=colors, alpha=alpha) + assert out.dtype == dtype + assert out is not img + + # Make sure the image didn't change where there's no mask + masked_pixels = masks[0] | masks[1] + assert_equal(img[:, ~masked_pixels], out[:, ~masked_pixels]) + + if colors is None: + colors = utils._generate_color_palette(num_masks) + + # Make sure each mask draws with its own color + for mask, color in zip(masks, colors): + if isinstance(color, str): + color = ImageColor.getrgb(color) + color = torch.tensor(color, dtype=dtype) + + if alpha == 1: + assert (out[:, mask] == color[:, None]).all() + elif alpha == 0: + assert (out[:, mask] == img[:, mask]).all() + + interpolated_color = (img[:, mask] * (1 - alpha) + color[:, None] * alpha).to(dtype) + torch.testing.assert_close(out[:, mask], interpolated_color, rtol=0.0, atol=1.0) + + +def test_draw_segmentation_masks_errors(): + h, w = 10, 10 + + masks = torch.randint(0, 2, size=(h, w), dtype=torch.bool) + img = torch.randint(0, 256, size=(3, h, w), dtype=torch.uint8) + + with pytest.raises(TypeError, match="The image must be a tensor"): + utils.draw_segmentation_masks(image="Not A Tensor Image", masks=masks) + with pytest.raises(ValueError, match="The image dtype must be"): + img_bad_dtype = torch.randint(0, 256, size=(3, h, w), dtype=torch.int64) + utils.draw_segmentation_masks(image=img_bad_dtype, masks=masks) + with pytest.raises(ValueError, match="Pass individual images, not batches"): + batch = torch.randint(0, 256, size=(10, 3, h, w), dtype=torch.uint8) + utils.draw_segmentation_masks(image=batch, masks=masks) + with pytest.raises(ValueError, match="Pass an RGB image"): + one_channel = torch.randint(0, 256, size=(1, h, w), dtype=torch.uint8) + utils.draw_segmentation_masks(image=one_channel, masks=masks) + with pytest.raises(ValueError, match="The masks must be of dtype bool"): + masks_bad_dtype = torch.randint(0, 2, size=(h, w), dtype=torch.float) + utils.draw_segmentation_masks(image=img, masks=masks_bad_dtype) + with pytest.raises(ValueError, match="masks must be of shape"): + masks_bad_shape = torch.randint(0, 2, size=(3, 2, h, w), dtype=torch.bool) + utils.draw_segmentation_masks(image=img, masks=masks_bad_shape) + with pytest.raises(ValueError, match="must have the same height and width"): + masks_bad_shape = torch.randint(0, 2, size=(h + 4, w), dtype=torch.bool) + utils.draw_segmentation_masks(image=img, masks=masks_bad_shape) + with pytest.raises(ValueError, match="There are more masks"): + utils.draw_segmentation_masks(image=img, masks=masks, colors=[]) + with pytest.raises(ValueError, match="colors must be a tuple or a string, or a list thereof"): + bad_colors = np.array(['red', 'blue']) # should be a list + utils.draw_segmentation_masks(image=img, masks=masks, colors=bad_colors) + with pytest.raises(ValueError, match="It seems that you passed a tuple of colors instead of"): + bad_colors = ('red', 'blue') # should be a list + utils.draw_segmentation_masks(image=img, masks=masks, colors=bad_colors) + + +if __name__ == "__main__": + pytest.main([__file__]) diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/test_video_reader.py b/pretrained_model/pytorch_vision_v0.10.0/test/test_video_reader.py new file mode 100644 index 0000000000000000000000000000000000000000..9818b6fc900a54ee7acfeb7d8fbd5f03cd906720 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/test_video_reader.py @@ -0,0 +1,1282 @@ +import collections +import math +import os +import time +import unittest +from fractions import Fraction + +import numpy as np +import torch +import torchvision.io as io +from numpy.random import randint +from torchvision.io import _HAS_VIDEO_OPT +from common_utils import PY39_SKIP +from _assert_utils import assert_equal + + +try: + import av + + # Do a version test too + io.video._check_av_available() +except ImportError: + av = None + + +from urllib.error import URLError + + +VIDEO_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets", "videos") + +CheckerConfig = [ + "duration", + "video_fps", + "audio_sample_rate", + # We find for some videos (e.g. HMDB51 videos), the decoded audio frames and pts are + # slightly different between TorchVision decoder and PyAv decoder. So omit it during check + "check_aframes", + "check_aframe_pts", +] +GroundTruth = collections.namedtuple("GroundTruth", " ".join(CheckerConfig)) + +all_check_config = GroundTruth( + duration=0, + video_fps=0, + audio_sample_rate=0, + check_aframes=True, + check_aframe_pts=True, +) + +test_videos = { + "RATRACE_wave_f_nm_np1_fr_goo_37.avi": GroundTruth( + duration=2.0, + video_fps=30.0, + audio_sample_rate=None, + check_aframes=True, + check_aframe_pts=True, + ), + "SchoolRulesHowTheyHelpUs_wave_f_nm_np1_ba_med_0.avi": GroundTruth( + duration=2.0, + video_fps=30.0, + audio_sample_rate=None, + check_aframes=True, + check_aframe_pts=True, + ), + "TrumanShow_wave_f_nm_np1_fr_med_26.avi": GroundTruth( + duration=2.0, + video_fps=30.0, + audio_sample_rate=None, + check_aframes=True, + check_aframe_pts=True, + ), + "v_SoccerJuggling_g23_c01.avi": GroundTruth( + duration=8.0, + video_fps=29.97, + audio_sample_rate=None, + check_aframes=True, + check_aframe_pts=True, + ), + "v_SoccerJuggling_g24_c01.avi": GroundTruth( + duration=8.0, + video_fps=29.97, + audio_sample_rate=None, + check_aframes=True, + check_aframe_pts=True, + ), + "R6llTwEh07w.mp4": GroundTruth( + duration=10.0, + video_fps=30.0, + audio_sample_rate=44100, + # PyAv miss one audio frame at the beginning (pts=0) + check_aframes=False, + check_aframe_pts=False, + ), + "SOX5yA1l24A.mp4": GroundTruth( + duration=11.0, + video_fps=29.97, + audio_sample_rate=48000, + # PyAv miss one audio frame at the beginning (pts=0) + check_aframes=False, + check_aframe_pts=False, + ), + "WUzgd7C1pWA.mp4": GroundTruth( + duration=11.0, + video_fps=29.97, + audio_sample_rate=48000, + # PyAv miss one audio frame at the beginning (pts=0) + check_aframes=False, + check_aframe_pts=False, + ), +} + + +DecoderResult = collections.namedtuple( + "DecoderResult", "vframes vframe_pts vtimebase aframes aframe_pts atimebase" +) + +"""av_seek_frame is imprecise so seek to a timestamp earlier by a margin +The unit of margin is second""" +seek_frame_margin = 0.25 + + +def _read_from_stream( + container, start_pts, end_pts, stream, stream_name, buffer_size=4 +): + """ + Args: + container: pyav container + start_pts/end_pts: the starting/ending Presentation TimeStamp where + frames are read + stream: pyav stream + stream_name: a dictionary of streams. For example, {"video": 0} means + video stream at stream index 0 + buffer_size: pts of frames decoded by PyAv is not guaranteed to be in + ascending order. We need to decode more frames even when we meet end + pts + """ + # seeking in the stream is imprecise. Thus, seek to an ealier PTS by a margin + margin = 1 + seek_offset = max(start_pts - margin, 0) + + container.seek(seek_offset, any_frame=False, backward=True, stream=stream) + frames = {} + buffer_count = 0 + for frame in container.decode(**stream_name): + if frame.pts < start_pts: + continue + if frame.pts <= end_pts: + frames[frame.pts] = frame + else: + buffer_count += 1 + if buffer_count >= buffer_size: + break + result = [frames[pts] for pts in sorted(frames)] + + return result + + +def _get_timebase_by_av_module(full_path): + container = av.open(full_path) + video_time_base = container.streams.video[0].time_base + if container.streams.audio: + audio_time_base = container.streams.audio[0].time_base + else: + audio_time_base = None + return video_time_base, audio_time_base + + +def _fraction_to_tensor(fraction): + ret = torch.zeros([2], dtype=torch.int32) + ret[0] = fraction.numerator + ret[1] = fraction.denominator + return ret + + +def _decode_frames_by_av_module( + full_path, + video_start_pts=0, + video_end_pts=None, + audio_start_pts=0, + audio_end_pts=None, +): + """ + Use PyAv to decode video frames. This provides a reference for our decoder + to compare the decoding results. + Input arguments: + full_path: video file path + video_start_pts/video_end_pts: the starting/ending Presentation TimeStamp where + frames are read + """ + if video_end_pts is None: + video_end_pts = float("inf") + if audio_end_pts is None: + audio_end_pts = float("inf") + container = av.open(full_path) + + video_frames = [] + vtimebase = torch.zeros([0], dtype=torch.int32) + if container.streams.video: + video_frames = _read_from_stream( + container, + video_start_pts, + video_end_pts, + container.streams.video[0], + {"video": 0}, + ) + # container.streams.video[0].average_rate is not a reliable estimator of + # frame rate. It can be wrong for certain codec, such as VP80 + # So we do not return video fps here + vtimebase = _fraction_to_tensor(container.streams.video[0].time_base) + + audio_frames = [] + atimebase = torch.zeros([0], dtype=torch.int32) + if container.streams.audio: + audio_frames = _read_from_stream( + container, + audio_start_pts, + audio_end_pts, + container.streams.audio[0], + {"audio": 0}, + ) + atimebase = _fraction_to_tensor(container.streams.audio[0].time_base) + + container.close() + vframes = [frame.to_rgb().to_ndarray() for frame in video_frames] + vframes = torch.as_tensor(np.stack(vframes)) + + vframe_pts = torch.tensor([frame.pts for frame in video_frames], dtype=torch.int64) + + aframes = [frame.to_ndarray() for frame in audio_frames] + if aframes: + aframes = np.transpose(np.concatenate(aframes, axis=1)) + aframes = torch.as_tensor(aframes) + else: + aframes = torch.empty((1, 0), dtype=torch.float32) + + aframe_pts = torch.tensor( + [audio_frame.pts for audio_frame in audio_frames], dtype=torch.int64 + ) + + return DecoderResult( + vframes=vframes, + vframe_pts=vframe_pts, + vtimebase=vtimebase, + aframes=aframes, + aframe_pts=aframe_pts, + atimebase=atimebase, + ) + + +def _pts_convert(pts, timebase_from, timebase_to, round_func=math.floor): + """convert pts between different time bases + Args: + pts: presentation timestamp, float + timebase_from: original timebase. Fraction + timebase_to: new timebase. Fraction + round_func: rounding function. + """ + new_pts = Fraction(pts, 1) * timebase_from / timebase_to + return int(round_func(new_pts)) + + +def _get_video_tensor(video_dir, video_file): + """open a video file, and represent the video data by a PT tensor""" + full_path = os.path.join(video_dir, video_file) + + assert os.path.exists(full_path), "File not found: %s" % full_path + + with open(full_path, "rb") as fp: + video_tensor = torch.from_numpy(np.frombuffer(fp.read(), dtype=np.uint8)) + + return full_path, video_tensor + + +@unittest.skipIf(av is None, "PyAV unavailable") +@unittest.skipIf(_HAS_VIDEO_OPT is False, "Didn't compile with ffmpeg") +class TestVideoReader(unittest.TestCase): + def check_separate_decoding_result(self, tv_result, config): + """check the decoding results from TorchVision decoder + """ + vframes, vframe_pts, vtimebase, vfps, vduration, \ + aframes, aframe_pts, atimebase, asample_rate, aduration = ( + tv_result + ) + + video_duration = vduration.item() * Fraction( + vtimebase[0].item(), vtimebase[1].item() + ) + self.assertAlmostEqual(video_duration, config.duration, delta=0.5) + + self.assertAlmostEqual(vfps.item(), config.video_fps, delta=0.5) + if asample_rate.numel() > 0: + self.assertEqual(asample_rate.item(), config.audio_sample_rate) + audio_duration = aduration.item() * Fraction( + atimebase[0].item(), atimebase[1].item() + ) + self.assertAlmostEqual(audio_duration, config.duration, delta=0.5) + + # check if pts of video frames are sorted in ascending order + for i in range(len(vframe_pts) - 1): + self.assertEqual(vframe_pts[i] < vframe_pts[i + 1], True) + + if len(aframe_pts) > 1: + # check if pts of audio frames are sorted in ascending order + for i in range(len(aframe_pts) - 1): + self.assertEqual(aframe_pts[i] < aframe_pts[i + 1], True) + + def check_probe_result(self, result, config): + vtimebase, vfps, vduration, atimebase, asample_rate, aduration = result + video_duration = vduration.item() * Fraction( + vtimebase[0].item(), vtimebase[1].item() + ) + self.assertAlmostEqual(video_duration, config.duration, delta=0.5) + self.assertAlmostEqual(vfps.item(), config.video_fps, delta=0.5) + if asample_rate.numel() > 0: + self.assertEqual(asample_rate.item(), config.audio_sample_rate) + audio_duration = aduration.item() * Fraction( + atimebase[0].item(), atimebase[1].item() + ) + self.assertAlmostEqual(audio_duration, config.duration, delta=0.5) + + def check_meta_result(self, result, config): + self.assertAlmostEqual(result.video_duration, config.duration, delta=0.5) + self.assertAlmostEqual(result.video_fps, config.video_fps, delta=0.5) + if result.has_audio > 0: + self.assertEqual(result.audio_sample_rate, config.audio_sample_rate) + self.assertAlmostEqual(result.audio_duration, config.duration, delta=0.5) + + def compare_decoding_result(self, tv_result, ref_result, config=all_check_config): + """ + Compare decoding results from two sources. + Args: + tv_result: decoding results from TorchVision decoder + ref_result: reference decoding results which can be from either PyAv + decoder or TorchVision decoder with getPtsOnly = 1 + config: config of decoding results checker + """ + vframes, vframe_pts, vtimebase, _vfps, _vduration, \ + aframes, aframe_pts, atimebase, _asample_rate, _aduration = ( + tv_result + ) + if isinstance(ref_result, list): + # the ref_result is from new video_reader decoder + ref_result = DecoderResult( + vframes=ref_result[0], + vframe_pts=ref_result[1], + vtimebase=ref_result[2], + aframes=ref_result[5], + aframe_pts=ref_result[6], + atimebase=ref_result[7], + ) + + if vframes.numel() > 0 and ref_result.vframes.numel() > 0: + mean_delta = torch.mean( + torch.abs(vframes.float() - ref_result.vframes.float()) + ) + self.assertAlmostEqual(mean_delta, 0, delta=8.0) + + mean_delta = torch.mean( + torch.abs(vframe_pts.float() - ref_result.vframe_pts.float()) + ) + self.assertAlmostEqual(mean_delta, 0, delta=1.0) + + assert_equal(vtimebase, ref_result.vtimebase) + + if ( + config.check_aframes + and aframes.numel() > 0 + and ref_result.aframes.numel() > 0 + ): + """Audio stream is available and audio frame is required to return + from decoder""" + assert_equal(aframes, ref_result.aframes) + + if ( + config.check_aframe_pts + and aframe_pts.numel() > 0 + and ref_result.aframe_pts.numel() > 0 + ): + """Audio stream is available""" + assert_equal(aframe_pts, ref_result.aframe_pts) + + assert_equal(atimebase, ref_result.atimebase) + + @unittest.skip( + "This stress test will iteratively decode the same set of videos." + "It helps to detect memory leak but it takes lots of time to run." + "By default, it is disabled" + ) + def test_stress_test_read_video_from_file(self): + num_iter = 10000 + # video related + width, height, min_dimension, max_dimension = 0, 0, 0, 0 + video_start_pts, video_end_pts = 0, -1 + video_timebase_num, video_timebase_den = 0, 1 + # audio related + samples, channels = 0, 0 + audio_start_pts, audio_end_pts = 0, -1 + audio_timebase_num, audio_timebase_den = 0, 1 + + for _i in range(num_iter): + for test_video, _config in test_videos.items(): + full_path = os.path.join(VIDEO_DIR, test_video) + + # pass 1: decode all frames using new decoder + torch.ops.video_reader.read_video_from_file( + full_path, + seek_frame_margin, + 0, # getPtsOnly + 1, # readVideoStream + width, + height, + min_dimension, + max_dimension, + video_start_pts, + video_end_pts, + video_timebase_num, + video_timebase_den, + 1, # readAudioStream + samples, + channels, + audio_start_pts, + audio_end_pts, + audio_timebase_num, + audio_timebase_den, + ) + + @PY39_SKIP + def test_read_video_from_file(self): + """ + Test the case when decoder starts with a video file to decode frames. + """ + # video related + width, height, min_dimension, max_dimension = 0, 0, 0, 0 + video_start_pts, video_end_pts = 0, -1 + video_timebase_num, video_timebase_den = 0, 1 + # audio related + samples, channels = 0, 0 + audio_start_pts, audio_end_pts = 0, -1 + audio_timebase_num, audio_timebase_den = 0, 1 + + for test_video, config in test_videos.items(): + full_path = os.path.join(VIDEO_DIR, test_video) + + # pass 1: decode all frames using new decoder + tv_result = torch.ops.video_reader.read_video_from_file( + full_path, + seek_frame_margin, + 0, # getPtsOnly + 1, # readVideoStream + width, + height, + min_dimension, + max_dimension, + video_start_pts, + video_end_pts, + video_timebase_num, + video_timebase_den, + 1, # readAudioStream + samples, + channels, + audio_start_pts, + audio_end_pts, + audio_timebase_num, + audio_timebase_den, + ) + # pass 2: decode all frames using av + pyav_result = _decode_frames_by_av_module(full_path) + # check results from TorchVision decoder + self.check_separate_decoding_result(tv_result, config) + # compare decoding results + self.compare_decoding_result(tv_result, pyav_result, config) + + @PY39_SKIP + def test_read_video_from_file_read_single_stream_only(self): + """ + Test the case when decoder starts with a video file to decode frames, and + only reads video stream and ignores audio stream + """ + # video related + width, height, min_dimension, max_dimension = 0, 0, 0, 0 + video_start_pts, video_end_pts = 0, -1 + video_timebase_num, video_timebase_den = 0, 1 + # audio related + samples, channels = 0, 0 + audio_start_pts, audio_end_pts = 0, -1 + audio_timebase_num, audio_timebase_den = 0, 1 + + for test_video, config in test_videos.items(): + full_path = os.path.join(VIDEO_DIR, test_video) + for readVideoStream, readAudioStream in [(1, 0), (0, 1)]: + # decode all frames using new decoder + tv_result = torch.ops.video_reader.read_video_from_file( + full_path, + seek_frame_margin, + 0, # getPtsOnly + readVideoStream, + width, + height, + min_dimension, + max_dimension, + video_start_pts, + video_end_pts, + video_timebase_num, + video_timebase_den, + readAudioStream, + samples, + channels, + audio_start_pts, + audio_end_pts, + audio_timebase_num, + audio_timebase_den, + ) + + vframes, vframe_pts, vtimebase, vfps, vduration, \ + aframes, aframe_pts, atimebase, asample_rate, aduration = ( + tv_result + ) + + self.assertEqual(vframes.numel() > 0, readVideoStream) + self.assertEqual(vframe_pts.numel() > 0, readVideoStream) + self.assertEqual(vtimebase.numel() > 0, readVideoStream) + self.assertEqual(vfps.numel() > 0, readVideoStream) + + expect_audio_data = ( + readAudioStream == 1 and config.audio_sample_rate is not None + ) + self.assertEqual(aframes.numel() > 0, expect_audio_data) + self.assertEqual(aframe_pts.numel() > 0, expect_audio_data) + self.assertEqual(atimebase.numel() > 0, expect_audio_data) + self.assertEqual(asample_rate.numel() > 0, expect_audio_data) + + def test_read_video_from_file_rescale_min_dimension(self): + """ + Test the case when decoder starts with a video file to decode frames, and + video min dimension between height and width is set. + """ + # video related + width, height, min_dimension, max_dimension = 0, 0, 128, 0 + video_start_pts, video_end_pts = 0, -1 + video_timebase_num, video_timebase_den = 0, 1 + # audio related + samples, channels = 0, 0 + audio_start_pts, audio_end_pts = 0, -1 + audio_timebase_num, audio_timebase_den = 0, 1 + + for test_video, _config in test_videos.items(): + full_path = os.path.join(VIDEO_DIR, test_video) + + tv_result = torch.ops.video_reader.read_video_from_file( + full_path, + seek_frame_margin, + 0, # getPtsOnly + 1, # readVideoStream + width, + height, + min_dimension, + max_dimension, + video_start_pts, + video_end_pts, + video_timebase_num, + video_timebase_den, + 1, # readAudioStream + samples, + channels, + audio_start_pts, + audio_end_pts, + audio_timebase_num, + audio_timebase_den, + ) + self.assertEqual( + min_dimension, min(tv_result[0].size(1), tv_result[0].size(2)) + ) + + def test_read_video_from_file_rescale_max_dimension(self): + """ + Test the case when decoder starts with a video file to decode frames, and + video min dimension between height and width is set. + """ + # video related + width, height, min_dimension, max_dimension = 0, 0, 0, 85 + video_start_pts, video_end_pts = 0, -1 + video_timebase_num, video_timebase_den = 0, 1 + # audio related + samples, channels = 0, 0 + audio_start_pts, audio_end_pts = 0, -1 + audio_timebase_num, audio_timebase_den = 0, 1 + + for test_video, _config in test_videos.items(): + full_path = os.path.join(VIDEO_DIR, test_video) + + tv_result = torch.ops.video_reader.read_video_from_file( + full_path, + seek_frame_margin, + 0, # getPtsOnly + 1, # readVideoStream + width, + height, + min_dimension, + max_dimension, + video_start_pts, + video_end_pts, + video_timebase_num, + video_timebase_den, + 1, # readAudioStream + samples, + channels, + audio_start_pts, + audio_end_pts, + audio_timebase_num, + audio_timebase_den, + ) + self.assertEqual( + max_dimension, max(tv_result[0].size(1), tv_result[0].size(2)) + ) + + def test_read_video_from_file_rescale_both_min_max_dimension(self): + """ + Test the case when decoder starts with a video file to decode frames, and + video min dimension between height and width is set. + """ + # video related + width, height, min_dimension, max_dimension = 0, 0, 64, 85 + video_start_pts, video_end_pts = 0, -1 + video_timebase_num, video_timebase_den = 0, 1 + # audio related + samples, channels = 0, 0 + audio_start_pts, audio_end_pts = 0, -1 + audio_timebase_num, audio_timebase_den = 0, 1 + + for test_video, _config in test_videos.items(): + full_path = os.path.join(VIDEO_DIR, test_video) + + tv_result = torch.ops.video_reader.read_video_from_file( + full_path, + seek_frame_margin, + 0, # getPtsOnly + 1, # readVideoStream + width, + height, + min_dimension, + max_dimension, + video_start_pts, + video_end_pts, + video_timebase_num, + video_timebase_den, + 1, # readAudioStream + samples, + channels, + audio_start_pts, + audio_end_pts, + audio_timebase_num, + audio_timebase_den, + ) + self.assertEqual( + min_dimension, min(tv_result[0].size(1), tv_result[0].size(2)) + ) + self.assertEqual( + max_dimension, max(tv_result[0].size(1), tv_result[0].size(2)) + ) + + def test_read_video_from_file_rescale_width(self): + """ + Test the case when decoder starts with a video file to decode frames, and + video width is set. + """ + # video related + width, height, min_dimension, max_dimension = 256, 0, 0, 0 + video_start_pts, video_end_pts = 0, -1 + video_timebase_num, video_timebase_den = 0, 1 + # audio related + samples, channels = 0, 0 + audio_start_pts, audio_end_pts = 0, -1 + audio_timebase_num, audio_timebase_den = 0, 1 + + for test_video, _config in test_videos.items(): + full_path = os.path.join(VIDEO_DIR, test_video) + + tv_result = torch.ops.video_reader.read_video_from_file( + full_path, + seek_frame_margin, + 0, # getPtsOnly + 1, # readVideoStream + width, + height, + min_dimension, + max_dimension, + video_start_pts, + video_end_pts, + video_timebase_num, + video_timebase_den, + 1, # readAudioStream + samples, + channels, + audio_start_pts, + audio_end_pts, + audio_timebase_num, + audio_timebase_den, + ) + self.assertEqual(tv_result[0].size(2), width) + + def test_read_video_from_file_rescale_height(self): + """ + Test the case when decoder starts with a video file to decode frames, and + video height is set. + """ + # video related + width, height, min_dimension, max_dimension = 0, 224, 0, 0 + video_start_pts, video_end_pts = 0, -1 + video_timebase_num, video_timebase_den = 0, 1 + # audio related + samples, channels = 0, 0 + audio_start_pts, audio_end_pts = 0, -1 + audio_timebase_num, audio_timebase_den = 0, 1 + + for test_video, _config in test_videos.items(): + full_path = os.path.join(VIDEO_DIR, test_video) + + tv_result = torch.ops.video_reader.read_video_from_file( + full_path, + seek_frame_margin, + 0, # getPtsOnly + 1, # readVideoStream + width, + height, + min_dimension, + max_dimension, + video_start_pts, + video_end_pts, + video_timebase_num, + video_timebase_den, + 1, # readAudioStream + samples, + channels, + audio_start_pts, + audio_end_pts, + audio_timebase_num, + audio_timebase_den, + ) + self.assertEqual(tv_result[0].size(1), height) + + def test_read_video_from_file_rescale_width_and_height(self): + """ + Test the case when decoder starts with a video file to decode frames, and + both video height and width are set. + """ + # video related + width, height, min_dimension, max_dimension = 320, 240, 0, 0 + video_start_pts, video_end_pts = 0, -1 + video_timebase_num, video_timebase_den = 0, 1 + # audio related + samples, channels = 0, 0 + audio_start_pts, audio_end_pts = 0, -1 + audio_timebase_num, audio_timebase_den = 0, 1 + + for test_video, _config in test_videos.items(): + full_path = os.path.join(VIDEO_DIR, test_video) + + tv_result = torch.ops.video_reader.read_video_from_file( + full_path, + seek_frame_margin, + 0, # getPtsOnly + 1, # readVideoStream + width, + height, + min_dimension, + max_dimension, + video_start_pts, + video_end_pts, + video_timebase_num, + video_timebase_den, + 1, # readAudioStream + samples, + channels, + audio_start_pts, + audio_end_pts, + audio_timebase_num, + audio_timebase_den, + ) + self.assertEqual(tv_result[0].size(1), height) + self.assertEqual(tv_result[0].size(2), width) + + @PY39_SKIP + def test_read_video_from_file_audio_resampling(self): + """ + Test the case when decoder starts with a video file to decode frames, and + audio waveform are resampled + """ + + for samples in [9600, 96000]: # downsampling # upsampling + # video related + width, height, min_dimension, max_dimension = 0, 0, 0, 0 + video_start_pts, video_end_pts = 0, -1 + video_timebase_num, video_timebase_den = 0, 1 + # audio related + channels = 0 + audio_start_pts, audio_end_pts = 0, -1 + audio_timebase_num, audio_timebase_den = 0, 1 + + for test_video, _config in test_videos.items(): + full_path = os.path.join(VIDEO_DIR, test_video) + + tv_result = torch.ops.video_reader.read_video_from_file( + full_path, + seek_frame_margin, + 0, # getPtsOnly + 1, # readVideoStream + width, + height, + min_dimension, + max_dimension, + video_start_pts, + video_end_pts, + video_timebase_num, + video_timebase_den, + 1, # readAudioStream + samples, + channels, + audio_start_pts, + audio_end_pts, + audio_timebase_num, + audio_timebase_den, + ) + vframes, vframe_pts, vtimebase, vfps, vduration, \ + aframes, aframe_pts, atimebase, asample_rate, aduration = ( + tv_result + ) + if aframes.numel() > 0: + self.assertEqual(samples, asample_rate.item()) + self.assertEqual(1, aframes.size(1)) + # when audio stream is found + duration = ( + float(aframe_pts[-1]) + * float(atimebase[0]) + / float(atimebase[1]) + ) + self.assertAlmostEqual( + aframes.size(0), + int(duration * asample_rate.item()), + delta=0.1 * asample_rate.item(), + ) + + @PY39_SKIP + def test_compare_read_video_from_memory_and_file(self): + """ + Test the case when video is already in memory, and decoder reads data in memory + """ + # video related + width, height, min_dimension, max_dimension = 0, 0, 0, 0 + video_start_pts, video_end_pts = 0, -1 + video_timebase_num, video_timebase_den = 0, 1 + # audio related + samples, channels = 0, 0 + audio_start_pts, audio_end_pts = 0, -1 + audio_timebase_num, audio_timebase_den = 0, 1 + + for test_video, config in test_videos.items(): + full_path, video_tensor = _get_video_tensor(VIDEO_DIR, test_video) + + # pass 1: decode all frames using cpp decoder + tv_result_memory = torch.ops.video_reader.read_video_from_memory( + video_tensor, + seek_frame_margin, + 0, # getPtsOnly + 1, # readVideoStream + width, + height, + min_dimension, + max_dimension, + video_start_pts, + video_end_pts, + video_timebase_num, + video_timebase_den, + 1, # readAudioStream + samples, + channels, + audio_start_pts, + audio_end_pts, + audio_timebase_num, + audio_timebase_den, + ) + self.check_separate_decoding_result(tv_result_memory, config) + # pass 2: decode all frames from file + tv_result_file = torch.ops.video_reader.read_video_from_file( + full_path, + seek_frame_margin, + 0, # getPtsOnly + 1, # readVideoStream + width, + height, + min_dimension, + max_dimension, + video_start_pts, + video_end_pts, + video_timebase_num, + video_timebase_den, + 1, # readAudioStream + samples, + channels, + audio_start_pts, + audio_end_pts, + audio_timebase_num, + audio_timebase_den, + ) + + self.check_separate_decoding_result(tv_result_file, config) + # finally, compare results decoded from memory and file + self.compare_decoding_result(tv_result_memory, tv_result_file) + + @PY39_SKIP + def test_read_video_from_memory(self): + """ + Test the case when video is already in memory, and decoder reads data in memory + """ + # video related + width, height, min_dimension, max_dimension = 0, 0, 0, 0 + video_start_pts, video_end_pts = 0, -1 + video_timebase_num, video_timebase_den = 0, 1 + # audio related + samples, channels = 0, 0 + audio_start_pts, audio_end_pts = 0, -1 + audio_timebase_num, audio_timebase_den = 0, 1 + + for test_video, config in test_videos.items(): + full_path, video_tensor = _get_video_tensor(VIDEO_DIR, test_video) + + # pass 1: decode all frames using cpp decoder + tv_result = torch.ops.video_reader.read_video_from_memory( + video_tensor, + seek_frame_margin, + 0, # getPtsOnly + 1, # readVideoStream + width, + height, + min_dimension, + max_dimension, + video_start_pts, + video_end_pts, + video_timebase_num, + video_timebase_den, + 1, # readAudioStream + samples, + channels, + audio_start_pts, + audio_end_pts, + audio_timebase_num, + audio_timebase_den, + ) + # pass 2: decode all frames using av + pyav_result = _decode_frames_by_av_module(full_path) + + self.check_separate_decoding_result(tv_result, config) + self.compare_decoding_result(tv_result, pyav_result, config) + + @PY39_SKIP + def test_read_video_from_memory_get_pts_only(self): + """ + Test the case when video is already in memory, and decoder reads data in memory. + Compare frame pts between decoding for pts only and full decoding + for both pts and frame data + """ + # video related + width, height, min_dimension, max_dimension = 0, 0, 0, 0 + video_start_pts, video_end_pts = 0, -1 + video_timebase_num, video_timebase_den = 0, 1 + # audio related + samples, channels = 0, 0 + audio_start_pts, audio_end_pts = 0, -1 + audio_timebase_num, audio_timebase_den = 0, 1 + + for test_video, config in test_videos.items(): + full_path, video_tensor = _get_video_tensor(VIDEO_DIR, test_video) + + # pass 1: decode all frames using cpp decoder + tv_result = torch.ops.video_reader.read_video_from_memory( + video_tensor, + seek_frame_margin, + 0, # getPtsOnly + 1, # readVideoStream + width, + height, + min_dimension, + max_dimension, + video_start_pts, + video_end_pts, + video_timebase_num, + video_timebase_den, + 1, # readAudioStream + samples, + channels, + audio_start_pts, + audio_end_pts, + audio_timebase_num, + audio_timebase_den, + ) + self.assertAlmostEqual(config.video_fps, tv_result[3].item(), delta=0.01) + + # pass 2: decode all frames to get PTS only using cpp decoder + tv_result_pts_only = torch.ops.video_reader.read_video_from_memory( + video_tensor, + seek_frame_margin, + 1, # getPtsOnly + 1, # readVideoStream + width, + height, + min_dimension, + max_dimension, + video_start_pts, + video_end_pts, + video_timebase_num, + video_timebase_den, + 1, # readAudioStream + samples, + channels, + audio_start_pts, + audio_end_pts, + audio_timebase_num, + audio_timebase_den, + ) + + self.assertEqual(tv_result_pts_only[0].numel(), 0) + self.assertEqual(tv_result_pts_only[5].numel(), 0) + self.compare_decoding_result(tv_result, tv_result_pts_only) + + @PY39_SKIP + def test_read_video_in_range_from_memory(self): + """ + Test the case when video is already in memory, and decoder reads data in memory. + In addition, decoder takes meaningful start- and end PTS as input, and decode + frames within that interval + """ + for test_video, config in test_videos.items(): + full_path, video_tensor = _get_video_tensor(VIDEO_DIR, test_video) + # video related + width, height, min_dimension, max_dimension = 0, 0, 0, 0 + video_start_pts, video_end_pts = 0, -1 + video_timebase_num, video_timebase_den = 0, 1 + # audio related + samples, channels = 0, 0 + audio_start_pts, audio_end_pts = 0, -1 + audio_timebase_num, audio_timebase_den = 0, 1 + # pass 1: decode all frames using new decoder + tv_result = torch.ops.video_reader.read_video_from_memory( + video_tensor, + seek_frame_margin, + 0, # getPtsOnly + 1, # readVideoStream + width, + height, + min_dimension, + max_dimension, + video_start_pts, + video_end_pts, + video_timebase_num, + video_timebase_den, + 1, # readAudioStream + samples, + channels, + audio_start_pts, + audio_end_pts, + audio_timebase_num, + audio_timebase_den, + ) + vframes, vframe_pts, vtimebase, vfps, vduration, \ + aframes, aframe_pts, atimebase, asample_rate, aduration = ( + tv_result + ) + self.assertAlmostEqual(config.video_fps, vfps.item(), delta=0.01) + + for num_frames in [4, 8, 16, 32, 64, 128]: + start_pts_ind_max = vframe_pts.size(0) - num_frames + if start_pts_ind_max <= 0: + continue + # randomly pick start pts + start_pts_ind = randint(0, start_pts_ind_max) + end_pts_ind = start_pts_ind + num_frames - 1 + video_start_pts = vframe_pts[start_pts_ind] + video_end_pts = vframe_pts[end_pts_ind] + + video_timebase_num, video_timebase_den = vtimebase[0], vtimebase[1] + if len(atimebase) > 0: + # when audio stream is available + audio_timebase_num, audio_timebase_den = atimebase[0], atimebase[1] + audio_start_pts = _pts_convert( + video_start_pts.item(), + Fraction(video_timebase_num.item(), video_timebase_den.item()), + Fraction(audio_timebase_num.item(), audio_timebase_den.item()), + math.floor, + ) + audio_end_pts = _pts_convert( + video_end_pts.item(), + Fraction(video_timebase_num.item(), video_timebase_den.item()), + Fraction(audio_timebase_num.item(), audio_timebase_den.item()), + math.ceil, + ) + + # pass 2: decode frames in the randomly generated range + tv_result = torch.ops.video_reader.read_video_from_memory( + video_tensor, + seek_frame_margin, + 0, # getPtsOnly + 1, # readVideoStream + width, + height, + min_dimension, + max_dimension, + video_start_pts, + video_end_pts, + video_timebase_num, + video_timebase_den, + 1, # readAudioStream + samples, + channels, + audio_start_pts, + audio_end_pts, + audio_timebase_num, + audio_timebase_den, + ) + + # pass 3: decode frames in range using PyAv + video_timebase_av, audio_timebase_av = _get_timebase_by_av_module( + full_path + ) + + video_start_pts_av = _pts_convert( + video_start_pts.item(), + Fraction(video_timebase_num.item(), video_timebase_den.item()), + Fraction( + video_timebase_av.numerator, video_timebase_av.denominator + ), + math.floor, + ) + video_end_pts_av = _pts_convert( + video_end_pts.item(), + Fraction(video_timebase_num.item(), video_timebase_den.item()), + Fraction( + video_timebase_av.numerator, video_timebase_av.denominator + ), + math.ceil, + ) + if audio_timebase_av: + audio_start_pts = _pts_convert( + video_start_pts.item(), + Fraction(video_timebase_num.item(), video_timebase_den.item()), + Fraction( + audio_timebase_av.numerator, audio_timebase_av.denominator + ), + math.floor, + ) + audio_end_pts = _pts_convert( + video_end_pts.item(), + Fraction(video_timebase_num.item(), video_timebase_den.item()), + Fraction( + audio_timebase_av.numerator, audio_timebase_av.denominator + ), + math.ceil, + ) + + pyav_result = _decode_frames_by_av_module( + full_path, + video_start_pts_av, + video_end_pts_av, + audio_start_pts, + audio_end_pts, + ) + + self.assertEqual(tv_result[0].size(0), num_frames) + if pyav_result.vframes.size(0) == num_frames: + # if PyAv decodes a different number of video frames, skip + # comparing the decoding results between Torchvision video reader + # and PyAv + self.compare_decoding_result(tv_result, pyav_result, config) + + def test_probe_video_from_file(self): + """ + Test the case when decoder probes a video file + """ + for test_video, config in test_videos.items(): + full_path = os.path.join(VIDEO_DIR, test_video) + probe_result = torch.ops.video_reader.probe_video_from_file(full_path) + self.check_probe_result(probe_result, config) + + def test_probe_video_from_memory(self): + """ + Test the case when decoder probes a video in memory + """ + for test_video, config in test_videos.items(): + full_path, video_tensor = _get_video_tensor(VIDEO_DIR, test_video) + probe_result = torch.ops.video_reader.probe_video_from_memory(video_tensor) + self.check_probe_result(probe_result, config) + + def test_probe_video_from_memory_script(self): + scripted_fun = torch.jit.script(io._probe_video_from_memory) + self.assertIsNotNone(scripted_fun) + + for test_video, config in test_videos.items(): + full_path, video_tensor = _get_video_tensor(VIDEO_DIR, test_video) + probe_result = scripted_fun(video_tensor) + self.check_meta_result(probe_result, config) + + @PY39_SKIP + def test_read_video_from_memory_scripted(self): + """ + Test the case when video is already in memory, and decoder reads data in memory + """ + # video related + width, height, min_dimension, max_dimension = 0, 0, 0, 0 + video_start_pts, video_end_pts = 0, -1 + video_timebase_num, video_timebase_den = 0, 1 + # audio related + samples, channels = 0, 0 + audio_start_pts, audio_end_pts = 0, -1 + audio_timebase_num, audio_timebase_den = 0, 1 + + scripted_fun = torch.jit.script(io._read_video_from_memory) + self.assertIsNotNone(scripted_fun) + + for test_video, _config in test_videos.items(): + full_path, video_tensor = _get_video_tensor(VIDEO_DIR, test_video) + + # decode all frames using cpp decoder + scripted_fun( + video_tensor, + seek_frame_margin, + 1, # readVideoStream + width, + height, + min_dimension, + max_dimension, + [video_start_pts, video_end_pts], + video_timebase_num, + video_timebase_den, + 1, # readAudioStream + samples, + channels, + [audio_start_pts, audio_end_pts], + audio_timebase_num, + audio_timebase_den, + ) + # FUTURE: check value of video / audio frames + + def test_audio_video_sync(self): + """Test if audio/video are synchronised with pyav output.""" + for test_video, config in test_videos.items(): + full_path = os.path.join(VIDEO_DIR, test_video) + container = av.open(full_path) + if not container.streams.audio: + # Skip if no audio stream + continue + start_pts_val, cutoff = 0, 1 + if container.streams.video: + video = container.streams.video[0] + arr = [] + for index, frame in enumerate(container.decode(video)): + if index == cutoff: + start_pts_val = frame.pts + if index >= cutoff: + arr.append(frame.to_rgb().to_ndarray()) + visual, _, info = io.read_video(full_path, start_pts=start_pts_val, pts_unit='pts') + self.assertAlmostEqual( + config.video_fps, info['video_fps'], delta=0.0001 + ) + arr = torch.Tensor(arr) + if arr.shape == visual.shape: + self.assertGreaterEqual( + torch.mean(torch.isclose(visual.float(), arr, atol=1e-5).float()), 0.99) + + container = av.open(full_path) + if container.streams.audio: + audio = container.streams.audio[0] + arr = [] + for index, frame in enumerate(container.decode(audio)): + if index >= cutoff: + arr.append(frame.to_ndarray()) + _, audio, _ = io.read_video(full_path, start_pts=start_pts_val, pts_unit='pts') + arr = torch.as_tensor(np.concatenate(arr, axis=1)) + if arr.shape == audio.shape: + self.assertGreaterEqual( + torch.mean(torch.isclose(audio.float(), arr).float()), 0.99) + + +if __name__ == "__main__": + unittest.main() diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/test_videoapi.py b/pretrained_model/pytorch_vision_v0.10.0/test/test_videoapi.py new file mode 100644 index 0000000000000000000000000000000000000000..da73c7cd17d3b585c688b05c619c42c4a6172d51 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/test_videoapi.py @@ -0,0 +1,200 @@ +import collections +import os +import unittest + +import torch +import torchvision +from torchvision.io import _HAS_VIDEO_OPT, VideoReader +from torchvision.datasets.utils import download_url + +from common_utils import PY39_SKIP + +try: + import av + + # Do a version test too + torchvision.io.video._check_av_available() +except ImportError: + av = None + + +VIDEO_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "assets", "videos") + +CheckerConfig = ["duration", "video_fps", "audio_sample_rate"] +GroundTruth = collections.namedtuple("GroundTruth", " ".join(CheckerConfig)) + + +def fate(name, path="."): + """Download and return a path to a sample from the FFmpeg test suite. + See the `FFmpeg Automated Test Environment <https://www.ffmpeg.org/fate.html>`_ + """ + + file_name = name.split("/")[1] + download_url("http://fate.ffmpeg.org/fate-suite/" + name, path, file_name) + return os.path.join(path, file_name) + + +test_videos = { + "RATRACE_wave_f_nm_np1_fr_goo_37.avi": GroundTruth( + duration=2.0, video_fps=30.0, audio_sample_rate=None + ), + "SchoolRulesHowTheyHelpUs_wave_f_nm_np1_ba_med_0.avi": GroundTruth( + duration=2.0, video_fps=30.0, audio_sample_rate=None + ), + "TrumanShow_wave_f_nm_np1_fr_med_26.avi": GroundTruth( + duration=2.0, video_fps=30.0, audio_sample_rate=None + ), + "v_SoccerJuggling_g23_c01.avi": GroundTruth( + duration=8.0, video_fps=29.97, audio_sample_rate=None + ), + "v_SoccerJuggling_g24_c01.avi": GroundTruth( + duration=8.0, video_fps=29.97, audio_sample_rate=None + ), + "R6llTwEh07w.mp4": GroundTruth( + duration=10.0, video_fps=30.0, audio_sample_rate=44100 + ), + "SOX5yA1l24A.mp4": GroundTruth( + duration=11.0, video_fps=29.97, audio_sample_rate=48000 + ), + "WUzgd7C1pWA.mp4": GroundTruth( + duration=11.0, video_fps=29.97, audio_sample_rate=48000 + ), +} + + +@unittest.skipIf(_HAS_VIDEO_OPT is False, "Didn't compile with ffmpeg") +@PY39_SKIP +class TestVideoApi(unittest.TestCase): + @unittest.skipIf(av is None, "PyAV unavailable") + def test_frame_reading(self): + for test_video, config in test_videos.items(): + full_path = os.path.join(VIDEO_DIR, test_video) + + av_reader = av.open(full_path) + + if av_reader.streams.video: + video_reader = VideoReader(full_path, "video") + for av_frame in av_reader.decode(av_reader.streams.video[0]): + vr_frame = next(video_reader) + + self.assertAlmostEqual( + float(av_frame.pts * av_frame.time_base), + vr_frame["pts"], + delta=0.1, + ) + + av_array = torch.tensor(av_frame.to_rgb().to_ndarray()).permute( + 2, 0, 1 + ) + vr_array = vr_frame["data"] + mean_delta = torch.mean( + torch.abs(av_array.float() - vr_array.float()) + ) + # on average the difference is very small and caused + # by decoding (around 1%) + # TODO: asses empirically how to set this? atm it's 1% + # averaged over all frames + self.assertTrue(mean_delta.item() < 2.5) + + av_reader = av.open(full_path) + if av_reader.streams.audio: + video_reader = VideoReader(full_path, "audio") + for av_frame in av_reader.decode(av_reader.streams.audio[0]): + vr_frame = next(video_reader) + self.assertAlmostEqual( + float(av_frame.pts * av_frame.time_base), + vr_frame["pts"], + delta=0.1, + ) + + av_array = torch.tensor(av_frame.to_ndarray()).permute(1, 0) + vr_array = vr_frame["data"] + + max_delta = torch.max( + torch.abs(av_array.float() - vr_array.float()) + ) + # we assure that there is never more than 1% difference in signal + self.assertTrue(max_delta.item() < 0.001) + + def test_metadata(self): + """ + Test that the metadata returned via pyav corresponds to the one returned + by the new video decoder API + """ + for test_video, config in test_videos.items(): + full_path = os.path.join(VIDEO_DIR, test_video) + reader = VideoReader(full_path, "video") + reader_md = reader.get_metadata() + self.assertAlmostEqual( + config.video_fps, reader_md["video"]["fps"][0], delta=0.0001 + ) + self.assertAlmostEqual( + config.duration, reader_md["video"]["duration"][0], delta=0.5 + ) + + def test_seek_start(self): + for test_video, config in test_videos.items(): + full_path = os.path.join(VIDEO_DIR, test_video) + + video_reader = VideoReader(full_path, "video") + num_frames = 0 + for frame in video_reader: + num_frames += 1 + + # now seek the container to 0 and do it again + # It's often that starting seek can be inprecise + # this way and it doesn't start at 0 + video_reader.seek(0) + start_num_frames = 0 + for frame in video_reader: + start_num_frames += 1 + + self.assertEqual(start_num_frames, num_frames) + + # now seek the container to < 0 to check for unexpected behaviour + video_reader.seek(-1) + start_num_frames = 0 + for frame in video_reader: + start_num_frames += 1 + + self.assertEqual(start_num_frames, num_frames) + + def test_accurateseek_middle(self): + for test_video, config in test_videos.items(): + full_path = os.path.join(VIDEO_DIR, test_video) + + stream = "video" + video_reader = VideoReader(full_path, stream) + md = video_reader.get_metadata() + duration = md[stream]["duration"][0] + if duration is not None: + + num_frames = 0 + for frame in video_reader: + num_frames += 1 + + video_reader.seek(duration / 2) + middle_num_frames = 0 + for frame in video_reader: + middle_num_frames += 1 + + self.assertTrue(middle_num_frames < num_frames) + self.assertAlmostEqual(middle_num_frames, num_frames // 2, delta=1) + + video_reader.seek(duration / 2) + frame = next(video_reader) + lb = duration / 2 - 1 / md[stream]["fps"][0] + ub = duration / 2 + 1 / md[stream]["fps"][0] + self.assertTrue((lb <= frame["pts"]) & (ub >= frame["pts"])) + + def test_fate_suite(self): + video_path = fate("sub/MovText_capability_tester.mp4", VIDEO_DIR) + vr = VideoReader(video_path) + metadata = vr.get_metadata() + + self.assertTrue(metadata["subtitles"]["duration"] is not None) + os.remove(video_path) + + +if __name__ == "__main__": + unittest.main() diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/tracing/frcnn/CMakeLists.txt b/pretrained_model/pytorch_vision_v0.10.0/test/tracing/frcnn/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..c79382470bd528e17e38fb01ad3078d77eccf24b --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/tracing/frcnn/CMakeLists.txt @@ -0,0 +1,13 @@ +cmake_minimum_required(VERSION 3.1 FATAL_ERROR) +project(test_frcnn_tracing) + +find_package(Torch REQUIRED) +find_package(TorchVision REQUIRED) + +# This due to some headers importing Python.h +find_package(Python3 COMPONENTS Development) + +add_executable(test_frcnn_tracing test_frcnn_tracing.cpp) +target_compile_features(test_frcnn_tracing PUBLIC cxx_range_for) +target_link_libraries(test_frcnn_tracing ${TORCH_LIBRARIES} TorchVision::TorchVision Python3::Python) +set_property(TARGET test_frcnn_tracing PROPERTY CXX_STANDARD 14) diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/tracing/frcnn/test_frcnn_tracing.cpp b/pretrained_model/pytorch_vision_v0.10.0/test/tracing/frcnn/test_frcnn_tracing.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f5f350b6b02c0fbf3b0cfa3f2f44ab69c1c231a4 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/tracing/frcnn/test_frcnn_tracing.cpp @@ -0,0 +1,58 @@ +#include <torch/script.h> +#include <torch/torch.h> +#include <torchvision/vision.h> +#include <torchvision/ops/nms.h> + + +int main() { + torch::DeviceType device_type; + device_type = torch::kCPU; + + torch::jit::script::Module module; + try { + std::cout << "Loading model\n"; + // Deserialize the ScriptModule from a file using torch::jit::load(). + module = torch::jit::load("fasterrcnn_resnet50_fpn.pt"); + std::cout << "Model loaded\n"; + } catch (const torch::Error& e) { + std::cout << "error loading the model\n"; + return -1; + } catch (const std::exception& e) { + std::cout << "Other error: " << e.what() << "\n"; + return -1; + } + + // TorchScript models require a List[IValue] as input + std::vector<torch::jit::IValue> inputs; + + // Faster RCNN accepts a List[Tensor] as main input + std::vector<torch::Tensor> images; + images.push_back(torch::rand({3, 256, 275})); + images.push_back(torch::rand({3, 256, 275})); + + inputs.push_back(images); + auto output = module.forward(inputs); + + std::cout << "ok\n"; + std::cout << "output" << output << "\n"; + + if (torch::cuda::is_available()) { + // Move traced model to GPU + module.to(torch::kCUDA); + + // Add GPU inputs + images.clear(); + inputs.clear(); + + torch::TensorOptions options = torch::TensorOptions{torch::kCUDA}; + images.push_back(torch::rand({3, 256, 275}, options)); + images.push_back(torch::rand({3, 256, 275}, options)); + + inputs.push_back(images); + auto output = module.forward(inputs); + + std::cout << "ok\n"; + std::cout << "output" << output << "\n"; + } + return 0; +} diff --git a/pretrained_model/pytorch_vision_v0.10.0/test/tracing/frcnn/trace_model.py b/pretrained_model/pytorch_vision_v0.10.0/test/tracing/frcnn/trace_model.py new file mode 100644 index 0000000000000000000000000000000000000000..34961e8684f1376e644e349a24f072760d1b9a95 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/test/tracing/frcnn/trace_model.py @@ -0,0 +1,14 @@ + +import os.path as osp + +import torch +import torchvision + +HERE = osp.dirname(osp.abspath(__file__)) +ASSETS = osp.dirname(osp.dirname(HERE)) + +model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=False) +model.eval() + +traced_model = torch.jit.script(model) +traced_model.save("fasterrcnn_resnet50_fpn.pt") diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/__init__.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9508605b551c74b71941a6f90b5713a0a3c03a20 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/__init__.py @@ -0,0 +1,99 @@ +import warnings +import os + +from .extension import _HAS_OPS + +from torchvision import models +from torchvision import datasets +from torchvision import ops +from torchvision import transforms +from torchvision import utils +from torchvision import io + +import torch + +try: + from .version import __version__ # noqa: F401 +except ImportError: + pass + +# Check if torchvision is being imported within the root folder +if (not _HAS_OPS and os.path.dirname(os.path.realpath(__file__)) == + os.path.join(os.path.realpath(os.getcwd()), 'torchvision')): + message = ('You are importing torchvision within its own root folder ({}). ' + 'This is not expected to work and may give errors. Please exit the ' + 'torchvision project source and relaunch your python interpreter.') + warnings.warn(message.format(os.getcwd())) + +_image_backend = 'PIL' + +_video_backend = "pyav" + + +def set_image_backend(backend): + """ + Specifies the package used to load images. + + Args: + backend (string): Name of the image backend. one of {'PIL', 'accimage'}. + The :mod:`accimage` package uses the Intel IPP library. It is + generally faster than PIL, but does not support as many operations. + """ + global _image_backend + if backend not in ['PIL', 'accimage']: + raise ValueError("Invalid backend '{}'. Options are 'PIL' and 'accimage'" + .format(backend)) + _image_backend = backend + + +def get_image_backend(): + """ + Gets the name of the package used to load images + """ + return _image_backend + + +def set_video_backend(backend): + """ + Specifies the package used to decode videos. + + Args: + backend (string): Name of the video backend. one of {'pyav', 'video_reader'}. + The :mod:`pyav` package uses the 3rd party PyAv library. It is a Pythonic + binding for the FFmpeg libraries. + The :mod:`video_reader` package includes a native C++ implementation on + top of FFMPEG libraries, and a python API of TorchScript custom operator. + It is generally decoding faster than :mod:`pyav`, but perhaps is less robust. + + .. note:: + Building with FFMPEG is disabled by default in the latest master. If you want to use the 'video_reader' + backend, please compile torchvision from source. + """ + global _video_backend + if backend not in ["pyav", "video_reader"]: + raise ValueError( + "Invalid video backend '%s'. Options are 'pyav' and 'video_reader'" % backend + ) + if backend == "video_reader" and not io._HAS_VIDEO_OPT: + message = ( + "video_reader video backend is not available." + " Please compile torchvision from source and try again" + ) + warnings.warn(message) + else: + _video_backend = backend + + +def get_video_backend(): + """ + Returns the currently active video backend used to decode videos. + + Returns: + str: Name of the video backend. one of {'pyav', 'video_reader'}. + """ + + return _video_backend + + +def _is_tracing(): + return torch._C._get_tracing_state() diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/audio_sampler.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/audio_sampler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..421e503b2cea4f6f5b81bdad19573aa31e11c703 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/audio_sampler.cpp @@ -0,0 +1,233 @@ +#include "audio_sampler.h" +#include <c10/util/Logging.h> +#include "util.h" + +#define AVRESAMPLE_MAX_CHANNELS 32 + +// www.ffmpeg.org/doxygen/1.1/doc_2examples_2resampling_audio_8c-example.html#a24 +namespace ffmpeg { + +namespace { +int preparePlanes( + const AudioFormat& fmt, + const uint8_t* buffer, + int numSamples, + uint8_t** planes) { + int result; + if ((result = av_samples_fill_arrays( + planes, + nullptr, // linesize is not needed + buffer, + fmt.channels, + numSamples, + (AVSampleFormat)fmt.format, + 1)) < 0) { + LOG(ERROR) << "av_samples_fill_arrays failed, err: " + << Util::generateErrorDesc(result) + << ", numSamples: " << numSamples << ", fmt: " << fmt.format; + } + return result; +} +} // namespace + +AudioSampler::AudioSampler(void* logCtx) : logCtx_(logCtx) {} + +AudioSampler::~AudioSampler() { + cleanUp(); +} + +void AudioSampler::shutdown() { + cleanUp(); +} + +bool AudioSampler::init(const SamplerParameters& params) { + cleanUp(); + + if (params.type != MediaType::TYPE_AUDIO) { + LOG(ERROR) << "Invalid media type, expected MediaType::TYPE_AUDIO"; + return false; + } + + swrContext_ = swr_alloc_set_opts( + nullptr, + av_get_default_channel_layout(params.out.audio.channels), + (AVSampleFormat)params.out.audio.format, + params.out.audio.samples, + av_get_default_channel_layout(params.in.audio.channels), + (AVSampleFormat)params.in.audio.format, + params.in.audio.samples, + 0, + logCtx_); + if (swrContext_ == nullptr) { + LOG(ERROR) << "Cannot allocate SwrContext"; + return false; + } + + int result; + if ((result = swr_init(swrContext_)) < 0) { + LOG(ERROR) << "swr_init faield, err: " << Util::generateErrorDesc(result) + << ", in -> format: " << params.in.audio.format + << ", channels: " << params.in.audio.channels + << ", samples: " << params.in.audio.samples + << ", out -> format: " << params.out.audio.format + << ", channels: " << params.out.audio.channels + << ", samples: " << params.out.audio.samples; + return false; + } + + // set formats + params_ = params; + return true; +} + +int AudioSampler::numOutputSamples(int inSamples) const { + return swr_get_out_samples(swrContext_, inSamples); +} + +int AudioSampler::sample( + const uint8_t* inPlanes[], + int inNumSamples, + ByteStorage* out, + int outNumSamples) { + int result; + int outBufferBytes = av_samples_get_buffer_size( + nullptr, + params_.out.audio.channels, + outNumSamples, + (AVSampleFormat)params_.out.audio.format, + 1); + + if (out) { + out->ensure(outBufferBytes); + + uint8_t* outPlanes[AVRESAMPLE_MAX_CHANNELS] = {nullptr}; + + if ((result = preparePlanes( + params_.out.audio, + out->writableTail(), + outNumSamples, + outPlanes)) < 0) { + return result; + } + + if ((result = swr_convert( + swrContext_, + &outPlanes[0], + outNumSamples, + inPlanes, + inNumSamples)) < 0) { + LOG(ERROR) << "swr_convert faield, err: " + << Util::generateErrorDesc(result); + return result; + } + + CHECK_LE(result, outNumSamples); + + if (result) { + if ((result = av_samples_get_buffer_size( + nullptr, + params_.out.audio.channels, + result, + (AVSampleFormat)params_.out.audio.format, + 1)) >= 0) { + out->append(result); + } else { + LOG(ERROR) << "av_samples_get_buffer_size faield, err: " + << Util::generateErrorDesc(result); + } + } + } else { + // allocate a temporary buffer + auto* tmpBuffer = static_cast<uint8_t*>(av_malloc(outBufferBytes)); + if (!tmpBuffer) { + LOG(ERROR) << "av_alloc faield, for size: " << outBufferBytes; + return -1; + } + + uint8_t* outPlanes[AVRESAMPLE_MAX_CHANNELS] = {nullptr}; + + if ((result = preparePlanes( + params_.out.audio, tmpBuffer, outNumSamples, outPlanes)) < 0) { + av_free(tmpBuffer); + return result; + } + + if ((result = swr_convert( + swrContext_, + &outPlanes[0], + outNumSamples, + inPlanes, + inNumSamples)) < 0) { + LOG(ERROR) << "swr_convert faield, err: " + << Util::generateErrorDesc(result); + av_free(tmpBuffer); + return result; + } + + av_free(tmpBuffer); + + CHECK_LE(result, outNumSamples); + + if (result) { + result = av_samples_get_buffer_size( + nullptr, + params_.out.audio.channels, + result, + (AVSampleFormat)params_.out.audio.format, + 1); + } + } + + return result; +} + +int AudioSampler::sample(AVFrame* frame, ByteStorage* out) { + const auto outNumSamples = numOutputSamples(frame ? frame->nb_samples : 0); + + if (!outNumSamples) { + return 0; + } + + return sample( + frame ? (const uint8_t**)&frame->data[0] : nullptr, + frame ? frame->nb_samples : 0, + out, + outNumSamples); +} + +int AudioSampler::sample(const ByteStorage* in, ByteStorage* out) { + const auto inSampleSize = + av_get_bytes_per_sample((AVSampleFormat)params_.in.audio.format); + + const auto inNumSamples = + !in ? 0 : in->length() / inSampleSize / params_.in.audio.channels; + + const auto outNumSamples = numOutputSamples(inNumSamples); + + if (!outNumSamples) { + return 0; + } + + uint8_t* inPlanes[AVRESAMPLE_MAX_CHANNELS] = {nullptr}; + int result; + if (in && + (result = preparePlanes( + params_.in.audio, in->data(), inNumSamples, inPlanes)) < 0) { + return result; + } + + return sample( + in ? (const uint8_t**)inPlanes : nullptr, + inNumSamples, + out, + outNumSamples); +} + +void AudioSampler::cleanUp() { + if (swrContext_) { + swr_free(&swrContext_); + swrContext_ = nullptr; + } +} + +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/audio_sampler.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/audio_sampler.h new file mode 100644 index 0000000000000000000000000000000000000000..e105bbe4de20c35b49fd0136bd30ef26c25b0241 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/audio_sampler.h @@ -0,0 +1,39 @@ +#pragma once + +#include "defs.h" + +namespace ffmpeg { + +/** + * Class transcode audio frames from one format into another + */ + +class AudioSampler : public MediaSampler { + public: + explicit AudioSampler(void* logCtx); + ~AudioSampler() override; + + // MediaSampler overrides + bool init(const SamplerParameters& params) override; + int sample(const ByteStorage* in, ByteStorage* out) override; + void shutdown() override; + + int sample(AVFrame* frame, ByteStorage* out); + + private: + // close resources + void cleanUp(); + // helper functions for rescaling, cropping, etc. + int numOutputSamples(int inSamples) const; + int sample( + const uint8_t* inPlanes[], + int inNumSamples, + ByteStorage* out, + int outNumSamples); + + private: + SwrContext* swrContext_{nullptr}; + void* logCtx_{nullptr}; +}; + +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/audio_stream.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/audio_stream.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9d66e589bf319a4266113d1503c0fc79aeefa6b6 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/audio_stream.cpp @@ -0,0 +1,101 @@ +#include "audio_stream.h" +#include <c10/util/Logging.h> +#include <limits> +#include "util.h" + +namespace ffmpeg { + +namespace { +bool operator==(const AudioFormat& x, const AVFrame& y) { + return x.samples == y.sample_rate && x.channels == y.channels && + x.format == y.format; +} + +bool operator==(const AudioFormat& x, const AVCodecContext& y) { + return x.samples == y.sample_rate && x.channels == y.channels && + x.format == y.sample_fmt; +} + +AudioFormat& toAudioFormat(AudioFormat& x, const AVFrame& y) { + x.samples = y.sample_rate; + x.channels = y.channels; + x.format = y.format; + return x; +} + +AudioFormat& toAudioFormat(AudioFormat& x, const AVCodecContext& y) { + x.samples = y.sample_rate; + x.channels = y.channels; + x.format = y.sample_fmt; + return x; +} +} // namespace + +AudioStream::AudioStream( + AVFormatContext* inputCtx, + int index, + bool convertPtsToWallTime, + const AudioFormat& format) + : Stream( + inputCtx, + MediaFormat::makeMediaFormat(format, index), + convertPtsToWallTime, + 0) {} + +AudioStream::~AudioStream() { + if (sampler_) { + sampler_->shutdown(); + sampler_.reset(); + } +} + +int AudioStream::initFormat() { + // set output format + if (format_.format.audio.samples == 0) { + format_.format.audio.samples = codecCtx_->sample_rate; + } + if (format_.format.audio.channels == 0) { + format_.format.audio.channels = codecCtx_->channels; + } + if (format_.format.audio.format == AV_SAMPLE_FMT_NONE) { + format_.format.audio.format = codecCtx_->sample_fmt; + } + + return format_.format.audio.samples != 0 && + format_.format.audio.channels != 0 && + format_.format.audio.format != AV_SAMPLE_FMT_NONE + ? 0 + : -1; +} + +int AudioStream::copyFrameBytes(ByteStorage* out, bool flush) { + if (!sampler_) { + sampler_ = std::make_unique<AudioSampler>(codecCtx_); + } + // check if input format gets changed + if (flush ? !(sampler_->getInputFormat().audio == *codecCtx_) + : !(sampler_->getInputFormat().audio == *frame_)) { + // - reinit sampler + SamplerParameters params; + params.type = format_.type; + params.out = format_.format; + params.in = FormatUnion(); + flush ? toAudioFormat(params.in.audio, *codecCtx_) + : toAudioFormat(params.in.audio, *frame_); + if (!sampler_->init(params)) { + return -1; + } + + VLOG(1) << "Set input audio sampler format" + << ", samples: " << params.in.audio.samples + << ", channels: " << params.in.audio.channels + << ", format: " << params.in.audio.format + << " : output audio sampler format" + << ", samples: " << format_.format.audio.samples + << ", channels: " << format_.format.audio.channels + << ", format: " << format_.format.audio.format; + } + return sampler_->sample(flush ? nullptr : frame_, out); +} + +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/audio_stream.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/audio_stream.h new file mode 100644 index 0000000000000000000000000000000000000000..2d6457b68f53cc835b6c87a22492f0e02c28a633 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/audio_stream.h @@ -0,0 +1,29 @@ +#pragma once + +#include "audio_sampler.h" +#include "stream.h" + +namespace ffmpeg { + +/** + * Class uses FFMPEG library to decode one audio stream. + */ + +class AudioStream : public Stream { + public: + AudioStream( + AVFormatContext* inputCtx, + int index, + bool convertPtsToWallTime, + const AudioFormat& format); + ~AudioStream() override; + + private: + int initFormat() override; + int copyFrameBytes(ByteStorage* out, bool flush) override; + + private: + std::unique_ptr<AudioSampler> sampler_; +}; + +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/cc_stream.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/cc_stream.cpp new file mode 100644 index 0000000000000000000000000000000000000000..89174c396fd8cab68677b095f8a59980cfe9ee33 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/cc_stream.cpp @@ -0,0 +1,24 @@ +#include "cc_stream.h" + +namespace ffmpeg { + +CCStream::CCStream( + AVFormatContext* inputCtx, + int index, + bool convertPtsToWallTime, + const SubtitleFormat& format) + : SubtitleStream(inputCtx, index, convertPtsToWallTime, format) { + format_.type = TYPE_CC; +} + +AVCodec* CCStream::findCodec(AVCodecParameters* params) { + if (params->codec_id == AV_CODEC_ID_BIN_DATA && + params->codec_type == AVMEDIA_TYPE_DATA) { + // obtain subtitles codec + params->codec_id = AV_CODEC_ID_MOV_TEXT; + params->codec_type = AVMEDIA_TYPE_SUBTITLE; + } + return Stream::findCodec(params); +} + +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/cc_stream.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/cc_stream.h new file mode 100644 index 0000000000000000000000000000000000000000..3a1d169f01407ec0319fde72e9af056211dd8287 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/cc_stream.h @@ -0,0 +1,22 @@ +#pragma once + +#include "subtitle_stream.h" + +namespace ffmpeg { + +/** + * Class uses FFMPEG library to decode one closed captions stream. + */ +class CCStream : public SubtitleStream { + public: + CCStream( + AVFormatContext* inputCtx, + int index, + bool convertPtsToWallTime, + const SubtitleFormat& format); + + private: + AVCodec* findCodec(AVCodecParameters* params) override; +}; + +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/decoder.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/decoder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6c9a3cdf825a573d5a9aab516284df66b0df15b3 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/decoder.cpp @@ -0,0 +1,665 @@ +#include "decoder.h" +#include <c10/util/Logging.h> +#include <future> +#include <iostream> +#include <mutex> +#include "audio_stream.h" +#include "cc_stream.h" +#include "subtitle_stream.h" +#include "util.h" +#include "video_stream.h" + +namespace ffmpeg { + +namespace { + +constexpr size_t kIoBufferSize = 96 * 1024; +constexpr size_t kIoPaddingSize = AV_INPUT_BUFFER_PADDING_SIZE; +constexpr size_t kLogBufferSize = 1024; + +int ffmpeg_lock(void** mutex, enum AVLockOp op) { + std::mutex** handle = (std::mutex**)mutex; + switch (op) { + case AV_LOCK_CREATE: + *handle = new std::mutex(); + break; + case AV_LOCK_OBTAIN: + (*handle)->lock(); + break; + case AV_LOCK_RELEASE: + (*handle)->unlock(); + break; + case AV_LOCK_DESTROY: + delete *handle; + break; + } + return 0; +} + +bool mapFfmpegType(AVMediaType media, MediaType* type) { + switch (media) { + case AVMEDIA_TYPE_AUDIO: + *type = TYPE_AUDIO; + return true; + case AVMEDIA_TYPE_VIDEO: + *type = TYPE_VIDEO; + return true; + case AVMEDIA_TYPE_SUBTITLE: + *type = TYPE_SUBTITLE; + return true; + case AVMEDIA_TYPE_DATA: + *type = TYPE_CC; + return true; + default: + return false; + } +} + +std::unique_ptr<Stream> createStream( + MediaType type, + AVFormatContext* ctx, + int idx, + bool convertPtsToWallTime, + const FormatUnion& format, + int64_t loggingUuid) { + switch (type) { + case TYPE_AUDIO: + return std::make_unique<AudioStream>( + ctx, idx, convertPtsToWallTime, format.audio); + case TYPE_VIDEO: + return std::make_unique<VideoStream>( + // negative loggingUuid indicates video streams. + ctx, + idx, + convertPtsToWallTime, + format.video, + -loggingUuid); + case TYPE_SUBTITLE: + return std::make_unique<SubtitleStream>( + ctx, idx, convertPtsToWallTime, format.subtitle); + case TYPE_CC: + return std::make_unique<CCStream>( + ctx, idx, convertPtsToWallTime, format.subtitle); + default: + return nullptr; + } +} + +} // Namespace + +/* static */ +void Decoder::logFunction(void* avcl, int level, const char* cfmt, va_list vl) { + if (!avcl) { + // Nothing can be done here + return; + } + + AVClass* avclass = *reinterpret_cast<AVClass**>(avcl); + if (!avclass) { + // Nothing can be done here + return; + } + Decoder* decoder = nullptr; + if (strcmp(avclass->class_name, "AVFormatContext") == 0) { + AVFormatContext* context = reinterpret_cast<AVFormatContext*>(avcl); + if (context) { + decoder = reinterpret_cast<Decoder*>(context->opaque); + } + } else if (strcmp(avclass->class_name, "AVCodecContext") == 0) { + AVCodecContext* context = reinterpret_cast<AVCodecContext*>(avcl); + if (context) { + decoder = reinterpret_cast<Decoder*>(context->opaque); + } + } else if (strcmp(avclass->class_name, "AVIOContext") == 0) { + AVIOContext* context = reinterpret_cast<AVIOContext*>(avcl); + // only if opaque was assigned to Decoder pointer + if (context && context->read_packet == Decoder::readFunction) { + decoder = reinterpret_cast<Decoder*>(context->opaque); + } + } else if (strcmp(avclass->class_name, "SWResampler") == 0) { + // expect AVCodecContext as parent + if (avclass->parent_log_context_offset) { + AVClass** parent = + *(AVClass***)(((uint8_t*)avcl) + avclass->parent_log_context_offset); + AVCodecContext* context = reinterpret_cast<AVCodecContext*>(parent); + if (context) { + decoder = reinterpret_cast<Decoder*>(context->opaque); + } + } + } else if (strcmp(avclass->class_name, "SWScaler") == 0) { + // cannot find a way to pass context pointer through SwsContext struct + } else { + VLOG(2) << "Unknown context class: " << avclass->class_name; + } + + if (decoder != nullptr && decoder->enableLogLevel(level)) { + char buf[kLogBufferSize] = {0}; + // Format the line + int* prefix = decoder->getPrintPrefix(); + *prefix = 1; + av_log_format_line(avcl, level, cfmt, vl, buf, sizeof(buf) - 1, prefix); + // pass message to the decoder instance + std::string msg(buf); + decoder->logCallback(level, msg); + } +} + +bool Decoder::enableLogLevel(int level) const { + return ssize_t(level) <= params_.logLevel; +} + +void Decoder::logCallback(int level, const std::string& message) { + LOG(INFO) << "Msg, uuid=" << params_.loggingUuid << " level=" << level + << " msg=" << message; +} + +/* static */ +int Decoder::shutdownFunction(void* ctx) { + Decoder* decoder = (Decoder*)ctx; + if (decoder == nullptr) { + return 1; + } + return decoder->shutdownCallback(); +} + +int Decoder::shutdownCallback() { + return interrupted_ ? 1 : 0; +} + +/* static */ +int Decoder::readFunction(void* opaque, uint8_t* buf, int size) { + Decoder* decoder = reinterpret_cast<Decoder*>(opaque); + if (decoder == nullptr) { + return 0; + } + return decoder->readCallback(buf, size); +} + +/* static */ +int64_t Decoder::seekFunction(void* opaque, int64_t offset, int whence) { + Decoder* decoder = reinterpret_cast<Decoder*>(opaque); + if (decoder == nullptr) { + return -1; + } + return decoder->seekCallback(offset, whence); +} + +int Decoder::readCallback(uint8_t* buf, int size) { + return seekableBuffer_.read(buf, size, params_.timeoutMs); +} + +int64_t Decoder::seekCallback(int64_t offset, int whence) { + return seekableBuffer_.seek(offset, whence, params_.timeoutMs); +} + +/* static */ +void Decoder::initOnce() { + static std::once_flag flagInit; + std::call_once(flagInit, []() { + av_register_all(); + avcodec_register_all(); + avformat_network_init(); + // register ffmpeg lock manager + av_lockmgr_register(&ffmpeg_lock); + av_log_set_callback(Decoder::logFunction); + av_log_set_level(AV_LOG_ERROR); + VLOG(1) << "Registered ffmpeg libs"; + }); +} + +Decoder::Decoder() { + initOnce(); +} + +Decoder::~Decoder() { + cleanUp(); +} + +bool Decoder::init( + const DecoderParameters& params, + DecoderInCallback&& in, + std::vector<DecoderMetadata>* metadata) { + cleanUp(); + + if ((params.uri.empty() || in) && (!params.uri.empty() || !in)) { + LOG(ERROR) + << "uuid=" << params_.loggingUuid + << " either external URI gets provided or explicit input callback"; + return false; + } + + // set callback and params + params_ = params; + + if (!(inputCtx_ = avformat_alloc_context())) { + LOG(ERROR) << "uuid=" << params_.loggingUuid + << " cannot allocate format context"; + return false; + } + + AVInputFormat* fmt = nullptr; + int result = 0; + if (in) { + ImageType type = ImageType::UNKNOWN; + if ((result = seekableBuffer_.init( + std::forward<DecoderInCallback>(in), + params_.timeoutMs, + params_.maxSeekableBytes, + params_.isImage ? &type : nullptr)) < 0) { + LOG(ERROR) << "uuid=" << params_.loggingUuid + << " can't initiate seekable buffer"; + cleanUp(); + return false; + } + + if (params_.isImage) { + const char* fmtName = "image2"; + switch (type) { + case ImageType::JPEG: + fmtName = "jpeg_pipe"; + break; + case ImageType::PNG: + fmtName = "png_pipe"; + break; + case ImageType::TIFF: + fmtName = "tiff_pipe"; + break; + default: + break; + } + + fmt = av_find_input_format(fmtName); + } + + const size_t avioCtxBufferSize = kIoBufferSize; + uint8_t* avioCtxBuffer = + (uint8_t*)av_malloc(avioCtxBufferSize + kIoPaddingSize); + if (!avioCtxBuffer) { + LOG(ERROR) << "uuid=" << params_.loggingUuid + << " av_malloc cannot allocate " << avioCtxBufferSize + << " bytes"; + cleanUp(); + return false; + } + + if (!(avioCtx_ = avio_alloc_context( + avioCtxBuffer, + avioCtxBufferSize, + 0, + reinterpret_cast<void*>(this), + &Decoder::readFunction, + nullptr, + result == 1 ? &Decoder::seekFunction : nullptr))) { + LOG(ERROR) << "uuid=" << params_.loggingUuid + << " avio_alloc_context failed"; + av_free(avioCtxBuffer); + cleanUp(); + return false; + } + + inputCtx_->pb = avioCtx_; + inputCtx_->flags |= AVFMT_FLAG_CUSTOM_IO; + } + + inputCtx_->opaque = reinterpret_cast<void*>(this); + inputCtx_->interrupt_callback.callback = Decoder::shutdownFunction; + inputCtx_->interrupt_callback.opaque = reinterpret_cast<void*>(this); + + // add network timeout + inputCtx_->flags |= AVFMT_FLAG_NONBLOCK; + + AVDictionary* options = nullptr; + if (params_.listen) { + av_dict_set_int(&options, "listen", 1, 0); + } + if (params_.timeoutMs > 0) { + av_dict_set_int(&options, "analyzeduration", params_.timeoutMs * 1000, 0); + av_dict_set_int(&options, "stimeout", params_.timeoutMs * 1000, 0); + av_dict_set_int(&options, "rw_timeout", params_.timeoutMs * 1000, 0); + if (!params_.tlsCertFile.empty()) { + av_dict_set(&options, "cert_file", params_.tlsCertFile.data(), 0); + } + if (!params_.tlsKeyFile.empty()) { + av_dict_set(&options, "key_file", params_.tlsKeyFile.data(), 0); + } + } + + interrupted_ = false; + + // ffmpeg avformat_open_input call can hang if media source doesn't respond + // set a guard for handle such situations, if requested + std::promise<bool> p; + std::future<bool> f = p.get_future(); + std::unique_ptr<std::thread> guard; + if (params_.preventStaleness) { + guard = std::make_unique<std::thread>([&f, this]() { + auto timeout = std::chrono::milliseconds(params_.timeoutMs); + if (std::future_status::timeout == f.wait_for(timeout)) { + LOG(ERROR) << "uuid=" << params_.loggingUuid + << " cannot open stream within " << params_.timeoutMs + << " ms"; + interrupted_ = true; + } + }); + } + + if (fmt) { + result = avformat_open_input(&inputCtx_, nullptr, fmt, &options); + } else { + result = + avformat_open_input(&inputCtx_, params_.uri.c_str(), nullptr, &options); + } + + av_dict_free(&options); + + if (guard) { + p.set_value(true); + guard->join(); + guard.reset(); + } + + if (result < 0 || interrupted_) { + LOG(ERROR) << "uuid=" << params_.loggingUuid + << " avformat_open_input failed, error=" + << Util::generateErrorDesc(result); + cleanUp(); + return false; + } + + result = avformat_find_stream_info(inputCtx_, nullptr); + + if (result < 0) { + LOG(ERROR) << "uuid=" << params_.loggingUuid + << " avformat_find_stream_info failed, error=" + << Util::generateErrorDesc(result); + cleanUp(); + return false; + } + + if (!openStreams(metadata)) { + LOG(ERROR) << "uuid=" << params_.loggingUuid << " cannot activate streams"; + cleanUp(); + return false; + } + + onInit(); + + if (params.startOffset != 0) { + auto offset = params.startOffset <= params.seekAccuracy + ? 0 + : params.startOffset - params.seekAccuracy; + + av_seek_frame(inputCtx_, -1, offset, AVSEEK_FLAG_BACKWARD); + } + + VLOG(1) << "Decoder initialized, log level: " << params_.logLevel; + return true; +} + +bool Decoder::openStreams(std::vector<DecoderMetadata>* metadata) { + for (int i = 0; i < inputCtx_->nb_streams; i++) { + // - find the corespondent format at params_.formats set + MediaFormat format; + const auto media = inputCtx_->streams[i]->codec->codec_type; + if (!mapFfmpegType(media, &format.type)) { + VLOG(1) << "Stream media: " << media << " at index " << i + << " gets ignored, unknown type"; + + continue; // unsupported type + } + + // check format + auto it = params_.formats.find(format); + if (it == params_.formats.end()) { + VLOG(1) << "Stream type: " << format.type << " at index: " << i + << " gets ignored, caller is not interested"; + continue; // clients don't care about this media format + } + + // do we have stream of this type? + auto stream = findByType(format); + + // should we process this stream? + + if (it->stream == -2 || // all streams of this type are welcome + (!stream && (it->stream == -1 || it->stream == i))) { // new stream + VLOG(1) << "Stream type: " << format.type << " found, at index: " << i; + auto stream = createStream( + format.type, + inputCtx_, + i, + params_.convertPtsToWallTime, + it->format, + params_.loggingUuid); + CHECK(stream); + if (stream->openCodec(metadata) < 0) { + LOG(ERROR) << "uuid=" << params_.loggingUuid + << " open codec failed, stream_idx=" << i; + return false; + } + streams_.emplace(i, std::move(stream)); + inRange_.set(i, true); + } + } + + return true; +} + +void Decoder::shutdown() { + cleanUp(); +} + +void Decoder::interrupt() { + interrupted_ = true; +} + +void Decoder::cleanUp() { + if (!interrupted_) { + interrupted_ = true; + } + + if (inputCtx_) { + for (auto& stream : streams_) { + // Drain stream buffers. + DecoderOutputMessage msg; + while (msg.payload = nullptr, stream.second->flush(&msg, true) > 0) { + } + stream.second.reset(); + } + streams_.clear(); + avformat_close_input(&inputCtx_); + } + if (avioCtx_) { + av_freep(&avioCtx_->buffer); + av_freep(&avioCtx_); + } + + // reset callback + seekableBuffer_.shutdown(); +} + +int Decoder::getFrame(size_t workingTimeInMs) { + if (inRange_.none()) { + return ENODATA; + } + // decode frames until cache is full and leave thread + // once decode() method gets called and grab some bytes + // run this method again + // init package + AVPacket avPacket; + av_init_packet(&avPacket); + avPacket.data = nullptr; + avPacket.size = 0; + + auto end = std::chrono::steady_clock::now() + + std::chrono::milliseconds(workingTimeInMs); + // return true if elapsed time less than timeout + auto watcher = [end]() -> bool { + return std::chrono::steady_clock::now() <= end; + }; + + int result = 0; + size_t decodingErrors = 0; + bool decodedFrame = false; + while (!interrupted_ && inRange_.any() && !decodedFrame && watcher()) { + result = av_read_frame(inputCtx_, &avPacket); + if (result == AVERROR(EAGAIN)) { + VLOG(4) << "Decoder is busy..."; + std::this_thread::yield(); + result = 0; // reset error, EAGAIN is not an error at all + continue; + } else if (result == AVERROR_EOF) { + flushStreams(); + VLOG(1) << "End of stream"; + result = ENODATA; + break; + } else if (result < 0) { + flushStreams(); + LOG(ERROR) << "Error detected: " << Util::generateErrorDesc(result); + break; + } + + // get stream + auto stream = findByIndex(avPacket.stream_index); + if (stream == nullptr || !inRange_.test(stream->getIndex())) { + av_packet_unref(&avPacket); + continue; + } + + size_t numConsecutiveNoBytes = 0; + // it can be only partial decoding of the package bytes + do { + // decode package + bool gotFrame = false; + bool hasMsg = false; + // packet either got consumed completely or not at all + if ((result = processPacket(stream, &avPacket, &gotFrame, &hasMsg)) < 0) { + LOG(ERROR) << "uuid=" << params_.loggingUuid + << " processPacket failed with code=" << result; + break; + } + + if (!gotFrame && params_.maxProcessNoBytes != 0 && + ++numConsecutiveNoBytes > params_.maxProcessNoBytes) { + LOG(ERROR) << "uuid=" << params_.loggingUuid + << " exceeding max amount of consecutive no bytes"; + break; + } + if (result > 0) { + numConsecutiveNoBytes = 0; + } + + decodedFrame |= hasMsg; + } while (result == 0); + + // post loop check + if (result < 0) { + if (params_.maxPackageErrors != 0 && // check errors + ++decodingErrors >= params_.maxPackageErrors) { // reached the limit + LOG(ERROR) << "uuid=" << params_.loggingUuid + << " exceeding max amount of consecutive package errors"; + break; + } + } else { + decodingErrors = 0; // reset on success + } + + result = 0; + + av_packet_unref(&avPacket); + } + + av_packet_unref(&avPacket); + + VLOG(2) << "Interrupted loop" + << ", interrupted_ " << interrupted_ << ", inRange_.any() " + << inRange_.any() << ", decodedFrame " << decodedFrame << ", result " + << result; + + // loop can be terminated, either by: + // 1. explcitly iterrupted + // 2. terminated by workable timeout + // 3. unrecoverable error or ENODATA (end of stream) + // 4. decoded frames pts are out of the specified range + // 5. success decoded frame + if (interrupted_) { + return EINTR; + } + if (result != 0) { + return result; + } + if (inRange_.none()) { + return ENODATA; + } + return 0; +} + +Stream* Decoder::findByIndex(int streamIndex) const { + auto it = streams_.find(streamIndex); + return it != streams_.end() ? it->second.get() : nullptr; +} + +Stream* Decoder::findByType(const MediaFormat& format) const { + for (auto& stream : streams_) { + if (stream.second->getMediaFormat().type == format.type) { + return stream.second.get(); + } + } + return nullptr; +} + +int Decoder::processPacket( + Stream* stream, + AVPacket* packet, + bool* gotFrame, + bool* hasMsg) { + // decode package + int result; + DecoderOutputMessage msg; + msg.payload = params_.headerOnly ? nullptr : createByteStorage(0); + *hasMsg = false; + if ((result = stream->decodePacket( + packet, &msg, params_.headerOnly, gotFrame)) >= 0 && + *gotFrame) { + // check end offset + bool endInRange = + params_.endOffset <= 0 || msg.header.pts <= params_.endOffset; + inRange_.set(stream->getIndex(), endInRange); + if (endInRange && msg.header.pts >= params_.startOffset) { + *hasMsg = true; + push(std::move(msg)); + } + } + return result; +} + +void Decoder::flushStreams() { + VLOG(1) << "Flushing streams..."; + for (auto& stream : streams_) { + DecoderOutputMessage msg; + while (msg.payload = (params_.headerOnly ? nullptr : createByteStorage(0)), + stream.second->flush(&msg, params_.headerOnly) > 0) { + // check end offset + bool endInRange = + params_.endOffset <= 0 || msg.header.pts <= params_.endOffset; + inRange_.set(stream.second->getIndex(), endInRange); + if (endInRange && msg.header.pts >= params_.startOffset) { + push(std::move(msg)); + } else { + msg.payload.reset(); + } + } + } +} + +int Decoder::decode_all(const DecoderOutCallback& callback) { + int result; + do { + DecoderOutputMessage out; + if (0 == (result = decode(&out, params_.timeoutMs))) { + callback(std::move(out)); + } + } while (result == 0); + return result; +} +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/decoder.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/decoder.h new file mode 100644 index 0000000000000000000000000000000000000000..c2d8f163bc30faccbf0bbb751cb77fcc53f360d2 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/decoder.h @@ -0,0 +1,92 @@ +#pragma once + +#include <bitset> +#include <unordered_map> +#include "seekable_buffer.h" +#include "stream.h" + +#if defined(_MSC_VER) +#include <BaseTsd.h> +using ssize_t = SSIZE_T; +#endif + +namespace ffmpeg { + +/** + * Class uses FFMPEG library to decode media streams. + * Media bytes can be explicitly provided through read-callback + * or fetched internally by FFMPEG library + */ +class Decoder : public MediaDecoder { + public: + Decoder(); + ~Decoder() override; + + // MediaDecoder overrides + bool init( + const DecoderParameters& params, + DecoderInCallback&& in, + std::vector<DecoderMetadata>* metadata) override; + int decode_all(const DecoderOutCallback& callback) override; + void shutdown() override; + void interrupt() override; + + protected: + // function does actual work, derived class calls it in working thread + // periodically. On success method returns 0, ENOADATA on EOF, ETIMEDOUT if + // no frames got decoded in the specified timeout time, and error on + // unrecoverable error. + int getFrame(size_t workingTimeInMs = 100); + + // Derived class must override method and consume the provided message + virtual void push(DecoderOutputMessage&& buffer) = 0; + + // Fires on init call + virtual void onInit() {} + + public: + // C-style FFMPEG API requires C/static methods for callbacks + static void logFunction(void* avcl, int level, const char* cfmt, va_list vl); + static int shutdownFunction(void* ctx); + static int readFunction(void* opaque, uint8_t* buf, int size); + static int64_t seekFunction(void* opaque, int64_t offset, int whence); + // can be called by any classes or API + static void initOnce(); + + int* getPrintPrefix() { + return &printPrefix; + } + + private: + // mark below function for a proper invocation + virtual bool enableLogLevel(int level) const; + virtual void logCallback(int level, const std::string& message); + virtual int readCallback(uint8_t* buf, int size); + virtual int64_t seekCallback(int64_t offset, int whence); + virtual int shutdownCallback(); + + bool openStreams(std::vector<DecoderMetadata>* metadata); + Stream* findByIndex(int streamIndex) const; + Stream* findByType(const MediaFormat& format) const; + int processPacket( + Stream* stream, + AVPacket* packet, + bool* gotFrame, + bool* hasMsg); + void flushStreams(); + void cleanUp(); + + protected: + DecoderParameters params_; + + private: + SeekableBuffer seekableBuffer_; + int printPrefix{1}; + + std::atomic<bool> interrupted_{false}; + AVFormatContext* inputCtx_{nullptr}; + AVIOContext* avioCtx_{nullptr}; + std::unordered_map<ssize_t, std::unique_ptr<Stream>> streams_; + std::bitset<64> inRange_; +}; +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/defs.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/defs.h new file mode 100644 index 0000000000000000000000000000000000000000..b828934bdf0e3b9779649cb185ac65b2547941df --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/defs.h @@ -0,0 +1,390 @@ +#pragma once + +#include <array> +#include <functional> +#include <memory> +#include <set> +#include <string> +#include <unordered_set> +#include <vector> + +extern "C" { +#include <libavcodec/avcodec.h> +#include <libavformat/avformat.h> +#include <libavformat/avio.h> +#include <libavutil/avutil.h> +#include <libavutil/imgutils.h> +#include <libswresample/swresample.h> +#include "libswscale/swscale.h" +} + +namespace ffmpeg { + +// bit mask of formats, keep them in form 2^n +enum MediaType : size_t { + TYPE_AUDIO = 1, + TYPE_VIDEO = 2, + TYPE_SUBTITLE = 4, + TYPE_CC = 8, // closed captions from transport streams +}; + +// audio +struct AudioFormat { + // fields are initialized for the auto detection + // caller can specify some/all of field values if specific output is desirable + bool operator==(const AudioFormat& x) const { + return x.format == format && x.samples == samples && x.channels == channels; + } + + size_t samples{0}; // number samples per second (frequency) + size_t channels{0}; // number of channels + long format{-1}; // AVSampleFormat, auto AV_SAMPLE_FMT_NONE + size_t padding[2]; + // -- alignment 40 bytes +}; + +// video +struct VideoFormat { + // fields are initialized for the auto detection + // caller can specify some/all of field values if specific output is desirable + bool operator==(const VideoFormat& x) const { + return x.format == format && x.width == width && x.height == height; + } + /* + When width = 0, height = 0, minDimension = 0, and maxDimension = 0, + keep the original frame resolution + When width = 0, height = 0, minDimension != 0, and maxDimension = 0, + keep the aspect ratio and resize the frame so that shorter edge size is + minDimension + When width = 0, height = 0, minDimension = 0, and maxDimension != 0, + keep the aspect ratio and resize the frame so that longer edge size is + maxDimension + When width = 0, height = 0, minDimension != 0, and maxDimension != 0, + resize the frame so that shorter edge size is minDimension, and + longer edge size is maxDimension. The aspect ratio may not be preserved + When width = 0, height != 0, minDimension = 0, and maxDimension = 0, + keep the aspect ratio and resize the frame so that frame height is $height + When width != 0, height = 0, minDimension = 0, and maxDimension = 0, + keep the aspect ratio and resize the frame so that frame width is $width + When width != 0, height != 0, minDimension = 0, and maxDimension = 0, + resize the frame so that frame width and height are set to $width and + $height, + respectively + */ + size_t width{0}; // width in pixels + size_t height{0}; // height in pixels + long format{-1}; // AVPixelFormat, auto AV_PIX_FMT_NONE + size_t minDimension{0}; // choose min dimension and rescale accordingly + size_t maxDimension{0}; // choose max dimension and rescale accordingly + size_t cropImage{0}; // request image crop + // -- alignment 40 bytes +}; + +// subtitle/cc +struct SubtitleFormat { + long type{0}; // AVSubtitleType, auto SUBTITLE_NONE + size_t padding[4]; + // -- alignment 40 bytes +}; + +union FormatUnion { + FormatUnion() : audio() {} + explicit FormatUnion(int) : video() {} + explicit FormatUnion(char) : subtitle() {} + explicit FormatUnion(double) : subtitle() {} + AudioFormat audio; + VideoFormat video; + SubtitleFormat subtitle; + // -- alignment 40 bytes +}; + +/* + MediaFormat data structure serves as input/output parameter. + Caller assigns values for input formats + or leave default values for auto detection + For output formats all fields will be set to the specific values +*/ +struct MediaFormat { + // for using map/set data structures + bool operator<(const MediaFormat& x) const { + return type < x.type; + } + bool operator==(const MediaFormat& x) const { + if (type != x.type) { + return false; + } + switch (type) { + case TYPE_AUDIO: + return format.audio == x.format.audio; + case TYPE_VIDEO: + return format.video == x.format.video; + case TYPE_SUBTITLE: + case TYPE_CC: + return true; + default: + return false; + } + } + + explicit MediaFormat(long s = -1) : type(TYPE_AUDIO), stream(s), format() {} + explicit MediaFormat(int x, long s = -1) + : type(TYPE_VIDEO), stream(s), format(x) {} + explicit MediaFormat(char x, long s = -1) + : type(TYPE_SUBTITLE), stream(s), format(x) {} + explicit MediaFormat(double x, long s = -1) + : type(TYPE_CC), stream(s), format(x) {} + + static MediaFormat makeMediaFormat(AudioFormat format, long stream) { + MediaFormat result(stream); + result.format.audio = format; + return result; + } + + static MediaFormat makeMediaFormat(VideoFormat format, long stream) { + MediaFormat result(0, stream); + result.format.video = format; + return result; + } + + static MediaFormat makeMediaFormat(SubtitleFormat format, long stream) { + MediaFormat result('0', stream); + result.format.subtitle = format; + return result; + } + + // format type + MediaType type; + // stream index: + // set -1 for one stream auto detection, -2 for all streams auto detection, + // >= 0, specified stream, if caller knows the stream index (unlikely) + long stream; + // union keeps one of the possible formats, defined by MediaType + FormatUnion format; +}; + +struct DecoderParameters { + // local file, remote file, http url, rtmp stream uri, etc. anything that + // ffmpeg can recognize + std::string uri; + // timeout on getting bytes for decoding + size_t timeoutMs{1000}; + // logging level, default AV_LOG_PANIC + long logLevel{0}; + // when decoder would give up, 0 means never + size_t maxPackageErrors{0}; + // max allowed consecutive times no bytes are processed. 0 means for infinite. + size_t maxProcessNoBytes{0}; + // start offset (us) + long startOffset{0}; + // end offset (us) + long endOffset{-1}; + // logging id + int64_t loggingUuid{0}; + // internal max seekable buffer size + size_t maxSeekableBytes{0}; + // adjust header pts to the epoch time + bool convertPtsToWallTime{false}; + // indicate if input stream is an encoded image + bool isImage{false}; + // listen and wait for new rtmp stream + bool listen{false}; + // don't copy frame body, only header + bool headerOnly{false}; + // interrupt init method on timeout + bool preventStaleness{true}; + // seek tolerated accuracy (us) + double seekAccuracy{1000000.0}; + // what media types should be processed, default none + std::set<MediaFormat> formats; + + // can be used for asynchronous decoders + size_t cacheSize{8192}; // mow many bytes to cache before stop reading bytes + size_t cacheTimeoutMs{1000}; // timeout on bytes writing + bool enforceCacheSize{false}; // drop output frames if cache is full + bool mergeAudioMessages{false}; // combine collocated audio messages together + + std::string tlsCertFile; + std::string tlsKeyFile; +}; + +struct DecoderHeader { + // message id, from 0 till ... + size_t seqno{0}; + // decoded timestamp in microseconds from either beginning of the stream or + // from epoch time, see DecoderParameters::convertPtsToWallTime + long pts{0}; + // decoded key frame + size_t keyFrame{0}; + // frames per second, valid only for video streams + double fps{0}; + // format specifies what kind frame is in a payload + MediaFormat format; +}; + +// Abstract interface ByteStorage class +class ByteStorage { + public: + virtual ~ByteStorage() = default; + // makes sure that buffer has at least n bytes available for writing, if not + // storage must reallocate memory. + virtual void ensure(size_t n) = 0; + // caller must not to write more than available bytes + virtual uint8_t* writableTail() = 0; + // caller confirms that n bytes were written to the writable tail + virtual void append(size_t n) = 0; + // caller confirms that n bytes were read from the read buffer + virtual void trim(size_t n) = 0; + // gives an access to the beginning of the read buffer + virtual const uint8_t* data() const = 0; + // returns the stored size in bytes + virtual size_t length() const = 0; + // returns available capacity for writable tail + virtual size_t tail() const = 0; + // clears content, keeps capacity + virtual void clear() = 0; +}; + +struct DecoderOutputMessage { + DecoderHeader header; + std::unique_ptr<ByteStorage> payload; +}; + +/* + * External provider of the ecnoded bytes, specific implementation is left for + * different use cases, like file, memory, external network end-points, etc. + * Normally input/output parameter @out set to valid, not null buffer pointer, + * which indicates "read" call, however there are "seek" modes as well. + + * @out != nullptr => read from the current offset, @whence got ignored, + * @size bytes to read => return number bytes got read, 0 if no more bytes + * available, < 0 on error. + + * @out == nullptr, @timeoutMs == 0 => does provider support "seek" + * capability in a first place? @size & @whence got ignored, return 0 on + * success, < 0 if "seek" mode is not supported. + + * @out == nullptr, @timeoutMs != 0 => normal seek call + * offset == @size, i.e. @whence = [SEEK_SET, SEEK_CUR, SEEK_END, AVSEEK_SIZE) + * return < 0 on error, position if @whence = [SEEK_SET, SEEK_CUR, SEEK_END], + * length of buffer if @whence = [AVSEEK_SIZE]. + */ +using DecoderInCallback = + std::function<int(uint8_t* out, int size, int whence, uint64_t timeoutMs)>; + +using DecoderOutCallback = std::function<void(DecoderOutputMessage&&)>; + +struct DecoderMetadata { + // time base numerator + long num{0}; + // time base denominator + long den{1}; + // duration of the stream, in miscroseconds, if available + long duration{-1}; + // frames per second, valid only for video streams + double fps{0}; + // format specifies what kind frame is in a payload + MediaFormat format; +}; +/** + * Abstract class for decoding media bytes + * It has two diffrent modes. Internal media bytes retrieval for given uri and + * external media bytes provider in case of memory streams + */ +class MediaDecoder { + public: + virtual ~MediaDecoder() = default; + + /** + * Initializes media decoder with parameters, + * calls callback when media bytes are available. + * Media bytes get fetched internally from provided URI + * or invokes provided input callback to get media bytes. + * Input callback must be empty for the internal media provider + * Caller can provide non-null pointer for the input container + * if headers to obtain the streams metadata (optional) + */ + virtual bool init( + const DecoderParameters& params, + DecoderInCallback&& in, + std::vector<DecoderMetadata>* metadata) = 0; + + /** + * Polls available decoded one frame from decoder + * Returns error code, 0 - for success + */ + virtual int decode(DecoderOutputMessage* out, uint64_t timeoutMs) = 0; + + /** + * Polls available decoded bytes from decoder, till EOF or error + */ + virtual int decode_all(const DecoderOutCallback& callback) = 0; + + /** + * Stops calling callback, releases resources + */ + virtual void shutdown() = 0; + + /** + * Interrupts whatever decoder is doing at any time + */ + virtual void interrupt() = 0; + + /** + * Factory to create ByteStorage class instances, particular implementation is + * left to the derived class. Caller provides the initially allocated size + */ + virtual std::unique_ptr<ByteStorage> createByteStorage(size_t n) = 0; +}; + +struct SamplerParameters { + MediaType type{TYPE_AUDIO}; + FormatUnion in; + FormatUnion out; + int64_t loggingUuid{0}; +}; + +/** + * Abstract class for sampling media bytes + */ +class MediaSampler { + public: + virtual ~MediaSampler() = default; + + /** + * Initializes media sampler with parameters + */ + virtual bool init(const SamplerParameters& params) = 0; + + /** + * Samples media bytes + * Returns error code < 0, or >=0 - for success, indicating number of bytes + * processed. + * set @in to null for flushing data + */ + virtual int sample(const ByteStorage* in, ByteStorage* out) = 0; + + /** + * Releases resources + */ + virtual void shutdown() = 0; + + /* + * Returns media type + */ + MediaType getMediaType() const { + return params_.type; + } + /* + * Returns formats + */ + FormatUnion getInputFormat() const { + return params_.in; + } + FormatUnion getOutFormat() const { + return params_.out; + } + + protected: + SamplerParameters params_; +}; +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/memory_buffer.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/memory_buffer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a7b0128e3edecfa3b18bc3730a8e75e384d2738a --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/memory_buffer.cpp @@ -0,0 +1,71 @@ +#include "memory_buffer.h" +#include <c10/util/Logging.h> + +namespace ffmpeg { + +MemoryBuffer::MemoryBuffer(const uint8_t* buffer, size_t size) + : buffer_(buffer), len_(size) {} + +int MemoryBuffer::read(uint8_t* buf, int size) { + if (pos_ < len_) { + auto available = std::min(int(len_ - pos_), size); + memcpy(buf, buffer_ + pos_, available); + pos_ += available; + return available; + } + + return 0; +} + +int64_t MemoryBuffer::seek(int64_t offset, int whence) { + if (whence & AVSEEK_SIZE) { + return len_; + } + + // remove force flag + whence &= ~AVSEEK_FORCE; + + switch (whence) { + case SEEK_SET: + if (offset >= 0 && offset <= len_) { + pos_ = offset; + } + break; + case SEEK_END: + if (len_ + offset >= 0 && len_ + offset <= len_) { + pos_ = len_ + offset; + } + break; + case SEEK_CUR: + if (pos_ + offset > 0 && pos_ + offset <= len_) { + pos_ += offset; + } + break; + default: + LOG(ERROR) << "Unknown whence flag gets provided: " << whence; + } + return pos_; +} + +/* static */ +DecoderInCallback MemoryBuffer::getCallback( + const uint8_t* buffer, + size_t size) { + MemoryBuffer object(buffer, size); + return + [object](uint8_t* out, int size, int whence, uint64_t timeoutMs) mutable + -> int { + if (out) { // see defs.h file + // read mode + return object.read(out, size); + } + // seek mode + if (!timeoutMs) { + // seek capabilty, yes - supported + return 0; + } + return object.seek(size, whence); + }; +} + +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/memory_buffer.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/memory_buffer.h new file mode 100644 index 0000000000000000000000000000000000000000..909626d3caed4da60a7097019350f25804ebe7e8 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/memory_buffer.h @@ -0,0 +1,25 @@ +#pragma once + +#include "defs.h" + +namespace ffmpeg { + +/** + * Class uses external memory buffer and implements a seekable interface. + */ +class MemoryBuffer { + public: + explicit MemoryBuffer(const uint8_t* buffer, size_t size); + int64_t seek(int64_t offset, int whence); + int read(uint8_t* buf, int size); + + // static constructor for decoder callback. + static DecoderInCallback getCallback(const uint8_t* buffer, size_t size); + + private: + const uint8_t* buffer_; // set at construction time + long pos_{0}; // current position + long len_{0}; // bytes in buffer +}; + +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/seekable_buffer.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/seekable_buffer.cpp new file mode 100644 index 0000000000000000000000000000000000000000..41e3e689c7b57be7f76cc397258991a248ab76da --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/seekable_buffer.cpp @@ -0,0 +1,139 @@ +#include "seekable_buffer.h" +#include <c10/util/Logging.h> +#include <chrono> +#include "memory_buffer.h" + +namespace ffmpeg { + +int SeekableBuffer::init( + DecoderInCallback&& in, + uint64_t timeoutMs, + size_t maxSeekableBytes, + ImageType* type) { + shutdown(); + isSeekable_ = in(nullptr, 0, 0, 0) == 0; + if (isSeekable_) { // seekable + if (type) { + if (!readBytes(in, 8, timeoutMs)) { + return -1; + } + setImageType(type); + end_ = 0; + eof_ = false; + std::vector<uint8_t>().swap(buffer_); + // reset callback + if (in(nullptr, 0, SEEK_SET, timeoutMs)) { + return -1; + } + } + inCallback_ = std::forward<DecoderInCallback>(in); + return 1; + } + + if (!readBytes(in, maxSeekableBytes + (type ? 8 : 0), timeoutMs)) { + return -1; + } + + if (type) { + setImageType(type); + } + + if (eof_) { + end_ = 0; + eof_ = false; + // reuse MemoryBuffer functionality + inCallback_ = MemoryBuffer::getCallback(buffer_.data(), buffer_.size()); + isSeekable_ = true; + return 1; + } + inCallback_ = std::forward<DecoderInCallback>(in); + return 0; +} + +bool SeekableBuffer::readBytes( + DecoderInCallback& in, + size_t maxBytes, + uint64_t timeoutMs) { + // Resize to th minimum 4K page or less + buffer_.resize(std::min(maxBytes, size_t(4 * 1024UL))); + end_ = 0; + eof_ = false; + + auto end = + std::chrono::steady_clock::now() + std::chrono::milliseconds(timeoutMs); + auto watcher = [end]() -> bool { + return std::chrono::steady_clock::now() <= end; + }; + + bool hasTime = true; + while (!eof_ && end_ < maxBytes && (hasTime = watcher())) { + // lets read all bytes into available buffer + auto res = in(buffer_.data() + end_, buffer_.size() - end_, 0, timeoutMs); + if (res > 0) { + end_ += res; + if (end_ == buffer_.size()) { + buffer_.resize(std::min(size_t(end_ * 4UL), maxBytes)); + } + } else if (res == 0) { + eof_ = true; + } else { + // error + return false; + } + } + + buffer_.resize(end_); + + return hasTime; +} + +void SeekableBuffer::setImageType(ImageType* type) { + if (buffer_.size() > 2 && buffer_[0] == 0xFF && buffer_[1] == 0xD8 && + buffer_[2] == 0xFF) { + *type = ImageType::JPEG; + } else if ( + buffer_.size() > 3 && buffer_[1] == 'P' && buffer_[2] == 'N' && + buffer_[3] == 'G') { + *type = ImageType::PNG; + } else if ( + buffer_.size() > 1 && + ((buffer_[0] == 0x49 && buffer_[1] == 0x49) || + (buffer_[0] == 0x4D && buffer_[1] == 0x4D))) { + *type = ImageType::TIFF; + } else { + *type = ImageType::UNKNOWN; + } +} + +int SeekableBuffer::read(uint8_t* buf, int size, uint64_t timeoutMs) { + if (isSeekable_) { + return inCallback_(buf, size, 0, timeoutMs); + } + if (pos_ < end_) { + // read cached bytes for non-seekable callback + auto available = std::min(int(end_ - pos_), size); + memcpy(buf, buffer_.data() + pos_, available); + pos_ += available; + return available; + } else if (!eof_) { + // normal sequential read (see defs.h file), i.e. @buf != null + auto res = inCallback_(buf, size, 0, timeoutMs); // read through + eof_ = res == 0; + return res; + } else { + return 0; + } +} + +int64_t SeekableBuffer::seek(int64_t offset, int whence, uint64_t timeoutMs) { + return inCallback_(nullptr, offset, whence, timeoutMs); +} + +void SeekableBuffer::shutdown() { + pos_ = end_ = 0; + eof_ = false; + std::vector<uint8_t>().swap(buffer_); + inCallback_ = nullptr; +} + +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/seekable_buffer.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/seekable_buffer.h new file mode 100644 index 0000000000000000000000000000000000000000..9d5729f53067b7eb1260b918d4119cb00ad553d7 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/seekable_buffer.h @@ -0,0 +1,45 @@ +#pragma once + +#include "defs.h" + +namespace ffmpeg { + +/** + * Class uses internal buffer to store initial size bytes as a seekable cache + * from Media provider and let ffmpeg to seek and read bytes from cache + * and beyond - reading bytes directly from Media provider + */ +enum class ImageType { + UNKNOWN = 0, + JPEG = 1, + PNG = 2, + TIFF = 3, +}; + +class SeekableBuffer { + public: + // @type is optional, not nullptr only is image detection required + // \returns 1 is buffer seekable, 0 - if not seekable, < 0 on error + int init( + DecoderInCallback&& in, + uint64_t timeoutMs, + size_t maxSeekableBytes, + ImageType* type); + int read(uint8_t* buf, int size, uint64_t timeoutMs); + int64_t seek(int64_t offset, int whence, uint64_t timeoutMs); + void shutdown(); + + private: + bool readBytes(DecoderInCallback& in, size_t maxBytes, uint64_t timeoutMs); + void setImageType(ImageType* type); + + private: + DecoderInCallback inCallback_; + std::vector<uint8_t> buffer_; // resized at init time + long pos_{0}; // current position (SEEK_CUR iff pos_ < end_) + long end_{0}; // current buffer size + bool eof_{0}; // indicates the EOF + bool isSeekable_{false}; // is callback seekable +}; + +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/stream.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/stream.cpp new file mode 100644 index 0000000000000000000000000000000000000000..37dd5805d5a08e79526e6ac1bb7b0fb67e839c6c --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/stream.cpp @@ -0,0 +1,258 @@ +#include "stream.h" +#include <c10/util/Logging.h> +#include "util.h" + +namespace ffmpeg { +const AVRational timeBaseQ = AVRational{1, AV_TIME_BASE}; + +Stream::Stream( + AVFormatContext* inputCtx, + MediaFormat format, + bool convertPtsToWallTime, + int64_t loggingUuid) + : inputCtx_(inputCtx), + format_(format), + convertPtsToWallTime_(convertPtsToWallTime), + loggingUuid_(loggingUuid) {} + +Stream::~Stream() { + if (frame_) { + av_free(frame_); + } + if (codecCtx_) { + avcodec_free_context(&codecCtx_); + } +} + +AVCodec* Stream::findCodec(AVCodecParameters* params) { + return avcodec_find_decoder(params->codec_id); +} + +int Stream::openCodec(std::vector<DecoderMetadata>* metadata) { + AVStream* steam = inputCtx_->streams[format_.stream]; + + AVCodec* codec = findCodec(steam->codecpar); + if (!codec) { + LOG(ERROR) << "LoggingUuid #" << loggingUuid_ + << ", avcodec_find_decoder failed for codec_id: " + << int(steam->codecpar->codec_id); + return AVERROR(EINVAL); + } + + if (!(codecCtx_ = avcodec_alloc_context3(codec))) { + LOG(ERROR) << "LoggingUuid #" << loggingUuid_ + << ", avcodec_alloc_context3 failed"; + return AVERROR(ENOMEM); + } + + int ret; + // Copy codec parameters from input stream to output codec context + if ((ret = avcodec_parameters_to_context(codecCtx_, steam->codecpar)) < 0) { + LOG(ERROR) << "LoggingUuid #" << loggingUuid_ + << ", avcodec_parameters_to_context failed"; + return ret; + } + + // after avcodec_open2, value of codecCtx_->time_base is NOT meaningful + if ((ret = avcodec_open2(codecCtx_, codec, nullptr)) < 0) { + LOG(ERROR) << "LoggingUuid #" << loggingUuid_ + << ", avcodec_open2 failed: " << Util::generateErrorDesc(ret); + avcodec_free_context(&codecCtx_); + codecCtx_ = nullptr; + return ret; + } + + frame_ = av_frame_alloc(); + + switch (format_.type) { + case TYPE_VIDEO: + fps_ = av_q2d(av_guess_frame_rate(inputCtx_, steam, nullptr)); + break; + case TYPE_AUDIO: + fps_ = codecCtx_->sample_rate; + break; + default: + fps_ = 30.0; + } + + if ((ret = initFormat())) { + LOG(ERROR) << "initFormat failed, type: " << format_.type; + } + + if (metadata) { + DecoderMetadata header; + header.format = format_; + header.fps = fps_; + header.num = steam->time_base.num; + header.den = steam->time_base.den; + header.duration = + av_rescale_q(steam->duration, steam->time_base, timeBaseQ); + metadata->push_back(header); + } + + return ret; +} + +int Stream::analyzePacket(const AVPacket* packet, bool* gotFrame) { + int consumed = 0; + int result = avcodec_send_packet(codecCtx_, packet); + if (result == AVERROR(EAGAIN)) { + *gotFrame = false; // no bytes get consumed, fetch frame + } else if (result == AVERROR_EOF) { + *gotFrame = false; // more than one flush packet + if (packet) { + // got packet after flush, this is an error + return result; + } + } else if (result < 0) { + LOG(ERROR) << "avcodec_send_packet failed, err: " + << Util::generateErrorDesc(result); + return result; // error + } else { + consumed = packet ? packet->size : 0; // all bytes get consumed + } + + result = avcodec_receive_frame(codecCtx_, frame_); + + if (result >= 0) { + *gotFrame = true; // frame is available + } else if (result == AVERROR(EAGAIN)) { + *gotFrame = false; // no frames at this time, needs more packets + if (!consumed) { + // precaution, if no packages got consumed and no frames are available + return result; + } + } else if (result == AVERROR_EOF) { + *gotFrame = false; // the last frame has been flushed + // precaution, if no more frames are available assume we consume all bytes + consumed = 0; + } else { // error + LOG(ERROR) << "avcodec_receive_frame failed, err: " + << Util::generateErrorDesc(result); + return result; + } + return consumed; +} + +int Stream::decodePacket( + const AVPacket* packet, + DecoderOutputMessage* out, + bool headerOnly, + bool* hasMsg) { + int consumed; + bool gotFrame = false; + *hasMsg = false; + if ((consumed = analyzePacket(packet, &gotFrame)) >= 0 && + (packet == nullptr || gotFrame)) { + int result; + if ((result = getMessage(out, !gotFrame, headerOnly)) < 0) { + return result; // report error + } + *hasMsg = result > 0; + } + return consumed; +} + +int Stream::flush(DecoderOutputMessage* out, bool headerOnly) { + bool hasMsg = false; + int result = decodePacket(nullptr, out, headerOnly, &hasMsg); + if (result < 0) { + avcodec_flush_buffers(codecCtx_); + return result; + } + if (!hasMsg) { + avcodec_flush_buffers(codecCtx_); + return 0; + } + return 1; +} + +int Stream::getMessage(DecoderOutputMessage* out, bool flush, bool headerOnly) { + if (flush) { + // only flush of audio frames makes sense + if (format_.type == TYPE_AUDIO) { + int processed = 0; + size_t total = 0; + // grab all audio bytes by chunks + do { + if ((processed = copyFrameBytes(out->payload.get(), flush)) < 0) { + return processed; + } + total += processed; + } while (processed); + + if (total) { + // set header if message bytes are available + setHeader(&out->header, flush); + return 1; + } + } + return 0; + } else { + if (format_.type == TYPE_AUDIO) { + int processed = 0; + if ((processed = copyFrameBytes(out->payload.get(), flush)) < 0) { + return processed; + } + if (processed) { + // set header if message bytes are available + setHeader(&out->header, flush); + return 1; + } + return 0; + } else { + // set header + setHeader(&out->header, flush); + + if (headerOnly) { + // Only header is requisted + return 1; + } + + return copyFrameBytes(out->payload.get(), flush); + } + } +} + +void Stream::setHeader(DecoderHeader* header, bool flush) { + header->seqno = numGenerator_++; + + setFramePts(header, flush); + + if (convertPtsToWallTime_) { + keeper_.adjust(header->pts); + } + + header->format = format_; + header->keyFrame = 0; + header->fps = std::numeric_limits<double>::quiet_NaN(); +} + +void Stream::setFramePts(DecoderHeader* header, bool flush) { + if (flush) { + header->pts = nextPts_; // already in us + } else { + header->pts = frame_->best_effort_timestamp; + if (header->pts == AV_NOPTS_VALUE) { + header->pts = nextPts_; + } else { + header->pts = av_rescale_q( + header->pts, + inputCtx_->streams[format_.stream]->time_base, + timeBaseQ); + } + + switch (format_.type) { + case TYPE_AUDIO: + nextPts_ = header->pts + frame_->nb_samples * AV_TIME_BASE / fps_; + break; + case TYPE_VIDEO: + nextPts_ = header->pts + AV_TIME_BASE / fps_; + break; + default: + nextPts_ = header->pts; + } + } +} + +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/stream.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/stream.h new file mode 100644 index 0000000000000000000000000000000000000000..97dfa8b57610d2c5bb45eea5cb1ea74236ea3bef --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/stream.h @@ -0,0 +1,74 @@ +#pragma once + +#include <atomic> +#include "defs.h" +#include "time_keeper.h" + +namespace ffmpeg { + +/** + * Class uses FFMPEG library to decode one media stream (audio or video). + */ + +class Stream { + public: + Stream( + AVFormatContext* inputCtx, + MediaFormat format, + bool convertPtsToWallTime, + int64_t loggingUuid); + virtual ~Stream(); + + // returns 0 - on success or negative error + int openCodec(std::vector<DecoderMetadata>* metadata); + // returns 1 - if packet got consumed, 0 - if it's not, and < 0 on error + int decodePacket( + const AVPacket* packet, + DecoderOutputMessage* out, + bool headerOnly, + bool* hasMsg); + // returns stream index + int getIndex() const { + return format_.stream; + } + // returns 1 - if message got a payload, 0 - if it's not, and < 0 on error + int flush(DecoderOutputMessage* out, bool headerOnly); + // return media format + MediaFormat getMediaFormat() const { + return format_; + } + + protected: + virtual int initFormat() = 0; + // returns number processed bytes from packet, or negative error + virtual int analyzePacket(const AVPacket* packet, bool* gotFrame); + // returns number processed bytes from packet, or negative error + virtual int copyFrameBytes(ByteStorage* out, bool flush) = 0; + // sets output format + virtual void setHeader(DecoderHeader* header, bool flush); + // set frame pts + virtual void setFramePts(DecoderHeader* header, bool flush); + // finds codec + virtual AVCodec* findCodec(AVCodecParameters* params); + + private: + // returns 1 - if message got a payload, 0 - if it's not, and < 0 on error + int getMessage(DecoderOutputMessage* out, bool flush, bool headerOnly); + + protected: + AVFormatContext* const inputCtx_; + MediaFormat format_; + const bool convertPtsToWallTime_; + int64_t loggingUuid_; + + AVCodecContext* codecCtx_{nullptr}; + AVFrame* frame_{nullptr}; + + std::atomic<size_t> numGenerator_{0}; + TimeKeeper keeper_; + // estimated next frame pts for flushing the last frame + int64_t nextPts_{0}; + double fps_{30.}; +}; + +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/subtitle_sampler.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/subtitle_sampler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d0df24d3e358a42d2f4e26b2d3bcd2b563d6a53a --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/subtitle_sampler.cpp @@ -0,0 +1,46 @@ +#include "subtitle_sampler.h" +#include <c10/util/Logging.h> +#include "util.h" + +namespace ffmpeg { + +SubtitleSampler::~SubtitleSampler() { + cleanUp(); +} + +void SubtitleSampler::shutdown() { + cleanUp(); +} + +bool SubtitleSampler::init(const SamplerParameters& params) { + cleanUp(); + // set formats + params_ = params; + return true; +} + +int SubtitleSampler::sample(AVSubtitle* sub, ByteStorage* out) { + if (!sub || !out) { + return 0; // flush + } + + out->ensure(Util::size(*sub)); + + return Util::serialize(*sub, out); +} + +int SubtitleSampler::sample(const ByteStorage* in, ByteStorage* out) { + if (in && out) { + // Get a writable copy + if (size_t len = in->length()) { + out->ensure(len); + memcpy(out->writableTail(), in->data(), len); + } + return out->length(); + } + return 0; +} + +void SubtitleSampler::cleanUp() {} + +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/subtitle_sampler.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/subtitle_sampler.h new file mode 100644 index 0000000000000000000000000000000000000000..4aee811ed5635fab2f64ad967691f3eb14b96561 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/subtitle_sampler.h @@ -0,0 +1,32 @@ +#pragma once + +#include "defs.h" + +namespace ffmpeg { + +/** + * Class transcode audio frames from one format into another + */ + +class SubtitleSampler : public MediaSampler { + public: + SubtitleSampler() = default; + ~SubtitleSampler() override; + + bool init(const SamplerParameters& params) override; + int sample(const ByteStorage* in, ByteStorage* out) override; + void shutdown() override; + + // returns number processed/scaling bytes + int sample(AVSubtitle* sub, ByteStorage* out); + + // helper serialization/deserialization methods + static void serialize(const AVSubtitle& sub, ByteStorage* out); + static bool deserialize(const ByteStorage& buf, AVSubtitle* sub); + + private: + // close resources + void cleanUp(); +}; + +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/subtitle_stream.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/subtitle_stream.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0d3fc9f12c12c6c191de8aeac4549427f9e06daa --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/subtitle_stream.cpp @@ -0,0 +1,83 @@ +#include "subtitle_stream.h" +#include <c10/util/Logging.h> +#include <limits> +#include "util.h" + +namespace ffmpeg { +const AVRational timeBaseQ = AVRational{1, AV_TIME_BASE}; + +SubtitleStream::SubtitleStream( + AVFormatContext* inputCtx, + int index, + bool convertPtsToWallTime, + const SubtitleFormat& format) + : Stream( + inputCtx, + MediaFormat::makeMediaFormat(format, index), + convertPtsToWallTime, + 0) { + memset(&sub_, 0, sizeof(sub_)); +} + +void SubtitleStream::releaseSubtitle() { + if (sub_.release) { + avsubtitle_free(&sub_); + memset(&sub_, 0, sizeof(sub_)); + } +} + +SubtitleStream::~SubtitleStream() { + releaseSubtitle(); + sampler_.shutdown(); +} + +int SubtitleStream::initFormat() { + if (!codecCtx_->subtitle_header) { + LOG(ERROR) << "No subtitle header found"; + } else { + VLOG(1) << "Subtitle header found!"; + } + return 0; +} + +int SubtitleStream::analyzePacket(const AVPacket* packet, bool* gotFrame) { + // clean-up + releaseSubtitle(); + // check flush packet + AVPacket avPacket; + av_init_packet(&avPacket); + avPacket.data = nullptr; + avPacket.size = 0; + auto pkt = packet ? *packet : avPacket; + int gotFramePtr = 0; + int result = avcodec_decode_subtitle2(codecCtx_, &sub_, &gotFramePtr, &pkt); + + if (result < 0) { + LOG(ERROR) << "avcodec_decode_subtitle2 failed, err: " + << Util::generateErrorDesc(result); + return result; + } else if (result == 0) { + result = pkt.size; // discard the rest of the package + } + + sub_.release = gotFramePtr; + *gotFrame = gotFramePtr > 0; + + // set proper pts in us + if (gotFramePtr) { + sub_.pts = av_rescale_q( + pkt.pts, inputCtx_->streams[format_.stream]->time_base, timeBaseQ); + } + + return result; +} + +int SubtitleStream::copyFrameBytes(ByteStorage* out, bool flush) { + return sampler_.sample(flush ? nullptr : &sub_, out); +} + +void SubtitleStream::setFramePts(DecoderHeader* header, bool) { + header->pts = sub_.pts; // already in us +} + +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/subtitle_stream.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/subtitle_stream.h new file mode 100644 index 0000000000000000000000000000000000000000..6c366e11f501db39cbdb71cb05b97d3128480f4b --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/subtitle_stream.h @@ -0,0 +1,38 @@ +#pragma once + +#include "stream.h" +#include "subtitle_sampler.h" + +namespace ffmpeg { + +/** + * Class uses FFMPEG library to decode one subtitle stream. + */ +struct AVSubtitleKeeper : AVSubtitle { + int64_t release{0}; +}; + +class SubtitleStream : public Stream { + public: + SubtitleStream( + AVFormatContext* inputCtx, + int index, + bool convertPtsToWallTime, + const SubtitleFormat& format); + ~SubtitleStream() override; + + protected: + void setFramePts(DecoderHeader* header, bool flush) override; + + private: + int initFormat() override; + int analyzePacket(const AVPacket* packet, bool* gotFrame) override; + int copyFrameBytes(ByteStorage* out, bool flush) override; + void releaseSubtitle(); + + private: + SubtitleSampler sampler_; + AVSubtitleKeeper sub_; +}; + +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/sync_decoder.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/sync_decoder.cpp new file mode 100644 index 0000000000000000000000000000000000000000..374b40838ea13e7bc25b98878a0e23576f764fc5 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/sync_decoder.cpp @@ -0,0 +1,97 @@ +#include "sync_decoder.h" +#include <c10/util/Logging.h> + +namespace ffmpeg { + +SyncDecoder::AVByteStorage::AVByteStorage(size_t n) { + ensure(n); +} + +SyncDecoder::AVByteStorage::~AVByteStorage() { + av_free(buffer_); +} + +void SyncDecoder::AVByteStorage::ensure(size_t n) { + if (tail() < n) { + capacity_ = offset_ + length_ + n; + buffer_ = static_cast<uint8_t*>(av_realloc(buffer_, capacity_)); + } +} + +uint8_t* SyncDecoder::AVByteStorage::writableTail() { + CHECK_LE(offset_ + length_, capacity_); + return buffer_ + offset_ + length_; +} + +void SyncDecoder::AVByteStorage::append(size_t n) { + CHECK_LE(n, tail()); + length_ += n; +} + +void SyncDecoder::AVByteStorage::trim(size_t n) { + CHECK_LE(n, length_); + offset_ += n; + length_ -= n; +} + +const uint8_t* SyncDecoder::AVByteStorage::data() const { + return buffer_ + offset_; +} + +size_t SyncDecoder::AVByteStorage::length() const { + return length_; +} + +size_t SyncDecoder::AVByteStorage::tail() const { + CHECK_LE(offset_ + length_, capacity_); + return capacity_ - offset_ - length_; +} + +void SyncDecoder::AVByteStorage::clear() { + offset_ = 0; + length_ = 0; +} + +std::unique_ptr<ByteStorage> SyncDecoder::createByteStorage(size_t n) { + return std::make_unique<AVByteStorage>(n); +} + +void SyncDecoder::onInit() { + eof_ = false; + queue_.clear(); +} + +int SyncDecoder::decode(DecoderOutputMessage* out, uint64_t timeoutMs) { + if (eof_ && queue_.empty()) { + return ENODATA; + } + + if (queue_.empty()) { + int result = getFrame(timeoutMs); + // assign EOF + eof_ = result == ENODATA; + // check unrecoverable error, any error but ENODATA + if (result && result != ENODATA) { + return result; + } + + // still empty + if (queue_.empty()) { + if (eof_) { + return ENODATA; + } else { + LOG(INFO) << "Queue is empty"; + return ETIMEDOUT; + } + } + } + + *out = std::move(queue_.front()); + queue_.pop_front(); + return 0; +} + +void SyncDecoder::push(DecoderOutputMessage&& buffer) { + queue_.push_back(std::move(buffer)); +} +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/sync_decoder.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/sync_decoder.h new file mode 100644 index 0000000000000000000000000000000000000000..b7cf7b625ac8b95ae5e33f47bee2645e33d212ad --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/sync_decoder.h @@ -0,0 +1,48 @@ +#pragma once + +#include <list> +#include "decoder.h" + +namespace ffmpeg { + +/** + * Class uses FFMPEG library to decode media streams. + * Media bytes can be explicitly provided through read-callback + * or fetched internally by FFMPEG library + */ +class SyncDecoder : public Decoder { + public: + // Allocation of memory must be done with a proper alignment. + class AVByteStorage : public ByteStorage { + public: + explicit AVByteStorage(size_t n); + ~AVByteStorage() override; + void ensure(size_t n) override; + uint8_t* writableTail() override; + void append(size_t n) override; + void trim(size_t n) override; + const uint8_t* data() const override; + size_t length() const override; + size_t tail() const override; + void clear() override; + + private: + size_t offset_{0}; + size_t length_{0}; + size_t capacity_{0}; + uint8_t* buffer_{nullptr}; + }; + + public: + int decode(DecoderOutputMessage* out, uint64_t timeoutMs) override; + + private: + void push(DecoderOutputMessage&& buffer) override; + void onInit() override; + std::unique_ptr<ByteStorage> createByteStorage(size_t n) override; + + private: + std::list<DecoderOutputMessage> queue_; + bool eof_{false}; +}; +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/sync_decoder_test.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/sync_decoder_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6109b12685e4f0dd1f0e92c7878420e1197bb14c --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/sync_decoder_test.cpp @@ -0,0 +1,412 @@ +#include <c10/util/Logging.h> +#include <dirent.h> +#include <gtest/gtest.h> +#include "memory_buffer.h" +#include "sync_decoder.h" +#include "util.h" + +using namespace ffmpeg; + +namespace { +struct VideoFileStats { + std::string name; + size_t durationPts{0}; + int num{0}; + int den{0}; + int fps{0}; +}; + +void gotAllTestFiles( + const std::string& folder, + std::vector<VideoFileStats>* stats) { + DIR* d = opendir(folder.c_str()); + CHECK(d); + struct dirent* dir; + while ((dir = readdir(d))) { + if (dir->d_type != DT_DIR && 0 != strcmp(dir->d_name, "README")) { + VideoFileStats item; + item.name = folder + '/' + dir->d_name; + LOG(INFO) << "Found video file: " << item.name; + stats->push_back(std::move(item)); + } + } + closedir(d); +} + +void gotFilesStats(std::vector<VideoFileStats>& stats) { + DecoderParameters params; + params.timeoutMs = 10000; + params.startOffset = 1000000; + params.seekAccuracy = 100000; + params.formats = {MediaFormat(0)}; + params.headerOnly = true; + params.preventStaleness = false; + size_t avgProvUs = 0; + const size_t rounds = 100; + for (auto& item : stats) { + LOG(INFO) << "Decoding video file in memory: " << item.name; + FILE* f = fopen(item.name.c_str(), "rb"); + CHECK(f != nullptr); + fseek(f, 0, SEEK_END); + std::vector<uint8_t> buffer(ftell(f)); + rewind(f); + CHECK_EQ(buffer.size(), fread(buffer.data(), 1, buffer.size(), f)); + fclose(f); + + for (size_t i = 0; i < rounds; ++i) { + SyncDecoder decoder; + std::vector<DecoderMetadata> metadata; + const auto now = std::chrono::steady_clock::now(); + CHECK(decoder.init( + params, + MemoryBuffer::getCallback(buffer.data(), buffer.size()), + &metadata)); + const auto then = std::chrono::steady_clock::now(); + decoder.shutdown(); + avgProvUs += + std::chrono::duration_cast<std::chrono::microseconds>(then - now) + .count(); + CHECK_EQ(metadata.size(), 1); + item.num = metadata[0].num; + item.den = metadata[0].den; + item.fps = metadata[0].fps; + item.durationPts = + av_rescale_q(metadata[0].duration, AV_TIME_BASE_Q, {1, item.fps}); + } + } + LOG(INFO) << "Probing (us) " << avgProvUs / stats.size() / rounds; +} + +size_t measurePerformanceUs( + const std::vector<VideoFileStats>& stats, + size_t rounds, + size_t num, + size_t stride) { + size_t avgClipDecodingUs = 0; + std::srand(time(nullptr)); + for (const auto& item : stats) { + FILE* f = fopen(item.name.c_str(), "rb"); + CHECK(f != nullptr); + fseek(f, 0, SEEK_END); + std::vector<uint8_t> buffer(ftell(f)); + rewind(f); + CHECK_EQ(buffer.size(), fread(buffer.data(), 1, buffer.size(), f)); + fclose(f); + + for (size_t i = 0; i < rounds; ++i) { + // randomy select clip + size_t rOffset = std::rand(); + size_t fOffset = rOffset % item.durationPts; + size_t clipFrames = num + (num - 1) * stride; + if (fOffset + clipFrames > item.durationPts) { + fOffset = item.durationPts - clipFrames; + } + + DecoderParameters params; + params.timeoutMs = 10000; + params.startOffset = 1000000; + params.seekAccuracy = 100000; + params.preventStaleness = false; + + for (size_t n = 0; n < num; ++n) { + std::list<DecoderOutputMessage> msgs; + + params.startOffset = + av_rescale_q(fOffset, {1, item.fps}, AV_TIME_BASE_Q); + params.endOffset = params.startOffset + 100; + + auto now = std::chrono::steady_clock::now(); + SyncDecoder decoder; + CHECK(decoder.init( + params, + MemoryBuffer::getCallback(buffer.data(), buffer.size()), + nullptr)); + DecoderOutputMessage out; + while (0 == decoder.decode(&out, params.timeoutMs)) { + msgs.push_back(std::move(out)); + } + + decoder.shutdown(); + + const auto then = std::chrono::steady_clock::now(); + + fOffset += 1 + stride; + + avgClipDecodingUs += + std::chrono::duration_cast<std::chrono::microseconds>(then - now) + .count(); + } + } + } + + return avgClipDecodingUs / rounds / num / stats.size(); +} + +void runDecoder(SyncDecoder& decoder) { + DecoderOutputMessage out; + size_t audioFrames = 0, videoFrames = 0, totalBytes = 0; + while (0 == decoder.decode(&out, 10000)) { + if (out.header.format.type == TYPE_AUDIO) { + ++audioFrames; + } else if (out.header.format.type == TYPE_VIDEO) { + ++videoFrames; + } else if (out.header.format.type == TYPE_SUBTITLE && out.payload) { + // deserialize + LOG(INFO) << "Deserializing subtitle"; + AVSubtitle sub; + memset(&sub, 0, sizeof(sub)); + EXPECT_TRUE(Util::deserialize(*out.payload, &sub)); + LOG(INFO) << "Found subtitles" + << ", num rects: " << sub.num_rects; + for (int i = 0; i < sub.num_rects; ++i) { + std::string text = "picture"; + if (sub.rects[i]->type == SUBTITLE_TEXT) { + text = sub.rects[i]->text; + } else if (sub.rects[i]->type == SUBTITLE_ASS) { + text = sub.rects[i]->ass; + } + + LOG(INFO) << "Rect num: " << i << ", type:" << sub.rects[i]->type + << ", text: " << text; + } + + avsubtitle_free(&sub); + } + if (out.payload) { + totalBytes += out.payload->length(); + } + } + LOG(INFO) << "Decoded audio frames: " << audioFrames + << ", video frames: " << videoFrames + << ", total bytes: " << totalBytes; +} +} // namespace + +TEST(SyncDecoder, TestSyncDecoderPerformance) { + // Measure the average time of decoding per clip + // 1. list of the videos in testing directory + // 2. for each video got number of frames with timestamps + // 3. randomly select frame offset + // 4. adjust offset for number frames and strides, + // if it's out out upper boundary + // 5. repeat multiple times, measuring and accumulating decoding time + // per clip. + /* + 1) 4 x 2 + 2) 8 x 8 + 3) 16 x 8 + 4) 32 x 4 + */ + const std::string kFolder = "pytorch/vision/test/assets/videos"; + std::vector<VideoFileStats> stats; + gotAllTestFiles(kFolder, &stats); + gotFilesStats(stats); + + const size_t kRounds = 10; + + auto new4x2 = measurePerformanceUs(stats, kRounds, 4, 2); + auto new8x8 = measurePerformanceUs(stats, kRounds, 8, 8); + auto new16x8 = measurePerformanceUs(stats, kRounds, 16, 8); + auto new32x4 = measurePerformanceUs(stats, kRounds, 32, 4); + LOG(INFO) << "Clip decoding (us)" + << ", new(4x2): " << new4x2 << ", new(8x8): " << new8x8 + << ", new(16x8): " << new16x8 << ", new(32x4): " << new32x4; +} + +TEST(SyncDecoder, Test) { + SyncDecoder decoder; + DecoderParameters params; + params.timeoutMs = 10000; + params.startOffset = 1000000; + params.seekAccuracy = 100000; + params.formats = {MediaFormat(), MediaFormat(0), MediaFormat('0')}; + params.uri = "pytorch/vision/test/assets/videos/R6llTwEh07w.mp4"; + CHECK(decoder.init(params, nullptr, nullptr)); + runDecoder(decoder); + decoder.shutdown(); +} + +TEST(SyncDecoder, TestSubtitles) { + SyncDecoder decoder; + DecoderParameters params; + params.timeoutMs = 10000; + params.formats = {MediaFormat(), MediaFormat(0), MediaFormat('0')}; + params.uri = "vue/synergy/data/robotsub.mp4"; + CHECK(decoder.init(params, nullptr, nullptr)); + runDecoder(decoder); + decoder.shutdown(); +} + +TEST(SyncDecoder, TestHeadersOnly) { + SyncDecoder decoder; + DecoderParameters params; + params.timeoutMs = 10000; + params.startOffset = 1000000; + params.seekAccuracy = 100000; + params.headerOnly = true; + params.formats = {MediaFormat(), MediaFormat(0), MediaFormat('0')}; + + params.uri = "pytorch/vision/test/assets/videos/R6llTwEh07w.mp4"; + CHECK(decoder.init(params, nullptr, nullptr)); + runDecoder(decoder); + decoder.shutdown(); + + params.uri = "pytorch/vision/test/assets/videos/SOX5yA1l24A.mp4"; + CHECK(decoder.init(params, nullptr, nullptr)); + runDecoder(decoder); + decoder.shutdown(); + + params.uri = "pytorch/vision/test/assets/videos/WUzgd7C1pWA.mp4"; + CHECK(decoder.init(params, nullptr, nullptr)); + runDecoder(decoder); + decoder.shutdown(); +} + +TEST(SyncDecoder, TestHeadersOnlyDownSampling) { + SyncDecoder decoder; + DecoderParameters params; + params.timeoutMs = 10000; + params.startOffset = 1000000; + params.seekAccuracy = 100000; + params.headerOnly = true; + MediaFormat format; + format.type = TYPE_AUDIO; + format.format.audio.samples = 8000; + params.formats.insert(format); + + format.type = TYPE_VIDEO; + format.format.video.width = 224; + format.format.video.height = 224; + params.formats.insert(format); + + params.uri = "pytorch/vision/test/assets/videos/R6llTwEh07w.mp4"; + CHECK(decoder.init(params, nullptr, nullptr)); + runDecoder(decoder); + decoder.shutdown(); + + params.uri = "pytorch/vision/test/assets/videos/SOX5yA1l24A.mp4"; + CHECK(decoder.init(params, nullptr, nullptr)); + runDecoder(decoder); + decoder.shutdown(); + + params.uri = "pytorch/vision/test/assets/videos/WUzgd7C1pWA.mp4"; + CHECK(decoder.init(params, nullptr, nullptr)); + runDecoder(decoder); + decoder.shutdown(); +} + +TEST(SyncDecoder, TestInitOnlyNoShutdown) { + SyncDecoder decoder; + DecoderParameters params; + params.timeoutMs = 10000; + params.startOffset = 1000000; + params.seekAccuracy = 100000; + params.headerOnly = false; + params.formats = {MediaFormat(), MediaFormat(0), MediaFormat('0')}; + params.uri = "pytorch/vision/test/assets/videos/R6llTwEh07w.mp4"; + std::vector<DecoderMetadata> metadata; + CHECK(decoder.init(params, nullptr, &metadata)); +} + +TEST(SyncDecoder, TestMemoryBuffer) { + SyncDecoder decoder; + DecoderParameters params; + params.timeoutMs = 10000; + params.startOffset = 1000000; + params.endOffset = 9000000; + params.seekAccuracy = 10000; + params.formats = {MediaFormat(), MediaFormat(0), MediaFormat('0')}; + + FILE* f = fopen( + "pytorch/vision/test/assets/videos/RATRACE_wave_f_nm_np1_fr_goo_37.avi", + "rb"); + CHECK(f != nullptr); + fseek(f, 0, SEEK_END); + std::vector<uint8_t> buffer(ftell(f)); + rewind(f); + CHECK_EQ(buffer.size(), fread(buffer.data(), 1, buffer.size(), f)); + fclose(f); + CHECK(decoder.init( + params, + MemoryBuffer::getCallback(buffer.data(), buffer.size()), + nullptr)); + LOG(INFO) << "Decoding from memory bytes: " << buffer.size(); + runDecoder(decoder); + decoder.shutdown(); +} + +TEST(SyncDecoder, TestMemoryBufferNoSeekableWithFullRead) { + SyncDecoder decoder; + DecoderParameters params; + params.timeoutMs = 10000; + params.startOffset = 1000000; + params.endOffset = 9000000; + params.seekAccuracy = 10000; + params.formats = {MediaFormat(), MediaFormat(0), MediaFormat('0')}; + + FILE* f = fopen("pytorch/vision/test/assets/videos/R6llTwEh07w.mp4", "rb"); + CHECK(f != nullptr); + fseek(f, 0, SEEK_END); + std::vector<uint8_t> buffer(ftell(f)); + rewind(f); + CHECK_EQ(buffer.size(), fread(buffer.data(), 1, buffer.size(), f)); + fclose(f); + + params.maxSeekableBytes = buffer.size() + 1; + MemoryBuffer object(buffer.data(), buffer.size()); + CHECK(decoder.init( + params, + [object](uint8_t* out, int size, int whence, uint64_t timeoutMs) mutable + -> int { + if (out) { // see defs.h file + // read mode + return object.read(out, size); + } + // seek mode + if (!timeoutMs) { + // seek capabilty, yes - no + return -1; + } + return object.seek(size, whence); + }, + nullptr)); + runDecoder(decoder); + decoder.shutdown(); +} + +TEST(SyncDecoder, TestMemoryBufferNoSeekableWithPartialRead) { + SyncDecoder decoder; + DecoderParameters params; + params.timeoutMs = 10000; + params.startOffset = 1000000; + params.endOffset = 9000000; + params.seekAccuracy = 10000; + params.formats = {MediaFormat(), MediaFormat(0), MediaFormat('0')}; + + FILE* f = fopen("pytorch/vision/test/assets/videos/R6llTwEh07w.mp4", "rb"); + CHECK(f != nullptr); + fseek(f, 0, SEEK_END); + std::vector<uint8_t> buffer(ftell(f)); + rewind(f); + CHECK_EQ(buffer.size(), fread(buffer.data(), 1, buffer.size(), f)); + fclose(f); + + params.maxSeekableBytes = buffer.size() / 2; + MemoryBuffer object(buffer.data(), buffer.size()); + CHECK(!decoder.init( + params, + [object](uint8_t* out, int size, int whence, uint64_t timeoutMs) mutable + -> int { + if (out) { // see defs.h file + // read mode + return object.read(out, size); + } + // seek mode + if (!timeoutMs) { + // seek capabilty, yes - no + return -1; + } + return object.seek(size, whence); + }, + nullptr)); +} diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/time_keeper.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/time_keeper.cpp new file mode 100644 index 0000000000000000000000000000000000000000..845c76cddc81dcf5d1f97a7118f7c95324d48da4 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/time_keeper.cpp @@ -0,0 +1,35 @@ +#include "time_keeper.h" +#include "defs.h" + +namespace ffmpeg { + +namespace { +const long kMaxTimeBaseDiference = 10; +} + +long TimeKeeper::adjust(long& decoderTimestamp) { + const long now = std::chrono::duration_cast<std::chrono::microseconds>( + std::chrono::system_clock::now().time_since_epoch()) + .count(); + + if (startTime_ == 0) { + startTime_ = now; + } + if (streamTimestamp_ == 0) { + streamTimestamp_ = decoderTimestamp; + } + + const auto runOut = startTime_ + decoderTimestamp - streamTimestamp_; + + if (std::labs((now - runOut) / AV_TIME_BASE) > kMaxTimeBaseDiference) { + streamTimestamp_ = startTime_ - now + decoderTimestamp; + } + + const auto sleepAdvised = runOut - now; + + decoderTimestamp += startTime_ - streamTimestamp_; + + return sleepAdvised > 0 ? sleepAdvised : 0; +} + +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/time_keeper.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/time_keeper.h new file mode 100644 index 0000000000000000000000000000000000000000..e4d4718c705d98e9c07bbc884324060ac38aebfc --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/time_keeper.h @@ -0,0 +1,25 @@ +#pragma once + +#include <stdlib.h> +#include <chrono> + +namespace ffmpeg { + +/** + * Class keeps the track of the decoded timestamps (us) for media streams. + */ + +class TimeKeeper { + public: + TimeKeeper() = default; + + // adjust provided @timestamp to the corrected value + // return advised sleep time before next frame processing in (us) + long adjust(long& decoderTimestamp); + + private: + long startTime_{0}; + long streamTimestamp_{0}; +}; + +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/util.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/util.cpp new file mode 100644 index 0000000000000000000000000000000000000000..658876ff600bc6f5972e4f6534bdfeee3360bc29 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/util.cpp @@ -0,0 +1,402 @@ +#include "util.h" +#include <c10/util/Logging.h> + +namespace ffmpeg { + +namespace Serializer { + +// fixed size types +template <typename T> +inline size_t getSize(const T& x) { + return sizeof(x); +} + +template <typename T> +inline bool serializeItem( + uint8_t* dest, + size_t len, + size_t& pos, + const T& src) { + VLOG(6) << "Generic serializeItem"; + const auto required = sizeof(src); + if (len < pos + required) { + return false; + } + memcpy(dest + pos, &src, required); + pos += required; + return true; +} + +template <typename T> +inline bool deserializeItem( + const uint8_t* src, + size_t len, + size_t& pos, + T& dest) { + const auto required = sizeof(dest); + if (len < pos + required) { + return false; + } + memcpy(&dest, src + pos, required); + pos += required; + return true; +} + +// AVSubtitleRect specialization +inline size_t getSize(const AVSubtitleRect& x) { + auto rectBytes = [](const AVSubtitleRect& y) -> size_t { + size_t s = 0; + switch (y.type) { + case SUBTITLE_BITMAP: + for (int i = 0; i < y.nb_colors; ++i) { + s += sizeof(y.linesize[i]); + s += y.linesize[i]; + } + break; + case SUBTITLE_TEXT: + s += sizeof(size_t); + s += strlen(y.text); + break; + case SUBTITLE_ASS: + s += sizeof(size_t); + s += strlen(y.ass); + break; + default: + break; + } + return s; + }; + return getSize(x.x) + getSize(x.y) + getSize(x.w) + getSize(x.h) + + getSize(x.nb_colors) + getSize(x.type) + getSize(x.flags) + rectBytes(x); +} + +// AVSubtitle specialization +inline size_t getSize(const AVSubtitle& x) { + auto rectBytes = [](const AVSubtitle& y) -> size_t { + size_t s = getSize(y.num_rects); + for (unsigned i = 0; i < y.num_rects; ++i) { + s += getSize(*y.rects[i]); + } + return s; + }; + return getSize(x.format) + getSize(x.start_display_time) + + getSize(x.end_display_time) + getSize(x.pts) + rectBytes(x); +} + +inline bool serializeItem( + uint8_t* dest, + size_t len, + size_t& pos, + const AVSubtitleRect& src) { + auto rectSerialize = + [](uint8_t* d, size_t l, size_t& p, const AVSubtitleRect& x) -> size_t { + switch (x.type) { + case SUBTITLE_BITMAP: + for (int i = 0; i < x.nb_colors; ++i) { + if (!serializeItem(d, l, p, x.linesize[i])) { + return false; + } + if (p + x.linesize[i] > l) { + return false; + } + memcpy(d + p, x.data[i], x.linesize[i]); + p += x.linesize[i]; + } + return true; + case SUBTITLE_TEXT: { + const size_t s = strlen(x.text); + if (!serializeItem(d, l, p, s)) { + return false; + } + if (p + s > l) { + return false; + } + memcpy(d + p, x.text, s); + p += s; + return true; + } + case SUBTITLE_ASS: { + const size_t s = strlen(x.ass); + if (!serializeItem(d, l, p, s)) { + return false; + } + if (p + s > l) { + return false; + } + memcpy(d + p, x.ass, s); + p += s; + return true; + } + default: + return true; + } + }; + return serializeItem(dest, len, pos, src.x) && + serializeItem(dest, len, pos, src.y) && + serializeItem(dest, len, pos, src.w) && + serializeItem(dest, len, pos, src.h) && + serializeItem(dest, len, pos, src.nb_colors) && + serializeItem(dest, len, pos, src.type) && + serializeItem(dest, len, pos, src.flags) && + rectSerialize(dest, len, pos, src); +} + +inline bool serializeItem( + uint8_t* dest, + size_t len, + size_t& pos, + const AVSubtitle& src) { + auto rectSerialize = + [](uint8_t* d, size_t l, size_t& p, const AVSubtitle& x) -> bool { + bool res = serializeItem(d, l, p, x.num_rects); + for (unsigned i = 0; res && i < x.num_rects; ++i) { + res = serializeItem(d, l, p, *(x.rects[i])); + } + return res; + }; + VLOG(6) << "AVSubtitle serializeItem"; + return serializeItem(dest, len, pos, src.format) && + serializeItem(dest, len, pos, src.start_display_time) && + serializeItem(dest, len, pos, src.end_display_time) && + serializeItem(dest, len, pos, src.pts) && + rectSerialize(dest, len, pos, src); +} + +inline bool deserializeItem( + const uint8_t* src, + size_t len, + size_t& pos, + AVSubtitleRect& dest) { + auto rectDeserialize = + [](const uint8_t* y, size_t l, size_t& p, AVSubtitleRect& x) -> bool { + switch (x.type) { + case SUBTITLE_BITMAP: + for (int i = 0; i < x.nb_colors; ++i) { + if (!deserializeItem(y, l, p, x.linesize[i])) { + return false; + } + if (p + x.linesize[i] > l) { + return false; + } + x.data[i] = (uint8_t*)av_malloc(x.linesize[i]); + memcpy(x.data[i], y + p, x.linesize[i]); + p += x.linesize[i]; + } + return true; + case SUBTITLE_TEXT: { + size_t s = 0; + if (!deserializeItem(y, l, p, s)) { + return false; + } + if (p + s > l) { + return false; + } + x.text = (char*)av_malloc(s + 1); + memcpy(x.text, y + p, s); + x.text[s] = 0; + p += s; + return true; + } + case SUBTITLE_ASS: { + size_t s = 0; + if (!deserializeItem(y, l, p, s)) { + return false; + } + if (p + s > l) { + return false; + } + x.ass = (char*)av_malloc(s + 1); + memcpy(x.ass, y + p, s); + x.ass[s] = 0; + p += s; + return true; + } + default: + return true; + } + }; + + return deserializeItem(src, len, pos, dest.x) && + deserializeItem(src, len, pos, dest.y) && + deserializeItem(src, len, pos, dest.w) && + deserializeItem(src, len, pos, dest.h) && + deserializeItem(src, len, pos, dest.nb_colors) && + deserializeItem(src, len, pos, dest.type) && + deserializeItem(src, len, pos, dest.flags) && + rectDeserialize(src, len, pos, dest); +} + +inline bool deserializeItem( + const uint8_t* src, + size_t len, + size_t& pos, + AVSubtitle& dest) { + auto rectDeserialize = + [](const uint8_t* y, size_t l, size_t& p, AVSubtitle& x) -> bool { + bool res = deserializeItem(y, l, p, x.num_rects); + if (res && x.num_rects) { + x.rects = + (AVSubtitleRect**)av_malloc(x.num_rects * sizeof(AVSubtitleRect*)); + } + for (unsigned i = 0; res && i < x.num_rects; ++i) { + x.rects[i] = (AVSubtitleRect*)av_malloc(sizeof(AVSubtitleRect)); + memset(x.rects[i], 0, sizeof(AVSubtitleRect)); + res = deserializeItem(y, l, p, *x.rects[i]); + } + return res; + }; + return deserializeItem(src, len, pos, dest.format) && + deserializeItem(src, len, pos, dest.start_display_time) && + deserializeItem(src, len, pos, dest.end_display_time) && + deserializeItem(src, len, pos, dest.pts) && + rectDeserialize(src, len, pos, dest); +} +} // namespace Serializer + +namespace Util { +std::string generateErrorDesc(int errorCode) { + std::array<char, 1024> buffer; + if (av_strerror(errorCode, buffer.data(), buffer.size()) < 0) { + return std::string("Unknown error code: ") + std::to_string(errorCode); + } + buffer.back() = 0; + return std::string(buffer.data()); +} + +size_t serialize(const AVSubtitle& sub, ByteStorage* out) { + const auto len = size(sub); + CHECK_LE(len, out->tail()); + size_t pos = 0; + if (!Serializer::serializeItem(out->writableTail(), len, pos, sub)) { + return 0; + } + out->append(len); + return len; +} + +bool deserialize(const ByteStorage& buf, AVSubtitle* sub) { + size_t pos = 0; + return Serializer::deserializeItem(buf.data(), buf.length(), pos, *sub); +} + +size_t size(const AVSubtitle& sub) { + return Serializer::getSize(sub); +} + +bool validateVideoFormat(const VideoFormat& f) { + // clang-format off + /* + Valid parameters values for decoder + ____________________________________________________________________________________ + | W | H | minDimension | maxDimension | cropImage | algorithm | + |__________________________________________________________________________________| + | 0 | 0 | 0 | 0 | N/A | original | + |__________________________________________________________________________________| + | >0 | 0 | N/A | N/A | N/A | scale keeping W | + |__________________________________________________________________________________| + | 0 | >0 | N/A | N/A | N/A | scale keeping H | + |__________________________________________________________________________________| + | >0 | >0 | N/A | N/A | 0 | stretch/scale | + |__________________________________________________________________________________| + | >0 | >0 | N/A | N/A | >0 | scale/crop | + |__________________________________________________________________________________| + | 0 | 0 | >0 | 0 | N/A |scale to min dimension | + |__________________________________________________________________________________| + | 0 | 0 | 0 | >0 | N/A |scale to max dimension | + |__________________________________________________________________________________| + | 0 | 0 | >0 | >0 | N/A |stretch to min/max dimension| + |_____|_____|______________|______________|___________|____________________________| + + */ + // clang-format on + return (f.width == 0 && // #1, #6, #7 and #8 + f.height == 0 && f.cropImage == 0) || + (f.width != 0 && // #4 and #5 + f.height != 0 && f.minDimension == 0 && f.maxDimension == 0) || + (((f.width != 0 && // #2 + f.height == 0) || + (f.width == 0 && // #3 + f.height != 0)) && + f.minDimension == 0 && f.maxDimension == 0 && f.cropImage == 0); +} + +void setFormatDimensions( + size_t& destW, + size_t& destH, + size_t userW, + size_t userH, + size_t srcW, + size_t srcH, + size_t minDimension, + size_t maxDimension, + size_t cropImage) { + // rounding rules + // int -> double -> round up + // if fraction is >= 0.5 or round down if fraction is < 0.5 + // int result = double(value) + 0.5 + // here we rounding double to int according to the above rule + + // #1, #6, #7 and #8 + if (userW == 0 && userH == 0) { + if (minDimension > 0 && maxDimension == 0) { // #6 + if (srcW > srcH) { + // landscape + destH = minDimension; + destW = round(double(srcW * minDimension) / srcH); + } else { + // portrait + destW = minDimension; + destH = round(double(srcH * minDimension) / srcW); + } + } else if (minDimension == 0 && maxDimension > 0) { // #7 + if (srcW > srcH) { + // landscape + destW = maxDimension; + destH = round(double(srcH * maxDimension) / srcW); + } else { + // portrait + destH = maxDimension; + destW = round(double(srcW * maxDimension) / srcH); + } + } else if (minDimension > 0 && maxDimension > 0) { // #8 + if (srcW > srcH) { + // landscape + destW = maxDimension; + destH = minDimension; + } else { + // portrait + destW = minDimension; + destH = maxDimension; + } + } else { // #1 + destW = srcW; + destH = srcH; + } + } else if (userW != 0 && userH == 0) { // #2 + destW = userW; + destH = round(double(srcH * userW) / srcW); + } else if (userW == 0 && userH != 0) { // #3 + destW = round(double(srcW * userH) / srcH); + destH = userH; + } else { // userW != 0 && userH != 0 + if (cropImage == 0) { // #4 + destW = userW; + destH = userH; + } else { // #5 + double userSlope = double(userH) / userW; + double srcSlope = double(srcH) / srcW; + if (srcSlope < userSlope) { + destW = round(double(srcW * userH) / srcH); + destH = userH; + } else { + destW = userW; + destH = round(double(srcH * userW) / srcW); + } + } + } + // prevent zeros + destW = std::max(destW, size_t(1UL)); + destH = std::max(destH, size_t(1UL)); +} +} // namespace Util +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/util.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/util.h new file mode 100644 index 0000000000000000000000000000000000000000..01b550e5bbc5eacb49b0e03c4f306ffcce2772d7 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/util.h @@ -0,0 +1,28 @@ +#pragma once + +#include "defs.h" + +namespace ffmpeg { + +/** + * FFMPEG library utility functions. + */ + +namespace Util { +std::string generateErrorDesc(int errorCode); +size_t serialize(const AVSubtitle& sub, ByteStorage* out); +bool deserialize(const ByteStorage& buf, AVSubtitle* sub); +size_t size(const AVSubtitle& sub); +void setFormatDimensions( + size_t& destW, + size_t& destH, + size_t userW, + size_t userH, + size_t srcW, + size_t srcH, + size_t minDimension, + size_t maxDimension, + size_t cropImage); +bool validateVideoFormat(const VideoFormat& format); +} // namespace Util +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/util_test.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/util_test.cpp new file mode 100644 index 0000000000000000000000000000000000000000..78de08b7139b2bdade1187f1f4dc8bf33d0190cf --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/util_test.cpp @@ -0,0 +1,35 @@ +#include <c10/util/Logging.h> +#include <dirent.h> +#include <gtest/gtest.h> +#include "util.h" + +TEST(Util, TestSetFormatDimensions) { + // clang-format off + const size_t test_cases[][9] = { + // (userW, userH, srcW, srcH, minDimension, maxDimension, cropImage, destW, destH) + {0, 0, 172, 128, 0, 0, 0, 172, 128}, // #1 + {86, 0, 172, 128, 0, 0, 0, 86, 64}, // #2 + {64, 0, 128, 172, 0, 0, 0, 64, 86}, // #2 + {0, 32, 172, 128, 0, 0, 0, 43, 32}, // #3 + {32, 0, 128, 172, 0, 0, 0, 32, 43}, // #3 + {60, 50, 172, 128, 0, 0, 0, 60, 50}, // #4 + {50, 60, 128, 172, 0, 0, 0, 50, 60}, // #4 + {86, 40, 172, 128, 0, 0, 1, 86, 64}, // #5 + {86, 92, 172, 128, 0, 0, 1, 124, 92}, // #5 + {0, 0, 172, 128, 256, 0, 0, 344, 256}, // #6 + {0, 0, 128, 172, 256, 0, 0, 256, 344}, // #6 + {0, 0, 128, 172, 0, 344, 0, 256, 344}, // #7 + {0, 0, 172, 128, 0, 344, 0, 344, 256}, // #7 + {0, 0, 172, 128, 100, 344, 0, 344, 100},// #8 + {0, 0, 128, 172, 100, 344, 0, 100, 344} // #8 + }; + // clang-format onn + + for (const auto& tc : test_cases) { + size_t destW = 0; + size_t destH = 0; + ffmpeg::Util::setFormatDimensions(destW, destH, tc[0], tc[1], tc[2], tc[3], tc[4], tc[5], tc[6]); + CHECK(destW == tc[7]); + CHECK(destH == tc[8]); + } +} diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/video_sampler.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/video_sampler.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5b9726b7c6c4e783cebb5ff41c433b665b0d822a --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/video_sampler.cpp @@ -0,0 +1,261 @@ +#include "video_sampler.h" +#include <c10/util/Logging.h> +#include "util.h" + +// www.ffmpeg.org/doxygen/0.5/swscale-example_8c-source.html + +namespace ffmpeg { + +namespace { +int preparePlanes( + const VideoFormat& fmt, + const uint8_t* buffer, + uint8_t** planes, + int* lineSize) { + int result; + + if ((result = av_image_fill_arrays( + planes, + lineSize, + buffer, + (AVPixelFormat)fmt.format, + fmt.width, + fmt.height, + 1)) < 0) { + LOG(ERROR) << "av_image_fill_arrays failed, err: " + << Util::generateErrorDesc(result); + } + return result; +} + +int transformImage( + SwsContext* context, + const uint8_t* const srcSlice[], + int srcStride[], + VideoFormat inFormat, + VideoFormat outFormat, + uint8_t* out, + uint8_t* planes[], + int lines[]) { + int result; + if ((result = preparePlanes(outFormat, out, planes, lines)) < 0) { + return result; + } + + if ((result = sws_scale( + context, srcSlice, srcStride, 0, inFormat.height, planes, lines)) < + 0) { + LOG(ERROR) << "sws_scale failed, err: " << Util::generateErrorDesc(result); + return result; + } + return 0; +} +} // namespace + +VideoSampler::VideoSampler(int swsFlags, int64_t loggingUuid) + : swsFlags_(swsFlags), loggingUuid_(loggingUuid) {} + +VideoSampler::~VideoSampler() { + cleanUp(); +} + +void VideoSampler::shutdown() { + cleanUp(); +} + +bool VideoSampler::init(const SamplerParameters& params) { + cleanUp(); + + if (params.out.video.cropImage != 0) { + if (!Util::validateVideoFormat(params.out.video)) { + LOG(ERROR) << "Invalid video format" + << ", width: " << params.out.video.width + << ", height: " << params.out.video.height + << ", format: " << params.out.video.format + << ", minDimension: " << params.out.video.minDimension + << ", crop: " << params.out.video.cropImage; + + return false; + } + + scaleFormat_.format = params.out.video.format; + Util::setFormatDimensions( + scaleFormat_.width, + scaleFormat_.height, + params.out.video.width, + params.out.video.height, + params.in.video.width, + params.in.video.height, + 0, + 0, + 1); + + if (!(scaleFormat_ == params_.out.video)) { // crop required + cropContext_ = sws_getContext( + params.out.video.width, + params.out.video.height, + (AVPixelFormat)params.out.video.format, + params.out.video.width, + params.out.video.height, + (AVPixelFormat)params.out.video.format, + swsFlags_, + nullptr, + nullptr, + nullptr); + + if (!cropContext_) { + LOG(ERROR) << "sws_getContext failed for crop context"; + return false; + } + + const auto scaleImageSize = av_image_get_buffer_size( + (AVPixelFormat)scaleFormat_.format, + scaleFormat_.width, + scaleFormat_.height, + 1); + scaleBuffer_.resize(scaleImageSize); + } + } else { + scaleFormat_ = params.out.video; + } + + VLOG(1) << "Input format #" << loggingUuid_ << ", width " + << params.in.video.width << ", height " << params.in.video.height + << ", format " << params.in.video.format << ", minDimension " + << params.in.video.minDimension << ", cropImage " + << params.in.video.cropImage; + VLOG(1) << "Scale format #" << loggingUuid_ << ", width " + << scaleFormat_.width << ", height " << scaleFormat_.height + << ", format " << scaleFormat_.format << ", minDimension " + << scaleFormat_.minDimension << ", cropImage " + << scaleFormat_.cropImage; + VLOG(1) << "Crop format #" << loggingUuid_ << ", width " + << params.out.video.width << ", height " << params.out.video.height + << ", format " << params.out.video.format << ", minDimension " + << params.out.video.minDimension << ", cropImage " + << params.out.video.cropImage; + + scaleContext_ = sws_getContext( + params.in.video.width, + params.in.video.height, + (AVPixelFormat)params.in.video.format, + scaleFormat_.width, + scaleFormat_.height, + (AVPixelFormat)scaleFormat_.format, + swsFlags_, + nullptr, + nullptr, + nullptr); + + // set output format + params_ = params; + + return scaleContext_ != nullptr; +} + +int VideoSampler::sample( + const uint8_t* const srcSlice[], + int srcStride[], + ByteStorage* out) { + int result; + // scaled and cropped image + int outImageSize = av_image_get_buffer_size( + (AVPixelFormat)params_.out.video.format, + params_.out.video.width, + params_.out.video.height, + 1); + + out->ensure(outImageSize); + + uint8_t* scalePlanes[4] = {nullptr}; + int scaleLines[4] = {0}; + // perform scale first + if ((result = transformImage( + scaleContext_, + srcSlice, + srcStride, + params_.in.video, + scaleFormat_, + // for crop use internal buffer + cropContext_ ? scaleBuffer_.data() : out->writableTail(), + scalePlanes, + scaleLines))) { + return result; + } + + // is crop required? + if (cropContext_) { + uint8_t* cropPlanes[4] = {nullptr}; + int cropLines[4] = {0}; + + if (params_.out.video.height < scaleFormat_.height) { + // Destination image is wider of source image: cut top and bottom + for (size_t i = 0; i < 4 && scalePlanes[i] != nullptr; ++i) { + scalePlanes[i] += scaleLines[i] * + (scaleFormat_.height - params_.out.video.height) / 2; + } + } else { + // Source image is wider of destination image: cut sides + for (size_t i = 0; i < 4 && scalePlanes[i] != nullptr; ++i) { + scalePlanes[i] += scaleLines[i] * + (scaleFormat_.width - params_.out.video.width) / 2 / + scaleFormat_.width; + } + } + + // crop image + if ((result = transformImage( + cropContext_, + scalePlanes, + scaleLines, + params_.out.video, + params_.out.video, + out->writableTail(), + cropPlanes, + cropLines))) { + return result; + } + } + + out->append(outImageSize); + return outImageSize; +} + +int VideoSampler::sample(AVFrame* frame, ByteStorage* out) { + if (!frame) { + return 0; // no flush for videos + } + + return sample(frame->data, frame->linesize, out); +} + +int VideoSampler::sample(const ByteStorage* in, ByteStorage* out) { + if (!in) { + return 0; // no flush for videos + } + + int result; + uint8_t* inPlanes[4] = {nullptr}; + int inLineSize[4] = {0}; + + if ((result = preparePlanes( + params_.in.video, in->data(), inPlanes, inLineSize)) < 0) { + return result; + } + + return sample(inPlanes, inLineSize, out); +} + +void VideoSampler::cleanUp() { + if (scaleContext_) { + sws_freeContext(scaleContext_); + scaleContext_ = nullptr; + } + if (cropContext_) { + sws_freeContext(cropContext_); + cropContext_ = nullptr; + scaleBuffer_.clear(); + } +} + +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/video_sampler.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/video_sampler.h new file mode 100644 index 0000000000000000000000000000000000000000..47247f2c0c588adcd60a80eb5c6cda4d79478dcf --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/video_sampler.h @@ -0,0 +1,44 @@ +#pragma once + +#include "defs.h" + +namespace ffmpeg { + +/** + * Class transcode video frames from one format into another + */ + +class VideoSampler : public MediaSampler { + public: + VideoSampler(int swsFlags = SWS_AREA, int64_t loggingUuid = 0); + + ~VideoSampler() override; + + // MediaSampler overrides + bool init(const SamplerParameters& params) override; + int sample(const ByteStorage* in, ByteStorage* out) override; + void shutdown() override; + + // returns number processed/scaling bytes + int sample(AVFrame* frame, ByteStorage* out); + int getImageBytes() const; + + private: + // close resources + void cleanUp(); + // helper functions for rescaling, cropping, etc. + int sample( + const uint8_t* const srcSlice[], + int srcStride[], + ByteStorage* out); + + private: + VideoFormat scaleFormat_; + SwsContext* scaleContext_{nullptr}; + SwsContext* cropContext_{nullptr}; + int swsFlags_{SWS_AREA}; + std::vector<uint8_t> scaleBuffer_; + int64_t loggingUuid_{0}; +}; + +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/video_stream.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/video_stream.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a9e20434fe04297c4a970f40d603e7f41c139e2a --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/video_stream.cpp @@ -0,0 +1,126 @@ +#include "video_stream.h" +#include <c10/util/Logging.h> +#include "util.h" + +namespace ffmpeg { + +namespace { +bool operator==(const VideoFormat& x, const AVFrame& y) { + return x.width == y.width && x.height == y.height && x.format == y.format; +} + +bool operator==(const VideoFormat& x, const AVCodecContext& y) { + return x.width == y.width && x.height == y.height && x.format == y.pix_fmt; +} + +VideoFormat& toVideoFormat(VideoFormat& x, const AVFrame& y) { + x.width = y.width; + x.height = y.height; + x.format = y.format; + return x; +} + +VideoFormat& toVideoFormat(VideoFormat& x, const AVCodecContext& y) { + x.width = y.width; + x.height = y.height; + x.format = y.pix_fmt; + return x; +} +} // namespace + +VideoStream::VideoStream( + AVFormatContext* inputCtx, + int index, + bool convertPtsToWallTime, + const VideoFormat& format, + int64_t loggingUuid) + : Stream( + inputCtx, + MediaFormat::makeMediaFormat(format, index), + convertPtsToWallTime, + loggingUuid) {} + +VideoStream::~VideoStream() { + if (sampler_) { + sampler_->shutdown(); + sampler_.reset(); + } +} + +int VideoStream::initFormat() { + // set output format + if (!Util::validateVideoFormat(format_.format.video)) { + LOG(ERROR) << "Invalid video format" + << ", width: " << format_.format.video.width + << ", height: " << format_.format.video.height + << ", format: " << format_.format.video.format + << ", minDimension: " << format_.format.video.minDimension + << ", crop: " << format_.format.video.cropImage; + return -1; + } + + // keep aspect ratio + Util::setFormatDimensions( + format_.format.video.width, + format_.format.video.height, + format_.format.video.width, + format_.format.video.height, + codecCtx_->width, + codecCtx_->height, + format_.format.video.minDimension, + format_.format.video.maxDimension, + 0); + + if (format_.format.video.format == AV_PIX_FMT_NONE) { + format_.format.video.format = codecCtx_->pix_fmt; + } + return format_.format.video.width != 0 && format_.format.video.height != 0 && + format_.format.video.format != AV_PIX_FMT_NONE + ? 0 + : -1; +} + +int VideoStream::copyFrameBytes(ByteStorage* out, bool flush) { + if (!sampler_) { + sampler_ = std::make_unique<VideoSampler>(SWS_AREA, loggingUuid_); + } + + // check if input format gets changed + if (flush ? !(sampler_->getInputFormat().video == *codecCtx_) + : !(sampler_->getInputFormat().video == *frame_)) { + // - reinit sampler + SamplerParameters params; + params.type = format_.type; + params.out = format_.format; + params.in = FormatUnion(0); + flush ? toVideoFormat(params.in.video, *codecCtx_) + : toVideoFormat(params.in.video, *frame_); + if (!sampler_->init(params)) { + return -1; + } + + VLOG(1) << "Set input video sampler format" + << ", width: " << params.in.video.width + << ", height: " << params.in.video.height + << ", format: " << params.in.video.format + << " : output video sampler format" + << ", width: " << format_.format.video.width + << ", height: " << format_.format.video.height + << ", format: " << format_.format.video.format + << ", minDimension: " << format_.format.video.minDimension + << ", crop: " << format_.format.video.cropImage; + } + + return sampler_->sample(flush ? nullptr : frame_, out); +} + +void VideoStream::setHeader(DecoderHeader* header, bool flush) { + Stream::setHeader(header, flush); + if (!flush) { // no frames for video flush + header->keyFrame = frame_->key_frame; + header->fps = av_q2d(av_guess_frame_rate( + inputCtx_, inputCtx_->streams[format_.stream], nullptr)); + } +} + +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/video_stream.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/video_stream.h new file mode 100644 index 0000000000000000000000000000000000000000..e6a8bf02b65a690c442ad4e878722110e0c18a8f --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/decoder/video_stream.h @@ -0,0 +1,31 @@ +#pragma once + +#include "stream.h" +#include "video_sampler.h" + +namespace ffmpeg { + +/** + * Class uses FFMPEG library to decode one video stream. + */ + +class VideoStream : public Stream { + public: + VideoStream( + AVFormatContext* inputCtx, + int index, + bool convertPtsToWallTime, + const VideoFormat& format, + int64_t loggingUuid); + ~VideoStream() override; + + private: + int initFormat() override; + int copyFrameBytes(ByteStorage* out, bool flush) override; + void setHeader(DecoderHeader* header, bool flush) override; + + private: + std::unique_ptr<VideoSampler> sampler_; +}; + +} // namespace ffmpeg diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/common_jpeg.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/common_jpeg.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4c993106b45d5e2136abe9d439e6e864b190d4a7 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/common_jpeg.cpp @@ -0,0 +1,26 @@ +#include "common_jpeg.h" + +namespace vision { +namespace image { +namespace detail { + +#if JPEG_FOUND +void torch_jpeg_error_exit(j_common_ptr cinfo) { + /* cinfo->err really points to a torch_jpeg_error_mgr struct, so coerce + * pointer */ + torch_jpeg_error_ptr myerr = (torch_jpeg_error_ptr)cinfo->err; + + /* Always display the message. */ + /* We could postpone this until after returning, if we chose. */ + // (*cinfo->err->output_message)(cinfo); + /* Create the message */ + (*(cinfo->err->format_message))(cinfo, myerr->jpegLastErrorMsg); + + /* Return control to the setjmp point */ + longjmp(myerr->setjmp_buffer, 1); +} +#endif + +} // namespace detail +} // namespace image +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/common_jpeg.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/common_jpeg.h new file mode 100644 index 0000000000000000000000000000000000000000..7f7f9f0ccf16f8962caf6e0226175aa07c75b362 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/common_jpeg.h @@ -0,0 +1,27 @@ +#pragma once + +#if JPEG_FOUND +#include <stdio.h> + +#include <jpeglib.h> +#include <setjmp.h> + +namespace vision { +namespace image { +namespace detail { + +static const JOCTET EOI_BUFFER[1] = {JPEG_EOI}; +struct torch_jpeg_error_mgr { + struct jpeg_error_mgr pub; /* "public" fields */ + char jpegLastErrorMsg[JMSG_LENGTH_MAX]; /* error messages */ + jmp_buf setjmp_buffer; /* for return to caller */ +}; + +using torch_jpeg_error_ptr = struct torch_jpeg_error_mgr*; +void torch_jpeg_error_exit(j_common_ptr cinfo); + +} // namespace detail +} // namespace image +} // namespace vision + +#endif diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/common_png.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/common_png.h new file mode 100644 index 0000000000000000000000000000000000000000..68400d48e05661158d0da2d8618f7bd89fac5cf1 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/common_png.h @@ -0,0 +1,6 @@ +#pragma once + +#if PNG_FOUND +#include <png.h> +#include <setjmp.h> +#endif diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/decode_image.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/decode_image.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1cc05dc76cadcb563a777c444139799186d1977e --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/decode_image.cpp @@ -0,0 +1,35 @@ +#include "decode_image.h" + +#include "decode_jpeg.h" +#include "decode_png.h" + +namespace vision { +namespace image { + +torch::Tensor decode_image(const torch::Tensor& data, ImageReadMode mode) { + // Check that the input tensor dtype is uint8 + TORCH_CHECK(data.dtype() == torch::kU8, "Expected a torch.uint8 tensor"); + // Check that the input tensor is 1-dimensional + TORCH_CHECK( + data.dim() == 1 && data.numel() > 0, + "Expected a non empty 1-dimensional tensor"); + + auto datap = data.data_ptr<uint8_t>(); + + const uint8_t jpeg_signature[3] = {255, 216, 255}; // == "\xFF\xD8\xFF" + const uint8_t png_signature[4] = {137, 80, 78, 71}; // == "\211PNG" + + if (memcmp(jpeg_signature, datap, 3) == 0) { + return decode_jpeg(data, mode); + } else if (memcmp(png_signature, datap, 4) == 0) { + return decode_png(data, mode); + } else { + TORCH_CHECK( + false, + "Unsupported image file. Only jpeg and png ", + "are currently supported."); + } +} + +} // namespace image +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/decode_image.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/decode_image.h new file mode 100644 index 0000000000000000000000000000000000000000..853d6d91afa67f4ed3db95ba934a17aef4116c7d --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/decode_image.h @@ -0,0 +1,14 @@ +#pragma once + +#include <torch/types.h> +#include "../image_read_mode.h" + +namespace vision { +namespace image { + +C10_EXPORT torch::Tensor decode_image( + const torch::Tensor& data, + ImageReadMode mode = IMAGE_READ_MODE_UNCHANGED); + +} // namespace image +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/decode_jpeg.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/decode_jpeg.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c6e971c3b12d61c387e434929a7d665ba10d626a --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/decode_jpeg.cpp @@ -0,0 +1,157 @@ +#include "decode_jpeg.h" +#include "common_jpeg.h" + +namespace vision { +namespace image { + +#if !JPEG_FOUND +torch::Tensor decode_jpeg(const torch::Tensor& data, ImageReadMode mode) { + TORCH_CHECK( + false, "decode_jpeg: torchvision not compiled with libjpeg support"); +} +#else + +using namespace detail; + +namespace { + +struct torch_jpeg_mgr { + struct jpeg_source_mgr pub; + const JOCTET* data; + size_t len; +}; + +static void torch_jpeg_init_source(j_decompress_ptr cinfo) {} + +static boolean torch_jpeg_fill_input_buffer(j_decompress_ptr cinfo) { + // No more data. Probably an incomplete image; Raise exception. + torch_jpeg_error_ptr myerr = (torch_jpeg_error_ptr)cinfo->err; + strcpy(myerr->jpegLastErrorMsg, "Image is incomplete or truncated"); + longjmp(myerr->setjmp_buffer, 1); +} + +static void torch_jpeg_skip_input_data(j_decompress_ptr cinfo, long num_bytes) { + torch_jpeg_mgr* src = (torch_jpeg_mgr*)cinfo->src; + if (src->pub.bytes_in_buffer < (size_t)num_bytes) { + // Skipping over all of remaining data; output EOI. + src->pub.next_input_byte = EOI_BUFFER; + src->pub.bytes_in_buffer = 1; + } else { + // Skipping over only some of the remaining data. + src->pub.next_input_byte += num_bytes; + src->pub.bytes_in_buffer -= num_bytes; + } +} + +static void torch_jpeg_term_source(j_decompress_ptr cinfo) {} + +static void torch_jpeg_set_source_mgr( + j_decompress_ptr cinfo, + const unsigned char* data, + size_t len) { + torch_jpeg_mgr* src; + if (cinfo->src == 0) { // if this is first time; allocate memory + cinfo->src = (struct jpeg_source_mgr*)(*cinfo->mem->alloc_small)( + (j_common_ptr)cinfo, JPOOL_PERMANENT, sizeof(torch_jpeg_mgr)); + } + src = (torch_jpeg_mgr*)cinfo->src; + src->pub.init_source = torch_jpeg_init_source; + src->pub.fill_input_buffer = torch_jpeg_fill_input_buffer; + src->pub.skip_input_data = torch_jpeg_skip_input_data; + src->pub.resync_to_restart = jpeg_resync_to_restart; // default + src->pub.term_source = torch_jpeg_term_source; + // fill the buffers + src->data = (const JOCTET*)data; + src->len = len; + src->pub.bytes_in_buffer = len; + src->pub.next_input_byte = src->data; +} + +} // namespace + +torch::Tensor decode_jpeg(const torch::Tensor& data, ImageReadMode mode) { + // Check that the input tensor dtype is uint8 + TORCH_CHECK(data.dtype() == torch::kU8, "Expected a torch.uint8 tensor"); + // Check that the input tensor is 1-dimensional + TORCH_CHECK( + data.dim() == 1 && data.numel() > 0, + "Expected a non empty 1-dimensional tensor"); + + struct jpeg_decompress_struct cinfo; + struct torch_jpeg_error_mgr jerr; + + auto datap = data.data_ptr<uint8_t>(); + // Setup decompression structure + cinfo.err = jpeg_std_error(&jerr.pub); + jerr.pub.error_exit = torch_jpeg_error_exit; + /* Establish the setjmp return context for my_error_exit to use. */ + if (setjmp(jerr.setjmp_buffer)) { + /* If we get here, the JPEG code has signaled an error. + * We need to clean up the JPEG object. + */ + jpeg_destroy_decompress(&cinfo); + TORCH_CHECK(false, jerr.jpegLastErrorMsg); + } + + jpeg_create_decompress(&cinfo); + torch_jpeg_set_source_mgr(&cinfo, datap, data.numel()); + + // read info from header. + jpeg_read_header(&cinfo, TRUE); + + int channels = cinfo.num_components; + + if (mode != IMAGE_READ_MODE_UNCHANGED) { + switch (mode) { + case IMAGE_READ_MODE_GRAY: + if (cinfo.jpeg_color_space != JCS_GRAYSCALE) { + cinfo.out_color_space = JCS_GRAYSCALE; + channels = 1; + } + break; + case IMAGE_READ_MODE_RGB: + if (cinfo.jpeg_color_space != JCS_RGB) { + cinfo.out_color_space = JCS_RGB; + channels = 3; + } + break; + /* + * Libjpeg does not support converting from CMYK to grayscale etc. There + * is a way to do this but it involves converting it manually to RGB: + * https://github.com/tensorflow/tensorflow/blob/86871065265b04e0db8ca360c046421efb2bdeb4/tensorflow/core/lib/jpeg/jpeg_mem.cc#L284-L313 + */ + default: + jpeg_destroy_decompress(&cinfo); + TORCH_CHECK(false, "The provided mode is not supported for JPEG files"); + } + + jpeg_calc_output_dimensions(&cinfo); + } + + jpeg_start_decompress(&cinfo); + + int height = cinfo.output_height; + int width = cinfo.output_width; + + int stride = width * channels; + auto tensor = + torch::empty({int64_t(height), int64_t(width), channels}, torch::kU8); + auto ptr = tensor.data_ptr<uint8_t>(); + while (cinfo.output_scanline < cinfo.output_height) { + /* jpeg_read_scanlines expects an array of pointers to scanlines. + * Here the array is only one element long, but you could ask for + * more than one scanline at a time if that's more convenient. + */ + jpeg_read_scanlines(&cinfo, &ptr, 1); + ptr += stride; + } + + jpeg_finish_decompress(&cinfo); + jpeg_destroy_decompress(&cinfo); + return tensor.permute({2, 0, 1}); +} + +#endif + +} // namespace image +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/decode_jpeg.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/decode_jpeg.h new file mode 100644 index 0000000000000000000000000000000000000000..97ed3d51a54e625989d695c42ccf78f1e2e79d9f --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/decode_jpeg.h @@ -0,0 +1,14 @@ +#pragma once + +#include <torch/types.h> +#include "../image_read_mode.h" + +namespace vision { +namespace image { + +C10_EXPORT torch::Tensor decode_jpeg( + const torch::Tensor& data, + ImageReadMode mode = IMAGE_READ_MODE_UNCHANGED); + +} // namespace image +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/decode_png.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/decode_png.cpp new file mode 100644 index 0000000000000000000000000000000000000000..5ee33635a1c1892f0a201273cf53a72d1b8f479f --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/decode_png.cpp @@ -0,0 +1,169 @@ +#include "decode_png.h" +#include "common_png.h" + +namespace vision { +namespace image { + +#if !PNG_FOUND +torch::Tensor decode_png(const torch::Tensor& data, ImageReadMode mode) { + TORCH_CHECK( + false, "decode_png: torchvision not compiled with libPNG support"); +} +#else + +torch::Tensor decode_png(const torch::Tensor& data, ImageReadMode mode) { + // Check that the input tensor dtype is uint8 + TORCH_CHECK(data.dtype() == torch::kU8, "Expected a torch.uint8 tensor"); + // Check that the input tensor is 1-dimensional + TORCH_CHECK( + data.dim() == 1 && data.numel() > 0, + "Expected a non empty 1-dimensional tensor"); + + auto png_ptr = + png_create_read_struct(PNG_LIBPNG_VER_STRING, nullptr, nullptr, nullptr); + TORCH_CHECK(png_ptr, "libpng read structure allocation failed!") + auto info_ptr = png_create_info_struct(png_ptr); + if (!info_ptr) { + png_destroy_read_struct(&png_ptr, nullptr, nullptr); + // Seems redundant with the if statement. done here to avoid leaking memory. + TORCH_CHECK(info_ptr, "libpng info structure allocation failed!") + } + + auto datap = data.accessor<unsigned char, 1>().data(); + + if (setjmp(png_jmpbuf(png_ptr)) != 0) { + png_destroy_read_struct(&png_ptr, &info_ptr, nullptr); + TORCH_CHECK(false, "Internal error."); + } + auto is_png = !png_sig_cmp(datap, 0, 8); + TORCH_CHECK(is_png, "Content is not png!") + + struct Reader { + png_const_bytep ptr; + } reader; + reader.ptr = png_const_bytep(datap) + 8; + + auto read_callback = + [](png_structp png_ptr, png_bytep output, png_size_t bytes) { + auto reader = static_cast<Reader*>(png_get_io_ptr(png_ptr)); + std::copy(reader->ptr, reader->ptr + bytes, output); + reader->ptr += bytes; + }; + png_set_sig_bytes(png_ptr, 8); + png_set_read_fn(png_ptr, &reader, read_callback); + png_read_info(png_ptr, info_ptr); + + png_uint_32 width, height; + int bit_depth, color_type; + auto retval = png_get_IHDR( + png_ptr, + info_ptr, + &width, + &height, + &bit_depth, + &color_type, + nullptr, + nullptr, + nullptr); + + if (retval != 1) { + png_destroy_read_struct(&png_ptr, &info_ptr, nullptr); + TORCH_CHECK(retval == 1, "Could read image metadata from content.") + } + + int channels = png_get_channels(png_ptr, info_ptr); + + if (mode != IMAGE_READ_MODE_UNCHANGED) { + // TODO: consider supporting PNG_INFO_tRNS + bool is_palette = (color_type & PNG_COLOR_MASK_PALETTE) != 0; + bool has_color = (color_type & PNG_COLOR_MASK_COLOR) != 0; + bool has_alpha = (color_type & PNG_COLOR_MASK_ALPHA) != 0; + + switch (mode) { + case IMAGE_READ_MODE_GRAY: + if (color_type != PNG_COLOR_TYPE_GRAY) { + if (is_palette) { + png_set_palette_to_rgb(png_ptr); + has_alpha = true; + } + + if (has_alpha) { + png_set_strip_alpha(png_ptr); + } + + if (has_color) { + png_set_rgb_to_gray(png_ptr, 1, 0.2989, 0.587); + } + channels = 1; + } + break; + case IMAGE_READ_MODE_GRAY_ALPHA: + if (color_type != PNG_COLOR_TYPE_GRAY_ALPHA) { + if (is_palette) { + png_set_palette_to_rgb(png_ptr); + has_alpha = true; + } + + if (!has_alpha) { + png_set_add_alpha(png_ptr, (1 << bit_depth) - 1, PNG_FILLER_AFTER); + } + + if (has_color) { + png_set_rgb_to_gray(png_ptr, 1, 0.2989, 0.587); + } + channels = 2; + } + break; + case IMAGE_READ_MODE_RGB: + if (color_type != PNG_COLOR_TYPE_RGB) { + if (is_palette) { + png_set_palette_to_rgb(png_ptr); + has_alpha = true; + } else if (!has_color) { + png_set_gray_to_rgb(png_ptr); + } + + if (has_alpha) { + png_set_strip_alpha(png_ptr); + } + channels = 3; + } + break; + case IMAGE_READ_MODE_RGB_ALPHA: + if (color_type != PNG_COLOR_TYPE_RGB_ALPHA) { + if (is_palette) { + png_set_palette_to_rgb(png_ptr); + has_alpha = true; + } else if (!has_color) { + png_set_gray_to_rgb(png_ptr); + } + + if (!has_alpha) { + png_set_add_alpha(png_ptr, (1 << bit_depth) - 1, PNG_FILLER_AFTER); + } + channels = 4; + } + break; + default: + png_destroy_read_struct(&png_ptr, &info_ptr, nullptr); + TORCH_CHECK(false, "The provided mode is not supported for PNG files"); + } + + png_read_update_info(png_ptr, info_ptr); + } + + auto tensor = + torch::empty({int64_t(height), int64_t(width), channels}, torch::kU8); + auto ptr = tensor.accessor<uint8_t, 3>().data(); + auto bytes = png_get_rowbytes(png_ptr, info_ptr); + for (png_uint_32 i = 0; i < height; ++i) { + png_read_row(png_ptr, ptr, nullptr); + ptr += bytes; + } + png_destroy_read_struct(&png_ptr, &info_ptr, nullptr); + return tensor.permute({2, 0, 1}); +} +#endif + +} // namespace image +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/decode_png.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/decode_png.h new file mode 100644 index 0000000000000000000000000000000000000000..471bf77d935b5a21a8f2ca41e44428f94024baa9 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/decode_png.h @@ -0,0 +1,14 @@ +#pragma once + +#include <torch/types.h> +#include "../image_read_mode.h" + +namespace vision { +namespace image { + +C10_EXPORT torch::Tensor decode_png( + const torch::Tensor& data, + ImageReadMode mode = IMAGE_READ_MODE_UNCHANGED); + +} // namespace image +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/encode_jpeg.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/encode_jpeg.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c84ad37005d861cc3c177558293a391e0e1218ea --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/encode_jpeg.cpp @@ -0,0 +1,109 @@ +#include "encode_jpeg.h" + +#include "common_jpeg.h" + +namespace vision { +namespace image { + +#if !JPEG_FOUND + +torch::Tensor encode_jpeg(const torch::Tensor& data, int64_t quality) { + TORCH_CHECK( + false, "encode_jpeg: torchvision not compiled with libjpeg support"); +} + +#else + +using namespace detail; + +torch::Tensor encode_jpeg(const torch::Tensor& data, int64_t quality) { + // Define compression structures and error handling + struct jpeg_compress_struct cinfo; + struct torch_jpeg_error_mgr jerr; + + // Define buffer to write JPEG information to and its size + unsigned long jpegSize = 0; + uint8_t* jpegBuf = NULL; + + cinfo.err = jpeg_std_error(&jerr.pub); + jerr.pub.error_exit = torch_jpeg_error_exit; + + /* Establish the setjmp return context for my_error_exit to use. */ + if (setjmp(jerr.setjmp_buffer)) { + /* If we get here, the JPEG code has signaled an error. + * We need to clean up the JPEG object and the buffer. + */ + jpeg_destroy_compress(&cinfo); + if (jpegBuf != NULL) { + free(jpegBuf); + } + + TORCH_CHECK(false, (const char*)jerr.jpegLastErrorMsg); + } + + // Check that the input tensor is on CPU + TORCH_CHECK(data.device() == torch::kCPU, "Input tensor should be on CPU"); + + // Check that the input tensor dtype is uint8 + TORCH_CHECK(data.dtype() == torch::kU8, "Input tensor dtype should be uint8"); + + // Check that the input tensor is 3-dimensional + TORCH_CHECK(data.dim() == 3, "Input data should be a 3-dimensional tensor"); + + // Get image info + int channels = data.size(0); + int height = data.size(1); + int width = data.size(2); + auto input = data.permute({1, 2, 0}).contiguous(); + + TORCH_CHECK( + channels == 1 || channels == 3, + "The number of channels should be 1 or 3, got: ", + channels); + + // Initialize JPEG structure + jpeg_create_compress(&cinfo); + + // Set output image information + cinfo.image_width = width; + cinfo.image_height = height; + cinfo.input_components = channels; + cinfo.in_color_space = channels == 1 ? JCS_GRAYSCALE : JCS_RGB; + + jpeg_set_defaults(&cinfo); + jpeg_set_quality(&cinfo, quality, TRUE); + + // Save JPEG output to a buffer + jpeg_mem_dest(&cinfo, &jpegBuf, &jpegSize); + + // Start JPEG compression + jpeg_start_compress(&cinfo, TRUE); + + auto stride = width * channels; + auto ptr = input.data_ptr<uint8_t>(); + + // Encode JPEG file + while (cinfo.next_scanline < cinfo.image_height) { + jpeg_write_scanlines(&cinfo, &ptr, 1); + ptr += stride; + } + + jpeg_finish_compress(&cinfo); + jpeg_destroy_compress(&cinfo); + + torch::TensorOptions options = torch::TensorOptions{torch::kU8}; + auto outTensor = torch::empty({(long)jpegSize}, options); + + // Copy memory from jpeg buffer, since torch cannot get ownership of it via + // `from_blob` + auto outPtr = outTensor.data_ptr<uint8_t>(); + std::memcpy(outPtr, jpegBuf, sizeof(uint8_t) * outTensor.numel()); + + free(jpegBuf); + + return outTensor; +} +#endif + +} // namespace image +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/encode_jpeg.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/encode_jpeg.h new file mode 100644 index 0000000000000000000000000000000000000000..25084e154d674bbfb42d841de92255a8e16a63d4 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/encode_jpeg.h @@ -0,0 +1,13 @@ +#pragma once + +#include <torch/types.h> + +namespace vision { +namespace image { + +C10_EXPORT torch::Tensor encode_jpeg( + const torch::Tensor& data, + int64_t quality); + +} // namespace image +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/encode_png.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/encode_png.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d28bad958909f2f7eaa1c164cf326d49c511283a --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/encode_png.cpp @@ -0,0 +1,179 @@ +#include "encode_jpeg.h" + +#include "common_png.h" + +namespace vision { +namespace image { + +#if !PNG_FOUND + +torch::Tensor encode_png(const torch::Tensor& data, int64_t compression_level) { + TORCH_CHECK( + false, "encode_png: torchvision not compiled with libpng support"); +} + +#else + +namespace { + +struct torch_mem_encode { + char* buffer; + size_t size; +}; + +struct torch_png_error_mgr { + const char* pngLastErrorMsg; /* error messages */ + jmp_buf setjmp_buffer; /* for return to caller */ +}; + +using torch_png_error_mgr_ptr = torch_png_error_mgr*; + +void torch_png_error(png_structp png_ptr, png_const_charp error_msg) { + /* png_ptr->err really points to a torch_png_error_mgr struct, so coerce + * pointer */ + auto error_ptr = (torch_png_error_mgr_ptr)png_get_error_ptr(png_ptr); + /* Replace the error message on the error structure */ + error_ptr->pngLastErrorMsg = error_msg; + /* Return control to the setjmp point */ + longjmp(error_ptr->setjmp_buffer, 1); +} + +void torch_png_write_data( + png_structp png_ptr, + png_bytep data, + png_size_t length) { + struct torch_mem_encode* p = + (struct torch_mem_encode*)png_get_io_ptr(png_ptr); + size_t nsize = p->size + length; + + /* allocate or grow buffer */ + if (p->buffer) + p->buffer = (char*)realloc(p->buffer, nsize); + else + p->buffer = (char*)malloc(nsize); + + if (!p->buffer) + png_error(png_ptr, "Write Error"); + + /* copy new bytes to end of buffer */ + memcpy(p->buffer + p->size, data, length); + p->size += length; +} + +} // namespace + +torch::Tensor encode_png(const torch::Tensor& data, int64_t compression_level) { + // Define compression structures and error handling + png_structp png_write; + png_infop info_ptr; + struct torch_png_error_mgr err_ptr; + + // Define output buffer + struct torch_mem_encode buf_info; + buf_info.buffer = NULL; + buf_info.size = 0; + + /* Establish the setjmp return context for my_error_exit to use. */ + if (setjmp(err_ptr.setjmp_buffer)) { + /* If we get here, the PNG code has signaled an error. + * We need to clean up the PNG object and the buffer. + */ + if (info_ptr != NULL) { + png_destroy_info_struct(png_write, &info_ptr); + } + + if (png_write != NULL) { + png_destroy_write_struct(&png_write, NULL); + } + + if (buf_info.buffer != NULL) { + free(buf_info.buffer); + } + + TORCH_CHECK(false, err_ptr.pngLastErrorMsg); + } + + // Check that the compression level is between 0 and 9 + TORCH_CHECK( + compression_level >= 0 && compression_level <= 9, + "Compression level should be between 0 and 9"); + + // Check that the input tensor is on CPU + TORCH_CHECK(data.device() == torch::kCPU, "Input tensor should be on CPU"); + + // Check that the input tensor dtype is uint8 + TORCH_CHECK(data.dtype() == torch::kU8, "Input tensor dtype should be uint8"); + + // Check that the input tensor is 3-dimensional + TORCH_CHECK(data.dim() == 3, "Input data should be a 3-dimensional tensor"); + + // Get image info + int channels = data.size(0); + int height = data.size(1); + int width = data.size(2); + auto input = data.permute({1, 2, 0}).contiguous(); + + TORCH_CHECK( + channels == 1 || channels == 3, + "The number of channels should be 1 or 3, got: ", + channels); + + // Initialize PNG structures + png_write = png_create_write_struct( + PNG_LIBPNG_VER_STRING, &err_ptr, torch_png_error, NULL); + + info_ptr = png_create_info_struct(png_write); + + // Define custom buffer output + png_set_write_fn(png_write, &buf_info, torch_png_write_data, NULL); + + // Set output image information + auto color_type = channels == 1 ? PNG_COLOR_TYPE_GRAY : PNG_COLOR_TYPE_RGB; + png_set_IHDR( + png_write, + info_ptr, + width, + height, + 8, + color_type, + PNG_INTERLACE_NONE, + PNG_COMPRESSION_TYPE_DEFAULT, + PNG_FILTER_TYPE_DEFAULT); + + // Set image compression level + png_set_compression_level(png_write, compression_level); + + // Write file header + png_write_info(png_write, info_ptr); + + auto stride = width * channels; + auto ptr = input.data_ptr<uint8_t>(); + + // Encode PNG file + for (int y = 0; y < height; ++y) { + png_write_row(png_write, ptr); + ptr += stride; + } + + // Write EOF + png_write_end(png_write, info_ptr); + + // Destroy structures + png_destroy_write_struct(&png_write, &info_ptr); + + torch::TensorOptions options = torch::TensorOptions{torch::kU8}; + auto outTensor = torch::empty({(long)buf_info.size}, options); + + // Copy memory from png buffer, since torch cannot get ownership of it via + // `from_blob` + auto outPtr = outTensor.data_ptr<uint8_t>(); + std::memcpy(outPtr, buf_info.buffer, sizeof(uint8_t) * outTensor.numel()); + free(buf_info.buffer); + + return outTensor; +} + +#endif + +} // namespace image +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/encode_png.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/encode_png.h new file mode 100644 index 0000000000000000000000000000000000000000..86a67c8706e0b4cce3d3f7291f98fe1672f4c7ae --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/encode_png.h @@ -0,0 +1,13 @@ +#pragma once + +#include <torch/types.h> + +namespace vision { +namespace image { + +C10_EXPORT torch::Tensor encode_png( + const torch::Tensor& data, + int64_t compression_level); + +} // namespace image +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/read_write_file.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/read_write_file.cpp new file mode 100644 index 0000000000000000000000000000000000000000..a0bb7df72d57915f5c03fbf8be1653ad3404b14c --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/read_write_file.cpp @@ -0,0 +1,104 @@ +#include "read_write_file.h" + +#include <sys/stat.h> + +#ifdef _WIN32 +#define WIN32_LEAN_AND_MEAN +#include <Windows.h> +#endif + +namespace vision { +namespace image { + +#ifdef _WIN32 +namespace { +std::wstring utf8_decode(const std::string& str) { + if (str.empty()) { + return std::wstring(); + } + int size_needed = MultiByteToWideChar( + CP_UTF8, 0, str.c_str(), static_cast<int>(str.size()), NULL, 0); + TORCH_CHECK(size_needed > 0, "Error converting the content to Unicode"); + std::wstring wstrTo(size_needed, 0); + MultiByteToWideChar( + CP_UTF8, + 0, + str.c_str(), + static_cast<int>(str.size()), + &wstrTo[0], + size_needed); + return wstrTo; +} +} // namespace +#endif + +torch::Tensor read_file(const std::string& filename) { +#ifdef _WIN32 + // According to + // https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/stat-functions?view=vs-2019, + // we should use struct __stat64 and _wstat64 for 64-bit file size on Windows. + struct __stat64 stat_buf; + auto fileW = utf8_decode(filename); + int rc = _wstat64(fileW.c_str(), &stat_buf); +#else + struct stat stat_buf; + int rc = stat(filename.c_str(), &stat_buf); +#endif + // errno is a variable defined in errno.h + TORCH_CHECK( + rc == 0, "[Errno ", errno, "] ", strerror(errno), ": '", filename, "'"); + + int64_t size = stat_buf.st_size; + + TORCH_CHECK(size > 0, "Expected a non empty file"); + +#ifdef _WIN32 + // TODO: Once torch::from_file handles UTF-8 paths correctly, we should move + // back to use the following implementation since it uses file mapping. + // auto data = + // torch::from_file(filename, /*shared=*/false, /*size=*/size, + // torch::kU8).clone() + FILE* infile = _wfopen(fileW.c_str(), L"rb"); + + TORCH_CHECK(infile != nullptr, "Error opening input file"); + + auto data = torch::empty({size}, torch::kU8); + auto dataBytes = data.data_ptr<uint8_t>(); + + fread(dataBytes, sizeof(uint8_t), size, infile); + fclose(infile); +#else + auto data = + torch::from_file(filename, /*shared=*/false, /*size=*/size, torch::kU8); +#endif + + return data; +} + +void write_file(const std::string& filename, torch::Tensor& data) { + // Check that the input tensor is on CPU + TORCH_CHECK(data.device() == torch::kCPU, "Input tensor should be on CPU"); + + // Check that the input tensor dtype is uint8 + TORCH_CHECK(data.dtype() == torch::kU8, "Input tensor dtype should be uint8"); + + // Check that the input tensor is 3-dimensional + TORCH_CHECK(data.dim() == 1, "Input data should be a 1-dimensional tensor"); + + auto fileBytes = data.data_ptr<uint8_t>(); + auto fileCStr = filename.c_str(); +#ifdef _WIN32 + auto fileW = utf8_decode(filename); + FILE* outfile = _wfopen(fileW.c_str(), L"wb"); +#else + FILE* outfile = fopen(fileCStr, "wb"); +#endif + + TORCH_CHECK(outfile != nullptr, "Error opening output file"); + + fwrite(fileBytes, sizeof(uint8_t), data.numel(), outfile); + fclose(outfile); +} + +} // namespace image +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/read_write_file.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/read_write_file.h new file mode 100644 index 0000000000000000000000000000000000000000..a5a712dd8e24173b5fec9ed0d5347140277f828d --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cpu/read_write_file.h @@ -0,0 +1,13 @@ +#pragma once + +#include <torch/types.h> + +namespace vision { +namespace image { + +C10_EXPORT torch::Tensor read_file(const std::string& filename); + +C10_EXPORT void write_file(const std::string& filename, torch::Tensor& data); + +} // namespace image +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cuda/decode_jpeg_cuda.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cuda/decode_jpeg_cuda.cpp new file mode 100644 index 0000000000000000000000000000000000000000..68f63ced427df927a1a789d68dbba0e58b9edf52 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cuda/decode_jpeg_cuda.cpp @@ -0,0 +1,185 @@ +#include "decode_jpeg_cuda.h" + +#include <ATen/ATen.h> + +#if NVJPEG_FOUND +#include <ATen/cuda/CUDAContext.h> +#include <c10/cuda/CUDAGuard.h> +#include <nvjpeg.h> +#endif + +#include <string> + +namespace vision { +namespace image { + +#if !NVJPEG_FOUND + +torch::Tensor decode_jpeg_cuda( + const torch::Tensor& data, + ImageReadMode mode, + torch::Device device) { + TORCH_CHECK( + false, "decode_jpeg_cuda: torchvision not compiled with nvJPEG support"); +} + +#else + +namespace { +static nvjpegHandle_t nvjpeg_handle = nullptr; +} + +torch::Tensor decode_jpeg_cuda( + const torch::Tensor& data, + ImageReadMode mode, + torch::Device device) { + TORCH_CHECK(data.dtype() == torch::kU8, "Expected a torch.uint8 tensor"); + + TORCH_CHECK( + !data.is_cuda(), + "The input tensor must be on CPU when decoding with nvjpeg") + + TORCH_CHECK( + data.dim() == 1 && data.numel() > 0, + "Expected a non empty 1-dimensional tensor"); + + TORCH_CHECK(device.is_cuda(), "Expected a cuda device") + + at::cuda::CUDAGuard device_guard(device); + + // Create global nvJPEG handle + std::once_flag nvjpeg_handle_creation_flag; + std::call_once(nvjpeg_handle_creation_flag, []() { + if (nvjpeg_handle == nullptr) { + nvjpegStatus_t create_status = nvjpegCreateSimple(&nvjpeg_handle); + + if (create_status != NVJPEG_STATUS_SUCCESS) { + // Reset handle so that one can still call the function again in the + // same process if there was a failure + free(nvjpeg_handle); + nvjpeg_handle = nullptr; + } + TORCH_CHECK( + create_status == NVJPEG_STATUS_SUCCESS, + "nvjpegCreateSimple failed: ", + create_status); + } + }); + + // Create the jpeg state + nvjpegJpegState_t jpeg_state; + nvjpegStatus_t state_status = + nvjpegJpegStateCreate(nvjpeg_handle, &jpeg_state); + + TORCH_CHECK( + state_status == NVJPEG_STATUS_SUCCESS, + "nvjpegJpegStateCreate failed: ", + state_status); + + auto datap = data.data_ptr<uint8_t>(); + + // Get the image information + int num_channels; + nvjpegChromaSubsampling_t subsampling; + int widths[NVJPEG_MAX_COMPONENT]; + int heights[NVJPEG_MAX_COMPONENT]; + nvjpegStatus_t info_status = nvjpegGetImageInfo( + nvjpeg_handle, + datap, + data.numel(), + &num_channels, + &subsampling, + widths, + heights); + + if (info_status != NVJPEG_STATUS_SUCCESS) { + nvjpegJpegStateDestroy(jpeg_state); + TORCH_CHECK(false, "nvjpegGetImageInfo failed: ", info_status); + } + + if (subsampling == NVJPEG_CSS_UNKNOWN) { + nvjpegJpegStateDestroy(jpeg_state); + TORCH_CHECK(false, "Unknown NVJPEG chroma subsampling"); + } + + int width = widths[0]; + int height = heights[0]; + + nvjpegOutputFormat_t ouput_format; + int num_channels_output; + + switch (mode) { + case IMAGE_READ_MODE_UNCHANGED: + num_channels_output = num_channels; + // For some reason, setting output_format to NVJPEG_OUTPUT_UNCHANGED will + // not properly decode RGB images (it's fine for grayscale), so we set + // output_format manually here + if (num_channels == 1) { + ouput_format = NVJPEG_OUTPUT_Y; + } else if (num_channels == 3) { + ouput_format = NVJPEG_OUTPUT_RGB; + } else { + nvjpegJpegStateDestroy(jpeg_state); + TORCH_CHECK( + false, + "When mode is UNCHANGED, only 1 or 3 input channels are allowed."); + } + break; + case IMAGE_READ_MODE_GRAY: + ouput_format = NVJPEG_OUTPUT_Y; + num_channels_output = 1; + break; + case IMAGE_READ_MODE_RGB: + ouput_format = NVJPEG_OUTPUT_RGB; + num_channels_output = 3; + break; + default: + nvjpegJpegStateDestroy(jpeg_state); + TORCH_CHECK( + false, "The provided mode is not supported for JPEG decoding on GPU"); + } + + auto out_tensor = torch::empty( + {int64_t(num_channels_output), int64_t(height), int64_t(width)}, + torch::dtype(torch::kU8).device(device)); + + // nvjpegImage_t is a struct with + // - an array of pointers to each channel + // - the pitch for each channel + // which must be filled in manually + nvjpegImage_t out_image; + + for (int c = 0; c < num_channels_output; c++) { + out_image.channel[c] = out_tensor[c].data_ptr<uint8_t>(); + out_image.pitch[c] = width; + } + for (int c = num_channels_output; c < NVJPEG_MAX_COMPONENT; c++) { + out_image.channel[c] = nullptr; + out_image.pitch[c] = 0; + } + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(device.index()); + + nvjpegStatus_t decode_status = nvjpegDecode( + nvjpeg_handle, + jpeg_state, + datap, + data.numel(), + ouput_format, + &out_image, + stream); + + nvjpegJpegStateDestroy(jpeg_state); + + TORCH_CHECK( + decode_status == NVJPEG_STATUS_SUCCESS, + "nvjpegDecode failed: ", + decode_status); + + return out_tensor; +} + +#endif // NVJPEG_FOUND + +} // namespace image +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cuda/decode_jpeg_cuda.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cuda/decode_jpeg_cuda.h new file mode 100644 index 0000000000000000000000000000000000000000..496b355e9b743692ebacf3f440420e3da41d72a4 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/cuda/decode_jpeg_cuda.h @@ -0,0 +1,15 @@ +#pragma once + +#include <torch/types.h> +#include "../image_read_mode.h" + +namespace vision { +namespace image { + +C10_EXPORT torch::Tensor decode_jpeg_cuda( + const torch::Tensor& data, + ImageReadMode mode, + torch::Device device); + +} // namespace image +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/image.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/image.cpp new file mode 100644 index 0000000000000000000000000000000000000000..37d64013cb26dd022c9e8a3e6da6eed4b0531986 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/image.cpp @@ -0,0 +1,28 @@ +#include "image.h" + +#include <Python.h> + +// If we are in a Windows environment, we need to define +// initialization functions for the _custom_ops extension +#ifdef _WIN32 +PyMODINIT_FUNC PyInit_image(void) { + // No need to do anything. + return NULL; +} +#endif + +namespace vision { +namespace image { + +static auto registry = torch::RegisterOperators() + .op("image::decode_png", &decode_png) + .op("image::encode_png", &encode_png) + .op("image::decode_jpeg", &decode_jpeg) + .op("image::encode_jpeg", &encode_jpeg) + .op("image::read_file", &read_file) + .op("image::write_file", &write_file) + .op("image::decode_image", &decode_image) + .op("image::decode_jpeg_cuda", &decode_jpeg_cuda); + +} // namespace image +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/image.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/image.h new file mode 100644 index 0000000000000000000000000000000000000000..05bac44c77d0329c96a3d3bbb3fe442d2288bf86 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/image.h @@ -0,0 +1,9 @@ +#pragma once + +#include "cpu/decode_image.h" +#include "cpu/decode_jpeg.h" +#include "cpu/decode_png.h" +#include "cpu/encode_jpeg.h" +#include "cpu/encode_png.h" +#include "cpu/read_write_file.h" +#include "cuda/decode_jpeg_cuda.h" diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/image_read_mode.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/image_read_mode.h new file mode 100644 index 0000000000000000000000000000000000000000..84425265c3486b3af1ed41253ec75018eb4eb773 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/image/image_read_mode.h @@ -0,0 +1,17 @@ +#pragma once + +#include <stdint.h> + +namespace vision { +namespace image { + +/* Should be kept in-sync with Python ImageReadMode enum */ +using ImageReadMode = int64_t; +const ImageReadMode IMAGE_READ_MODE_UNCHANGED = 0; +const ImageReadMode IMAGE_READ_MODE_GRAY = 1; +const ImageReadMode IMAGE_READ_MODE_GRAY_ALPHA = 2; +const ImageReadMode IMAGE_READ_MODE_RGB = 3; +const ImageReadMode IMAGE_READ_MODE_RGB_ALPHA = 4; + +} // namespace image +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/video/video.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/video/video.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d7d28a517702b624407a3295d230c6984924a77f --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/video/video.cpp @@ -0,0 +1,342 @@ +#include "video.h" + +#include <regex> + +namespace vision { +namespace video { + +namespace { + +const size_t decoderTimeoutMs = 600000; +const AVPixelFormat defaultVideoPixelFormat = AV_PIX_FMT_RGB24; + +// returns number of written bytes +template <typename T> +size_t fillTensorList(DecoderOutputMessage& msgs, torch::Tensor& frame) { + const auto& msg = msgs; + T* frameData = frame.numel() > 0 ? frame.data_ptr<T>() : nullptr; + if (frameData) { + auto sizeInBytes = msg.payload->length(); + memcpy(frameData, msg.payload->data(), sizeInBytes); + } + return sizeof(T); +} + +size_t fillVideoTensor(DecoderOutputMessage& msgs, torch::Tensor& videoFrame) { + return fillTensorList<uint8_t>(msgs, videoFrame); +} + +size_t fillAudioTensor(DecoderOutputMessage& msgs, torch::Tensor& audioFrame) { + return fillTensorList<float>(msgs, audioFrame); +} + +std::array<std::pair<std::string, ffmpeg::MediaType>, 4>::const_iterator +_parse_type(const std::string& stream_string) { + static const std::array<std::pair<std::string, MediaType>, 4> types = {{ + {"video", TYPE_VIDEO}, + {"audio", TYPE_AUDIO}, + {"subtitle", TYPE_SUBTITLE}, + {"cc", TYPE_CC}, + }}; + auto device = std::find_if( + types.begin(), + types.end(), + [stream_string](const std::pair<std::string, MediaType>& p) { + return p.first == stream_string; + }); + if (device != types.end()) { + return device; + } + TORCH_CHECK( + false, "Expected one of [audio, video, subtitle, cc] ", stream_string); +} + +std::string parse_type_to_string(const std::string& stream_string) { + auto device = _parse_type(stream_string); + return device->first; +} + +MediaType parse_type_to_mt(const std::string& stream_string) { + auto device = _parse_type(stream_string); + return device->second; +} + +std::tuple<std::string, long> _parseStream(const std::string& streamString) { + TORCH_CHECK(!streamString.empty(), "Stream string must not be empty"); + static const std::regex regex("([a-zA-Z_]+)(?::([1-9]\\d*|0))?"); + std::smatch match; + + TORCH_CHECK( + std::regex_match(streamString, match, regex), + "Invalid stream string: '", + streamString, + "'"); + + std::string type_ = "video"; + type_ = parse_type_to_string(match[1].str()); + long index_ = -1; + if (match[2].matched) { + try { + index_ = c10::stoi(match[2].str()); + } catch (const std::exception&) { + TORCH_CHECK( + false, + "Could not parse device index '", + match[2].str(), + "' in device string '", + streamString, + "'"); + } + } + return std::make_tuple(type_, index_); +} + +} // namespace + +void Video::_getDecoderParams( + double videoStartS, + int64_t getPtsOnly, + std::string stream, + long stream_id = -1, + bool all_streams = false, + double seekFrameMarginUs = 10) { + int64_t videoStartUs = int64_t(videoStartS * 1e6); + + params.timeoutMs = decoderTimeoutMs; + params.startOffset = videoStartUs; + params.seekAccuracy = seekFrameMarginUs; + params.headerOnly = false; + + params.preventStaleness = false; // not sure what this is about + + if (all_streams == true) { + MediaFormat format; + format.stream = -2; + format.type = TYPE_AUDIO; + params.formats.insert(format); + + format.type = TYPE_VIDEO; + format.stream = -2; + format.format.video.width = 0; + format.format.video.height = 0; + format.format.video.cropImage = 0; + format.format.video.format = defaultVideoPixelFormat; + params.formats.insert(format); + + format.type = TYPE_SUBTITLE; + format.stream = -2; + params.formats.insert(format); + + format.type = TYPE_CC; + format.stream = -2; + params.formats.insert(format); + } else { + // parse stream type + MediaType stream_type = parse_type_to_mt(stream); + + // TODO: reset params.formats + std::set<MediaFormat> formats; + params.formats = formats; + // Define new format + MediaFormat format; + format.type = stream_type; + format.stream = stream_id; + if (stream_type == TYPE_VIDEO) { + format.format.video.width = 0; + format.format.video.height = 0; + format.format.video.cropImage = 0; + format.format.video.format = defaultVideoPixelFormat; + } + params.formats.insert(format); + } + +} // _get decoder params + +Video::Video(std::string videoPath, std::string stream) { + // parse stream information + current_stream = _parseStream(stream); + // note that in the initial call we want to get all streams + Video::_getDecoderParams( + 0, // video start + 0, // headerOnly + std::get<0>(current_stream), // stream info - remove that + long(-1), // stream_id parsed from info above change to -2 + true // read all streams + ); + + std::string logMessage, logType; + + // TODO: add read from memory option + params.uri = videoPath; + logType = "file"; + logMessage = videoPath; + + // locals + std::vector<double> audioFPS, videoFPS; + std::vector<double> audioDuration, videoDuration, ccDuration, subsDuration; + std::vector<double> audioTB, videoTB, ccTB, subsTB; + c10::Dict<std::string, std::vector<double>> audioMetadata; + c10::Dict<std::string, std::vector<double>> videoMetadata; + c10::Dict<std::string, std::vector<double>> ccMetadata; + c10::Dict<std::string, std::vector<double>> subsMetadata; + + // calback and metadata defined in struct + succeeded = decoder.init(params, std::move(callback), &metadata); + if (succeeded) { + for (const auto& header : metadata) { + double fps = double(header.fps); + double duration = double(header.duration) * 1e-6; // * timeBase; + + if (header.format.type == TYPE_VIDEO) { + videoFPS.push_back(fps); + videoDuration.push_back(duration); + } else if (header.format.type == TYPE_AUDIO) { + audioFPS.push_back(fps); + audioDuration.push_back(duration); + } else if (header.format.type == TYPE_CC) { + ccDuration.push_back(duration); + } else if (header.format.type == TYPE_SUBTITLE) { + subsDuration.push_back(duration); + }; + } + } + // audio + audioMetadata.insert("duration", audioDuration); + audioMetadata.insert("framerate", audioFPS); + // video + videoMetadata.insert("duration", videoDuration); + videoMetadata.insert("fps", videoFPS); + // subs + subsMetadata.insert("duration", subsDuration); + // cc + ccMetadata.insert("duration", ccDuration); + // put all to a data + streamsMetadata.insert("video", videoMetadata); + streamsMetadata.insert("audio", audioMetadata); + streamsMetadata.insert("subtitles", subsMetadata); + streamsMetadata.insert("cc", ccMetadata); + + succeeded = Video::setCurrentStream(stream); + LOG(INFO) << "\nDecoder inited with: " << succeeded << "\n"; + if (std::get<1>(current_stream) != -1) { + LOG(INFO) + << "Stream index set to " << std::get<1>(current_stream) + << ". If you encounter trouble, consider switching it to automatic stream discovery. \n"; + } +} // video + +bool Video::setCurrentStream(std::string stream = "video") { + if ((!stream.empty()) && (_parseStream(stream) != current_stream)) { + current_stream = _parseStream(stream); + } + + double ts = 0; + if (seekTS > 0) { + ts = seekTS; + } + + _getDecoderParams( + ts, // video start + 0, // headerOnly + std::get<0>(current_stream), // stream + long(std::get<1>( + current_stream)), // stream_id parsed from info above change to -2 + false // read all streams + ); + + // calback and metadata defined in Video.h + return (decoder.init(params, std::move(callback), &metadata)); +} + +std::tuple<std::string, int64_t> Video::getCurrentStream() const { + return current_stream; +} + +c10::Dict<std::string, c10::Dict<std::string, std::vector<double>>> Video:: + getStreamMetadata() const { + return streamsMetadata; +} + +void Video::Seek(double ts) { + // initialize the class variables used for seeking and retrurn + _getDecoderParams( + ts, // video start + 0, // headerOnly + std::get<0>(current_stream), // stream + long(std::get<1>( + current_stream)), // stream_id parsed from info above change to -2 + false // read all streams + ); + + // calback and metadata defined in Video.h + succeeded = decoder.init(params, std::move(callback), &metadata); + LOG(INFO) << "Decoder init at seek " << succeeded << "\n"; +} + +std::tuple<torch::Tensor, double> Video::Next() { + // if failing to decode simply return a null tensor (note, should we + // raise an exeption?) + double frame_pts_s; + torch::Tensor outFrame = torch::zeros({0}, torch::kByte); + + // decode single frame + DecoderOutputMessage out; + int64_t res = decoder.decode(&out, decoderTimeoutMs); + // if successfull + if (res == 0) { + frame_pts_s = double(double(out.header.pts) * 1e-6); + + auto header = out.header; + const auto& format = header.format; + + // initialize the output variables based on type + + if (format.type == TYPE_VIDEO) { + // note: this can potentially be optimized + // by having the global tensor that we fill at decode time + // (would avoid allocations) + int outHeight = format.format.video.height; + int outWidth = format.format.video.width; + int numChannels = 3; + outFrame = torch::zeros({outHeight, outWidth, numChannels}, torch::kByte); + fillVideoTensor(out, outFrame); + outFrame = outFrame.permute({2, 0, 1}); + + } else if (format.type == TYPE_AUDIO) { + int outAudioChannels = format.format.audio.channels; + int bytesPerSample = av_get_bytes_per_sample( + static_cast<AVSampleFormat>(format.format.audio.format)); + int frameSizeTotal = out.payload->length(); + + CHECK_EQ(frameSizeTotal % (outAudioChannels * bytesPerSample), 0); + int numAudioSamples = + frameSizeTotal / (outAudioChannels * bytesPerSample); + + outFrame = + torch::zeros({numAudioSamples, outAudioChannels}, torch::kFloat); + + fillAudioTensor(out, outFrame); + } + // currently not supporting other formats (will do soon) + + out.payload.reset(); + } else if (res == ENODATA) { + LOG(INFO) << "Decoder ran out of frames (ENODATA)\n"; + } else { + LOG(ERROR) << "Decoder failed with ERROR_CODE " << res; + } + + return std::make_tuple(outFrame, frame_pts_s); +} + +static auto registerVideo = + torch::class_<Video>("torchvision", "Video") + .def(torch::init<std::string, std::string>()) + .def("get_current_stream", &Video::getCurrentStream) + .def("set_current_stream", &Video::setCurrentStream) + .def("get_metadata", &Video::getStreamMetadata) + .def("seek", &Video::Seek) + .def("next", &Video::Next); + +} // namespace video +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/video/video.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/video/video.h new file mode 100644 index 0000000000000000000000000000000000000000..7da5fbbf29492a020c0bf0fc4c974301f7e3f8b7 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/video/video.h @@ -0,0 +1,56 @@ +#pragma once + +#include <torch/types.h> + +#include "../decoder/defs.h" +#include "../decoder/memory_buffer.h" +#include "../decoder/sync_decoder.h" + +using namespace ffmpeg; + +namespace vision { +namespace video { + +struct Video : torch::CustomClassHolder { + std::tuple<std::string, long> current_stream; // stream type, id + // global video metadata + c10::Dict<std::string, c10::Dict<std::string, std::vector<double>>> + streamsMetadata; + + public: + Video(std::string videoPath, std::string stream); + std::tuple<std::string, int64_t> getCurrentStream() const; + c10::Dict<std::string, c10::Dict<std::string, std::vector<double>>> + getStreamMetadata() const; + void Seek(double ts); + bool setCurrentStream(std::string stream); + std::tuple<torch::Tensor, double> Next(); + + private: + bool succeeded = false; // decoder init flag + // seekTS and doSeek act as a flag - if it's not set, next function simply + // retruns the next frame. If it's set, we look at the global seek + // time in comination with any_frame settings + double seekTS = -1; + + void _getDecoderParams( + double videoStartS, + int64_t getPtsOnly, + std::string stream, + long stream_id, + bool all_streams, + double seekFrameMarginUs); // this needs to be improved + + std::map<std::string, std::vector<double>> streamTimeBase; // not used + + DecoderInCallback callback = nullptr; + std::vector<DecoderMetadata> metadata; + + protected: + SyncDecoder decoder; + DecoderParameters params; + +}; // struct Video + +} // namespace video +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/video_reader/video_reader.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/video_reader/video_reader.cpp new file mode 100644 index 0000000000000000000000000000000000000000..51b0750b431b165891fc294db82cc2d9f689ee4a --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/video_reader/video_reader.cpp @@ -0,0 +1,672 @@ +#include "video_reader.h" + +#include <Python.h> + +#include "../decoder/memory_buffer.h" +#include "../decoder/sync_decoder.h" + +// If we are in a Windows environment, we need to define +// initialization functions for the _custom_ops extension +#ifdef _WIN32 +PyMODINIT_FUNC PyInit_video_reader(void) { + // No need to do anything. + return NULL; +} +#endif + +using namespace ffmpeg; + +namespace vision { +namespace video_reader { + +namespace { + +const AVPixelFormat defaultVideoPixelFormat = AV_PIX_FMT_RGB24; +const AVSampleFormat defaultAudioSampleFormat = AV_SAMPLE_FMT_FLT; +const AVRational timeBaseQ = AVRational{1, AV_TIME_BASE}; +const size_t decoderTimeoutMs = 600000; +// A jitter can be added to the end of the range to avoid conversion/rounding +// error, small value 100us won't be enough to select the next frame, but enough +// to compensate rounding error due to the multiple conversions. +const size_t timeBaseJitterUs = 100; + +DecoderParameters getDecoderParams( + int64_t videoStartUs, + int64_t videoEndUs, + double seekFrameMarginUs, + int64_t getPtsOnly, + int64_t readVideoStream, + int videoWidth, + int videoHeight, + int videoMinDimension, + int videoMaxDimension, + int64_t readAudioStream, + int audioSamples, + int audioChannels) { + DecoderParameters params; + params.headerOnly = getPtsOnly != 0; + params.seekAccuracy = seekFrameMarginUs; + params.startOffset = videoStartUs; + params.endOffset = videoEndUs; + params.timeoutMs = decoderTimeoutMs; + params.preventStaleness = false; + + if (readVideoStream == 1) { + MediaFormat videoFormat(0); + videoFormat.type = TYPE_VIDEO; + videoFormat.format.video.format = defaultVideoPixelFormat; + videoFormat.format.video.width = videoWidth; + videoFormat.format.video.height = videoHeight; + videoFormat.format.video.minDimension = videoMinDimension; + videoFormat.format.video.maxDimension = videoMaxDimension; + params.formats.insert(videoFormat); + } + + if (readAudioStream == 1) { + MediaFormat audioFormat; + audioFormat.type = TYPE_AUDIO; + audioFormat.format.audio.format = defaultAudioSampleFormat; + audioFormat.format.audio.samples = audioSamples; + audioFormat.format.audio.channels = audioChannels; + params.formats.insert(audioFormat); + } + + return params; +} + +// returns number of written bytes +template <typename T> +size_t fillTensor( + std::vector<DecoderOutputMessage>& msgs, + torch::Tensor& frame, + torch::Tensor& framePts, + int64_t num, + int64_t den) { + if (msgs.empty()) { + return 0; + } + T* frameData = frame.numel() > 0 ? frame.data_ptr<T>() : nullptr; + int64_t* framePtsData = framePts.data_ptr<int64_t>(); + CHECK_EQ(framePts.size(0), (int64_t)msgs.size()); + size_t avgElementsInFrame = frame.numel() / msgs.size(); + + size_t offset = 0; + for (size_t i = 0; i < msgs.size(); ++i) { + const auto& msg = msgs[i]; + // convert pts into original time_base + AVRational avr = AVRational{(int)num, (int)den}; + framePtsData[i] = av_rescale_q(msg.header.pts, timeBaseQ, avr); + VLOG(2) << "PTS type: " << sizeof(T) << ", us: " << msg.header.pts + << ", original: " << framePtsData[i]; + + if (frameData) { + auto sizeInBytes = msg.payload->length(); + memcpy(frameData + offset, msg.payload->data(), sizeInBytes); + if (sizeof(T) == sizeof(uint8_t)) { + // Video - move by allocated frame size + offset += avgElementsInFrame / sizeof(T); + } else { + // Audio - move by number of samples + offset += sizeInBytes / sizeof(T); + } + } + } + return offset * sizeof(T); +} + +size_t fillVideoTensor( + std::vector<DecoderOutputMessage>& msgs, + torch::Tensor& videoFrame, + torch::Tensor& videoFramePts, + int64_t num, + int64_t den) { + return fillTensor<uint8_t>(msgs, videoFrame, videoFramePts, num, den); +} + +size_t fillAudioTensor( + std::vector<DecoderOutputMessage>& msgs, + torch::Tensor& audioFrame, + torch::Tensor& audioFramePts, + int64_t num, + int64_t den) { + return fillTensor<float>(msgs, audioFrame, audioFramePts, num, den); +} + +void offsetsToUs( + double& seekFrameMargin, + int64_t readVideoStream, + int64_t videoStartPts, + int64_t videoEndPts, + int64_t videoTimeBaseNum, + int64_t videoTimeBaseDen, + int64_t readAudioStream, + int64_t audioStartPts, + int64_t audioEndPts, + int64_t audioTimeBaseNum, + int64_t audioTimeBaseDen, + int64_t& videoStartUs, + int64_t& videoEndUs) { + seekFrameMargin *= AV_TIME_BASE; + videoStartUs = 0; + videoEndUs = -1; + + if (readVideoStream) { + AVRational vr = AVRational{(int)videoTimeBaseNum, (int)videoTimeBaseDen}; + if (videoStartPts > 0) { + videoStartUs = av_rescale_q(videoStartPts, vr, timeBaseQ); + } + if (videoEndPts > 0) { + // Add jitter to the end of the range to avoid conversion/rounding error. + // Small value 100us won't be enough to select the next frame, but enough + // to compensate rounding error due to the multiple conversions. + videoEndUs = timeBaseJitterUs + av_rescale_q(videoEndPts, vr, timeBaseQ); + } + } else if (readAudioStream) { + AVRational ar = AVRational{(int)audioTimeBaseNum, (int)audioTimeBaseDen}; + if (audioStartPts > 0) { + videoStartUs = av_rescale_q(audioStartPts, ar, timeBaseQ); + } + if (audioEndPts > 0) { + // Add jitter to the end of the range to avoid conversion/rounding error. + // Small value 100us won't be enough to select the next frame, but enough + // to compensate rounding error due to the multiple conversions. + videoEndUs = timeBaseJitterUs + av_rescale_q(audioEndPts, ar, timeBaseQ); + } + } +} + +torch::List<torch::Tensor> readVideo( + bool isReadFile, + const torch::Tensor& input_video, + std::string videoPath, + double seekFrameMargin, + int64_t getPtsOnly, + int64_t readVideoStream, + int64_t width, + int64_t height, + int64_t minDimension, + int64_t maxDimension, + int64_t videoStartPts, + int64_t videoEndPts, + int64_t videoTimeBaseNum, + int64_t videoTimeBaseDen, + int64_t readAudioStream, + int64_t audioSamples, + int64_t audioChannels, + int64_t audioStartPts, + int64_t audioEndPts, + int64_t audioTimeBaseNum, + int64_t audioTimeBaseDen) { + int64_t videoStartUs, videoEndUs; + + offsetsToUs( + seekFrameMargin, + readVideoStream, + videoStartPts, + videoEndPts, + videoTimeBaseNum, + videoTimeBaseDen, + readAudioStream, + audioStartPts, + audioEndPts, + audioTimeBaseNum, + audioTimeBaseDen, + videoStartUs, + videoEndUs); + + DecoderParameters params = getDecoderParams( + videoStartUs, // videoStartPts + videoEndUs, // videoEndPts + seekFrameMargin, // seekFrameMargin + getPtsOnly, // getPtsOnly + readVideoStream, // readVideoStream + width, // width + height, // height + minDimension, // minDimension + maxDimension, // maxDimension + readAudioStream, // readAudioStream + audioSamples, // audioSamples + audioChannels // audioChannels + ); + + SyncDecoder decoder; + std::vector<DecoderOutputMessage> audioMessages, videoMessages; + DecoderInCallback callback = nullptr; + std::string logMessage, logType; + if (isReadFile) { + params.uri = videoPath; + logType = "file"; + logMessage = videoPath; + } else { + callback = MemoryBuffer::getCallback( + input_video.data_ptr<uint8_t>(), input_video.size(0)); + logType = "memory"; + logMessage = std::to_string(input_video.size(0)); + } + + VLOG(1) << "Video decoding from " << logType << " [" << logMessage + << "] has started"; + + const auto now = std::chrono::system_clock::now(); + + bool succeeded; + DecoderMetadata audioMetadata, videoMetadata; + std::vector<DecoderMetadata> metadata; + if ((succeeded = decoder.init(params, std::move(callback), &metadata))) { + for (const auto& header : metadata) { + if (header.format.type == TYPE_VIDEO) { + videoMetadata = header; + } else if (header.format.type == TYPE_AUDIO) { + audioMetadata = header; + } + } + int res; + DecoderOutputMessage msg; + while (0 == (res = decoder.decode(&msg, decoderTimeoutMs))) { + if (msg.header.format.type == TYPE_VIDEO) { + videoMessages.push_back(std::move(msg)); + } + if (msg.header.format.type == TYPE_AUDIO) { + audioMessages.push_back(std::move(msg)); + } + msg.payload.reset(); + } + } else { + LOG(ERROR) << "Decoder initialization has failed"; + } + const auto then = std::chrono::system_clock::now(); + VLOG(1) << "Video decoding from " << logType << " [" << logMessage + << "] has finished, " + << std::chrono::duration_cast<std::chrono::microseconds>(then - now) + .count() + << " us"; + + decoder.shutdown(); + + // video section + torch::Tensor videoFrame = torch::zeros({0}, torch::kByte); + torch::Tensor videoFramePts = torch::zeros({0}, torch::kLong); + torch::Tensor videoTimeBase = torch::zeros({0}, torch::kInt); + torch::Tensor videoFps = torch::zeros({0}, torch::kFloat); + torch::Tensor videoDuration = torch::zeros({0}, torch::kLong); + + if (succeeded && readVideoStream == 1) { + if (!videoMessages.empty()) { + const auto& header = videoMetadata; + const auto& format = header.format.format.video; + int numVideoFrames = videoMessages.size(); + int outHeight = format.height; + int outWidth = format.width; + int numChannels = 3; // decoder guarantees the default AV_PIX_FMT_RGB24 + + size_t expectedWrittenBytes = 0; + if (getPtsOnly == 0) { + videoFrame = torch::zeros( + {numVideoFrames, outHeight, outWidth, numChannels}, torch::kByte); + expectedWrittenBytes = + (size_t)numVideoFrames * outHeight * outWidth * numChannels; + } + + videoFramePts = torch::zeros({numVideoFrames}, torch::kLong); + + VLOG(2) << "video duration: " << header.duration + << ", fps: " << header.fps << ", num: " << header.num + << ", den: " << header.den << ", num frames: " << numVideoFrames; + + auto numberWrittenBytes = fillVideoTensor( + videoMessages, videoFrame, videoFramePts, header.num, header.den); + + CHECK_EQ(numberWrittenBytes, expectedWrittenBytes); + + videoTimeBase = torch::zeros({2}, torch::kInt); + int* videoTimeBaseData = videoTimeBase.data_ptr<int>(); + videoTimeBaseData[0] = header.num; + videoTimeBaseData[1] = header.den; + + videoFps = torch::zeros({1}, torch::kFloat); + float* videoFpsData = videoFps.data_ptr<float>(); + videoFpsData[0] = header.fps; + + videoDuration = torch::zeros({1}, torch::kLong); + int64_t* videoDurationData = videoDuration.data_ptr<int64_t>(); + AVRational vr = AVRational{(int)header.num, (int)header.den}; + videoDurationData[0] = av_rescale_q(header.duration, timeBaseQ, vr); + VLOG(1) << "Video decoding from " << logType << " [" << logMessage + << "] filled video tensors"; + } else { + VLOG(1) << "Miss video stream"; + } + } + + // audio section + torch::Tensor audioFrame = torch::zeros({0}, torch::kFloat); + torch::Tensor audioFramePts = torch::zeros({0}, torch::kLong); + torch::Tensor audioTimeBase = torch::zeros({0}, torch::kInt); + torch::Tensor audioSampleRate = torch::zeros({0}, torch::kInt); + torch::Tensor audioDuration = torch::zeros({0}, torch::kLong); + if (succeeded && readAudioStream == 1) { + if (!audioMessages.empty()) { + const auto& header = audioMetadata; + const auto& format = header.format.format.audio; + + int64_t outAudioChannels = format.channels; + int bytesPerSample = + av_get_bytes_per_sample(static_cast<AVSampleFormat>(format.format)); + + int numAudioFrames = audioMessages.size(); + int64_t numAudioSamples = 0; + if (getPtsOnly == 0) { + int64_t frameSizeTotal = 0; + for (auto const& audioMessage : audioMessages) { + frameSizeTotal += audioMessage.payload->length(); + } + + CHECK_EQ(frameSizeTotal % (outAudioChannels * bytesPerSample), 0); + numAudioSamples = frameSizeTotal / (outAudioChannels * bytesPerSample); + + audioFrame = + torch::zeros({numAudioSamples, outAudioChannels}, torch::kFloat); + } + audioFramePts = torch::zeros({numAudioFrames}, torch::kLong); + + VLOG(2) << "audio duration: " << header.duration + << ", channels: " << format.channels + << ", sample rate: " << format.samples << ", num: " << header.num + << ", den: " << header.den; + + auto numberWrittenBytes = fillAudioTensor( + audioMessages, audioFrame, audioFramePts, header.num, header.den); + CHECK_EQ( + numberWrittenBytes, + numAudioSamples * outAudioChannels * sizeof(float)); + + audioTimeBase = torch::zeros({2}, torch::kInt); + int* audioTimeBaseData = audioTimeBase.data_ptr<int>(); + audioTimeBaseData[0] = header.num; + audioTimeBaseData[1] = header.den; + + audioSampleRate = torch::zeros({1}, torch::kInt); + int* audioSampleRateData = audioSampleRate.data_ptr<int>(); + audioSampleRateData[0] = format.samples; + + audioDuration = torch::zeros({1}, torch::kLong); + int64_t* audioDurationData = audioDuration.data_ptr<int64_t>(); + AVRational ar = AVRational{(int)header.num, (int)header.den}; + audioDurationData[0] = av_rescale_q(header.duration, timeBaseQ, ar); + VLOG(1) << "Video decoding from " << logType << " [" << logMessage + << "] filled audio tensors"; + } else { + VLOG(1) << "Miss audio stream"; + } + } + + torch::List<torch::Tensor> result; + result.push_back(std::move(videoFrame)); + result.push_back(std::move(videoFramePts)); + result.push_back(std::move(videoTimeBase)); + result.push_back(std::move(videoFps)); + result.push_back(std::move(videoDuration)); + result.push_back(std::move(audioFrame)); + result.push_back(std::move(audioFramePts)); + result.push_back(std::move(audioTimeBase)); + result.push_back(std::move(audioSampleRate)); + result.push_back(std::move(audioDuration)); + + VLOG(1) << "Video decoding from " << logType << " [" << logMessage + << "] about to return"; + + return result; +} + +torch::List<torch::Tensor> probeVideo( + bool isReadFile, + const torch::Tensor& input_video, + std::string videoPath) { + DecoderParameters params = getDecoderParams( + 0, // videoStartUs + -1, // videoEndUs + 0, // seekFrameMargin + 1, // getPtsOnly + 1, // readVideoStream + 0, // width + 0, // height + 0, // minDimension + 0, // maxDimension + 1, // readAudioStream + 0, // audioSamples + 0 // audioChannels + ); + + SyncDecoder decoder; + DecoderInCallback callback = nullptr; + std::string logMessage, logType; + if (isReadFile) { + params.uri = videoPath; + logType = "file"; + logMessage = videoPath; + } else { + callback = MemoryBuffer::getCallback( + input_video.data_ptr<uint8_t>(), input_video.size(0)); + logType = "memory"; + logMessage = std::to_string(input_video.size(0)); + } + + VLOG(1) << "Video probing from " << logType << " [" << logMessage + << "] has started"; + + const auto now = std::chrono::system_clock::now(); + + bool succeeded; + bool gotAudio = false, gotVideo = false; + DecoderMetadata audioMetadata, videoMetadata; + std::vector<DecoderMetadata> metadata; + if ((succeeded = decoder.init(params, std::move(callback), &metadata))) { + for (const auto& header : metadata) { + if (header.format.type == TYPE_VIDEO) { + gotVideo = true; + videoMetadata = header; + } else if (header.format.type == TYPE_AUDIO) { + gotAudio = true; + audioMetadata = header; + } + } + const auto then = std::chrono::system_clock::now(); + VLOG(1) << "Video probing from " << logType << " [" << logMessage + << "] has finished, " + << std::chrono::duration_cast<std::chrono::microseconds>(then - now) + .count() + << " us"; + } else { + LOG(ERROR) << "Decoder initialization has failed"; + } + + decoder.shutdown(); + + // video section + torch::Tensor videoTimeBase = torch::zeros({0}, torch::kInt); + torch::Tensor videoFps = torch::zeros({0}, torch::kFloat); + torch::Tensor videoDuration = torch::zeros({0}, torch::kLong); + + if (succeeded && gotVideo) { + videoTimeBase = torch::zeros({2}, torch::kInt); + int* videoTimeBaseData = videoTimeBase.data_ptr<int>(); + const auto& header = videoMetadata; + + videoTimeBaseData[0] = header.num; + videoTimeBaseData[1] = header.den; + + videoFps = torch::zeros({1}, torch::kFloat); + float* videoFpsData = videoFps.data_ptr<float>(); + videoFpsData[0] = header.fps; + + videoDuration = torch::zeros({1}, torch::kLong); + int64_t* videoDurationData = videoDuration.data_ptr<int64_t>(); + AVRational avr = AVRational{(int)header.num, (int)header.den}; + videoDurationData[0] = av_rescale_q(header.duration, timeBaseQ, avr); + + VLOG(2) << "Prob fps: " << header.fps << ", duration: " << header.duration + << ", num: " << header.num << ", den: " << header.den; + + VLOG(1) << "Video probing from " << logType << " [" << logMessage + << "] filled video tensors"; + } else { + LOG(ERROR) << "Miss video stream"; + } + + // audio section + torch::Tensor audioTimeBase = torch::zeros({0}, torch::kInt); + torch::Tensor audioSampleRate = torch::zeros({0}, torch::kInt); + torch::Tensor audioDuration = torch::zeros({0}, torch::kLong); + + if (succeeded && gotAudio) { + audioTimeBase = torch::zeros({2}, torch::kInt); + int* audioTimeBaseData = audioTimeBase.data_ptr<int>(); + const auto& header = audioMetadata; + const auto& media = header.format; + const auto& format = media.format.audio; + + audioTimeBaseData[0] = header.num; + audioTimeBaseData[1] = header.den; + + audioSampleRate = torch::zeros({1}, torch::kInt); + int* audioSampleRateData = audioSampleRate.data_ptr<int>(); + audioSampleRateData[0] = format.samples; + + audioDuration = torch::zeros({1}, torch::kLong); + int64_t* audioDurationData = audioDuration.data_ptr<int64_t>(); + AVRational avr = AVRational{(int)header.num, (int)header.den}; + audioDurationData[0] = av_rescale_q(header.duration, timeBaseQ, avr); + + VLOG(2) << "Prob sample rate: " << format.samples + << ", duration: " << header.duration << ", num: " << header.num + << ", den: " << header.den; + + VLOG(1) << "Video probing from " << logType << " [" << logMessage + << "] filled audio tensors"; + } else { + VLOG(1) << "Miss audio stream"; + } + + torch::List<torch::Tensor> result; + result.push_back(std::move(videoTimeBase)); + result.push_back(std::move(videoFps)); + result.push_back(std::move(videoDuration)); + result.push_back(std::move(audioTimeBase)); + result.push_back(std::move(audioSampleRate)); + result.push_back(std::move(audioDuration)); + + VLOG(1) << "Video probing from " << logType << " [" << logMessage + << "] is about to return"; + + return result; +} + +} // namespace + +torch::List<torch::Tensor> read_video_from_memory( + torch::Tensor input_video, + double seekFrameMargin, + int64_t getPtsOnly, + int64_t readVideoStream, + int64_t width, + int64_t height, + int64_t minDimension, + int64_t maxDimension, + int64_t videoStartPts, + int64_t videoEndPts, + int64_t videoTimeBaseNum, + int64_t videoTimeBaseDen, + int64_t readAudioStream, + int64_t audioSamples, + int64_t audioChannels, + int64_t audioStartPts, + int64_t audioEndPts, + int64_t audioTimeBaseNum, + int64_t audioTimeBaseDen) { + return readVideo( + false, + input_video, + "", // videoPath + seekFrameMargin, + getPtsOnly, + readVideoStream, + width, + height, + minDimension, + maxDimension, + videoStartPts, + videoEndPts, + videoTimeBaseNum, + videoTimeBaseDen, + readAudioStream, + audioSamples, + audioChannels, + audioStartPts, + audioEndPts, + audioTimeBaseNum, + audioTimeBaseDen); +} + +torch::List<torch::Tensor> read_video_from_file( + std::string videoPath, + double seekFrameMargin, + int64_t getPtsOnly, + int64_t readVideoStream, + int64_t width, + int64_t height, + int64_t minDimension, + int64_t maxDimension, + int64_t videoStartPts, + int64_t videoEndPts, + int64_t videoTimeBaseNum, + int64_t videoTimeBaseDen, + int64_t readAudioStream, + int64_t audioSamples, + int64_t audioChannels, + int64_t audioStartPts, + int64_t audioEndPts, + int64_t audioTimeBaseNum, + int64_t audioTimeBaseDen) { + torch::Tensor dummy_input_video = torch::ones({0}); + return readVideo( + true, + dummy_input_video, + videoPath, + seekFrameMargin, + getPtsOnly, + readVideoStream, + width, + height, + minDimension, + maxDimension, + videoStartPts, + videoEndPts, + videoTimeBaseNum, + videoTimeBaseDen, + readAudioStream, + audioSamples, + audioChannels, + audioStartPts, + audioEndPts, + audioTimeBaseNum, + audioTimeBaseDen); +} + +torch::List<torch::Tensor> probe_video_from_memory(torch::Tensor input_video) { + return probeVideo(false, input_video, ""); +} + +torch::List<torch::Tensor> probe_video_from_file(std::string videoPath) { + torch::Tensor dummy_input_video = torch::ones({0}); + return probeVideo(true, dummy_input_video, videoPath); +} + +TORCH_LIBRARY_FRAGMENT(video_reader, m) { + m.def("read_video_from_memory", read_video_from_memory); + m.def("read_video_from_file", read_video_from_file); + m.def("probe_video_from_memory", probe_video_from_memory); + m.def("probe_video_from_file", probe_video_from_file); +} + +} // namespace video_reader +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/video_reader/video_reader.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/video_reader/video_reader.h new file mode 100644 index 0000000000000000000000000000000000000000..48c4c841219521160fadee0660461129c00149c6 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/io/video_reader/video_reader.h @@ -0,0 +1,55 @@ +#pragma once + +#include <torch/types.h> + +namespace vision { +namespace video_reader { + +torch::List<torch::Tensor> read_video_from_memory( + torch::Tensor input_video, + double seekFrameMargin, + int64_t getPtsOnly, + int64_t readVideoStream, + int64_t width, + int64_t height, + int64_t minDimension, + int64_t maxDimension, + int64_t videoStartPts, + int64_t videoEndPts, + int64_t videoTimeBaseNum, + int64_t videoTimeBaseDen, + int64_t readAudioStream, + int64_t audioSamples, + int64_t audioChannels, + int64_t audioStartPts, + int64_t audioEndPts, + int64_t audioTimeBaseNum, + int64_t audioTimeBaseDen); + +torch::List<torch::Tensor> read_video_from_file( + std::string videoPath, + double seekFrameMargin, + int64_t getPtsOnly, + int64_t readVideoStream, + int64_t width, + int64_t height, + int64_t minDimension, + int64_t maxDimension, + int64_t videoStartPts, + int64_t videoEndPts, + int64_t videoTimeBaseNum, + int64_t videoTimeBaseDen, + int64_t readAudioStream, + int64_t audioSamples, + int64_t audioChannels, + int64_t audioStartPts, + int64_t audioEndPts, + int64_t audioTimeBaseNum, + int64_t audioTimeBaseDen); + +torch::List<torch::Tensor> probe_video_from_memory(torch::Tensor input_video); + +torch::List<torch::Tensor> probe_video_from_file(std::string videoPath); + +} // namespace video_reader +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/macros.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/macros.h new file mode 100644 index 0000000000000000000000000000000000000000..8a7136fad86f0a1d3b0eda2e1072ef99f251a254 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/macros.h @@ -0,0 +1,22 @@ +#pragma once + +#ifdef _WIN32 +#if defined(torchvision_EXPORTS) +#define VISION_API __declspec(dllexport) +#else +#define VISION_API __declspec(dllimport) +#endif +#else +#define VISION_API +#endif + +#if (defined __cpp_inline_variables) || __cplusplus >= 201703L +#define VISION_INLINE_VARIABLE inline +#else +#ifdef _MSC_VER +#define VISION_INLINE_VARIABLE __declspec(selectany) +#define HINT_MSVC_LINKER_INCLUDE_SYMBOL +#else +#define VISION_INLINE_VARIABLE __attribute__((weak)) +#endif +#endif diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/alexnet.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/alexnet.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e29674b706a522812f393f3d2fbe9e6d2d03bfb5 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/alexnet.cpp @@ -0,0 +1,47 @@ +#include "alexnet.h" + +#include "modelsimpl.h" + +namespace vision { +namespace models { +AlexNetImpl::AlexNetImpl(int64_t num_classes) { + features = torch::nn::Sequential( + torch::nn::Conv2d( + torch::nn::Conv2dOptions(3, 64, 11).stride(4).padding(2)), + torch::nn::Functional(modelsimpl::relu_), + torch::nn::Functional(modelsimpl::max_pool2d, 3, 2), + torch::nn::Conv2d(torch::nn::Conv2dOptions(64, 192, 5).padding(2)), + torch::nn::Functional(modelsimpl::relu_), + torch::nn::Functional(modelsimpl::max_pool2d, 3, 2), + torch::nn::Conv2d(torch::nn::Conv2dOptions(192, 384, 3).padding(1)), + torch::nn::Functional(modelsimpl::relu_), + torch::nn::Conv2d(torch::nn::Conv2dOptions(384, 256, 3).padding(1)), + torch::nn::Functional(modelsimpl::relu_), + torch::nn::Conv2d(torch::nn::Conv2dOptions(256, 256, 3).padding(1)), + torch::nn::Functional(modelsimpl::relu_), + torch::nn::Functional(modelsimpl::max_pool2d, 3, 2)); + + classifier = torch::nn::Sequential( + torch::nn::Dropout(), + torch::nn::Linear(256 * 6 * 6, 4096), + torch::nn::Functional(torch::relu), + torch::nn::Dropout(), + torch::nn::Linear(4096, 4096), + torch::nn::Functional(torch::relu), + torch::nn::Linear(4096, num_classes)); + + register_module("features", features); + register_module("classifier", classifier); +} + +torch::Tensor AlexNetImpl::forward(torch::Tensor x) { + x = features->forward(x); + x = torch::adaptive_avg_pool2d(x, {6, 6}); + x = x.view({x.size(0), -1}); + x = classifier->forward(x); + + return x; +} + +} // namespace models +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/alexnet.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/alexnet.h new file mode 100644 index 0000000000000000000000000000000000000000..e584446d205c57e73a0e4cc42c59f86b4f75a8df --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/alexnet.h @@ -0,0 +1,21 @@ +#pragma once + +#include <torch/nn.h> +#include "../macros.h" + +namespace vision { +namespace models { +// AlexNet model architecture from the +// "One weird trick..." <https://arxiv.org/abs/1404.5997> paper. +struct VISION_API AlexNetImpl : torch::nn::Module { + torch::nn::Sequential features{nullptr}, classifier{nullptr}; + + explicit AlexNetImpl(int64_t num_classes = 1000); + + torch::Tensor forward(torch::Tensor x); +}; + +TORCH_MODULE(AlexNet); + +} // namespace models +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/densenet.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/densenet.cpp new file mode 100644 index 0000000000000000000000000000000000000000..145748b144963990a200009859e9c62b4e707d81 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/densenet.cpp @@ -0,0 +1,218 @@ +#include "densenet.h" + +#include "modelsimpl.h" + +namespace vision { +namespace models { +using Options = torch::nn::Conv2dOptions; + +struct _DenseLayerImpl : torch::nn::SequentialImpl { + double drop_rate; + + _DenseLayerImpl( + int64_t num_input_features, + int64_t growth_rate, + int64_t bn_size, + double drop_rate) + : drop_rate(drop_rate) { + push_back("norm1", torch::nn::BatchNorm2d(num_input_features)); + push_back("relu1", torch::nn::Functional(modelsimpl::relu_)); + push_back( + "conv1", + torch::nn::Conv2d(Options(num_input_features, bn_size * growth_rate, 1) + .stride(1) + .bias(false))); + push_back("norm2", torch::nn::BatchNorm2d(bn_size * growth_rate)); + push_back("relu2", torch::nn::Functional(modelsimpl::relu_)); + push_back( + "conv2", + torch::nn::Conv2d(Options(bn_size * growth_rate, growth_rate, 3) + .stride(1) + .padding(1) + .bias(false))); + } + + torch::Tensor forward(torch::Tensor x) { + auto new_features = torch::nn::SequentialImpl::forward(x); + if (drop_rate > 0) + new_features = + torch::dropout(new_features, drop_rate, this->is_training()); + return torch::cat({x, new_features}, 1); + } +}; + +TORCH_MODULE(_DenseLayer); + +struct _DenseBlockImpl : torch::nn::SequentialImpl { + _DenseBlockImpl( + int64_t num_layers, + int64_t num_input_features, + int64_t bn_size, + int64_t growth_rate, + double drop_rate) { + for (int64_t i = 0; i < num_layers; ++i) { + auto layer = _DenseLayer( + num_input_features + i * growth_rate, + growth_rate, + bn_size, + drop_rate); + push_back("denselayer" + std::to_string(i + 1), layer); + } + } + + torch::Tensor forward(torch::Tensor x) { + return torch::nn::SequentialImpl::forward(x); + } +}; + +TORCH_MODULE(_DenseBlock); + +struct _TransitionImpl : torch::nn::SequentialImpl { + _TransitionImpl(int64_t num_input_features, int64_t num_output_features) { + push_back("norm", torch::nn::BatchNorm2d(num_input_features)); + push_back("relu ", torch::nn::Functional(modelsimpl::relu_)); + push_back( + "conv", + torch::nn::Conv2d(Options(num_input_features, num_output_features, 1) + .stride(1) + .bias(false))); + push_back("pool", torch::nn::Functional([](const torch::Tensor& input) { + return torch::avg_pool2d(input, 2, 2, 0, false, true); + })); + } + + torch::Tensor forward(torch::Tensor x) { + return torch::nn::SequentialImpl::forward(x); + } +}; + +TORCH_MODULE(_Transition); + +DenseNetImpl::DenseNetImpl( + int64_t num_classes, + int64_t growth_rate, + const std::vector<int64_t>& block_config, + int64_t num_init_features, + int64_t bn_size, + double drop_rate) { + // First convolution + features = torch::nn::Sequential(); + features->push_back( + "conv0", + torch::nn::Conv2d( + Options(3, num_init_features, 7).stride(2).padding(3).bias(false))); + + features->push_back("norm0", torch::nn::BatchNorm2d(num_init_features)); + features->push_back("relu0", torch::nn::Functional(modelsimpl::relu_)); + features->push_back( + "pool0", torch::nn::Functional(torch::max_pool2d, 3, 2, 1, 1, false)); + + // Each denseblock + auto num_features = num_init_features; + for (size_t i = 0; i < block_config.size(); ++i) { + auto num_layers = block_config[i]; + _DenseBlock block( + num_layers, num_features, bn_size, growth_rate, drop_rate); + + features->push_back("denseblock" + std::to_string(i + 1), block); + num_features = num_features + num_layers * growth_rate; + + if (i != block_config.size() - 1) { + auto trans = _Transition(num_features, num_features / 2); + features->push_back("transition" + std::to_string(i + 1), trans); + num_features = num_features / 2; + } + } + + // Final batch norm + features->push_back("norm5", torch::nn::BatchNorm2d(num_features)); + // Linear layer + classifier = torch::nn::Linear(num_features, num_classes); + + register_module("features", features); + register_module("classifier", classifier); + + // Official init from torch repo. + for (auto& module : modules(/*include_self=*/false)) { + if (auto M = dynamic_cast<torch::nn::Conv2dImpl*>(module.get())) + torch::nn::init::kaiming_normal_(M->weight); + else if (auto M = dynamic_cast<torch::nn::BatchNorm2dImpl*>(module.get())) { + torch::nn::init::constant_(M->weight, 1); + torch::nn::init::constant_(M->bias, 0); + } else if (auto M = dynamic_cast<torch::nn::LinearImpl*>(module.get())) + torch::nn::init::constant_(M->bias, 0); + } +} + +torch::Tensor DenseNetImpl::forward(torch::Tensor x) { + auto features = this->features->forward(x); + auto out = torch::relu_(features); + out = torch::adaptive_avg_pool2d(out, {1, 1}); + + out = out.view({features.size(0), -1}); + out = this->classifier->forward(out); + return out; +} + +DenseNet121Impl::DenseNet121Impl( + int64_t num_classes, + int64_t growth_rate, + const std::vector<int64_t>& block_config, + int64_t num_init_features, + int64_t bn_size, + double drop_rate) + : DenseNetImpl( + num_classes, + growth_rate, + block_config, + num_init_features, + bn_size, + drop_rate) {} + +DenseNet169Impl::DenseNet169Impl( + int64_t num_classes, + int64_t growth_rate, + const std::vector<int64_t>& block_config, + int64_t num_init_features, + int64_t bn_size, + double drop_rate) + : DenseNetImpl( + num_classes, + growth_rate, + block_config, + num_init_features, + bn_size, + drop_rate) {} + +DenseNet201Impl::DenseNet201Impl( + int64_t num_classes, + int64_t growth_rate, + const std::vector<int64_t>& block_config, + int64_t num_init_features, + int64_t bn_size, + double drop_rate) + : DenseNetImpl( + num_classes, + growth_rate, + block_config, + num_init_features, + bn_size, + drop_rate) {} + +DenseNet161Impl::DenseNet161Impl( + int64_t num_classes, + int64_t growth_rate, + const std::vector<int64_t>& block_config, + int64_t num_init_features, + int64_t bn_size, + double drop_rate) + : DenseNetImpl( + num_classes, + growth_rate, + block_config, + num_init_features, + bn_size, + drop_rate) {} + +} // namespace models +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/densenet.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/densenet.h new file mode 100644 index 0000000000000000000000000000000000000000..d1d8f6645da8efbe0b84f6e63296311c772d5bdc --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/densenet.h @@ -0,0 +1,83 @@ +#pragma once + +#include <torch/nn.h> +#include "../macros.h" + +namespace vision { +namespace models { +// Densenet-BC model class, based on +// "Densely Connected Convolutional Networks" +// <https://arxiv.org/pdf/1608.06993.pdf> + +// Args: +// num_classes (int) - number of classification classes +// growth_rate (int) - how many filters to add each layer (`k` in paper) +// block_config (list of 4 ints) - how many layers in each pooling block +// num_init_features (int) - the number of filters to learn in the first +// convolution layer +// bn_size (int) - multiplicative factor for number of bottle neck layers +// (i.e. bn_size * k features in the bottleneck layer) +// drop_rate (float) - dropout rate after each dense layer +struct VISION_API DenseNetImpl : torch::nn::Module { + torch::nn::Sequential features{nullptr}; + torch::nn::Linear classifier{nullptr}; + + explicit DenseNetImpl( + int64_t num_classes = 1000, + int64_t growth_rate = 32, + const std::vector<int64_t>& block_config = {6, 12, 24, 16}, + int64_t num_init_features = 64, + int64_t bn_size = 4, + double drop_rate = 0); + + torch::Tensor forward(torch::Tensor x); +}; + +struct VISION_API DenseNet121Impl : DenseNetImpl { + explicit DenseNet121Impl( + int64_t num_classes = 1000, + int64_t growth_rate = 32, + const std::vector<int64_t>& block_config = {6, 12, 24, 16}, + int64_t num_init_features = 64, + int64_t bn_size = 4, + double drop_rate = 0); +}; + +struct VISION_API DenseNet169Impl : DenseNetImpl { + explicit DenseNet169Impl( + int64_t num_classes = 1000, + int64_t growth_rate = 32, + const std::vector<int64_t>& block_config = {6, 12, 32, 32}, + int64_t num_init_features = 64, + int64_t bn_size = 4, + double drop_rate = 0); +}; + +struct VISION_API DenseNet201Impl : DenseNetImpl { + explicit DenseNet201Impl( + int64_t num_classes = 1000, + int64_t growth_rate = 32, + const std::vector<int64_t>& block_config = {6, 12, 48, 32}, + int64_t num_init_features = 64, + int64_t bn_size = 4, + double drop_rate = 0); +}; + +struct VISION_API DenseNet161Impl : DenseNetImpl { + explicit DenseNet161Impl( + int64_t num_classes = 1000, + int64_t growth_rate = 48, + const std::vector<int64_t>& block_config = {6, 12, 36, 24}, + int64_t num_init_features = 96, + int64_t bn_size = 4, + double drop_rate = 0); +}; + +TORCH_MODULE(DenseNet); +TORCH_MODULE(DenseNet121); +TORCH_MODULE(DenseNet169); +TORCH_MODULE(DenseNet201); +TORCH_MODULE(DenseNet161); + +} // namespace models +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/googlenet.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/googlenet.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9e381b1628d8d766aa087c5b8f8f3ac27b6580cc --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/googlenet.cpp @@ -0,0 +1,230 @@ +#include "googlenet.h" + +namespace vision { +namespace models { + +using Options = torch::nn::Conv2dOptions; + +namespace _googlenetimpl { +BasicConv2dImpl::BasicConv2dImpl(torch::nn::Conv2dOptions options) { + options.bias(false); + conv = torch::nn::Conv2d(options); + bn = torch::nn::BatchNorm2d( + torch::nn::BatchNormOptions(options.out_channels()).eps(0.001)); + + register_module("conv", conv); + register_module("bn", bn); +} + +torch::Tensor BasicConv2dImpl::forward(torch::Tensor x) { + x = conv->forward(x); + x = bn->forward(x); + return x.relu_(); +} + +InceptionImpl::InceptionImpl( + int64_t in_channels, + int64_t ch1x1, + int64_t ch3x3red, + int64_t ch3x3, + int64_t ch5x5red, + int64_t ch5x5, + int64_t pool_proj) { + branch1 = BasicConv2d(Options(in_channels, ch1x1, 1)); + + branch2->push_back(BasicConv2d(Options(in_channels, ch3x3red, 1))); + branch2->push_back(BasicConv2d(Options(ch3x3red, ch3x3, 3).padding(1))); + + branch3->push_back(BasicConv2d(Options(in_channels, ch5x5red, 1))); + branch3->push_back(BasicConv2d(Options(ch5x5red, ch5x5, 3).padding(1))); + + branch4->push_back( + torch::nn::Functional(torch::max_pool2d, 3, 1, 1, 1, true)); + branch4->push_back(BasicConv2d(Options(in_channels, pool_proj, 1))); + + register_module("branch1", branch1); + register_module("branch2", branch2); + register_module("branch3", branch3); + register_module("branch4", branch4); +} + +torch::Tensor InceptionImpl::forward(torch::Tensor x) { + auto b1 = branch1->forward(x); + auto b2 = branch2->forward(x); + auto b3 = branch3->forward(x); + auto b4 = branch4->forward(x); + + return torch::cat({b1, b2, b3, b4}, 1); +} + +InceptionAuxImpl::InceptionAuxImpl(int64_t in_channels, int64_t num_classes) { + conv = BasicConv2d(Options(in_channels, 128, 1)); + fc1 = torch::nn::Linear(2048, 1024); + fc2 = torch::nn::Linear(1024, num_classes); + + register_module("conv", conv); + register_module("fc1", fc1); + register_module("fc2", fc2); +} + +torch::Tensor InceptionAuxImpl::forward(at::Tensor x) { + // aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14 + x = torch::adaptive_avg_pool2d(x, {4, 4}); + // aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4 + x = conv->forward(x); + // N x 128 x 4 x 4 + x = x.view({x.size(0), -1}); + // N x 2048 + x = fc1->forward(x).relu_(); + // N x 2048 + x = torch::dropout(x, 0.7, is_training()); + // N x 2048 + x = fc2->forward(x); + // N x 1024 + + return x; +} + +} // namespace _googlenetimpl + +GoogLeNetImpl::GoogLeNetImpl( + int64_t num_classes, + bool aux_logits, + bool transform_input, + bool init_weights) { + this->aux_logits = aux_logits; + this->transform_input = transform_input; + + conv1 = _googlenetimpl::BasicConv2d(Options(3, 64, 7).stride(2).padding(3)); + conv2 = _googlenetimpl::BasicConv2d(Options(64, 64, 1)); + conv3 = _googlenetimpl::BasicConv2d(Options(64, 192, 3).padding(1)); + + inception3a = _googlenetimpl::Inception(192, 64, 96, 128, 16, 32, 32); + inception3b = _googlenetimpl::Inception(256, 128, 128, 192, 32, 96, 64); + + inception4a = _googlenetimpl::Inception(480, 192, 96, 208, 16, 48, 64); + inception4b = _googlenetimpl::Inception(512, 160, 112, 224, 24, 64, 64); + inception4c = _googlenetimpl::Inception(512, 128, 128, 256, 24, 64, 64); + inception4d = _googlenetimpl::Inception(512, 112, 144, 288, 32, 64, 64); + inception4e = _googlenetimpl::Inception(528, 256, 160, 320, 32, 128, 128); + + inception5a = _googlenetimpl::Inception(832, 256, 160, 320, 32, 128, 128); + inception5b = _googlenetimpl::Inception(832, 384, 192, 384, 48, 128, 128); + + if (aux_logits) { + aux1 = _googlenetimpl::InceptionAux(512, num_classes); + aux2 = _googlenetimpl::InceptionAux(528, num_classes); + + register_module("aux1", aux1); + register_module("aux2", aux2); + } + + dropout = torch::nn::Dropout(0.2); + fc = torch::nn::Linear(1024, num_classes); + + register_module("conv1", conv1); + register_module("conv2", conv2); + register_module("conv3", conv3); + + register_module("inception3a", inception3a); + register_module("inception3b", inception3b); + + register_module("inception4a", inception4a); + register_module("inception4b", inception4b); + register_module("inception4c", inception4c); + register_module("inception4d", inception4d); + register_module("inception4e", inception4e); + + register_module("inception5a", inception5a); + register_module("inception5b", inception5b); + + register_module("dropout", dropout); + register_module("fc", fc); + + if (init_weights) + _initialize_weights(); +} + +void GoogLeNetImpl::_initialize_weights() { + for (auto& module : modules(/*include_self=*/false)) { + if (auto M = dynamic_cast<torch::nn::Conv2dImpl*>(module.get())) + torch::nn::init::normal_(M->weight); // Note: used instead of truncated + // normal initialization + else if (auto M = dynamic_cast<torch::nn::LinearImpl*>(module.get())) + torch::nn::init::normal_(M->weight); // Note: used instead of truncated + // normal initialization + else if (auto M = dynamic_cast<torch::nn::BatchNorm2dImpl*>(module.get())) { + torch::nn::init::ones_(M->weight); + torch::nn::init::zeros_(M->bias); + } + } +} + +GoogLeNetOutput GoogLeNetImpl::forward(torch::Tensor x) { + if (transform_input) { + auto x_ch0 = torch::unsqueeze(x.select(1, 0), 1) * (0.229 / 0.5) + + (0.485 - 0.5) / 0.5; + auto x_ch1 = torch::unsqueeze(x.select(1, 1), 1) * (0.224 / 0.5) + + (0.456 - 0.5) / 0.5; + auto x_ch2 = torch::unsqueeze(x.select(1, 2), 1) * (0.225 / 0.5) + + (0.406 - 0.5) / 0.5; + + x = torch::cat({x_ch0, x_ch1, x_ch2}, 1); + } + + // N x 3 x 224 x 224 + x = conv1->forward(x); + // N x 64 x 112 x 112 + x = torch::max_pool2d(x, 3, 2, 0, 1, true); + // N x 64 x 56 x 56 + x = conv2->forward(x); + // N x 64 x 56 x 56 + x = conv3->forward(x); + // N x 192 x 56 x 56 + x = torch::max_pool2d(x, 3, 2, 0, 1, true); + + // N x 192 x 28 x 28 + x = inception3a->forward(x); + // N x 256 x 28 x 28 + x = inception3b->forward(x); + // N x 480 x 28 x 28 + x = torch::max_pool2d(x, 3, 2, 0, 1, true); + // N x 480 x 14 x 14 + x = inception4a->forward(x); + // N x 512 x 14 x 14 + torch::Tensor aux1; + if (is_training() && aux_logits) + aux1 = this->aux1->forward(x); + + x = inception4b->forward(x); + // N x 512 x 14 x 14 + x = inception4c->forward(x); + // N x 512 x 14 x 14 + x = inception4d->forward(x); + // N x 528 x 14 x 14 + torch::Tensor aux2; + if (is_training() && aux_logits) + aux2 = this->aux2->forward(x); + + x = inception4e(x); + // N x 832 x 14 x 14 + x = torch::max_pool2d(x, 2, 2, 0, 1, true); + // N x 832 x 7 x 7 + x = inception5a(x); + // N x 832 x 7 x 7 + x = inception5b(x); + // N x 1024 x 7 x 7 + + x = torch::adaptive_avg_pool2d(x, {1, 1}); + // N x 1024 x 1 x 1 + x = x.view({x.size(0), -1}); + // N x 1024 + x = dropout->forward(x); + x = fc->forward(x); + // N x 1000(num_classes) + + return {x, aux1, aux2}; +} + +} // namespace models +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/googlenet.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/googlenet.h new file mode 100644 index 0000000000000000000000000000000000000000..cb10a0b90b7e5ae6e916b1a796ba50a1486765eb --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/googlenet.h @@ -0,0 +1,87 @@ +#pragma once + +#include <torch/nn.h> +#include "../macros.h" + +namespace vision { +namespace models { + +namespace _googlenetimpl { +struct VISION_API BasicConv2dImpl : torch::nn::Module { + torch::nn::Conv2d conv{nullptr}; + torch::nn::BatchNorm2d bn{nullptr}; + + explicit BasicConv2dImpl(torch::nn::Conv2dOptions options); + + torch::Tensor forward(torch::Tensor x); +}; + +TORCH_MODULE(BasicConv2d); + +struct VISION_API InceptionImpl : torch::nn::Module { + BasicConv2d branch1{nullptr}; + torch::nn::Sequential branch2, branch3, branch4; + + InceptionImpl( + int64_t in_channels, + int64_t ch1x1, + int64_t ch3x3red, + int64_t ch3x3, + int64_t ch5x5red, + int64_t ch5x5, + int64_t pool_proj); + + torch::Tensor forward(torch::Tensor x); +}; + +TORCH_MODULE(Inception); + +struct VISION_API InceptionAuxImpl : torch::nn::Module { + BasicConv2d conv{nullptr}; + torch::nn::Linear fc1{nullptr}, fc2{nullptr}; + + InceptionAuxImpl(int64_t in_channels, int64_t num_classes); + + torch::Tensor forward(torch::Tensor x); +}; + +TORCH_MODULE(InceptionAux); + +} // namespace _googlenetimpl + +struct VISION_API GoogLeNetOutput { + torch::Tensor output; + torch::Tensor aux1; + torch::Tensor aux2; +}; + +struct VISION_API GoogLeNetImpl : torch::nn::Module { + bool aux_logits, transform_input; + + _googlenetimpl::BasicConv2d conv1{nullptr}, conv2{nullptr}, conv3{nullptr}; + + _googlenetimpl::Inception inception3a{nullptr}, inception3b{nullptr}, + inception4a{nullptr}, inception4b{nullptr}, inception4c{nullptr}, + inception4d{nullptr}, inception4e{nullptr}, inception5a{nullptr}, + inception5b{nullptr}; + + _googlenetimpl::InceptionAux aux1{nullptr}, aux2{nullptr}; + + torch::nn::Dropout dropout{nullptr}; + torch::nn::Linear fc{nullptr}; + + explicit GoogLeNetImpl( + int64_t num_classes = 1000, + bool aux_logits = true, + bool transform_input = false, + bool init_weights = true); + + void _initialize_weights(); + + GoogLeNetOutput forward(torch::Tensor x); +}; + +TORCH_MODULE(GoogLeNet); + +} // namespace models +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/inception.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/inception.cpp new file mode 100644 index 0000000000000000000000000000000000000000..002bf7ee2d1f0204f9429be9709a9bcc0c9abcd9 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/inception.cpp @@ -0,0 +1,373 @@ +#include "inception.h" + +namespace vision { +namespace models { + +using Options = torch::nn::Conv2dOptions; + +namespace _inceptionimpl { +BasicConv2dImpl::BasicConv2dImpl( + torch::nn::Conv2dOptions options, + double std_dev) { + options.bias(false); + conv = torch::nn::Conv2d(options); + bn = torch::nn::BatchNorm2d( + torch::nn::BatchNormOptions(options.out_channels()).eps(0.001)); + + register_module("conv", conv); + register_module("bn", bn); + + torch::nn::init::normal_( + conv->weight, + 0, + std_dev); // Note: used instead of truncated normal initialization + + torch::nn::init::constant_(bn->weight, 1); + torch::nn::init::constant_(bn->bias, 0); +} + +torch::Tensor BasicConv2dImpl::forward(torch::Tensor x) { + x = conv->forward(x); + x = bn->forward(x); + return torch::relu_(x); +} + +InceptionAImpl::InceptionAImpl(int64_t in_channels, int64_t pool_features) + : branch1x1(Options(in_channels, 64, 1)), + branch5x5_1(Options(in_channels, 48, 1)), + branch5x5_2(Options(48, 64, 5).padding(2)), + branch3x3dbl_1(Options(in_channels, 64, 1)), + branch3x3dbl_2(Options(64, 96, 3).padding(1)), + branch3x3dbl_3(Options(96, 96, 3).padding(1)), + branch_pool(Options(in_channels, pool_features, 1)) { + register_module("branch1x1", branch1x1); + register_module("branch5x5_1", branch5x5_1); + register_module("branch5x5_2", branch5x5_2); + register_module("branch3x3dbl_1", branch3x3dbl_1); + register_module("branch3x3dbl_2", branch3x3dbl_2); + register_module("branch3x3dbl_3", branch3x3dbl_3); + register_module("branch_pool", branch_pool); +} + +torch::Tensor InceptionAImpl::forward(const torch::Tensor& x) { + auto branch1x1 = this->branch1x1->forward(x); + + auto branch5x5 = this->branch5x5_1->forward(x); + branch5x5 = this->branch5x5_2->forward(branch5x5); + + auto branch3x3dbl = this->branch3x3dbl_1->forward(x); + branch3x3dbl = this->branch3x3dbl_2->forward(branch3x3dbl); + branch3x3dbl = this->branch3x3dbl_3->forward(branch3x3dbl); + + auto branch_pool = torch::avg_pool2d(x, 3, 1, 1); + branch_pool = this->branch_pool->forward(branch_pool); + + return torch::cat({branch1x1, branch5x5, branch3x3dbl, branch_pool}, 1); +} + +InceptionBImpl::InceptionBImpl(int64_t in_channels) + : branch3x3(Options(in_channels, 384, 3).stride(2)), + branch3x3dbl_1(Options(in_channels, 64, 1)), + branch3x3dbl_2(Options(64, 96, 3).padding(1)), + branch3x3dbl_3(Options(96, 96, 3).stride(2)) { + register_module("branch3x3", branch3x3); + register_module("branch3x3dbl_1", branch3x3dbl_1); + register_module("branch3x3dbl_2", branch3x3dbl_2); + register_module("branch3x3dbl_3", branch3x3dbl_3); +} + +torch::Tensor InceptionBImpl::forward(const torch::Tensor& x) { + auto branch3x3 = this->branch3x3->forward(x); + + auto branch3x3dbl = this->branch3x3dbl_1->forward(x); + branch3x3dbl = this->branch3x3dbl_2->forward(branch3x3dbl); + branch3x3dbl = this->branch3x3dbl_3->forward(branch3x3dbl); + + auto branch_pool = torch::max_pool2d(x, 3, 2); + return torch::cat({branch3x3, branch3x3dbl, branch_pool}, 1); +} + +InceptionCImpl::InceptionCImpl(int64_t in_channels, int64_t channels_7x7) { + branch1x1 = BasicConv2d(Options(in_channels, 192, 1)); + + auto c7 = channels_7x7; + branch7x7_1 = BasicConv2d(Options(in_channels, c7, 1)); + branch7x7_2 = BasicConv2d(Options(c7, c7, {1, 7}).padding({0, 3})); + branch7x7_3 = BasicConv2d(Options(c7, 192, {7, 1}).padding({3, 0})); + + branch7x7dbl_1 = BasicConv2d(Options(in_channels, c7, 1)); + branch7x7dbl_2 = BasicConv2d(Options(c7, c7, {7, 1}).padding({3, 0})); + branch7x7dbl_3 = BasicConv2d(Options(c7, c7, {1, 7}).padding({0, 3})); + branch7x7dbl_4 = BasicConv2d(Options(c7, c7, {7, 1}).padding({3, 0})); + branch7x7dbl_5 = BasicConv2d(Options(c7, 192, {1, 7}).padding({0, 3})); + + branch_pool = BasicConv2d(Options(in_channels, 192, 1)); + + register_module("branch1x1", branch1x1); + register_module("branch7x7_1", branch7x7_1); + register_module("branch7x7_2", branch7x7_2); + register_module("branch7x7_3", branch7x7_3); + register_module("branch7x7dbl_1", branch7x7dbl_1); + register_module("branch7x7dbl_2", branch7x7dbl_2); + register_module("branch7x7dbl_3", branch7x7dbl_3); + register_module("branch7x7dbl_4", branch7x7dbl_4); + register_module("branch7x7dbl_5", branch7x7dbl_5); + register_module("branch_pool", branch_pool); +} + +torch::Tensor InceptionCImpl::forward(const torch::Tensor& x) { + auto branch1x1 = this->branch1x1->forward(x); + + auto branch7x7 = this->branch7x7_1->forward(x); + branch7x7 = this->branch7x7_2->forward(branch7x7); + branch7x7 = this->branch7x7_3->forward(branch7x7); + + auto branch7x7dbl = this->branch7x7dbl_1->forward(x); + branch7x7dbl = this->branch7x7dbl_2->forward(branch7x7dbl); + branch7x7dbl = this->branch7x7dbl_3->forward(branch7x7dbl); + branch7x7dbl = this->branch7x7dbl_4->forward(branch7x7dbl); + branch7x7dbl = this->branch7x7dbl_5->forward(branch7x7dbl); + + auto branch_pool = torch::avg_pool2d(x, 3, 1, 1); + branch_pool = this->branch_pool->forward(branch_pool); + + return torch::cat({branch1x1, branch7x7, branch7x7dbl, branch_pool}, 1); +} + +InceptionDImpl::InceptionDImpl(int64_t in_channels) + : branch3x3_1(Options(in_channels, 192, 1)), + branch3x3_2(Options(192, 320, 3).stride(2)), + branch7x7x3_1(Options(in_channels, 192, 1)), + branch7x7x3_2(Options(192, 192, {1, 7}).padding({0, 3})), + branch7x7x3_3(Options(192, 192, {7, 1}).padding({3, 0})), + branch7x7x3_4(Options(192, 192, 3).stride(2)) + +{ + register_module("branch3x3_1", branch3x3_1); + register_module("branch3x3_2", branch3x3_2); + register_module("branch7x7x3_1", branch7x7x3_1); + register_module("branch7x7x3_2", branch7x7x3_2); + register_module("branch7x7x3_3", branch7x7x3_3); + register_module("branch7x7x3_4", branch7x7x3_4); +} + +torch::Tensor InceptionDImpl::forward(const torch::Tensor& x) { + auto branch3x3 = this->branch3x3_1->forward(x); + branch3x3 = this->branch3x3_2->forward(branch3x3); + + auto branch7x7x3 = this->branch7x7x3_1->forward(x); + branch7x7x3 = this->branch7x7x3_2->forward(branch7x7x3); + branch7x7x3 = this->branch7x7x3_3->forward(branch7x7x3); + branch7x7x3 = this->branch7x7x3_4->forward(branch7x7x3); + + auto branch_pool = torch::max_pool2d(x, 3, 2); + return torch::cat({branch3x3, branch7x7x3, branch_pool}, 1); +} + +InceptionEImpl::InceptionEImpl(int64_t in_channels) + : branch1x1(Options(in_channels, 320, 1)), + branch3x3_1(Options(in_channels, 384, 1)), + branch3x3_2a(Options(384, 384, {1, 3}).padding({0, 1})), + branch3x3_2b(Options(384, 384, {3, 1}).padding({1, 0})), + branch3x3dbl_1(Options(in_channels, 448, 1)), + branch3x3dbl_2(Options(448, 384, 3).padding(1)), + branch3x3dbl_3a(Options(384, 384, {1, 3}).padding({0, 1})), + branch3x3dbl_3b(Options(384, 384, {3, 1}).padding({1, 0})), + branch_pool(Options(in_channels, 192, 1)) { + register_module("branch1x1", branch1x1); + register_module("branch3x3_1", branch3x3_1); + register_module("branch3x3_2a", branch3x3_2a); + register_module("branch3x3_2b", branch3x3_2b); + register_module("branch3x3dbl_1", branch3x3dbl_1); + register_module("branch3x3dbl_2", branch3x3dbl_2); + register_module("branch3x3dbl_3a", branch3x3dbl_3a); + register_module("branch3x3dbl_3b", branch3x3dbl_3b); + register_module("branch_pool", branch_pool); +} + +torch::Tensor InceptionEImpl::forward(const torch::Tensor& x) { + auto branch1x1 = this->branch1x1->forward(x); + + auto branch3x3 = this->branch3x3_1->forward(x); + branch3x3 = torch::cat( + { + this->branch3x3_2a->forward(branch3x3), + this->branch3x3_2b->forward(branch3x3), + }, + 1); + + auto branch3x3dbl = this->branch3x3dbl_1->forward(x); + branch3x3dbl = this->branch3x3dbl_2->forward(branch3x3dbl); + branch3x3dbl = torch::cat( + {this->branch3x3dbl_3a->forward(branch3x3dbl), + this->branch3x3dbl_3b->forward(branch3x3dbl)}, + 1); + + auto branch_pool = torch::avg_pool2d(x, 3, 1, 1); + branch_pool = this->branch_pool->forward(branch_pool); + + return torch::cat({branch1x1, branch3x3, branch3x3dbl, branch_pool}, 1); +} + +InceptionAuxImpl::InceptionAuxImpl(int64_t in_channels, int64_t num_classes) + : conv0(BasicConv2d(Options(in_channels, 128, 1))), + conv1(BasicConv2d(Options(128, 768, 5), 0.01)), + fc(768, num_classes) { + torch::nn::init::normal_( + fc->weight, + 0, + 0.001); // Note: used instead of truncated normal initialization + + register_module("conv0", conv0); + register_module("conv1", conv1); + register_module("fc", fc); +} + +torch::Tensor InceptionAuxImpl::forward(torch::Tensor x) { + // N x 768 x 17 x 17 + x = torch::avg_pool2d(x, 5, 3); + // N x 768 x 5 x 5 + x = conv0->forward(x); + // N x 128 x 5 x 5 + x = conv1->forward(x); + // N x 768 x 1 x 1 + x = torch::adaptive_avg_pool2d(x, {1, 1}); + // N x 768 x 1 x 1 + x = x.view({x.size(0), -1}); + // N x 768 + x = fc->forward(x); + // N x 1000 (num_classes) + return x; +} + +} // namespace _inceptionimpl + +InceptionV3Impl::InceptionV3Impl( + int64_t num_classes, + bool aux_logits, + bool transform_input) + : aux_logits(aux_logits), transform_input(transform_input) { + Conv2d_1a_3x3 = _inceptionimpl::BasicConv2d(Options(3, 32, 3).stride(2)); + Conv2d_2a_3x3 = _inceptionimpl::BasicConv2d(Options(32, 32, 3)); + Conv2d_2b_3x3 = _inceptionimpl::BasicConv2d(Options(32, 64, 3).padding(1)); + Conv2d_3b_1x1 = _inceptionimpl::BasicConv2d(Options(64, 80, 1)); + Conv2d_4a_3x3 = _inceptionimpl::BasicConv2d(Options(80, 192, 3)); + + Mixed_5b = _inceptionimpl::InceptionA(192, 32); + Mixed_5c = _inceptionimpl::InceptionA(256, 64); + Mixed_5d = _inceptionimpl::InceptionA(288, 64); + + Mixed_6a = _inceptionimpl::InceptionB(288); + Mixed_6b = _inceptionimpl::InceptionC(768, 128); + Mixed_6c = _inceptionimpl::InceptionC(768, 160); + Mixed_6d = _inceptionimpl::InceptionC(768, 160); + Mixed_6e = _inceptionimpl::InceptionC(768, 192); + + if (aux_logits) + AuxLogits = _inceptionimpl::InceptionAux(768, num_classes); + + Mixed_7a = _inceptionimpl::InceptionD(768); + Mixed_7b = _inceptionimpl::InceptionE(1280); + Mixed_7c = _inceptionimpl::InceptionE(2048); + + fc = torch::nn::Linear(2048, num_classes); + torch::nn::init::normal_( + fc->weight, + 0, + 0.1); // Note: used instead of truncated normal initialization + + register_module("Conv2d_1a_3x3", Conv2d_1a_3x3); + register_module("Conv2d_2a_3x3", Conv2d_2a_3x3); + register_module("Conv2d_2b_3x3", Conv2d_2b_3x3); + register_module("Conv2d_3b_1x1", Conv2d_3b_1x1); + register_module("Conv2d_4a_3x3", Conv2d_4a_3x3); + register_module("Mixed_5b", Mixed_5b); + register_module("Mixed_5c", Mixed_5c); + register_module("Mixed_5d", Mixed_5d); + register_module("Mixed_6a", Mixed_6a); + register_module("Mixed_6b", Mixed_6b); + register_module("Mixed_6c", Mixed_6c); + register_module("Mixed_6d", Mixed_6d); + register_module("Mixed_6e", Mixed_6e); + + if (!AuxLogits.is_empty()) + register_module("AuxLogits", AuxLogits); + + register_module("Mixed_7a", Mixed_7a); + register_module("Mixed_7b", Mixed_7b); + register_module("Mixed_7c", Mixed_7c); + register_module("fc", fc); +} + +InceptionV3Output InceptionV3Impl::forward(torch::Tensor x) { + if (transform_input) { + auto x_ch0 = torch::unsqueeze(x.select(1, 0), 1) * (0.229 / 0.5) + + (0.485 - 0.5) / 0.5; + auto x_ch1 = torch::unsqueeze(x.select(1, 1), 1) * (0.224 / 0.5) + + (0.456 - 0.5) / 0.5; + auto x_ch2 = torch::unsqueeze(x.select(1, 2), 1) * (0.225 / 0.5) + + (0.406 - 0.5) / 0.5; + + x = torch::cat({x_ch0, x_ch1, x_ch2}, 1); + } + + // N x 3 x 299 x 299 + x = Conv2d_1a_3x3->forward(x); + // N x 32 x 149 x 149 + x = Conv2d_2a_3x3->forward(x); + // N x 32 x 147 x 147 + x = Conv2d_2b_3x3->forward(x); + // N x 64 x 147 x 147 + x = torch::max_pool2d(x, 3, 2); + // N x 64 x 73 x 73 + x = Conv2d_3b_1x1->forward(x); + // N x 80 x 73 x 73 + x = Conv2d_4a_3x3->forward(x); + // N x 192 x 71 x 71 + x = torch::max_pool2d(x, 3, 2); + // N x 192 x 35 x 35 + x = Mixed_5b->forward(x); + // N x 256 x 35 x 35 + x = Mixed_5c->forward(x); + // N x 288 x 35 x 35 + x = Mixed_5d->forward(x); + // N x 288 x 35 x 35 + x = Mixed_6a->forward(x); + // N x 768 x 17 x 17 + x = Mixed_6b->forward(x); + // N x 768 x 17 x 17 + x = Mixed_6c->forward(x); + // N x 768 x 17 x 17 + x = Mixed_6d->forward(x); + // N x 768 x 17 x 17 + x = Mixed_6e->forward(x); + // N x 768 x 17 x 17 + + torch::Tensor aux; + if (is_training() && aux_logits) + aux = AuxLogits->forward(x); + + // N x 768 x 17 x 17 + x = Mixed_7a->forward(x); + // N x 1280 x 8 x 8 + x = Mixed_7b->forward(x); + // N x 2048 x 8 x 8 + x = Mixed_7c->forward(x); + // N x 2048 x 8 x 8 + x = torch::adaptive_avg_pool2d(x, {1, 1}); + // N x 2048 x 1 x 1 + x = torch::dropout(x, 0.5, is_training()); + // N x 2048 x 1 x 1 + x = x.view({x.size(0), -1}); + // N x 2048 + x = fc->forward(x); + // N x 1000 (num_classes) + + if (is_training() && aux_logits) + return {x, aux}; + return {x, {}}; +} + +// namespace _inceptionimpl +} // namespace models +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/inception.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/inception.h new file mode 100644 index 0000000000000000000000000000000000000000..53ce07a703a71ed52d0c26eb7f58c0567c421053 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/inception.h @@ -0,0 +1,125 @@ +#pragma once + +#include <torch/nn.h> +#include "../macros.h" + +namespace vision { +namespace models { +namespace _inceptionimpl { +struct VISION_API BasicConv2dImpl : torch::nn::Module { + torch::nn::Conv2d conv{nullptr}; + torch::nn::BatchNorm2d bn{nullptr}; + + explicit BasicConv2dImpl( + torch::nn::Conv2dOptions options, + double std_dev = 0.1); + + torch::Tensor forward(torch::Tensor x); +}; + +TORCH_MODULE(BasicConv2d); + +struct VISION_API InceptionAImpl : torch::nn::Module { + BasicConv2d branch1x1, branch5x5_1, branch5x5_2, branch3x3dbl_1, + branch3x3dbl_2, branch3x3dbl_3, branch_pool; + + InceptionAImpl(int64_t in_channels, int64_t pool_features); + + torch::Tensor forward(const torch::Tensor& x); +}; + +struct VISION_API InceptionBImpl : torch::nn::Module { + BasicConv2d branch3x3, branch3x3dbl_1, branch3x3dbl_2, branch3x3dbl_3; + + explicit InceptionBImpl(int64_t in_channels); + + torch::Tensor forward(const torch::Tensor& x); +}; + +struct VISION_API InceptionCImpl : torch::nn::Module { + BasicConv2d branch1x1{nullptr}, branch7x7_1{nullptr}, branch7x7_2{nullptr}, + branch7x7_3{nullptr}, branch7x7dbl_1{nullptr}, branch7x7dbl_2{nullptr}, + branch7x7dbl_3{nullptr}, branch7x7dbl_4{nullptr}, branch7x7dbl_5{nullptr}, + branch_pool{nullptr}; + + InceptionCImpl(int64_t in_channels, int64_t channels_7x7); + + torch::Tensor forward(const torch::Tensor& x); +}; + +struct VISION_API InceptionDImpl : torch::nn::Module { + BasicConv2d branch3x3_1, branch3x3_2, branch7x7x3_1, branch7x7x3_2, + branch7x7x3_3, branch7x7x3_4; + + explicit InceptionDImpl(int64_t in_channels); + + torch::Tensor forward(const torch::Tensor& x); +}; + +struct VISION_API InceptionEImpl : torch::nn::Module { + BasicConv2d branch1x1, branch3x3_1, branch3x3_2a, branch3x3_2b, + branch3x3dbl_1, branch3x3dbl_2, branch3x3dbl_3a, branch3x3dbl_3b, + branch_pool; + + explicit InceptionEImpl(int64_t in_channels); + + torch::Tensor forward(const torch::Tensor& x); +}; + +struct VISION_API InceptionAuxImpl : torch::nn::Module { + BasicConv2d conv0; + BasicConv2d conv1; + torch::nn::Linear fc; + + InceptionAuxImpl(int64_t in_channels, int64_t num_classes); + + torch::Tensor forward(torch::Tensor x); +}; + +TORCH_MODULE(InceptionA); +TORCH_MODULE(InceptionB); +TORCH_MODULE(InceptionC); +TORCH_MODULE(InceptionD); +TORCH_MODULE(InceptionE); +TORCH_MODULE(InceptionAux); + +} // namespace _inceptionimpl + +struct VISION_API InceptionV3Output { + torch::Tensor output; + torch::Tensor aux; +}; + +// Inception v3 model architecture from +//"Rethinking the Inception Architecture for Computer Vision" +//<http://arxiv.org/abs/1512.00567> +struct VISION_API InceptionV3Impl : torch::nn::Module { + bool aux_logits, transform_input; + + _inceptionimpl::BasicConv2d Conv2d_1a_3x3{nullptr}, Conv2d_2a_3x3{nullptr}, + Conv2d_2b_3x3{nullptr}, Conv2d_3b_1x1{nullptr}, Conv2d_4a_3x3{nullptr}; + + _inceptionimpl::InceptionA Mixed_5b{nullptr}, Mixed_5c{nullptr}, + Mixed_5d{nullptr}; + _inceptionimpl::InceptionB Mixed_6a{nullptr}; + _inceptionimpl::InceptionC Mixed_6b{nullptr}, Mixed_6c{nullptr}, + Mixed_6d{nullptr}, Mixed_6e{nullptr}; + _inceptionimpl::InceptionD Mixed_7a{nullptr}; + _inceptionimpl::InceptionE Mixed_7b{nullptr}, Mixed_7c{nullptr}; + + torch::nn::Linear fc{nullptr}; + + _inceptionimpl::InceptionAux AuxLogits{nullptr}; + + explicit InceptionV3Impl( + int64_t num_classes = 1000, + bool aux_logits = true, + bool transform_input = false); + + InceptionV3Output forward(torch::Tensor x); +}; + +TORCH_MODULE(InceptionV3); + +} // namespace models +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/mnasnet.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/mnasnet.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8e433c50bd903549fab5305c11129e824d04232d --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/mnasnet.cpp @@ -0,0 +1,182 @@ +#include "mnasnet.h" + +#include "modelsimpl.h" + +namespace vision { +namespace models { +using Options = torch::nn::Conv2dOptions; + +struct MNASNetInvertedResidualImpl : torch::nn::Module { + bool apply_residual; + torch::nn::Sequential layers; + + MNASNetInvertedResidualImpl( + int64_t input, + int64_t output, + int64_t kernel, + int64_t stride, + double expansion_factor, + double bn_momentum = 0.1) { + TORCH_CHECK(stride == 1 || stride == 2); + TORCH_CHECK(kernel == 3 || kernel == 5); + + auto mid = int64_t(input * expansion_factor); + apply_residual = input == output && stride == 1; + + layers->push_back(torch::nn::Conv2d(Options(input, mid, 1).bias(false))); + layers->push_back(torch::nn::BatchNorm2d( + torch::nn::BatchNormOptions(mid).momentum(bn_momentum))); + layers->push_back( + torch::nn::Functional(torch::nn::Functional(modelsimpl::relu_))); + layers->push_back( + torch::nn::Conv2d(torch::nn::Conv2d(Options(mid, mid, kernel) + .padding(kernel / 2) + .stride(stride) + .groups(mid) + .bias(false)))); + layers->push_back(torch::nn::BatchNorm2d( + torch::nn::BatchNormOptions(mid).momentum(bn_momentum))); + layers->push_back( + torch::nn::Functional(torch::nn::Functional(modelsimpl::relu_))); + layers->push_back(torch::nn::Conv2d(Options(mid, output, 1).bias(false))); + layers->push_back(torch::nn::BatchNorm2d( + torch::nn::BatchNormOptions(output).momentum(bn_momentum))); + + register_module("layers", layers); + } + + torch::Tensor forward(torch::Tensor x) { + if (apply_residual) + return layers->forward(x) + x; + return layers->forward(x); + } +}; + +TORCH_MODULE(MNASNetInvertedResidual); + +struct StackSequentailImpl : torch::nn::SequentialImpl { + using SequentialImpl::SequentialImpl; + + torch::Tensor forward(torch::Tensor x) { + return SequentialImpl::forward(x); + } +}; + +TORCH_MODULE(StackSequentail); + +StackSequentail stack( + int64_t input, + int64_t output, + int64_t kernel, + int64_t stride, + double exp_factor, + int64_t repeats, + double bn_momentum) { + TORCH_CHECK(repeats >= 1); + + StackSequentail seq; + seq->push_back(MNASNetInvertedResidual( + input, output, kernel, stride, exp_factor, bn_momentum)); + + for (int64_t i = 1; i < repeats; ++i) + seq->push_back(MNASNetInvertedResidual( + output, output, kernel, 1, exp_factor, bn_momentum)); + + return seq; +} + +int64_t round_to_multiple_of( + int64_t val, + int64_t divisor, + double round_up_bias = .9) { + TORCH_CHECK(0.0 < round_up_bias && round_up_bias < 1.0); + auto new_val = std::max(divisor, (val + divisor / 2) / divisor * divisor); + return new_val >= round_up_bias * val ? new_val : new_val + divisor; +} + +std::vector<int64_t> scale_depths(std::vector<int64_t> depths, double alpha) { + std::vector<int64_t> data(depths.size()); + for (size_t i = 0; i < data.size(); ++i) { + data[i] = round_to_multiple_of(int64_t(depths[i] * alpha), 8); + } + + return data; +} + +void MNASNetImpl::_initialize_weights() { + for (auto& module : modules(/*include_self=*/false)) { + if (auto M = dynamic_cast<torch::nn::Conv2dImpl*>(module.get())) + torch::nn::init::kaiming_normal_( + M->weight, 0, torch::kFanOut, torch::kReLU); + else if (auto M = dynamic_cast<torch::nn::BatchNorm2dImpl*>(module.get())) { + torch::nn::init::ones_(M->weight); + torch::nn::init::zeros_(M->bias); + } else if (auto M = dynamic_cast<torch::nn::LinearImpl*>(module.get())) { + torch::nn::init::normal_(M->weight, 0, 0.01); + torch::nn::init::zeros_(M->bias); + } + } +} + +#define BN_MOMENTUM 1 - 0.9997 + +MNASNetImpl::MNASNetImpl(double alpha, int64_t num_classes, double dropout) { + auto depths = scale_depths({24, 40, 80, 96, 192, 320}, alpha); + + layers->push_back( + torch::nn::Conv2d(Options(3, 32, 3).padding(1).stride(2).bias(false))); + layers->push_back(torch::nn::BatchNorm2d( + torch::nn::BatchNormOptions(32).momentum(BN_MOMENTUM))); + layers->push_back(torch::nn::Functional(modelsimpl::relu_)); + layers->push_back(torch::nn::Conv2d( + Options(32, 32, 3).padding(1).stride(1).groups(32).bias(false))); + layers->push_back(torch::nn::BatchNorm2d( + torch::nn::BatchNormOptions(32).momentum(BN_MOMENTUM))); + layers->push_back(torch::nn::Functional(modelsimpl::relu_)); + layers->push_back( + torch::nn::Conv2d(Options(32, 16, 1).padding(0).stride(1).bias(false))); + layers->push_back(torch::nn::BatchNorm2d( + torch::nn::BatchNormOptions(16).momentum(BN_MOMENTUM))); + + layers->push_back(stack(16, depths[0], 3, 2, 3, 3, BN_MOMENTUM)); + layers->push_back(stack(depths[0], depths[1], 5, 2, 3, 3, BN_MOMENTUM)); + layers->push_back(stack(depths[1], depths[2], 5, 2, 6, 3, BN_MOMENTUM)); + layers->push_back(stack(depths[2], depths[3], 3, 1, 6, 2, BN_MOMENTUM)); + layers->push_back(stack(depths[3], depths[4], 5, 2, 6, 4, BN_MOMENTUM)); + layers->push_back(stack(depths[4], depths[5], 3, 1, 6, 1, BN_MOMENTUM)); + + layers->push_back(torch::nn::Conv2d( + Options(depths[5], 1280, 1).padding(0).stride(1).bias(false))); + layers->push_back(torch::nn::BatchNorm2d( + torch::nn::BatchNormOptions(1280).momentum(BN_MOMENTUM))); + layers->push_back(torch::nn::Functional(modelsimpl::relu_)); + + classifier = torch::nn::Sequential( + torch::nn::Dropout(dropout), torch::nn::Linear(1280, num_classes)); + + register_module("layers", layers); + register_module("classifier", classifier); + + _initialize_weights(); +} + +torch::Tensor MNASNetImpl::forward(torch::Tensor x) { + x = layers->forward(x); + x = x.mean({2, 3}); + return classifier->forward(x); +} + +MNASNet0_5Impl::MNASNet0_5Impl(int64_t num_classes, double dropout) + : MNASNetImpl(.5, num_classes, dropout) {} + +MNASNet0_75Impl::MNASNet0_75Impl(int64_t num_classes, double dropout) + : MNASNetImpl(.75, num_classes, dropout) {} + +MNASNet1_0Impl::MNASNet1_0Impl(int64_t num_classes, double dropout) + : MNASNetImpl(1, num_classes, dropout) {} + +MNASNet1_3Impl::MNASNet1_3Impl(int64_t num_classes, double dropout) + : MNASNetImpl(1.3, num_classes, dropout) {} + +} // namespace models +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/mnasnet.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/mnasnet.h new file mode 100644 index 0000000000000000000000000000000000000000..f08b5cf4284aa2717066ef0de6c3a1b2aa9fd08b --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/mnasnet.h @@ -0,0 +1,44 @@ +#pragma once + +#include <torch/nn.h> +#include "../macros.h" + +namespace vision { +namespace models { +struct VISION_API MNASNetImpl : torch::nn::Module { + torch::nn::Sequential layers, classifier; + + void _initialize_weights(); + + explicit MNASNetImpl( + double alpha, + int64_t num_classes = 1000, + double dropout = .2); + + torch::Tensor forward(torch::Tensor x); +}; + +struct MNASNet0_5Impl : MNASNetImpl { + explicit MNASNet0_5Impl(int64_t num_classes = 1000, double dropout = .2); +}; + +struct MNASNet0_75Impl : MNASNetImpl { + explicit MNASNet0_75Impl(int64_t num_classes = 1000, double dropout = .2); +}; + +struct MNASNet1_0Impl : MNASNetImpl { + explicit MNASNet1_0Impl(int64_t num_classes = 1000, double dropout = .2); +}; + +struct MNASNet1_3Impl : MNASNetImpl { + explicit MNASNet1_3Impl(int64_t num_classes = 1000, double dropout = .2); +}; + +TORCH_MODULE(MNASNet); +TORCH_MODULE(MNASNet0_5); +TORCH_MODULE(MNASNet0_75); +TORCH_MODULE(MNASNet1_0); +TORCH_MODULE(MNASNet1_3); + +} // namespace models +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/mobilenet.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/mobilenet.cpp new file mode 100644 index 0000000000000000000000000000000000000000..beeec89653b91c21f3df680f1d796d9202afc961 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/mobilenet.cpp @@ -0,0 +1,159 @@ +#include "mobilenet.h" + +#include "modelsimpl.h" + +namespace vision { +namespace models { +using Options = torch::nn::Conv2dOptions; + +int64_t make_divisible( + double value, + int64_t divisor, + c10::optional<int64_t> min_value = {}) { + if (!min_value.has_value()) + min_value = divisor; + auto new_value = std::max( + min_value.value(), (int64_t(value + divisor / 2) / divisor) * divisor); + if (new_value < .9 * value) + new_value += divisor; + return new_value; +} + +struct ConvBNReLUImpl : torch::nn::SequentialImpl { + ConvBNReLUImpl( + int64_t in_planes, + int64_t out_planes, + int64_t kernel_size = 3, + int64_t stride = 1, + int64_t groups = 1) { + auto padding = (kernel_size - 1) / 2; + + push_back(torch::nn::Conv2d(Options(in_planes, out_planes, kernel_size) + .stride(stride) + .padding(padding) + .groups(groups) + .bias(false))); + push_back(torch::nn::BatchNorm2d(out_planes)); + push_back(torch::nn::Functional(modelsimpl::relu6_)); + } + + torch::Tensor forward(torch::Tensor x) { + return torch::nn::SequentialImpl::forward(x); + } +}; + +TORCH_MODULE(ConvBNReLU); + +struct MobileNetInvertedResidualImpl : torch::nn::Module { + int64_t stride; + bool use_res_connect; + torch::nn::Sequential conv; + + MobileNetInvertedResidualImpl( + int64_t input, + int64_t output, + int64_t stride, + double expand_ratio) + : stride(stride), use_res_connect(stride == 1 && input == output) { + auto double_compare = [](double a, double b) { + return double(std::abs(a - b)) < std::numeric_limits<double>::epsilon(); + }; + + TORCH_CHECK(stride == 1 || stride == 2); + auto hidden_dim = int64_t(std::round(input * expand_ratio)); + + if (!double_compare(expand_ratio, 1)) + conv->push_back(ConvBNReLU(input, hidden_dim, 1)); + + conv->push_back(ConvBNReLU(hidden_dim, hidden_dim, 3, stride, hidden_dim)); + conv->push_back(torch::nn::Conv2d( + Options(hidden_dim, output, 1).stride(1).padding(0).bias(false))); + conv->push_back(torch::nn::BatchNorm2d(output)); + + register_module("conv", conv); + } + + torch::Tensor forward(torch::Tensor x) { + if (use_res_connect) + return x + conv->forward(x); + return conv->forward(x); + } +}; + +TORCH_MODULE(MobileNetInvertedResidual); + +MobileNetV2Impl::MobileNetV2Impl( + int64_t num_classes, + double width_mult, + std::vector<std::vector<int64_t>> inverted_residual_settings, + int64_t round_nearest) { + using Block = MobileNetInvertedResidual; + int64_t input_channel = 32; + int64_t last_channel = 1280; + + if (inverted_residual_settings.empty()) + inverted_residual_settings = { + // t, c, n, s + {1, 16, 1, 1}, + {6, 24, 2, 2}, + {6, 32, 3, 2}, + {6, 64, 4, 2}, + {6, 96, 3, 1}, + {6, 160, 3, 2}, + {6, 320, 1, 1}, + }; + + TORCH_CHECK( + inverted_residual_settings[0].size() == 4, + "inverted_residual_settings should contain 4-element vectors"); + + input_channel = make_divisible(input_channel * width_mult, round_nearest); + this->last_channel = + make_divisible(last_channel * std::max(1.0, width_mult), round_nearest); + features->push_back(ConvBNReLU(3, input_channel, 3, 2)); + + for (auto setting : inverted_residual_settings) { + auto output_channel = + make_divisible(setting[1] * width_mult, round_nearest); + + for (int64_t i = 0; i < setting[2]; ++i) { + auto stride = i == 0 ? setting[3] : 1; + features->push_back( + Block(input_channel, output_channel, stride, setting[0])); + input_channel = output_channel; + } + } + + features->push_back(ConvBNReLU(input_channel, this->last_channel, 1)); + + classifier->push_back(torch::nn::Dropout(0.2)); + classifier->push_back(torch::nn::Linear(this->last_channel, num_classes)); + + register_module("features", features); + register_module("classifier", classifier); + + for (auto& module : modules(/*include_self=*/false)) { + if (auto M = dynamic_cast<torch::nn::Conv2dImpl*>(module.get())) { + torch::nn::init::kaiming_normal_(M->weight, 0, torch::kFanOut); + if (M->options.bias()) + torch::nn::init::zeros_(M->bias); + } else if ( + auto M = dynamic_cast<torch::nn::BatchNorm2dImpl*>(module.get())) { + torch::nn::init::ones_(M->weight); + torch::nn::init::zeros_(M->bias); + } else if (auto M = dynamic_cast<torch::nn::LinearImpl*>(module.get())) { + torch::nn::init::normal_(M->weight, 0, 0.01); + torch::nn::init::zeros_(M->bias); + } + } +} + +torch::Tensor MobileNetV2Impl::forward(at::Tensor x) { + x = features->forward(x); + x = x.mean({2, 3}); + x = classifier->forward(x); + return x; +} + +} // namespace models +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/mobilenet.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/mobilenet.h new file mode 100644 index 0000000000000000000000000000000000000000..0d1b8f1d0c9a0fc310e1e4933cdc4473fa29e632 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/mobilenet.h @@ -0,0 +1,23 @@ +#pragma once + +#include <torch/nn.h> +#include "../macros.h" + +namespace vision { +namespace models { +struct VISION_API MobileNetV2Impl : torch::nn::Module { + int64_t last_channel; + torch::nn::Sequential features, classifier; + + explicit MobileNetV2Impl( + int64_t num_classes = 1000, + double width_mult = 1.0, + std::vector<std::vector<int64_t>> inverted_residual_settings = {}, + int64_t round_nearest = 8); + + torch::Tensor forward(torch::Tensor x); +}; + +TORCH_MODULE(MobileNetV2); +} // namespace models +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/models.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/models.h new file mode 100644 index 0000000000000000000000000000000000000000..8376ed120205dc19d5880ad45f3cc6a2bb938753 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/models.h @@ -0,0 +1,12 @@ +#pragma once + +#include "alexnet.h" +#include "densenet.h" +#include "googlenet.h" +#include "inception.h" +#include "mnasnet.h" +#include "mobilenet.h" +#include "resnet.h" +#include "shufflenetv2.h" +#include "squeezenet.h" +#include "vgg.h" diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/modelsimpl.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/modelsimpl.h new file mode 100644 index 0000000000000000000000000000000000000000..f159d1502a313b7b7a41b254ad45dd24123c77a4 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/modelsimpl.h @@ -0,0 +1,39 @@ +#pragma once + +#include <torch/nn.h> + +namespace vision { +namespace models { +namespace modelsimpl { + +// TODO here torch::relu_ and torch::adaptive_avg_pool2d wrapped in +// torch::nn::Fuctional don't work. so keeping these for now + +inline torch::Tensor& relu_(const torch::Tensor& x) { + return x.relu_(); +} + +inline torch::Tensor& relu6_(const torch::Tensor& x) { + return x.clamp_(0, 6); +} + +inline torch::Tensor adaptive_avg_pool2d( + const torch::Tensor& x, + torch::ExpandingArray<2> output_size) { + return torch::adaptive_avg_pool2d(x, output_size); +} + +inline torch::Tensor max_pool2d( + const torch::Tensor& x, + torch::ExpandingArray<2> kernel_size, + torch::ExpandingArray<2> stride) { + return torch::max_pool2d(x, kernel_size, stride); +} + +inline bool double_compare(double a, double b) { + return double(std::abs(a - b)) < std::numeric_limits<double>::epsilon(); +}; + +} // namespace modelsimpl +} // namespace models +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/resnet.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/resnet.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e97ba7677456a8534f82492a85ff491e191e1ead --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/resnet.cpp @@ -0,0 +1,157 @@ +#include "resnet.h" + +namespace vision { +namespace models { +namespace _resnetimpl { +torch::nn::Conv2d conv3x3( + int64_t in, + int64_t out, + int64_t stride, + int64_t groups) { + torch::nn::Conv2dOptions O(in, out, 3); + O.padding(1).stride(stride).groups(groups).bias(false); + return torch::nn::Conv2d(O); +} + +torch::nn::Conv2d conv1x1(int64_t in, int64_t out, int64_t stride) { + torch::nn::Conv2dOptions O(in, out, 1); + O.stride(stride).bias(false); + return torch::nn::Conv2d(O); +} + +int BasicBlock::expansion = 1; +int Bottleneck::expansion = 4; + +BasicBlock::BasicBlock( + int64_t inplanes, + int64_t planes, + int64_t stride, + const torch::nn::Sequential& downsample, + int64_t groups, + int64_t base_width) + : stride(stride), downsample(downsample) { + TORCH_CHECK( + groups == 1 && base_width == 64, + "BasicBlock only supports groups=1 and base_width=64"); + + // Both conv1 and downsample layers downsample the input when stride != 1 + conv1 = conv3x3(inplanes, planes, stride); + conv2 = conv3x3(planes, planes); + + bn1 = torch::nn::BatchNorm2d(planes); + bn2 = torch::nn::BatchNorm2d(planes); + + register_module("conv1", conv1); + register_module("conv2", conv2); + + register_module("bn1", bn1); + register_module("bn2", bn2); + + if (!downsample.is_empty()) + register_module("downsample", this->downsample); +} + +Bottleneck::Bottleneck( + int64_t inplanes, + int64_t planes, + int64_t stride, + const torch::nn::Sequential& downsample, + int64_t groups, + int64_t base_width) + : stride(stride), downsample(downsample) { + auto width = int64_t(planes * (base_width / 64.)) * groups; + + // Both conv2 and downsample layers downsample the input when stride != 1 + conv1 = conv1x1(inplanes, width); + conv2 = conv3x3(width, width, stride, groups); + conv3 = conv1x1(width, planes * expansion); + + bn1 = torch::nn::BatchNorm2d(width); + bn2 = torch::nn::BatchNorm2d(width); + bn3 = torch::nn::BatchNorm2d(planes * expansion); + + register_module("conv1", conv1); + register_module("conv2", conv2); + register_module("conv3", conv3); + + register_module("bn1", bn1); + register_module("bn2", bn2); + register_module("bn3", bn3); + + if (!downsample.is_empty()) + register_module("downsample", this->downsample); +} + +torch::Tensor Bottleneck::forward(torch::Tensor X) { + auto identity = X; + + auto out = conv1->forward(X); + out = bn1->forward(out).relu_(); + + out = conv2->forward(out); + out = bn2->forward(out).relu_(); + + out = conv3->forward(out); + out = bn3->forward(out); + + if (!downsample.is_empty()) + identity = downsample->forward(X); + + out += identity; + return out.relu_(); +} + +torch::Tensor BasicBlock::forward(torch::Tensor x) { + auto identity = x; + + auto out = conv1->forward(x); + out = bn1->forward(out).relu_(); + + out = conv2->forward(out); + out = bn2->forward(out); + + if (!downsample.is_empty()) + identity = downsample->forward(x); + + out += identity; + return out.relu_(); +} +} // namespace _resnetimpl + +ResNet18Impl::ResNet18Impl(int64_t num_classes, bool zero_init_residual) + : ResNetImpl({2, 2, 2, 2}, num_classes, zero_init_residual) {} + +ResNet34Impl::ResNet34Impl(int64_t num_classes, bool zero_init_residual) + : ResNetImpl({3, 4, 6, 3}, num_classes, zero_init_residual) {} + +ResNet50Impl::ResNet50Impl(int64_t num_classes, bool zero_init_residual) + : ResNetImpl({3, 4, 6, 3}, num_classes, zero_init_residual) {} + +ResNet101Impl::ResNet101Impl(int64_t num_classes, bool zero_init_residual) + : ResNetImpl({3, 4, 23, 3}, num_classes, zero_init_residual) {} + +ResNet152Impl::ResNet152Impl(int64_t num_classes, bool zero_init_residual) + : ResNetImpl({3, 8, 36, 3}, num_classes, zero_init_residual) {} + +ResNext50_32x4dImpl::ResNext50_32x4dImpl( + int64_t num_classes, + bool zero_init_residual) + : ResNetImpl({3, 4, 6, 3}, num_classes, zero_init_residual, 32, 4) {} + +ResNext101_32x8dImpl::ResNext101_32x8dImpl( + int64_t num_classes, + bool zero_init_residual) + : ResNetImpl({3, 4, 23, 3}, num_classes, zero_init_residual, 32, 8) {} + +WideResNet50_2Impl::WideResNet50_2Impl( + int64_t num_classes, + bool zero_init_residual) + : ResNetImpl({3, 4, 6, 3}, num_classes, zero_init_residual, 1, 64 * 2) {} + +WideResNet101_2Impl::WideResNet101_2Impl( + int64_t num_classes, + bool zero_init_residual) + : ResNetImpl({3, 4, 23, 3}, num_classes, zero_init_residual, 1, 64 * 2) {} + +} // namespace models +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/resnet.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/resnet.h new file mode 100644 index 0000000000000000000000000000000000000000..7e41de6e072072b438591bb565b47ade3505ac6c --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/resnet.h @@ -0,0 +1,257 @@ +#pragma once + +#include <torch/nn.h> +#include "../macros.h" + +namespace vision { +namespace models { +template <typename Block> +struct ResNetImpl; + +namespace _resnetimpl { +// 3x3 convolution with padding +torch::nn::Conv2d conv3x3( + int64_t in, + int64_t out, + int64_t stride = 1, + int64_t groups = 1); + +// 1x1 convolution +torch::nn::Conv2d conv1x1(int64_t in, int64_t out, int64_t stride = 1); + +struct VISION_API BasicBlock : torch::nn::Module { + template <typename Block> + friend struct vision::models::ResNetImpl; + + int64_t stride; + torch::nn::Sequential downsample; + + torch::nn::Conv2d conv1{nullptr}, conv2{nullptr}; + torch::nn::BatchNorm2d bn1{nullptr}, bn2{nullptr}; + + static int expansion; + + BasicBlock( + int64_t inplanes, + int64_t planes, + int64_t stride = 1, + const torch::nn::Sequential& downsample = nullptr, + int64_t groups = 1, + int64_t base_width = 64); + + torch::Tensor forward(torch::Tensor x); +}; + +struct VISION_API Bottleneck : torch::nn::Module { + template <typename Block> + friend struct vision::models::ResNetImpl; + + int64_t stride; + torch::nn::Sequential downsample; + + torch::nn::Conv2d conv1{nullptr}, conv2{nullptr}, conv3{nullptr}; + torch::nn::BatchNorm2d bn1{nullptr}, bn2{nullptr}, bn3{nullptr}; + + static int expansion; + + Bottleneck( + int64_t inplanes, + int64_t planes, + int64_t stride = 1, + const torch::nn::Sequential& downsample = nullptr, + int64_t groups = 1, + int64_t base_width = 64); + + torch::Tensor forward(torch::Tensor X); +}; +} // namespace _resnetimpl + +template <typename Block> +struct ResNetImpl : torch::nn::Module { + int64_t groups, base_width, inplanes; + torch::nn::Conv2d conv1; + torch::nn::BatchNorm2d bn1; + torch::nn::Sequential layer1, layer2, layer3, layer4; + torch::nn::Linear fc; + + torch::nn::Sequential _make_layer( + int64_t planes, + int64_t blocks, + int64_t stride = 1); + + explicit ResNetImpl( + const std::vector<int>& layers, + int64_t num_classes = 1000, + bool zero_init_residual = false, + int64_t groups = 1, + int64_t width_per_group = 64); + + torch::Tensor forward(torch::Tensor X); +}; + +template <typename Block> +torch::nn::Sequential ResNetImpl<Block>::_make_layer( + int64_t planes, + int64_t blocks, + int64_t stride) { + torch::nn::Sequential downsample = nullptr; + if (stride != 1 || inplanes != planes * Block::expansion) { + downsample = torch::nn::Sequential( + _resnetimpl::conv1x1(inplanes, planes * Block::expansion, stride), + torch::nn::BatchNorm2d(planes * Block::expansion)); + } + + torch::nn::Sequential layers; + layers->push_back( + Block(inplanes, planes, stride, downsample, groups, base_width)); + + inplanes = planes * Block::expansion; + + for (int i = 1; i < blocks; ++i) + layers->push_back(Block(inplanes, planes, 1, nullptr, groups, base_width)); + + return layers; +} + +template <typename Block> +ResNetImpl<Block>::ResNetImpl( + const std::vector<int>& layers, + int64_t num_classes, + bool zero_init_residual, + int64_t groups, + int64_t width_per_group) + : groups(groups), + base_width(width_per_group), + inplanes(64), + conv1( + torch::nn::Conv2dOptions(3, 64, 7).stride(2).padding(3).bias(false)), + bn1(64), + layer1(_make_layer(64, layers[0])), + layer2(_make_layer(128, layers[1], 2)), + layer3(_make_layer(256, layers[2], 2)), + layer4(_make_layer(512, layers[3], 2)), + fc(512 * Block::expansion, num_classes) { + register_module("conv1", conv1); + register_module("bn1", bn1); + register_module("fc", fc); + + register_module("layer1", layer1); + register_module("layer2", layer2); + register_module("layer3", layer3); + register_module("layer4", layer4); + + for (auto& module : modules(/*include_self=*/false)) { + if (auto M = dynamic_cast<torch::nn::Conv2dImpl*>(module.get())) + torch::nn::init::kaiming_normal_( + M->weight, + /*a=*/0, + torch::kFanOut, + torch::kReLU); + else if (auto M = dynamic_cast<torch::nn::BatchNorm2dImpl*>(module.get())) { + torch::nn::init::constant_(M->weight, 1); + torch::nn::init::constant_(M->bias, 0); + } + } + + // Zero-initialize the last BN in each residual branch, so that the residual + // branch starts with zeros, and each residual block behaves like an + // identity. This improves the model by 0.2~0.3% according to + // https://arxiv.org/abs/1706.02677 + if (zero_init_residual) + for (auto& module : modules(/*include_self=*/false)) { + if (auto* M = dynamic_cast<_resnetimpl::Bottleneck*>(module.get())) + torch::nn::init::constant_(M->bn3->weight, 0); + else if (auto* M = dynamic_cast<_resnetimpl::BasicBlock*>(module.get())) + torch::nn::init::constant_(M->bn2->weight, 0); + } +} + +template <typename Block> +torch::Tensor ResNetImpl<Block>::forward(torch::Tensor x) { + x = conv1->forward(x); + x = bn1->forward(x).relu_(); + x = torch::max_pool2d(x, 3, 2, 1); + + x = layer1->forward(x); + x = layer2->forward(x); + x = layer3->forward(x); + x = layer4->forward(x); + + x = torch::adaptive_avg_pool2d(x, {1, 1}); + x = x.reshape({x.size(0), -1}); + x = fc->forward(x); + + return x; +} + +struct VISION_API ResNet18Impl : ResNetImpl<_resnetimpl::BasicBlock> { + explicit ResNet18Impl( + int64_t num_classes = 1000, + bool zero_init_residual = false); +}; + +struct VISION_API ResNet34Impl : ResNetImpl<_resnetimpl::BasicBlock> { + explicit ResNet34Impl( + int64_t num_classes = 1000, + bool zero_init_residual = false); +}; + +struct VISION_API ResNet50Impl : ResNetImpl<_resnetimpl::Bottleneck> { + explicit ResNet50Impl( + int64_t num_classes = 1000, + bool zero_init_residual = false); +}; + +struct VISION_API ResNet101Impl : ResNetImpl<_resnetimpl::Bottleneck> { + explicit ResNet101Impl( + int64_t num_classes = 1000, + bool zero_init_residual = false); +}; + +struct VISION_API ResNet152Impl : ResNetImpl<_resnetimpl::Bottleneck> { + explicit ResNet152Impl( + int64_t num_classes = 1000, + bool zero_init_residual = false); +}; + +struct VISION_API ResNext50_32x4dImpl : ResNetImpl<_resnetimpl::Bottleneck> { + explicit ResNext50_32x4dImpl( + int64_t num_classes = 1000, + bool zero_init_residual = false); +}; + +struct VISION_API ResNext101_32x8dImpl : ResNetImpl<_resnetimpl::Bottleneck> { + explicit ResNext101_32x8dImpl( + int64_t num_classes = 1000, + bool zero_init_residual = false); +}; + +struct VISION_API WideResNet50_2Impl : ResNetImpl<_resnetimpl::Bottleneck> { + explicit WideResNet50_2Impl( + int64_t num_classes = 1000, + bool zero_init_residual = false); +}; + +struct VISION_API WideResNet101_2Impl : ResNetImpl<_resnetimpl::Bottleneck> { + explicit WideResNet101_2Impl( + int64_t num_classes = 1000, + bool zero_init_residual = false); +}; + +template <typename Block> +struct VISION_API ResNet : torch::nn::ModuleHolder<ResNetImpl<Block>> { + using torch::nn::ModuleHolder<ResNetImpl<Block>>::ModuleHolder; +}; + +TORCH_MODULE(ResNet18); +TORCH_MODULE(ResNet34); +TORCH_MODULE(ResNet50); +TORCH_MODULE(ResNet101); +TORCH_MODULE(ResNet152); +TORCH_MODULE(ResNext50_32x4d); +TORCH_MODULE(ResNext101_32x8d); +TORCH_MODULE(WideResNet50_2); +TORCH_MODULE(WideResNet101_2); + +} // namespace models +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/shufflenetv2.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/shufflenetv2.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d84c11de42c91cdfbdb9faafe2bef3abf65b834e --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/shufflenetv2.cpp @@ -0,0 +1,178 @@ +#include "shufflenetv2.h" + +#include "modelsimpl.h" + +namespace vision { +namespace models { + +using Options = torch::nn::Conv2dOptions; + +torch::Tensor channel_shuffle(torch::Tensor x, int64_t groups) { + auto shape = x.sizes(); + auto batchsize = shape[0]; + auto num_channels = shape[1]; + auto height = shape[2]; + auto width = shape[3]; + + auto channels_per_group = num_channels / groups; + + x = x.view({batchsize, groups, channels_per_group, height, width}); + x = torch::transpose(x, 1, 2).contiguous(); + x = x.view({batchsize, -1, height, width}); + + return x; +} + +torch::nn::Conv2d conv11(int64_t input, int64_t output) { + Options opts(input, output, 1); + opts = opts.stride(1).padding(0).bias(false); + return torch::nn::Conv2d(opts); +} + +torch::nn::Conv2d conv33(int64_t input, int64_t output, int64_t stride) { + Options opts(input, output, 3); + opts = opts.stride(stride).padding(1).bias(false).groups(input); + return torch::nn::Conv2d(opts); +} + +struct ShuffleNetV2InvertedResidualImpl : torch::nn::Module { + int64_t stride; + torch::nn::Sequential branch1{nullptr}, branch2{nullptr}; + + ShuffleNetV2InvertedResidualImpl(int64_t inp, int64_t oup, int64_t stride) + : stride(stride) { + TORCH_CHECK(stride >= 1 && stride <= 3, "illegal stride value"); + + auto branch_features = oup / 2; + TORCH_CHECK(stride != 1 || inp == branch_features << 1); + + if (stride > 1) { + branch1 = torch::nn::Sequential( + conv33(inp, inp, stride), + torch::nn::BatchNorm2d(inp), + conv11(inp, branch_features), + torch::nn::BatchNorm2d(branch_features), + torch::nn::Functional(modelsimpl::relu_)); + } + + branch2 = torch::nn::Sequential( + conv11(stride > 1 ? inp : branch_features, branch_features), + torch::nn::BatchNorm2d(branch_features), + torch::nn::Functional(modelsimpl::relu_), + conv33(branch_features, branch_features, stride), + torch::nn::BatchNorm2d(branch_features), + conv11(branch_features, branch_features), + torch::nn::BatchNorm2d(branch_features), + torch::nn::Functional(modelsimpl::relu_)); + + if (!branch1.is_empty()) + register_module("branch1", branch1); + + register_module("branch2", branch2); + } + + torch::Tensor forward(torch::Tensor x) { + torch::Tensor out; + + if (stride == 1) { + auto chunks = x.chunk(2, 1); + out = torch::cat({chunks[0], branch2->forward(chunks[1])}, 1); + } else + out = torch::cat({branch1->forward(x), branch2->forward(x)}, 1); + + out = ::vision::models::channel_shuffle(out, 2); + return out; + } +}; + +TORCH_MODULE(ShuffleNetV2InvertedResidual); + +ShuffleNetV2Impl::ShuffleNetV2Impl( + const std::vector<int64_t>& stage_repeats, + const std::vector<int64_t>& stage_out_channels, + int64_t num_classes) { + TORCH_CHECK( + stage_repeats.size() == 3, + "expected stage_repeats as vector of 3 positive ints"); + + TORCH_CHECK( + stage_out_channels.size() == 5, + "expected stage_out_channels as vector of 5 positive ints"); + + _stage_out_channels = stage_out_channels; + int64_t input_channels = 3; + auto output_channels = _stage_out_channels[0]; + + conv1 = torch::nn::Sequential( + torch::nn::Conv2d(Options(input_channels, output_channels, 3) + .stride(2) + .padding(1) + .bias(false)), + torch::nn::BatchNorm2d(output_channels), + torch::nn::Functional(modelsimpl::relu_)); + + input_channels = output_channels; + std::vector<torch::nn::Sequential> stages = {stage2, stage3, stage4}; + + for (size_t i = 0; i < stages.size(); ++i) { + auto& seq = stages[i]; + auto repeats = stage_repeats[i]; + auto output_channels = _stage_out_channels[i + 1]; + + seq->push_back( + ShuffleNetV2InvertedResidual(input_channels, output_channels, 2)); + + for (size_t j = 0; j < size_t(repeats - 1); ++j) + seq->push_back( + ShuffleNetV2InvertedResidual(output_channels, output_channels, 1)); + + input_channels = output_channels; + } + + output_channels = _stage_out_channels.back(); + conv5 = torch::nn::Sequential( + torch::nn::Conv2d(Options(input_channels, output_channels, 1) + .stride(1) + .padding(0) + .bias(false)), + torch::nn::BatchNorm2d(output_channels), + torch::nn::Functional(modelsimpl::relu_)); + + fc = torch::nn::Linear(output_channels, num_classes); + + register_module("conv1", conv1); + register_module("stage2", stage2); + register_module("stage3", stage3); + register_module("stage4", stage4); + register_module("conv2", conv5); + register_module("fc", fc); +} + +torch::Tensor ShuffleNetV2Impl::forward(torch::Tensor x) { + x = conv1->forward(x); + x = torch::max_pool2d(x, 3, 2, 1); + + x = stage2->forward(x); + x = stage3->forward(x); + x = stage4->forward(x); + x = conv5->forward(x); + + x = x.mean({2, 3}); + x = fc->forward(x); + return x; +} + +ShuffleNetV2_x0_5Impl::ShuffleNetV2_x0_5Impl(int64_t num_classes) + : ShuffleNetV2Impl({4, 8, 4}, {24, 48, 96, 192, 1024}, num_classes) {} + +ShuffleNetV2_x1_0Impl::ShuffleNetV2_x1_0Impl(int64_t num_classes) + : ShuffleNetV2Impl({4, 8, 4}, {24, 116, 232, 464, 1024}, num_classes) {} + +ShuffleNetV2_x1_5Impl::ShuffleNetV2_x1_5Impl(int64_t num_classes) + : ShuffleNetV2Impl({4, 8, 4}, {24, 176, 352, 704, 1024}, num_classes) {} + +ShuffleNetV2_x2_0Impl::ShuffleNetV2_x2_0Impl(int64_t num_classes) + : ShuffleNetV2Impl({4, 8, 4}, {24, 244, 488, 976, 2048}, num_classes) {} + +} // namespace models +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/shufflenetv2.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/shufflenetv2.h new file mode 100644 index 0000000000000000000000000000000000000000..00a73b36a06d3cd17e70ea2d3712e2267fbebe63 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/shufflenetv2.h @@ -0,0 +1,45 @@ +#pragma once + +#include <torch/nn.h> +#include "../macros.h" + +namespace vision { +namespace models { + +struct VISION_API ShuffleNetV2Impl : torch::nn::Module { + std::vector<int64_t> _stage_out_channels; + torch::nn::Sequential conv1{nullptr}, stage2, stage3, stage4, conv5{nullptr}; + torch::nn::Linear fc{nullptr}; + + ShuffleNetV2Impl( + const std::vector<int64_t>& stage_repeats, + const std::vector<int64_t>& stage_out_channels, + int64_t num_classes = 1000); + + torch::Tensor forward(torch::Tensor x); +}; + +struct VISION_API ShuffleNetV2_x0_5Impl : ShuffleNetV2Impl { + explicit ShuffleNetV2_x0_5Impl(int64_t num_classes = 1000); +}; + +struct VISION_API ShuffleNetV2_x1_0Impl : ShuffleNetV2Impl { + explicit ShuffleNetV2_x1_0Impl(int64_t num_classes = 1000); +}; + +struct VISION_API ShuffleNetV2_x1_5Impl : ShuffleNetV2Impl { + explicit ShuffleNetV2_x1_5Impl(int64_t num_classes = 1000); +}; + +struct VISION_API ShuffleNetV2_x2_0Impl : ShuffleNetV2Impl { + explicit ShuffleNetV2_x2_0Impl(int64_t num_classes = 1000); +}; + +TORCH_MODULE(ShuffleNetV2); +TORCH_MODULE(ShuffleNetV2_x0_5); +TORCH_MODULE(ShuffleNetV2_x1_0); +TORCH_MODULE(ShuffleNetV2_x1_5); +TORCH_MODULE(ShuffleNetV2_x2_0); + +} // namespace models +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/squeezenet.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/squeezenet.cpp new file mode 100644 index 0000000000000000000000000000000000000000..96a9a1800d0d7d3341a8be89185393b4cc82d0bd --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/squeezenet.cpp @@ -0,0 +1,111 @@ +#include "squeezenet.h" + +#include "modelsimpl.h" + +namespace vision { +namespace models { +struct Fire : torch::nn::Module { + torch::nn::Conv2d squeeze, expand1x1, expand3x3; + + Fire( + int64_t inplanes, + int64_t squeeze_planes, + int64_t expand1x1_planes, + int64_t expand3x3_planes) + : squeeze(torch::nn::Conv2dOptions(inplanes, squeeze_planes, 1)), + expand1x1( + torch::nn::Conv2dOptions(squeeze_planes, expand1x1_planes, 1)), + expand3x3(torch::nn::Conv2dOptions(squeeze_planes, expand3x3_planes, 3) + .padding(1)) { + register_module("squeeze", squeeze); + register_module("expand1x1", expand1x1); + register_module("expand3x3", expand3x3); + } + + torch::Tensor forward(torch::Tensor x) { + x = torch::relu(squeeze->forward(x)); + return torch::cat( + {torch::relu(expand1x1->forward(x)), + torch::relu(expand3x3->forward(x))}, + 1); + } +}; + +SqueezeNetImpl::SqueezeNetImpl(double version, int64_t num_classes) + : num_classes(num_classes) { + if (modelsimpl::double_compare(version, 1.0)) { + features = torch::nn::Sequential( + torch::nn::Conv2d(torch::nn::Conv2dOptions(3, 96, 7).stride(2)), + torch::nn::Functional(modelsimpl::relu_), + torch::nn::Functional(torch::max_pool2d, 3, 2, 0, 1, true), + Fire(96, 16, 64, 64), + Fire(128, 16, 64, 64), + Fire(128, 32, 128, 128), + torch::nn::Functional(torch::max_pool2d, 3, 2, 0, 1, true), + Fire(256, 32, 128, 128), + Fire(256, 48, 192, 192), + Fire(384, 48, 192, 192), + Fire(384, 64, 256, 256), + torch::nn::Functional(torch::max_pool2d, 3, 2, 0, 1, true), + Fire(512, 64, 256, 256)); + } else if (modelsimpl::double_compare(version, 1.1)) { + features = torch::nn::Sequential( + torch::nn::Conv2d(torch::nn::Conv2dOptions(3, 64, 3).stride(2)), + torch::nn::Functional(modelsimpl::relu_), + torch::nn::Functional(torch::max_pool2d, 3, 2, 0, 1, true), + Fire(64, 16, 64, 64), + Fire(128, 16, 64, 64), + torch::nn::Functional(torch::max_pool2d, 3, 2, 0, 1, true), + Fire(128, 32, 128, 128), + Fire(256, 32, 128, 128), + torch::nn::Functional(torch::max_pool2d, 3, 2, 0, 1, true), + Fire(256, 48, 192, 192), + Fire(384, 48, 192, 192), + Fire(384, 64, 256, 256), + Fire(512, 64, 256, 256)); + } else + TORCH_CHECK( + false, + "Unsupported SqueezeNet version ", + version, + ". 1_0 or 1_1 expected"); + + // Final convolution is initialized differently from the rest + auto final_conv = + torch::nn::Conv2d(torch::nn::Conv2dOptions(512, num_classes, 1)); + + classifier = torch::nn::Sequential( + torch::nn::Dropout(0.5), + final_conv, + torch::nn::Functional(modelsimpl::relu_), + torch::nn::Functional(modelsimpl::adaptive_avg_pool2d, 1)); + + register_module("features", features); + register_module("classifier", classifier); + + for (auto& module : modules(/*include_self=*/false)) + if (auto M = dynamic_cast<torch::nn::Conv2dImpl*>(module.get())) { + if (M == final_conv.get()) + torch::nn::init::normal_(M->weight, 0.0, 0.01); + else + torch::nn::init::kaiming_uniform_(M->weight); + + if (M->options.bias()) + torch::nn::init::constant_(M->bias, 0); + } +} + +torch::Tensor SqueezeNetImpl::forward(torch::Tensor x) { + x = features->forward(x); + x = classifier->forward(x); + return x.view({x.size(0), -1}); +} + +SqueezeNet1_0Impl::SqueezeNet1_0Impl(int64_t num_classes) + : SqueezeNetImpl(1.0, num_classes) {} + +SqueezeNet1_1Impl::SqueezeNet1_1Impl(int64_t num_classes) + : SqueezeNetImpl(1.1, num_classes) {} + +} // namespace models +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/squeezenet.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/squeezenet.h new file mode 100644 index 0000000000000000000000000000000000000000..37bc5825717f2868323141d2c5805bb321ba8eb9 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/squeezenet.h @@ -0,0 +1,37 @@ +#pragma once + +#include <torch/nn.h> +#include "../macros.h" + +namespace vision { +namespace models { +struct VISION_API SqueezeNetImpl : torch::nn::Module { + int64_t num_classes; + torch::nn::Sequential features{nullptr}, classifier{nullptr}; + + explicit SqueezeNetImpl(double version = 1.0, int64_t num_classes = 1000); + + torch::Tensor forward(torch::Tensor x); +}; + +// SqueezeNet model architecture from the "SqueezeNet: AlexNet-level +// accuracy with 50x fewer parameters and <0.5MB model size" +// <https://arxiv.org/abs/1602.07360> paper. +struct VISION_API SqueezeNet1_0Impl : SqueezeNetImpl { + explicit SqueezeNet1_0Impl(int64_t num_classes = 1000); +}; + +// SqueezeNet 1.1 model from the official SqueezeNet repo +// <https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>. +// SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters +// than SqueezeNet 1.0, without sacrificing accuracy. +struct VISION_API SqueezeNet1_1Impl : SqueezeNetImpl { + explicit SqueezeNet1_1Impl(int64_t num_classes = 1000); +}; + +TORCH_MODULE(SqueezeNet); +TORCH_MODULE(SqueezeNet1_0); +TORCH_MODULE(SqueezeNet1_1); + +} // namespace models +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/vgg.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/vgg.cpp new file mode 100644 index 0000000000000000000000000000000000000000..73d32d98214fac52f32ac259872c4475e745f664 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/vgg.cpp @@ -0,0 +1,115 @@ +#include "vgg.h" + +#include <unordered_map> +#include "modelsimpl.h" + +namespace vision { +namespace models { +torch::nn::Sequential makeLayers( + const std::vector<int>& cfg, + bool batch_norm = false) { + torch::nn::Sequential seq; + auto channels = 3; + + for (const auto& V : cfg) { + if (V <= -1) + seq->push_back(torch::nn::Functional(modelsimpl::max_pool2d, 2, 2)); + else { + seq->push_back(torch::nn::Conv2d( + torch::nn::Conv2dOptions(channels, V, 3).padding(1))); + + if (batch_norm) + seq->push_back(torch::nn::BatchNorm2d(V)); + seq->push_back(torch::nn::Functional(modelsimpl::relu_)); + + channels = V; + } + } + + return seq; +} + +void VGGImpl::_initialize_weights() { + for (auto& module : modules(/*include_self=*/false)) { + if (auto M = dynamic_cast<torch::nn::Conv2dImpl*>(module.get())) { + torch::nn::init::kaiming_normal_( + M->weight, + /*a=*/0, + torch::kFanOut, + torch::kReLU); + torch::nn::init::constant_(M->bias, 0); + } else if ( + auto M = dynamic_cast<torch::nn::BatchNorm2dImpl*>(module.get())) { + torch::nn::init::constant_(M->weight, 1); + torch::nn::init::constant_(M->bias, 0); + } else if (auto M = dynamic_cast<torch::nn::LinearImpl*>(module.get())) { + torch::nn::init::normal_(M->weight, 0, 0.01); + torch::nn::init::constant_(M->bias, 0); + } + } +} + +VGGImpl::VGGImpl( + const torch::nn::Sequential& features, + int64_t num_classes, + bool initialize_weights) { + classifier = torch::nn::Sequential( + torch::nn::Linear(512 * 7 * 7, 4096), + torch::nn::Functional(modelsimpl::relu_), + torch::nn::Dropout(), + torch::nn::Linear(4096, 4096), + torch::nn::Functional(modelsimpl::relu_), + torch::nn::Dropout(), + torch::nn::Linear(4096, num_classes)); + + this->features = features; + + register_module("features", this->features); + register_module("classifier", classifier); + + if (initialize_weights) + _initialize_weights(); +} + +torch::Tensor VGGImpl::forward(torch::Tensor x) { + x = features->forward(x); + x = torch::adaptive_avg_pool2d(x, {7, 7}); + x = x.view({x.size(0), -1}); + x = classifier->forward(x); + return x; +} + +// clang-format off +static std::unordered_map<char, std::vector<int>> cfgs = { + {'A', {64, -1, 128, -1, 256, 256, -1, 512, 512, -1, 512, 512, -1}}, + {'B', {64, 64, -1, 128, 128, -1, 256, 256, -1, 512, 512, -1, 512, 512, -1}}, + {'D', {64, 64, -1, 128, 128, -1, 256, 256, 256, -1, 512, 512, 512, -1, 512, 512, 512, -1}}, + {'E', {64, 64, -1, 128, 128, -1, 256, 256, 256, 256, -1, 512, 512, 512, 512, -1, 512, 512, 512, 512, -1}}}; +// clang-format on + +VGG11Impl::VGG11Impl(int64_t num_classes, bool initialize_weights) + : VGGImpl(makeLayers(cfgs['A']), num_classes, initialize_weights) {} + +VGG13Impl::VGG13Impl(int64_t num_classes, bool initialize_weights) + : VGGImpl(makeLayers(cfgs['B']), num_classes, initialize_weights) {} + +VGG16Impl::VGG16Impl(int64_t num_classes, bool initialize_weights) + : VGGImpl(makeLayers(cfgs['D']), num_classes, initialize_weights) {} + +VGG19Impl::VGG19Impl(int64_t num_classes, bool initialize_weights) + : VGGImpl(makeLayers(cfgs['E']), num_classes, initialize_weights) {} + +VGG11BNImpl::VGG11BNImpl(int64_t num_classes, bool initialize_weights) + : VGGImpl(makeLayers(cfgs['A'], true), num_classes, initialize_weights) {} + +VGG13BNImpl::VGG13BNImpl(int64_t num_classes, bool initialize_weights) + : VGGImpl(makeLayers(cfgs['B'], true), num_classes, initialize_weights) {} + +VGG16BNImpl::VGG16BNImpl(int64_t num_classes, bool initialize_weights) + : VGGImpl(makeLayers(cfgs['D'], true), num_classes, initialize_weights) {} + +VGG19BNImpl::VGG19BNImpl(int64_t num_classes, bool initialize_weights) + : VGGImpl(makeLayers(cfgs['E'], true), num_classes, initialize_weights) {} + +} // namespace models +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/vgg.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/vgg.h new file mode 100644 index 0000000000000000000000000000000000000000..dd5b30512c9f4ec5dc9e2ef4d902c0e634cd2149 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/models/vgg.h @@ -0,0 +1,90 @@ +#pragma once + +#include <torch/nn.h> +#include "../macros.h" + +namespace vision { +namespace models { +struct VISION_API VGGImpl : torch::nn::Module { + torch::nn::Sequential features{nullptr}, classifier{nullptr}; + + void _initialize_weights(); + + explicit VGGImpl( + const torch::nn::Sequential& features, + int64_t num_classes = 1000, + bool initialize_weights = true); + + torch::Tensor forward(torch::Tensor x); +}; + +// VGG 11-layer model (configuration "A") +struct VISION_API VGG11Impl : VGGImpl { + explicit VGG11Impl( + int64_t num_classes = 1000, + bool initialize_weights = true); +}; + +// VGG 13-layer model (configuration "B") +struct VISION_API VGG13Impl : VGGImpl { + explicit VGG13Impl( + int64_t num_classes = 1000, + bool initialize_weights = true); +}; + +// VGG 16-layer model (configuration "D") +struct VISION_API VGG16Impl : VGGImpl { + explicit VGG16Impl( + int64_t num_classes = 1000, + bool initialize_weights = true); +}; + +// VGG 19-layer model (configuration "E") +struct VISION_API VGG19Impl : VGGImpl { + explicit VGG19Impl( + int64_t num_classes = 1000, + bool initialize_weights = true); +}; + +// VGG 11-layer model (configuration "A") with batch normalization +struct VISION_API VGG11BNImpl : VGGImpl { + explicit VGG11BNImpl( + int64_t num_classes = 1000, + bool initialize_weights = true); +}; + +// VGG 13-layer model (configuration "B") with batch normalization +struct VISION_API VGG13BNImpl : VGGImpl { + explicit VGG13BNImpl( + int64_t num_classes = 1000, + bool initialize_weights = true); +}; + +// VGG 16-layer model (configuration "D") with batch normalization +struct VISION_API VGG16BNImpl : VGGImpl { + explicit VGG16BNImpl( + int64_t num_classes = 1000, + bool initialize_weights = true); +}; + +// VGG 19-layer model (configuration 'E') with batch normalization +struct VISION_API VGG19BNImpl : VGGImpl { + explicit VGG19BNImpl( + int64_t num_classes = 1000, + bool initialize_weights = true); +}; + +TORCH_MODULE(VGG); + +TORCH_MODULE(VGG11); +TORCH_MODULE(VGG13); +TORCH_MODULE(VGG16); +TORCH_MODULE(VGG19); + +TORCH_MODULE(VGG11BN); +TORCH_MODULE(VGG13BN); +TORCH_MODULE(VGG16BN); +TORCH_MODULE(VGG19BN); + +} // namespace models +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autocast/deform_conv2d_kernel.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autocast/deform_conv2d_kernel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..28c325be9b1def7b1115ea3675bc078cc756a7ec --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autocast/deform_conv2d_kernel.cpp @@ -0,0 +1,54 @@ +#include "../deform_conv2d.h" + +#include <ATen/autocast_mode.h> +#include <torch/types.h> + +namespace vision { +namespace ops { + +namespace { + +at::Tensor deform_conv2d_autocast( + const at::Tensor& input, + const at::Tensor& weight, + const at::Tensor& offset, + const at::Tensor& mask, + const at::Tensor& bias, + int64_t stride_h, + int64_t stride_w, + int64_t pad_h, + int64_t pad_w, + int64_t dilation_h, + int64_t dilation_w, + int64_t groups, + int64_t offset_groups, + bool use_mask) { + c10::impl::ExcludeDispatchKeyGuard no_autocast(c10::DispatchKey::Autocast); + return deform_conv2d( + at::autocast::cached_cast(at::kFloat, input), + at::autocast::cached_cast(at::kFloat, weight), + at::autocast::cached_cast(at::kFloat, offset), + at::autocast::cached_cast(at::kFloat, mask), + at::autocast::cached_cast(at::kFloat, bias), + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + groups, + offset_groups, + use_mask) + .to(input.scalar_type()); +} + +} // namespace + +TORCH_LIBRARY_IMPL(torchvision, Autocast, m) { + m.impl( + TORCH_SELECTIVE_NAME("torchvision::deform_conv2d"), + TORCH_FN(deform_conv2d_autocast)); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autocast/nms_kernel.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autocast/nms_kernel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..3a0ead004fd9df5214663885226a3d973a83b968 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autocast/nms_kernel.cpp @@ -0,0 +1,29 @@ +#include "../nms.h" + +#include <ATen/autocast_mode.h> +#include <torch/types.h> + +namespace vision { +namespace ops { + +namespace { + +at::Tensor nms_autocast( + const at::Tensor& dets, + const at::Tensor& scores, + double iou_threshold) { + c10::impl::ExcludeDispatchKeyGuard no_autocast(c10::DispatchKey::Autocast); + return nms( + at::autocast::cached_cast(at::kFloat, dets), + at::autocast::cached_cast(at::kFloat, scores), + iou_threshold); +} + +} // namespace + +TORCH_LIBRARY_IMPL(torchvision, Autocast, m) { + m.impl(TORCH_SELECTIVE_NAME("torchvision::nms"), TORCH_FN(nms_autocast)); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autocast/ps_roi_align_kernel.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autocast/ps_roi_align_kernel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..c93b26c8ad306a90a7cfcb5335508a111eda5374 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autocast/ps_roi_align_kernel.cpp @@ -0,0 +1,41 @@ +#include "../ps_roi_align.h" + +#include <ATen/autocast_mode.h> +#include <torch/types.h> + +namespace vision { +namespace ops { + +namespace { + +std::tuple<at::Tensor, at::Tensor> ps_roi_align_autocast( + const at::Tensor& input, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t sampling_ratio) { + c10::impl::ExcludeDispatchKeyGuard no_autocast(c10::DispatchKey::Autocast); + auto result = ps_roi_align( + at::autocast::cached_cast(at::kFloat, input), + at::autocast::cached_cast(at::kFloat, rois), + spatial_scale, + pooled_height, + pooled_width, + sampling_ratio); + + return std::make_tuple( + std::get<0>(result).to(input.scalar_type()), + std::get<1>(result).to(input.scalar_type())); +} + +} // namespace + +TORCH_LIBRARY_IMPL(torchvision, Autocast, m) { + m.impl( + TORCH_SELECTIVE_NAME("torchvision::ps_roi_align"), + TORCH_FN(ps_roi_align_autocast)); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autocast/ps_roi_pool_kernel.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autocast/ps_roi_pool_kernel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1421680ea987f0a522689aadd1e4a5f3fd3b85ca --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autocast/ps_roi_pool_kernel.cpp @@ -0,0 +1,39 @@ +#include "../ps_roi_pool.h" + +#include <ATen/autocast_mode.h> +#include <torch/types.h> + +namespace vision { +namespace ops { + +namespace { + +std::tuple<at::Tensor, at::Tensor> ps_roi_pool_autocast( + const at::Tensor& input, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width) { + c10::impl::ExcludeDispatchKeyGuard no_autocast(c10::DispatchKey::Autocast); + auto result = ps_roi_pool( + at::autocast::cached_cast(at::kFloat, input), + at::autocast::cached_cast(at::kFloat, rois), + spatial_scale, + pooled_height, + pooled_width); + + return std::make_tuple( + std::get<0>(result).to(input.scalar_type()), + std::get<1>(result).to(input.scalar_type())); +} + +} // namespace + +TORCH_LIBRARY_IMPL(torchvision, Autocast, m) { + m.impl( + TORCH_SELECTIVE_NAME("torchvision::ps_roi_pool"), + TORCH_FN(ps_roi_pool_autocast)); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autocast/roi_align_kernel.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autocast/roi_align_kernel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..95457224ac0cea1be54a0f50835564f07e083c5f --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autocast/roi_align_kernel.cpp @@ -0,0 +1,40 @@ +#include "../roi_align.h" + +#include <ATen/autocast_mode.h> +#include <torch/types.h> + +namespace vision { +namespace ops { + +namespace { + +at::Tensor roi_align_autocast( + const at::Tensor& input, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t sampling_ratio, + bool aligned) { + c10::impl::ExcludeDispatchKeyGuard no_autocast(c10::DispatchKey::Autocast); + return roi_align( + at::autocast::cached_cast(at::kFloat, input), + at::autocast::cached_cast(at::kFloat, rois), + spatial_scale, + pooled_height, + pooled_width, + sampling_ratio, + aligned) + .to(input.scalar_type()); +} + +} // namespace + +TORCH_LIBRARY_IMPL(torchvision, Autocast, m) { + m.impl( + TORCH_SELECTIVE_NAME("torchvision::roi_align"), + TORCH_FN(roi_align_autocast)); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autocast/roi_pool_kernel.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autocast/roi_pool_kernel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d317c38c7924c85cc7054380305f6a74a47fc0f9 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autocast/roi_pool_kernel.cpp @@ -0,0 +1,39 @@ +#include "../roi_pool.h" + +#include <ATen/autocast_mode.h> +#include <torch/types.h> + +namespace vision { +namespace ops { + +namespace { + +std::tuple<at::Tensor, at::Tensor> roi_pool_autocast( + const at::Tensor& input, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width) { + c10::impl::ExcludeDispatchKeyGuard no_autocast(c10::DispatchKey::Autocast); + auto result = roi_pool( + at::autocast::cached_cast(at::kFloat, input), + at::autocast::cached_cast(at::kFloat, rois), + spatial_scale, + pooled_height, + pooled_width); + + return std::make_tuple( + std::get<0>(result).to(input.scalar_type()), + std::get<1>(result).to(input.scalar_type())); +} + +} // namespace + +TORCH_LIBRARY_IMPL(torchvision, Autocast, m) { + m.impl( + TORCH_SELECTIVE_NAME("torchvision::roi_pool"), + TORCH_FN(roi_pool_autocast)); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autograd/deform_conv2d_kernel.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autograd/deform_conv2d_kernel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..801afb6a9bc2d07a6871ab5a16c4b520b5327816 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autograd/deform_conv2d_kernel.cpp @@ -0,0 +1,266 @@ +#include "../deform_conv2d.h" + +#include <torch/autograd.h> +#include <torch/types.h> + +namespace vision { +namespace ops { + +namespace { + +class DeformConv2dFunction + : public torch::autograd::Function<DeformConv2dFunction> { + public: + static torch::autograd::variable_list forward( + torch::autograd::AutogradContext* ctx, + const torch::autograd::Variable& input, + const torch::autograd::Variable& weight, + const torch::autograd::Variable& offset, + const torch::autograd::Variable& mask, + const torch::autograd::Variable& bias, + int64_t stride_h, + int64_t stride_w, + int64_t pad_h, + int64_t pad_w, + int64_t dilation_h, + int64_t dilation_w, + int64_t groups, + int64_t offset_groups, + bool use_mask) { + at::AutoDispatchBelowADInplaceOrView g; + auto output = deform_conv2d( + input, + weight, + offset, + mask, + bias, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + groups, + offset_groups, + use_mask); + + ctx->save_for_backward({input, weight, offset, mask, bias}); + ctx->saved_data["stride_h"] = stride_h; + ctx->saved_data["stride_w"] = stride_w; + ctx->saved_data["pad_h"] = pad_h; + ctx->saved_data["pad_w"] = pad_w; + ctx->saved_data["dilation_h"] = dilation_h; + ctx->saved_data["dilation_w"] = dilation_w; + ctx->saved_data["groups"] = groups; + ctx->saved_data["offset_groups"] = offset_groups; + ctx->saved_data["use_mask"] = use_mask; + + return { + output, + }; + } + + static torch::autograd::variable_list backward( + torch::autograd::AutogradContext* ctx, + const torch::autograd::variable_list& grad_output) { + auto saved = ctx->get_saved_variables(); + auto input = saved[0]; + auto weight = saved[1]; + auto offset = saved[2]; + auto mask = saved[3]; + auto bias = saved[4]; + + auto stride_h = ctx->saved_data["stride_h"].toInt(); + auto stride_w = ctx->saved_data["stride_w"].toInt(); + auto pad_h = ctx->saved_data["pad_h"].toInt(); + auto pad_w = ctx->saved_data["pad_w"].toInt(); + auto dilation_h = ctx->saved_data["dilation_h"].toInt(); + auto dilation_w = ctx->saved_data["dilation_w"].toInt(); + auto groups = ctx->saved_data["groups"].toInt(); + auto offset_groups = ctx->saved_data["offset_groups"].toInt(); + auto use_mask = ctx->saved_data["use_mask"].toBool(); + + auto grads = detail::_deform_conv2d_backward( + grad_output[0], + input, + weight, + offset, + mask, + bias, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + groups, + offset_groups, + use_mask); + auto grad_input = std::get<0>(grads); + auto grad_weight = std::get<1>(grads); + auto grad_offset = std::get<2>(grads); + auto grad_mask = std::get<3>(grads); + auto grad_bias = std::get<4>(grads); + + return { + grad_input, + grad_weight, + grad_offset, + grad_mask, + grad_bias, + torch::autograd::Variable(), + torch::autograd::Variable(), + torch::autograd::Variable(), + torch::autograd::Variable(), + torch::autograd::Variable(), + torch::autograd::Variable(), + torch::autograd::Variable(), + torch::autograd::Variable(), + torch::autograd::Variable(), + }; + } +}; + +// TODO: There should be an easier way to do this +class DeformConv2dBackwardFunction + : public torch::autograd::Function<DeformConv2dBackwardFunction> { + public: + static torch::autograd::variable_list forward( + torch::autograd::AutogradContext* ctx, + const torch::autograd::Variable& grad, + const torch::autograd::Variable& input, + const torch::autograd::Variable& weight, + const torch::autograd::Variable& offset, + const torch::autograd::Variable& mask, + const torch::autograd::Variable& bias, + int64_t stride_h, + int64_t stride_w, + int64_t pad_h, + int64_t pad_w, + int64_t dilation_h, + int64_t dilation_w, + int64_t groups, + int64_t offset_groups, + bool use_mask) { + at::AutoDispatchBelowADInplaceOrView g; + auto result = detail::_deform_conv2d_backward( + grad, + input, + weight, + offset, + mask, + bias, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + groups, + offset_groups, + use_mask); + + auto grad_input = std::get<0>(result); + auto grad_weight = std::get<1>(result); + auto grad_offset = std::get<2>(result); + auto grad_mask = std::get<3>(result); + auto grad_bias = std::get<4>(result); + + return { + grad_input, + grad_weight, + grad_offset, + grad_mask, + grad_bias, + }; + } + + static torch::autograd::variable_list backward( + torch::autograd::AutogradContext* ctx, + const torch::autograd::variable_list& grad_output) { + TORCH_CHECK(0, "double backwards on deform_conv2d not supported"); + } +}; + +at::Tensor deform_conv2d_autograd( + const at::Tensor& input, + const at::Tensor& weight, + const at::Tensor& offset, + const at::Tensor& mask, + const at::Tensor& bias, + int64_t stride_h, + int64_t stride_w, + int64_t pad_h, + int64_t pad_w, + int64_t dilation_h, + int64_t dilation_w, + int64_t groups, + int64_t offset_groups, + bool use_mask) { + return DeformConv2dFunction::apply( + input, + weight, + offset, + mask, + bias, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + groups, + offset_groups, + use_mask)[0]; +} + +std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor> +deform_conv2d_backward_autograd( + const at::Tensor& grad, + const at::Tensor& input, + const at::Tensor& weight, + const at::Tensor& offset, + const at::Tensor& mask, + const at::Tensor& bias, + int64_t stride_h, + int64_t stride_w, + int64_t pad_h, + int64_t pad_w, + int64_t dilation_h, + int64_t dilation_w, + int64_t groups, + int64_t offset_groups, + bool use_mask) { + auto result = DeformConv2dBackwardFunction::apply( + grad, + input, + weight, + offset, + mask, + bias, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + groups, + offset_groups, + use_mask); + + return std::make_tuple(result[0], result[1], result[2], result[3], result[4]); +} + +} // namespace + +TORCH_LIBRARY_IMPL(torchvision, Autograd, m) { + m.impl( + TORCH_SELECTIVE_NAME("torchvision::deform_conv2d"), + TORCH_FN(deform_conv2d_autograd)); + m.impl( + TORCH_SELECTIVE_NAME("torchvision::_deform_conv2d_backward"), + TORCH_FN(deform_conv2d_backward_autograd)); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autograd/ps_roi_align_kernel.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autograd/ps_roi_align_kernel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..47e51ce9ca2c7ff23c5dd652d1cb97218dc0a91b --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autograd/ps_roi_align_kernel.cpp @@ -0,0 +1,167 @@ +#include "../ps_roi_align.h" + +#include <torch/autograd.h> +#include <torch/types.h> + +namespace vision { +namespace ops { + +namespace { + +class PSROIAlignFunction + : public torch::autograd::Function<PSROIAlignFunction> { + public: + static torch::autograd::variable_list forward( + torch::autograd::AutogradContext* ctx, + const torch::autograd::Variable& input, + const torch::autograd::Variable& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t sampling_ratio) { + ctx->saved_data["spatial_scale"] = spatial_scale; + ctx->saved_data["pooled_height"] = pooled_height; + ctx->saved_data["pooled_width"] = pooled_width; + ctx->saved_data["sampling_ratio"] = sampling_ratio; + ctx->saved_data["input_shape"] = input.sizes(); + at::AutoDispatchBelowADInplaceOrView g; + auto result = ps_roi_align( + input, + rois, + spatial_scale, + pooled_height, + pooled_width, + sampling_ratio); + + auto output = std::get<0>(result); + auto channel_mapping = std::get<1>(result); + ctx->save_for_backward({rois, channel_mapping}); + ctx->mark_non_differentiable({channel_mapping}); + + return {output, channel_mapping}; + } + + static torch::autograd::variable_list backward( + torch::autograd::AutogradContext* ctx, + const torch::autograd::variable_list& grad_output) { + // Use data saved in forward + auto saved = ctx->get_saved_variables(); + auto rois = saved[0]; + auto channel_mapping = saved[1]; + auto input_shape = ctx->saved_data["input_shape"].toIntList(); + auto grad_in = detail::_ps_roi_align_backward( + grad_output[0], + rois, + channel_mapping, + ctx->saved_data["spatial_scale"].toDouble(), + ctx->saved_data["pooled_height"].toInt(), + ctx->saved_data["pooled_width"].toInt(), + ctx->saved_data["sampling_ratio"].toInt(), + input_shape[0], + input_shape[1], + input_shape[2], + input_shape[3]); + + return { + grad_in, + torch::autograd::Variable(), + torch::autograd::Variable(), + torch::autograd::Variable(), + torch::autograd::Variable(), + torch::autograd::Variable()}; + } +}; + +// TODO: There should be an easier way to do this +class PSROIAlignBackwardFunction + : public torch::autograd::Function<PSROIAlignBackwardFunction> { + public: + static torch::autograd::variable_list forward( + torch::autograd::AutogradContext* ctx, + const torch::autograd::Variable& grad, + const torch::autograd::Variable& rois, + const torch::autograd::Variable& channel_mapping, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t sampling_ratio, + int64_t batch_size, + int64_t channels, + int64_t height, + int64_t width) { + at::AutoDispatchBelowADInplaceOrView g; + auto grad_in = detail::_ps_roi_align_backward( + grad, + rois, + channel_mapping, + spatial_scale, + pooled_height, + pooled_width, + sampling_ratio, + batch_size, + channels, + height, + width); + + return {grad_in}; + } + + static torch::autograd::variable_list backward( + torch::autograd::AutogradContext* ctx, + const torch::autograd::variable_list& grad_output) { + TORCH_CHECK(0, "double backwards on ps_roi_align not supported"); + } +}; + +std::tuple<at::Tensor, at::Tensor> ps_roi_align_autograd( + const at::Tensor& input, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t sampling_ratio) { + auto result = PSROIAlignFunction::apply( + input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio); + + return std::make_tuple(result[0], result[1]); +} + +at::Tensor ps_roi_align_backward_autograd( + const at::Tensor& grad, + const at::Tensor& rois, + const at::Tensor& channel_mapping, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t sampling_ratio, + int64_t batch_size, + int64_t channels, + int64_t height, + int64_t width) { + return PSROIAlignBackwardFunction::apply( + grad, + rois, + channel_mapping, + spatial_scale, + pooled_height, + pooled_width, + sampling_ratio, + batch_size, + channels, + height, + width)[0]; +} + +} // namespace + +TORCH_LIBRARY_IMPL(torchvision, Autograd, m) { + m.impl( + TORCH_SELECTIVE_NAME("torchvision::ps_roi_align"), + TORCH_FN(ps_roi_align_autograd)); + m.impl( + TORCH_SELECTIVE_NAME("torchvision::_ps_roi_align_backward"), + TORCH_FN(ps_roi_align_backward_autograd)); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autograd/ps_roi_pool_kernel.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autograd/ps_roi_pool_kernel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..ddc37262382cbc2d82ee557cf7bf577f9da6592e --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autograd/ps_roi_pool_kernel.cpp @@ -0,0 +1,152 @@ +#include "../ps_roi_pool.h" + +#include <torch/autograd.h> +#include <torch/types.h> + +namespace vision { +namespace ops { + +namespace { + +class PSROIPoolFunction : public torch::autograd::Function<PSROIPoolFunction> { + public: + static torch::autograd::variable_list forward( + torch::autograd::AutogradContext* ctx, + const torch::autograd::Variable& input, + const torch::autograd::Variable& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width) { + ctx->saved_data["spatial_scale"] = spatial_scale; + ctx->saved_data["pooled_height"] = pooled_height; + ctx->saved_data["pooled_width"] = pooled_width; + ctx->saved_data["input_shape"] = input.sizes(); + at::AutoDispatchBelowADInplaceOrView g; + auto result = + ps_roi_pool(input, rois, spatial_scale, pooled_height, pooled_width); + + auto output = std::get<0>(result); + auto channel_mapping = std::get<1>(result); + ctx->save_for_backward({rois, channel_mapping}); + ctx->mark_non_differentiable({channel_mapping}); + + return {output, channel_mapping}; + } + + static torch::autograd::variable_list backward( + torch::autograd::AutogradContext* ctx, + const torch::autograd::variable_list& grad_output) { + // Use data saved in forward + auto saved = ctx->get_saved_variables(); + auto rois = saved[0]; + auto channel_mapping = saved[1]; + auto input_shape = ctx->saved_data["input_shape"].toIntList(); + auto grad_in = detail::_ps_roi_pool_backward( + grad_output[0], + rois, + channel_mapping, + ctx->saved_data["spatial_scale"].toDouble(), + ctx->saved_data["pooled_height"].toInt(), + ctx->saved_data["pooled_width"].toInt(), + input_shape[0], + input_shape[1], + input_shape[2], + input_shape[3]); + + return { + grad_in, + torch::autograd::Variable(), + torch::autograd::Variable(), + torch::autograd::Variable(), + torch::autograd::Variable()}; + } +}; + +// TODO: There should be an easier way to do this +class PSROIPoolBackwardFunction + : public torch::autograd::Function<PSROIPoolBackwardFunction> { + public: + static torch::autograd::variable_list forward( + torch::autograd::AutogradContext* ctx, + const torch::autograd::Variable& grad, + const torch::autograd::Variable& rois, + const torch::autograd::Variable& channel_mapping, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t batch_size, + int64_t channels, + int64_t height, + int64_t width) { + at::AutoDispatchBelowADInplaceOrView g; + auto grad_in = detail::_ps_roi_pool_backward( + grad, + rois, + channel_mapping, + spatial_scale, + pooled_height, + pooled_width, + batch_size, + channels, + height, + width); + + return {grad_in}; + } + + static torch::autograd::variable_list backward( + torch::autograd::AutogradContext* ctx, + const torch::autograd::variable_list& grad_output) { + TORCH_CHECK(0, "double backwards on ps_roi_pool not supported"); + } +}; + +std::tuple<at::Tensor, at::Tensor> ps_roi_pool_autograd( + const at::Tensor& input, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width) { + auto result = PSROIPoolFunction::apply( + input, rois, spatial_scale, pooled_height, pooled_width); + + return std::make_tuple(result[0], result[1]); +} + +at::Tensor ps_roi_pool_backward_autograd( + const at::Tensor& grad, + const at::Tensor& rois, + const at::Tensor& channel_mapping, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t batch_size, + int64_t channels, + int64_t height, + int64_t width) { + return PSROIPoolBackwardFunction::apply( + grad, + rois, + channel_mapping, + spatial_scale, + pooled_height, + pooled_width, + batch_size, + channels, + height, + width)[0]; +} + +} // namespace + +TORCH_LIBRARY_IMPL(torchvision, Autograd, m) { + m.impl( + TORCH_SELECTIVE_NAME("torchvision::ps_roi_pool"), + TORCH_FN(ps_roi_pool_autograd)); + m.impl( + TORCH_SELECTIVE_NAME("torchvision::_ps_roi_pool_backward"), + TORCH_FN(ps_roi_pool_backward_autograd)); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autograd/roi_align_kernel.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autograd/roi_align_kernel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..f26842b64286f2c9d186520949727af5da109267 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autograd/roi_align_kernel.cpp @@ -0,0 +1,167 @@ +#include "../roi_align.h" + +#include <torch/autograd.h> +#include <torch/types.h> + +namespace vision { +namespace ops { + +namespace { + +class ROIAlignFunction : public torch::autograd::Function<ROIAlignFunction> { + public: + static torch::autograd::variable_list forward( + torch::autograd::AutogradContext* ctx, + const torch::autograd::Variable& input, + const torch::autograd::Variable& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t sampling_ratio, + bool aligned) { + ctx->saved_data["spatial_scale"] = spatial_scale; + ctx->saved_data["pooled_height"] = pooled_height; + ctx->saved_data["pooled_width"] = pooled_width; + ctx->saved_data["sampling_ratio"] = sampling_ratio; + ctx->saved_data["aligned"] = aligned; + ctx->saved_data["input_shape"] = input.sizes(); + ctx->save_for_backward({rois}); + at::AutoDispatchBelowADInplaceOrView g; + auto result = roi_align( + input, + rois, + spatial_scale, + pooled_height, + pooled_width, + sampling_ratio, + aligned); + return {result}; + } + + static torch::autograd::variable_list backward( + torch::autograd::AutogradContext* ctx, + const torch::autograd::variable_list& grad_output) { + // Use data saved in forward + auto saved = ctx->get_saved_variables(); + auto rois = saved[0]; + auto input_shape = ctx->saved_data["input_shape"].toIntList(); + auto grad_in = detail::_roi_align_backward( + grad_output[0], + rois, + ctx->saved_data["spatial_scale"].toDouble(), + ctx->saved_data["pooled_height"].toInt(), + ctx->saved_data["pooled_width"].toInt(), + input_shape[0], + input_shape[1], + input_shape[2], + input_shape[3], + ctx->saved_data["sampling_ratio"].toInt(), + ctx->saved_data["aligned"].toBool()); + return { + grad_in, + torch::autograd::Variable(), + torch::autograd::Variable(), + torch::autograd::Variable(), + torch::autograd::Variable(), + torch::autograd::Variable(), + torch::autograd::Variable()}; + } +}; + +// TODO: There should be an easier way to do this +class ROIAlignBackwardFunction + : public torch::autograd::Function<ROIAlignBackwardFunction> { + public: + static torch::autograd::variable_list forward( + torch::autograd::AutogradContext* ctx, + const torch::autograd::Variable& grad, + const torch::autograd::Variable& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t batch_size, + int64_t channels, + int64_t height, + int64_t width, + int64_t sampling_ratio, + bool aligned) { + at::AutoDispatchBelowADInplaceOrView g; + auto result = detail::_roi_align_backward( + grad, + rois, + spatial_scale, + pooled_height, + pooled_width, + batch_size, + channels, + height, + width, + sampling_ratio, + aligned); + return {result}; + } + + static torch::autograd::variable_list backward( + torch::autograd::AutogradContext* ctx, + const torch::autograd::variable_list& grad_output) { + TORCH_CHECK(0, "double backwards on roi_align not supported"); + } +}; + +at::Tensor roi_align_autograd( + const at::Tensor& input, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t sampling_ratio, + bool aligned) { + return ROIAlignFunction::apply( + input, + rois, + spatial_scale, + pooled_height, + pooled_width, + sampling_ratio, + aligned)[0]; +} + +at::Tensor roi_align_backward_autograd( + const at::Tensor& grad, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t batch_size, + int64_t channels, + int64_t height, + int64_t width, + int64_t sampling_ratio, + bool aligned) { + return ROIAlignBackwardFunction::apply( + grad, + rois, + spatial_scale, + pooled_height, + pooled_width, + batch_size, + channels, + height, + width, + sampling_ratio, + aligned)[0]; +} + +} // namespace + +TORCH_LIBRARY_IMPL(torchvision, Autograd, m) { + m.impl( + TORCH_SELECTIVE_NAME("torchvision::roi_align"), + TORCH_FN(roi_align_autograd)); + m.impl( + TORCH_SELECTIVE_NAME("torchvision::_roi_align_backward"), + TORCH_FN(roi_align_backward_autograd)); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autograd/roi_pool_kernel.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autograd/roi_pool_kernel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d246f831bcbe3f48838ceb472d21d6952eee97e9 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/autograd/roi_pool_kernel.cpp @@ -0,0 +1,152 @@ +#include "../roi_pool.h" + +#include <torch/autograd.h> +#include <torch/types.h> + +namespace vision { +namespace ops { + +namespace { + +class ROIPoolFunction : public torch::autograd::Function<ROIPoolFunction> { + public: + static torch::autograd::variable_list forward( + torch::autograd::AutogradContext* ctx, + const torch::autograd::Variable& input, + const torch::autograd::Variable& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width) { + ctx->saved_data["spatial_scale"] = spatial_scale; + ctx->saved_data["pooled_height"] = pooled_height; + ctx->saved_data["pooled_width"] = pooled_width; + ctx->saved_data["input_shape"] = input.sizes(); + at::AutoDispatchBelowADInplaceOrView g; + auto result = + roi_pool(input, rois, spatial_scale, pooled_height, pooled_width); + + auto output = std::get<0>(result); + auto argmax = std::get<1>(result); + ctx->save_for_backward({rois, argmax}); + ctx->mark_non_differentiable({argmax}); + + return {output, argmax}; + } + + static torch::autograd::variable_list backward( + torch::autograd::AutogradContext* ctx, + const torch::autograd::variable_list& grad_output) { + // Use data saved in forward + auto saved = ctx->get_saved_variables(); + auto rois = saved[0]; + auto argmax = saved[1]; + auto input_shape = ctx->saved_data["input_shape"].toIntList(); + auto grad_in = detail::_roi_pool_backward( + grad_output[0], + rois, + argmax, + ctx->saved_data["spatial_scale"].toDouble(), + ctx->saved_data["pooled_height"].toInt(), + ctx->saved_data["pooled_width"].toInt(), + input_shape[0], + input_shape[1], + input_shape[2], + input_shape[3]); + + return { + grad_in, + torch::autograd::Variable(), + torch::autograd::Variable(), + torch::autograd::Variable(), + torch::autograd::Variable()}; + } +}; + +// TODO: There should be an easier way to do this +class ROIPoolBackwardFunction + : public torch::autograd::Function<ROIPoolBackwardFunction> { + public: + static torch::autograd::variable_list forward( + torch::autograd::AutogradContext* ctx, + const torch::autograd::Variable& grad, + const torch::autograd::Variable& rois, + const torch::autograd::Variable& argmax, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t batch_size, + int64_t channels, + int64_t height, + int64_t width) { + at::AutoDispatchBelowADInplaceOrView g; + auto grad_in = detail::_roi_pool_backward( + grad, + rois, + argmax, + spatial_scale, + pooled_height, + pooled_width, + batch_size, + channels, + height, + width); + + return {grad_in}; + } + + static torch::autograd::variable_list backward( + torch::autograd::AutogradContext* ctx, + const torch::autograd::variable_list& grad_output) { + TORCH_CHECK(0, "double backwards on roi_pool not supported"); + } +}; + +std::tuple<at::Tensor, at::Tensor> roi_pool_autograd( + const at::Tensor& input, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width) { + auto result = ROIPoolFunction::apply( + input, rois, spatial_scale, pooled_height, pooled_width); + + return std::make_tuple(result[0], result[1]); +} + +at::Tensor roi_pool_backward_autograd( + const at::Tensor& grad, + const at::Tensor& rois, + const at::Tensor& argmax, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t batch_size, + int64_t channels, + int64_t height, + int64_t width) { + return ROIPoolBackwardFunction::apply( + grad, + rois, + argmax, + spatial_scale, + pooled_height, + pooled_width, + batch_size, + channels, + height, + width)[0]; +} + +} // namespace + +TORCH_LIBRARY_IMPL(torchvision, Autograd, m) { + m.impl( + TORCH_SELECTIVE_NAME("torchvision::roi_pool"), + TORCH_FN(roi_pool_autograd)); + m.impl( + TORCH_SELECTIVE_NAME("torchvision::_roi_pool_backward"), + TORCH_FN(roi_pool_backward_autograd)); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cpu/deform_conv2d_kernel.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cpu/deform_conv2d_kernel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b1d15a158cfd7ece7d8822378530a039f020d918 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cpu/deform_conv2d_kernel.cpp @@ -0,0 +1,1172 @@ +/*! + ******************* BEGIN Caffe Copyright Notice and Disclaimer + ***************** + * + * COPYRIGHT + * + * All contributions by the University of California: + * Copyright (c) 2014-2017 The Regents of the University of California (Regents) + * All rights reserved. + * + * All other contributions: + * Copyright (c) 2014-2017, the respective contributors + * All rights reserved. + * + * Caffe uses a shared copyright model: each contributor holds copyright over + * their contributions to Caffe. The project versioning records all such + * contribution and copyright details. If a contributor wants to further mark + * their specific copyright on a particular contribution, they should indicate + * their copyright solely in the commit message of the change when it is + * committed. + * + * LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + *this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE + *FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + *DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + *SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + *CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + *OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + *OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * CONTRIBUTION AGREEMENT + * + * By contributing to the BVLC/caffe repository through pull-request, comment, + * or otherwise, the contributor releases their content to the + * license and copyright terms herein. + * + ***************** END Caffe Copyright Notice and Disclaimer + ********************* + * + * Copyright (c) 2018 Microsoft + * Licensed under The MIT License [see LICENSE for details] + * \file modulated_deformable_im2col.cuh + * \brief Function definitions of converting an image to + * column matrix based on kernel, padding, dilation, and offset. + * These functions are mainly used in deformable convolution operators. + * \ref: https://arxiv.org/abs/1703.06211 + * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng + */ + +// modified from +// https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu + +// modified from +// https://github.com/open-mmlab/mmdetection/blob/master/mmdet/ops/dcn/src/deform_conv_cuda.cpp + +#include <ATen/ATen.h> +#include <torch/library.h> + +namespace vision { +namespace ops { + +namespace { + +const int kMaxParallelImgs = 32; + +template <typename scalar_t> +scalar_t bilinear_interpolate( + const scalar_t* in, + int height, + int width, + scalar_t h, + scalar_t w) { + if (h <= -1 || height <= h || w <= -1 || width <= w) { + return 0; + } + + int h_low = floor(h); + int w_low = floor(w); + int h_high = h_low + 1; + int w_high = w_low + 1; + + scalar_t lh = h - h_low; + scalar_t lw = w - w_low; + scalar_t hh = 1 - lh, hw = 1 - lw; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + v1 = in[h_low * width + w_low]; + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + v2 = in[h_low * width + w_high]; + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + v3 = in[h_high * width + w_low]; + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + v4 = in[h_high * width + w_high]; + + scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + +template <typename scalar_t> +void deformable_im2col_kernel( + int n, + const scalar_t* input, + const scalar_t* offset, + const scalar_t* mask, + int height, + int width, + int weight_h, + int weight_w, + int pad_h, + int pad_w, + int stride_h, + int stride_w, + int dilation_h, + int dilation_w, + int batch_sz, + int n_in_channels, + int n_offset_grps, + int out_h, + int out_w, + bool use_mask, + scalar_t* columns) { + for (int index = 0; index != n; ++index) { + const int out_x = index % out_w; + const int out_y = (index / out_w) % out_h; + const int out_b = (index / (out_w * out_h)) % batch_sz; + const int in_c = index / (out_w * out_h * batch_sz); + const int out_c = in_c * weight_h * weight_w; + + int c_per_offset_grp = n_in_channels / n_offset_grps; + const int grp_idx = in_c / c_per_offset_grp; + + auto columns_ptr = columns + + (out_c * (batch_sz * out_h * out_w) + out_b * (out_h * out_w) + + out_y * out_w + out_x); + + auto input_ptr = input + + (out_b * (n_in_channels * height * width) + in_c * (height * width)); + + auto offset_ptr = offset + + (out_b * n_offset_grps + grp_idx) * 2 * weight_h * weight_w * out_h * + out_w; + + auto mask_ptr = mask; + if (use_mask) { + mask_ptr += (out_b * n_offset_grps + grp_idx) * weight_h * weight_w * + out_h * out_w; + } + + for (int i = 0; i < weight_h; ++i) { + for (int j = 0; j < weight_w; ++j) { + const int mask_idx = i * weight_w + j; + const int offset_idx = 2 * mask_idx; + + scalar_t mask_value = 1; + if (use_mask) { + mask_value = + mask_ptr[mask_idx * (out_h * out_w) + out_y * out_w + out_x]; + } + + const scalar_t offset_h = + offset_ptr[offset_idx * (out_h * out_w) + out_y * out_w + out_x]; + const scalar_t offset_w = offset_ptr + [(offset_idx + 1) * (out_h * out_w) + out_y * out_w + out_x]; + const scalar_t y = + (out_y * stride_h - pad_h) + i * dilation_h + offset_h; + const scalar_t x = + (out_x * stride_w - pad_w) + j * dilation_w + offset_w; + *columns_ptr = + mask_value * bilinear_interpolate(input_ptr, height, width, y, x); + columns_ptr += batch_sz * out_h * out_w; + } + } + } +} + +void deformable_im2col( + const at::Tensor& input, + const at::Tensor& data_offset, + const at::Tensor& data_mask, + int n_in_channels, + int height, + int width, + int weight_h, + int weight_w, + int pad_h, + int pad_w, + int stride_h, + int stride_w, + int dilation_h, + int dilation_w, + int out_h, + int out_w, + int parallel_imgs, + int deformable_group, + bool use_mask, + at::Tensor data_col) { + int num_kernels = n_in_channels * out_h * out_w * parallel_imgs; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), "deformable_im2col", ([&] { + deformable_im2col_kernel( + num_kernels, + input.data_ptr<scalar_t>(), + data_offset.data_ptr<scalar_t>(), + data_mask.data_ptr<scalar_t>(), + height, + width, + weight_h, + weight_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + parallel_imgs, + n_in_channels, + deformable_group, + out_h, + out_w, + use_mask, + data_col.data_ptr<scalar_t>()); + })); +} + +int get_greatest_divisor_below_bound(int n, int bound) { + for (int k = bound; k > 1; --k) { + if (n % k == 0) { + return k; + } + } + return 1; +} + +template <typename scalar_t> +void deformable_col2im_kernel( + int n, + const scalar_t* col, + const scalar_t* offset, + const scalar_t* mask, + int channels, + int height, + int width, + int kernel_h, + int kernel_w, + int pad_h, + int pad_w, + int stride_h, + int stride_w, + int dilation_h, + int dilation_w, + int batch_sz, + int n_offset_grps, + int out_h, + int out_w, + bool use_mask, + scalar_t* grad_im) { + for (int index = 0; index != n; ++index) { + const int out_x = index % out_w; + const int out_y = (index / out_w) % out_h; + const int b = (index / (out_w * out_h)) % batch_sz; + const int j = (index / (out_w * out_h * batch_sz)) % kernel_w; + const int i = (index / (out_w * out_h * batch_sz * kernel_w)) % kernel_h; + const int c = index / (out_w * out_h * batch_sz * kernel_w * kernel_h); + + int c_per_offset_grp = channels / n_offset_grps; + const int offset_grp = c / c_per_offset_grp; + + auto offset_ptr = offset + + (b * n_offset_grps + offset_grp) * 2 * kernel_h * kernel_w * out_h * + out_w; + + auto mask_ptr = mask; + if (use_mask) { + mask_ptr += (b * n_offset_grps + offset_grp) * kernel_h * kernel_w * + out_h * out_w; + } + + const int mask_idx = i * kernel_w + j; + const int offset_idx = 2 * mask_idx; + + const int offset_h_ptr = ((offset_idx)*out_h + out_y) * out_w + out_x; + const int offset_w_ptr = ((offset_idx + 1) * out_h + out_y) * out_w + out_x; + + const scalar_t offset_h = offset_ptr[offset_h_ptr]; + const scalar_t offset_w = offset_ptr[offset_w_ptr]; + + scalar_t mask_value = 1; + if (use_mask) { + mask_value = mask_ptr[(mask_idx * out_h + out_y) * out_w + out_x]; + } + + const scalar_t y = (out_y * stride_h - pad_h) + i * dilation_h + offset_h; + const scalar_t x = (out_x * stride_w - pad_w) + j * dilation_w + offset_w; + + for (int dy = -1; dy <= 1; dy++) { + for (int dx = -1; dx <= 1; dx++) { + int yp = int(y) + dy; + int xp = int(x) + dx; + if (0 <= yp && yp < height && 0 <= xp && xp < width && + std::abs(y - yp) < 1 && std::abs(x - xp) < 1) { + int grad_pos = ((b * channels + c) * height + yp) * width + xp; + scalar_t weight = (1 - std::abs(y - yp)) * (1 - std::abs(x - xp)); + grad_im[grad_pos] += mask_value * weight * col[index]; + } + } + } + } +} + +void compute_grad_input( + const at::Tensor& columns, + const at::Tensor& offset, + const at::Tensor& mask, + int channels, + int height, + int width, + int weight_h, + int weight_w, + int pad_h, + int pad_w, + int stride_h, + int stride_w, + int dilation_h, + int dilation_w, + int parallel_imgs, + int n_offset_grps, + bool use_mask, + at::Tensor grad_im) { + int out_h = + (height + 2 * pad_h - (dilation_h * (weight_h - 1) + 1)) / stride_h + 1; + int out_w = + (width + 2 * pad_w - (dilation_w * (weight_w - 1) + 1)) / stride_w + 1; + int num_kernels = + channels * weight_h * weight_w * out_h * out_w * parallel_imgs; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + columns.scalar_type(), "compute_grad_input", ([&] { + deformable_col2im_kernel( + num_kernels, + columns.data_ptr<scalar_t>(), + offset.data_ptr<scalar_t>(), + mask.data_ptr<scalar_t>(), + channels, + height, + width, + weight_h, + weight_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + parallel_imgs, + n_offset_grps, + out_h, + out_w, + use_mask, + grad_im.data_ptr<scalar_t>()); + })); +} + +template <typename scalar_t> +scalar_t get_coordinate_weight( + const scalar_t* im_data, + int height, + int width, + scalar_t y, + scalar_t x, + bool is_y_direction) { + int y_l = floor(y); + int x_l = floor(x); + int y_h = y_l + 1; + int x_h = x_l + 1; + + bool valid_y_l = 0 <= y_l && y_l < height; + bool valid_y_h = 0 <= y_h && y_h < height; + bool valid_x_l = 0 <= x_l && x_l < width; + bool valid_x_h = 0 <= x_h && x_h < width; + + scalar_t zero = 0; + scalar_t v_yx = (valid_y_l && valid_x_l) ? im_data[y_l * width + x_l] : zero; + scalar_t v_yX = (valid_y_l && valid_x_h) ? im_data[y_l * width + x_h] : zero; + scalar_t v_Yx = (valid_y_h && valid_x_l) ? im_data[y_h * width + x_l] : zero; + scalar_t v_YX = (valid_y_h && valid_x_h) ? im_data[y_h * width + x_h] : zero; + + if (is_y_direction) { + scalar_t dx = x - x_l; + return dx * (v_YX - v_yX) + (1 - dx) * (v_Yx - v_yx); + } else { + scalar_t dy = y - y_l; + return dy * (v_YX - v_Yx) + (1 - dy) * (v_yX - v_yx); + } +} + +template <typename scalar_t> +void deformable_col2im_coord_kernel( + int n, + const scalar_t* col, + const scalar_t* im, + const scalar_t* offset, + const scalar_t* mask, + int channels, + int height, + int width, + int weight_h, + int weight_w, + int pad_h, + int pad_w, + int stride_h, + int stride_w, + int dilation_h, + int dilation_w, + int batch_sz, + int offset_channels, + int n_offset_grps, + int out_h, + int out_w, + bool use_mask, + scalar_t* grad_offset, + scalar_t* grad_mask) { + for (int index = 0; index != n; ++index) { + scalar_t grad_offset_val = 0; + scalar_t grad_mask_val = 0; + + int w = index % out_w; + int h = (index / out_w) % out_h; + int w_w = (index / (out_w * out_h * 2)) % weight_w; + int w_h = (index / (out_w * out_h * 2 * weight_w)) % weight_h; + int c = (index / (out_w * out_h)) % offset_channels; + int b = index / (out_w * out_h * offset_channels); + + const int offset_grp = c / (2 * weight_h * weight_w); + const int col_step = weight_h * weight_w; + + int c_per_offset_grp = channels / n_offset_grps; + + auto col_ptr = col + + offset_grp * c_per_offset_grp * weight_h * weight_w * batch_sz * out_w * + out_h; + auto im_ptr = im + + (b * n_offset_grps + offset_grp) * c_per_offset_grp * height * width; + auto offset_ptr = offset + + (b * n_offset_grps + offset_grp) * 2 * weight_h * weight_w * out_h * + out_w; + + auto mask_ptr = mask; + if (use_mask) { + mask_ptr += (b * n_offset_grps + offset_grp) * weight_h * weight_w * + out_h * out_w; + } + + const int offset_c = c - offset_grp * 2 * weight_h * weight_w; + const bool is_y_direction = offset_c % 2 == 0; + + const int c_bound = c_per_offset_grp * weight_h * weight_w; + for (int col_c = (offset_c / 2); col_c < c_bound; col_c += col_step) { + const int col_pos = (((col_c * batch_sz + b) * out_h) + h) * out_w + w; + + int out_x = col_pos % out_w; + int out_y = (col_pos / out_w) % out_h; + int j = (col_pos / (out_w * out_h * batch_sz)) % weight_w; + int i = (col_pos / (out_w * out_h * batch_sz * weight_w)) % weight_h; + + const int mask_idx = i * weight_w + j; + + const int offset_h_idx = + (((2 * mask_idx) * out_h + out_y) * out_w + out_x); + const int offset_w_idx = + (((2 * mask_idx + 1) * out_h + out_y) * out_w + out_x); + const scalar_t offset_h = offset_ptr[offset_h_idx]; + const scalar_t offset_w = offset_ptr[offset_w_idx]; + + scalar_t mask_value = 1; + if (use_mask) { + mask_value = mask_ptr[(mask_idx * out_h + out_y) * out_w + out_x]; + } + + scalar_t y = (out_y * stride_h - pad_h) + i * dilation_h + offset_h; + scalar_t x = (out_x * stride_w - pad_w) + j * dilation_w + offset_w; + + const scalar_t weight = + get_coordinate_weight(im_ptr, height, width, y, x, is_y_direction); + grad_offset_val += mask_value * weight * col_ptr[col_pos]; + + if (use_mask && is_y_direction) { + grad_mask_val += col_ptr[col_pos] * + bilinear_interpolate(im_ptr, height, width, y, x); + } + + im_ptr += height * width; + } + + grad_offset[index] = grad_offset_val; + + if (use_mask && is_y_direction) { + const int idx = + ((((b * n_offset_grps + offset_grp) * weight_h + w_h) * weight_w + + w_w) * + out_h + + h) * + out_w + + w; + grad_mask[idx] = grad_mask_val; + } + } +} + +void compute_grad_offset_and_mask( + const at::Tensor& columns, + const at::Tensor& input, + const at::Tensor& offset, + const at::Tensor& mask, + int channels, + int height, + int width, + int weight_h, + int weight_w, + int pad_h, + int pad_w, + int stride_h, + int stride_w, + int dilation_h, + int dilation_w, + int parallel_imgs, + int n_offset_grps, + bool use_mask, + at::Tensor grad_offset, + at::Tensor grad_mask) { + int out_h = + (height + 2 * pad_h - (dilation_h * (weight_h - 1) + 1)) / stride_h + 1; + int out_w = + (width + 2 * pad_w - (dilation_w * (weight_w - 1) + 1)) / stride_w + 1; + int num_kernels = + out_h * out_w * 2 * weight_h * weight_w * n_offset_grps * parallel_imgs; + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + columns.scalar_type(), "compute_grad_offset_and_mask", ([&] { + deformable_col2im_coord_kernel( + num_kernels, + columns.data_ptr<scalar_t>(), + input.data_ptr<scalar_t>(), + offset.data_ptr<scalar_t>(), + mask.data_ptr<scalar_t>(), + channels, + height, + width, + weight_h, + weight_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + parallel_imgs, + 2 * weight_h * weight_w * n_offset_grps, + n_offset_grps, + out_h, + out_w, + use_mask, + grad_offset.data_ptr<scalar_t>(), + grad_mask.data_ptr<scalar_t>()); + })); +} + +std::tuple<at::Tensor, at::Tensor, at::Tensor> backward_gradient_inputs( + at::Tensor input, + at::Tensor weight, + at::Tensor offset, + at::Tensor mask, + at::Tensor grad_out, + int stride_h, + int stride_w, + int pad_h, + int pad_w, + int dilation_h, + int dilation_w, + int n_weight_grps, + int n_offset_grps, + int n_parallel_imgs, + bool use_mask) { + int batch_sz = input.size(0); + int n_in_channels = input.size(1); + int in_h = input.size(2); + int in_w = input.size(3); + + n_parallel_imgs = std::min(batch_sz, n_parallel_imgs); + + long n_out_channels = weight.size(0); + int weight_h = weight.size(2); + int weight_w = weight.size(3); + + long out_h = + (in_h + 2 * pad_h - (dilation_h * (weight_h - 1) + 1)) / stride_h + 1; + long out_w = + (in_w + 2 * pad_w - (dilation_w * (weight_w - 1) + 1)) / stride_w + 1; + + auto grad_input = at::zeros_like(input); + auto grad_offset = at::zeros_like(offset); + auto grad_mask = at::zeros_like(mask); + + if (batch_sz == 0) { + return std::make_tuple(grad_input, grad_offset, grad_mask); + } + + auto columns = at::empty( + {n_in_channels * weight_w * weight_h, n_parallel_imgs * out_h * out_w}, + input.options()); + + // Separate into blocks + grad_input = grad_input.reshape( + {batch_sz / n_parallel_imgs, n_parallel_imgs, n_in_channels, in_h, in_w}); + input = input.reshape( + {batch_sz / n_parallel_imgs, n_parallel_imgs, n_in_channels, in_h, in_w}); + + grad_offset = grad_offset.reshape( + {batch_sz / n_parallel_imgs, + n_parallel_imgs, + n_offset_grps * 2 * weight_h * weight_w, + out_h, + out_w}); + offset = offset.reshape( + {batch_sz / n_parallel_imgs, + n_parallel_imgs, + n_offset_grps * 2 * weight_h * weight_w, + out_h, + out_w}); + + if (use_mask) { + grad_mask = grad_mask.reshape( + {batch_sz / n_parallel_imgs, + n_parallel_imgs, + n_offset_grps * weight_h * weight_w, + out_h, + out_w}); + mask = mask.reshape( + {batch_sz / n_parallel_imgs, + n_parallel_imgs, + n_offset_grps * weight_h * weight_w, + out_h, + out_w}); + } + + grad_out = grad_out + .reshape( + {batch_sz / n_parallel_imgs, + n_parallel_imgs, + n_weight_grps, + n_out_channels / n_weight_grps, + out_h, + out_w}) + .permute({0, 2, 3, 1, 4, 5}); + + weight = weight.reshape( + {n_weight_grps, + weight.size(0) / n_weight_grps, + weight.size(1), + weight.size(2), + weight.size(3)}); + + columns = columns.view( + {n_weight_grps, columns.size(0) / n_weight_grps, columns.size(1)}); + + for (int elt = 0; elt < batch_sz / n_parallel_imgs; elt++) { + columns.zero_(); + // Separate into weight groups + for (int g = 0; g < n_weight_grps; g++) { + columns[g] = columns[g].addmm_( + weight[g].flatten(1).transpose(0, 1), grad_out[elt][g].flatten(1)); + } + + compute_grad_offset_and_mask( + columns, + input[elt], + offset[elt], + mask[elt], + n_in_channels, + in_h, + in_w, + weight_h, + weight_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + n_parallel_imgs, + n_offset_grps, + use_mask, + grad_offset[elt], + grad_mask[elt]); + + compute_grad_input( + columns, + offset[elt], + mask[elt], + n_in_channels, + in_h, + in_w, + weight_h, + weight_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + n_parallel_imgs, + n_offset_grps, + use_mask, + grad_input[elt]); + } + + grad_input = grad_input.view({batch_sz, n_in_channels, in_h, in_w}); + grad_offset = grad_offset.view( + {batch_sz, n_offset_grps * 2 * weight_h * weight_w, out_h, out_w}); + + if (use_mask) { + grad_mask = grad_mask.view( + {batch_sz, n_offset_grps * weight_h * weight_w, out_h, out_w}); + } + + return std::make_tuple(grad_input, grad_offset, grad_mask); +} + +at::Tensor backward_gradient_parameters( + at::Tensor input, + const at::Tensor& weight, + at::Tensor offset, + at::Tensor mask, + const at::Tensor& grad_out, + int stride_h, + int stride_w, + int pad_h, + int pad_w, + int dilation_h, + int dilation_w, + int n_weight_grps, + int n_offset_grps, + int n_parallel_imgs, + bool use_mask) { + int batch_sz = input.size(0); + int n_in_channels = input.size(1); + int in_h = input.size(2); + int in_w = input.size(3); + + n_parallel_imgs = std::min(batch_sz, n_parallel_imgs); + + long n_out_channels = weight.size(0); + int weight_h = weight.size(2); + int weight_w = weight.size(3); + + long out_h = grad_out.size(2); + long out_w = grad_out.size(3); + + auto grad_weight = at::zeros_like(weight); + if (batch_sz == 0) { + return grad_weight; + } + + at::Tensor grad_out_buf = grad_out + .reshape( + {batch_sz / n_parallel_imgs, + n_parallel_imgs, + n_weight_grps, + n_out_channels / n_weight_grps, + out_h, + out_w}) + .permute({0, 2, 3, 1, 4, 5}) + .contiguous(); + + input = input.reshape( + {batch_sz / n_parallel_imgs, n_parallel_imgs, n_in_channels, in_h, in_w}); + + offset = offset.reshape( + {batch_sz / n_parallel_imgs, + n_parallel_imgs, + n_offset_grps * 2 * weight_h * weight_w, + out_h, + out_w}); + + if (use_mask) { + mask = mask.reshape( + {batch_sz / n_parallel_imgs, + n_parallel_imgs, + n_offset_grps * weight_h * weight_w, + out_h, + out_w}); + } + + grad_weight = grad_weight.view( + {n_weight_grps, + grad_weight.size(0) / n_weight_grps, + grad_weight.size(1), + grad_weight.size(2), + grad_weight.size(3)}); + + auto columns = at::empty( + {n_weight_grps, + n_in_channels * weight_w * weight_h / n_weight_grps, + n_parallel_imgs * out_h * out_w}, + input.options()); + + for (int elt = 0; elt < batch_sz / n_parallel_imgs; elt++) { + deformable_im2col( + input[elt], + offset[elt], + mask[elt], + n_in_channels, + in_h, + in_w, + weight_h, + weight_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + out_h, + out_w, + n_parallel_imgs, + n_offset_grps, + use_mask, + columns); + + for (int g = 0; g < n_weight_grps; g++) { + grad_weight[g] = + grad_weight[g] + .flatten(1) + .addmm_( + grad_out_buf[elt][g].flatten(1), columns[g].transpose(1, 0)) + .view_as(grad_weight[g]); + } + } + + grad_weight = grad_weight.view( + {grad_weight.size(0) * grad_weight.size(1), + grad_weight.size(2), + grad_weight.size(3), + grad_weight.size(4)}); + return grad_weight; +} + +at::Tensor deform_conv2d_forward_kernel( + const at::Tensor& input, + const at::Tensor& weight, + const at::Tensor& offset, + const at::Tensor& mask, + const at::Tensor& bias, + int64_t stride_h, + int64_t stride_w, + int64_t pad_h, + int64_t pad_w, + int64_t dilation_h, + int64_t dilation_w, + int64_t n_weight_grps, + int64_t n_offset_grps, + bool use_mask) { + at::Tensor input_c = input.contiguous(); + at::Tensor offset_c = offset.contiguous(); + at::Tensor weight_c = weight.contiguous(); + at::Tensor mask_c = mask.contiguous(); + at::Tensor bias_c = bias.contiguous(); + + TORCH_CHECK(input_c.ndimension() == 4); + TORCH_CHECK(offset_c.ndimension() == 4); + TORCH_CHECK(!use_mask || mask_c.ndimension() == 4); + TORCH_CHECK(weight_c.ndimension() == 4); + TORCH_CHECK(input_c.device().is_cpu(), "input must be a CPU tensor"); + + int batch_sz = input_c.size(0); + int n_in_channels = input_c.size(1); + int in_h = input_c.size(2); + int in_w = input_c.size(3); + + int n_parallel_imgs = + get_greatest_divisor_below_bound(batch_sz, kMaxParallelImgs); + + // Unpack shapes and args + int out_channels = weight_c.size(0); + int weight_h = weight_c.size(2); + int weight_w = weight_c.size(3); + + int ker_h = dilation_h * (weight_h - 1) + 1; + int ker_w = dilation_w * (weight_w - 1) + 1; + int out_h = ((in_h + 2 * pad_h - ker_h) / stride_h) + 1; + int out_w = ((in_w + 2 * pad_w - ker_w) / stride_w) + 1; + + TORCH_CHECK( + weight_h > 0 && weight_w > 0, + "weight_h: ", + weight_h, + " weight_w: ", + weight_w); + TORCH_CHECK( + stride_h > 0 && stride_w > 0, + "stride_h: ", + stride_h, + " stride_w: ", + stride_w); + TORCH_CHECK(pad_h >= 0 && pad_w >= 0, "pad_h: ", pad_h, " pad_w: ", pad_w); + TORCH_CHECK( + dilation_h > 0 && dilation_w > 0, + "dilation_h: ", + dilation_h, + " dilation_w: ", + dilation_w); + + TORCH_CHECK(weight_c.size(1) * n_weight_grps == input_c.size(1)); + TORCH_CHECK(weight_c.size(0) % n_weight_grps == 0); + TORCH_CHECK( + (offset_c.size(1) == n_offset_grps * 2 * weight_h * weight_w), + "offset.shape[1] is not valid: got: ", + offset_c.size(1), + " expected: ", + n_offset_grps * 2 * weight_h * weight_w); + TORCH_CHECK( + (!use_mask || mask_c.size(1) == n_offset_grps * weight_h * weight_w), + "mask.shape[1] is not valid: got: ", + mask_c.size(1), + " expected: ", + n_offset_grps * weight_h * weight_w); + TORCH_CHECK(input_c.size(1) % n_offset_grps == 0); + + TORCH_CHECK( + (offset_c.size(0) == input_c.size(0)), "invalid batch size of offset"); + TORCH_CHECK( + (offset_c.size(2) == out_h && offset_c.size(3) == out_w), + "offset output dims: (", + offset_c.size(2), + ", ", + offset_c.size(3), + ") - ", + "computed output dims: (", + out_h, + ", ", + out_w, + ")"); + TORCH_CHECK( + (mask_c.size(0) == input_c.size(0)), "invalid batch size of mask"); + TORCH_CHECK( + (!use_mask || (mask_c.size(2) == out_h && mask_c.size(3) == out_w)), + "offset output dims: (", + mask_c.size(2), + ", ", + mask_c.size(3), + ") - ", + "computed output dims: (", + out_h, + ", ", + out_w, + ")"); + TORCH_CHECK( + out_h > 0 && out_w > 0, + "Calculated output size too small - out_h: ", + out_h, + " out_w: ", + out_w); + + auto out = + at::zeros({batch_sz, out_channels, out_h, out_w}, input_c.options()); + if (batch_sz == 0) { + return out; + } + + // Separate batches into blocks + out = out.view( + {batch_sz / n_parallel_imgs, + n_parallel_imgs, + out_channels, + out_h, + out_w}); + input_c = input_c.view( + {batch_sz / n_parallel_imgs, n_parallel_imgs, n_in_channels, in_h, in_w}); + + offset_c = offset_c.view( + {batch_sz / n_parallel_imgs, + n_parallel_imgs, + n_offset_grps * 2 * weight_h * weight_w, + out_h, + out_w}); + + if (use_mask) { + mask_c = mask_c.view( + {batch_sz / n_parallel_imgs, + n_parallel_imgs, + n_offset_grps * weight_h * weight_w, + out_h, + out_w}); + } + + at::Tensor out_buf = at::zeros( + {batch_sz / n_parallel_imgs, + out_channels, + n_parallel_imgs * out_h, + out_w}, + out.options()); + + // Separate channels into convolution groups + out_buf = out_buf.view( + {out_buf.size(0), + n_weight_grps, + out_buf.size(1) / n_weight_grps, + out_buf.size(2), + out_buf.size(3)}); + weight_c = weight_c.view( + {n_weight_grps, + weight_c.size(0) / n_weight_grps, + weight_c.size(1), + weight_c.size(2), + weight_c.size(3)}); + + // Sample points and perform convolution + auto columns = at::zeros( + {n_in_channels * weight_h * weight_w, n_parallel_imgs * out_h * out_w}, + input_c.options()); + for (int b = 0; b < batch_sz / n_parallel_imgs; b++) { + deformable_im2col( + input_c[b], + offset_c[b], + mask_c[b], + n_in_channels, + in_h, + in_w, + weight_h, + weight_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + out_h, + out_w, + n_parallel_imgs, + n_offset_grps, + use_mask, + columns); + + columns = columns.view( + {n_weight_grps, columns.size(0) / n_weight_grps, columns.size(1)}); + for (int g = 0; g < n_weight_grps; g++) { + out_buf[b][g] = out_buf[b][g] + .flatten(1) + .addmm_(weight_c[g].flatten(1), columns[g]) + .view_as(out_buf[b][g]); + } + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + } + + out_buf = out_buf.view( + {batch_sz / n_parallel_imgs, + out_channels, + n_parallel_imgs, + out_h, + out_w}); + out_buf.transpose_(1, 2); + out.copy_(out_buf); + out = out.view({batch_sz, out_channels, out_h, out_w}); + + return out + bias_c.view({1, out_channels, 1, 1}); +} + +std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor> +deform_conv2d_backward_kernel( + const at::Tensor& grad_out, + const at::Tensor& input, + const at::Tensor& weight, + const at::Tensor& offset, + const at::Tensor& mask, + const at::Tensor& bias, + int64_t stride_h, + int64_t stride_w, + int64_t pad_h, + int64_t pad_w, + int64_t dilation_h, + int64_t dilation_w, + int64_t n_weight_grps, + int64_t n_offset_grps, + bool use_mask) { + at::Tensor grad_out_c = grad_out.contiguous(); + at::Tensor input_c = input.contiguous(); + at::Tensor weight_c = weight.contiguous(); + at::Tensor offset_c = offset.contiguous(); + at::Tensor mask_c = mask.contiguous(); + at::Tensor bias_c = bias.contiguous(); + + const int batch_sz = input_c.size(0); + const int n_parallel_imgs = + get_greatest_divisor_below_bound(batch_sz, kMaxParallelImgs); + + auto grad_input_and_offset_and_mask = backward_gradient_inputs( + input_c, + weight_c, + offset_c, + mask_c, + grad_out_c, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + n_weight_grps, + n_offset_grps, + n_parallel_imgs, + use_mask); + + auto grad_input = std::get<0>(grad_input_and_offset_and_mask); + auto grad_offset = std::get<1>(grad_input_and_offset_and_mask); + auto grad_mask = std::get<2>(grad_input_and_offset_and_mask); + + auto grad_weight = backward_gradient_parameters( + input_c, + weight_c, + offset_c, + mask_c, + grad_out_c, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + n_weight_grps, + n_offset_grps, + n_parallel_imgs, + use_mask); + + auto grad_bias = at::ones_like(bias_c) * grad_out_c.sum({0, 2, 3}); + + return std::make_tuple( + grad_input, grad_weight, grad_offset, grad_mask, grad_bias); +} + +} // namespace + +TORCH_LIBRARY_IMPL(torchvision, CPU, m) { + m.impl( + TORCH_SELECTIVE_NAME("torchvision::deform_conv2d"), + TORCH_FN(deform_conv2d_forward_kernel)); + m.impl( + TORCH_SELECTIVE_NAME("torchvision::_deform_conv2d_backward"), + TORCH_FN(deform_conv2d_backward_kernel)); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cpu/interpolate_aa_kernels.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cpu/interpolate_aa_kernels.cpp new file mode 100644 index 0000000000000000000000000000000000000000..97b025aafb43a8765d9dfa0472c94fb4cfc3eb41 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cpu/interpolate_aa_kernels.cpp @@ -0,0 +1,572 @@ +#include <ATen/TypeDefault.h> +#include <ATen/native/IndexingUtils.h> +#include <ATen/native/TensorIterator.h> +#include <ATen/native/UpSample.h> +#include <cmath> +#include <vector> + +#include <torch/library.h> + +// Code temporary is in torchvision before merging it to PyTorch +namespace at { +namespace native { +namespace internal_upsample { + +using scale_t = std::vector<c10::optional<double>>; + +template <typename scalar_t, typename index_t> +static inline scalar_t interpolate_aa_single_dim_zero_strides( + char* src, + char** data, + int64_t i, + const index_t ids_stride) { + const index_t ids_min = *(index_t*)&data[0][0]; + const index_t ids_size = *(index_t*)&data[1][0]; + + char* src_min = src + ids_min; + + scalar_t t = *(scalar_t*)&src_min[0]; + index_t wts_idx = *(index_t*)&data[4][0]; + scalar_t* wts_ptr = (scalar_t*)&data[3][wts_idx]; + scalar_t wts = wts_ptr[0]; + + scalar_t output = t * wts; + int j = 1; + for (; j < ids_size; j++) { + wts = wts_ptr[j]; + t = *(scalar_t*)&src_min[j * ids_stride]; + output += t * wts; + } + return output; +} + +template <typename scalar_t, typename index_t> +static inline scalar_t interpolate_aa_single_dim( + char* src, + char** data, + const int64_t* strides, + int64_t i, + const index_t ids_stride) { + index_t ids_min = *(index_t*)&data[0][i * strides[0]]; + index_t ids_size = *(index_t*)&data[1][i * strides[1]]; + + char* src_min = src + ids_min; + + scalar_t t = *(scalar_t*)&src_min[0]; + index_t wts_idx = *(index_t*)&data[4][i * strides[4]]; + scalar_t* wts_ptr = (scalar_t*)&data[3][wts_idx]; + scalar_t wts = wts_ptr[0]; + + scalar_t output = t * wts; + int j = 1; + for (; j < ids_size; j++) { + wts = wts_ptr[j]; + t = *(scalar_t*)&src_min[j * ids_stride]; + output += t * wts; + } + return output; +} + +template <typename scalar_t, typename index_t> +static inline void basic_loop_aa_single_dim_zero_strides( + char** data, + const int64_t* strides, + int64_t n) { + char* dst = data[0]; + char* src = data[1]; + // index stride is constant for the given dimension + const index_t ids_stride = *(index_t*)&data[2 + 2][0]; + + for (int64_t i = 0; i < n; i++) { + *(scalar_t*)&dst[i * strides[0]] = + interpolate_aa_single_dim_zero_strides<scalar_t, index_t>( + src + i * strides[1], &data[2], i, ids_stride); + } +} + +template <typename scalar_t, typename index_t> +static inline void basic_loop_aa_single_dim_nonzero_strides( + char** data, + const int64_t* strides, + int64_t n) { + char* dst = data[0]; + char* src = data[1]; + // index stride is constant for the given dimension + const index_t ids_stride = *(index_t*)&data[2 + 2][0]; + + if (strides[1] == 0) { + for (int64_t i = 0; i < n; i++) { + *(scalar_t*)&dst[i * strides[0]] = + interpolate_aa_single_dim<scalar_t, index_t>( + src, &data[2], &strides[2], i, ids_stride); + } + } else { + for (int64_t i = 0; i < n; i++) { + *(scalar_t*)&dst[i * strides[0]] = + interpolate_aa_single_dim<scalar_t, index_t>( + src + i * strides[1], &data[2], &strides[2], i, ids_stride); + } + } +} + +template <int m> +static inline bool is_zero_stride(const int64_t* strides) { + bool output = strides[0] == 0; + for (int i = 1; i < m; i++) { + output &= (strides[i] == 0); + } + return output; +} + +template <typename scalar_t, typename index_t, int out_ndims> +void ti_cpu_upsample_generic_aa( + at::TensorIterator& iter, + int interp_size = -1) { + TORCH_INTERNAL_ASSERT(interp_size > 0); + + auto loop = [&](char** data, const int64_t* strides, int64_t n) { + if ((strides[0] == sizeof(scalar_t)) && (strides[1] == sizeof(scalar_t)) && + is_zero_stride<3 + 2>(&strides[2])) { + basic_loop_aa_single_dim_zero_strides<scalar_t, index_t>( + data, strides, n); + } else { + basic_loop_aa_single_dim_nonzero_strides<scalar_t, index_t>( + data, strides, n); + } + }; + + iter.for_each(loop); +} + +// Helper structs to use with ti_upsample_generic_Nd_kernel_impl +template <typename index_t, typename scalar_t> +struct HelperInterpBase { + template <typename filter_fn_t> + static inline std::vector<Tensor> _compute_indices_weights_aa( + int64_t input_size, + int64_t output_size, + int64_t stride, + int64_t ndims, + int64_t reshape_dim, + bool align_corners, + scalar_t scale, + int& in_out_interp_size, + filter_fn_t filter_fn) { + int interp_size = in_out_interp_size; + scalar_t support = + (scale >= 1.0) ? (interp_size * 0.5) * scale : interp_size * 0.5; + interp_size = (int)ceilf(support) * 2 + 1; + + // return interp_size + in_out_interp_size = interp_size; + + std::vector<Tensor> output; + auto new_shape = std::vector<int64_t>(ndims, 1); + new_shape[reshape_dim] = output_size; + + // ---- Bounds approach as in PIL ----- + // bounds: xmin/xmax + output.emplace_back( + empty(new_shape, CPU(c10::CppTypeToScalarType<index_t>()))); + output.emplace_back( + empty(new_shape, CPU(c10::CppTypeToScalarType<index_t>()))); + output.emplace_back( + empty(new_shape, CPU(c10::CppTypeToScalarType<index_t>()))); + + { + // Weights + new_shape[reshape_dim] = output_size * interp_size; + auto wts = empty(new_shape, CPU(c10::CppTypeToScalarType<scalar_t>())); + auto strides = wts.strides().vec(); + strides[reshape_dim] = 0; + new_shape[reshape_dim] = output_size; + wts = wts.as_strided(new_shape, strides); + output.emplace_back(wts); + // Weights indices + output.emplace_back( + empty(new_shape, CPU(c10::CppTypeToScalarType<index_t>()))); + } + + scalar_t center, total_w, invscale = (scale >= 1.0) ? 1.0 / scale : 1.0; + index_t zero = static_cast<index_t>(0); + int64_t* idx_ptr_xmin = output[0].data_ptr<index_t>(); + int64_t* idx_ptr_size = output[1].data_ptr<index_t>(); + int64_t* idx_ptr_stride = output[2].data_ptr<index_t>(); + scalar_t* wt_ptr = output[3].data_ptr<scalar_t>(); + int64_t* wt_idx_ptr = output[4].data_ptr<index_t>(); + + int64_t xmin, xmax, j; + + for (int64_t i = 0; i < output_size; i++) { + center = scale * (i + 0.5); + xmin = std::max(static_cast<int64_t>(center - support + 0.5), zero); + xmax = + std::min(static_cast<int64_t>(center + support + 0.5), input_size) - + xmin; + idx_ptr_xmin[i] = xmin * stride; + idx_ptr_size[i] = xmax; + idx_ptr_stride[i] = stride; + + wt_idx_ptr[i] = i * interp_size * sizeof(scalar_t); + + total_w = 0.0; + for (j = 0; j < xmax; j++) { + scalar_t w = filter_fn((j + xmin - center + 0.5) * invscale); + wt_ptr[i * interp_size + j] = w; + total_w += w; + } + for (j = 0; j < xmax; j++) { + if (total_w != 0.0) { + wt_ptr[i * interp_size + j] /= total_w; + } + } + + for (; j < interp_size; j++) { + wt_ptr[i * interp_size + j] = static_cast<scalar_t>(0.0); + } + } + return output; + } +}; + +template <typename index_t, typename scalar_t> +struct HelperInterpLinear : public HelperInterpBase<index_t, scalar_t> { + static const int interp_size = 2; + + // taken from + // https://github.com/python-pillow/Pillow/blob/6812205f18ca4ef54372e87e1a13ce4a859434df/ + // src/libImaging/Resample.c#L20-L29 + static inline scalar_t _filter(scalar_t x) { + if (x < 0.0) { + x = -x; + } + if (x < 1.0) { + return 1.0 - x; + } + return 0.0; + } + + static inline std::vector<Tensor> compute_indices_weights( + int64_t input_size, + int64_t output_size, + int64_t stride, + int64_t ndims, + int64_t reshape_dim, + bool align_corners, + const c10::optional<double> opt_scale, + bool antialias, + int& out_interp_size) { + TORCH_INTERNAL_ASSERT(antialias); + scalar_t scale = area_pixel_compute_scale<scalar_t>( + input_size, output_size, align_corners, opt_scale); + + out_interp_size = HelperInterpLinear<index_t, scalar_t>::interp_size; + return HelperInterpLinear<index_t, scalar_t>::_compute_indices_weights_aa( + input_size, + output_size, + stride, + ndims, + reshape_dim, + align_corners, + scale, + out_interp_size, + _filter); + } +}; + +template <typename index_t, typename scalar_t> +struct HelperInterpCubic : public HelperInterpBase<index_t, scalar_t> { + static const int interp_size = 4; + + static inline std::vector<Tensor> compute_indices_weights( + int64_t input_size, + int64_t output_size, + int64_t stride, + int64_t ndims, + int64_t reshape_dim, + bool align_corners, + const c10::optional<double> opt_scale, + bool antialias, + int& out_interp_size) { + TORCH_INTERNAL_ASSERT(antialias); + scalar_t scale = area_pixel_compute_scale<scalar_t>( + input_size, output_size, align_corners, opt_scale); + + out_interp_size = HelperInterpCubic<index_t, scalar_t>::interp_size; + return HelperInterpCubic<index_t, scalar_t>::_compute_indices_weights_aa( + input_size, + output_size, + stride, + ndims, + reshape_dim, + align_corners, + scale, + out_interp_size, + _filter); + } + + // taken from + // https://github.com/python-pillow/Pillow/blob/6812205f18ca4ef54372e87e1a13ce4a859434df/ + // src/libImaging/Resample.c#L46-L62 + static inline scalar_t _filter(scalar_t x) { + // https://en.wikipedia.org/wiki/Bicubic_interpolation#Bicubic_convolution_algorithm +#define a -0.5 + if (x < 0.0) { + x = -x; + } + if (x < 1.0) { + return ((a + 2.0) * x - (a + 3.0)) * x * x + 1; + } + if (x < 2.0) { + return (((x - 5) * x + 8) * x - 4) * a; + } + return 0.0; +#undef a + } +}; + +template < + typename index_t, + int out_ndims, + typename scale_type, + template <typename, typename> + class F> +void _ti_separable_upsample_generic_Nd_kernel_impl_single_dim( + Tensor& output, + const Tensor& input, + int interp_dim, + bool align_corners, + const scale_type& scales, + bool antialias) { + // input can be NCHW, NCL or NCKHW + auto shape = input.sizes().vec(); + auto strides = input.strides().vec(); + auto oshape = output.sizes(); + + TORCH_INTERNAL_ASSERT( + shape.size() == oshape.size() && shape.size() == 2 + out_ndims); + TORCH_INTERNAL_ASSERT(strides.size() == 2 + out_ndims); + TORCH_INTERNAL_ASSERT(antialias); + + for (int i = 0; i < out_ndims; i++) { + shape[i + 2] = oshape[i + 2]; + } + strides[interp_dim] = 0; + auto restrided_input = input.as_strided(shape, strides); + + std::vector<std::vector<Tensor>> indices_weights; + + int interp_size = F<index_t, float>::interp_size; + auto input_scalar_type = input.scalar_type(); + + if (interp_size == 1 && input_scalar_type == at::ScalarType::Byte) { + // nearest also supports uint8 tensor, but we have to use float + // with compute_indices_weights + input_scalar_type = at::ScalarType::Float; + } + + AT_DISPATCH_FLOATING_TYPES_AND( + at::ScalarType::Byte, + input_scalar_type, + "compute_indices_weights_generic", + [&] { + indices_weights.emplace_back( + F<index_t, scalar_t>::compute_indices_weights( + input.size(interp_dim), + oshape[interp_dim], + input.stride(interp_dim) * input.element_size(), + input.dim(), + interp_dim, + align_corners, + scales[interp_dim - 2], + antialias, + interp_size)); + }); + + TensorIteratorConfig config; + config.check_all_same_dtype(false) + .declare_static_dtype_and_device(input.scalar_type(), input.device()) + .add_output(output) + .add_input(restrided_input); + + for (auto& idx_weight : indices_weights) { + for (auto& tensor : idx_weight) { + config.add_input(tensor); + } + } + + auto iter = config.build(); + + if (interp_size > 1) { + // Nearest also supports uint8 tensor, so need to handle it separately + AT_DISPATCH_FLOATING_TYPES(iter.dtype(), "upsample_generic_Nd", [&] { + ti_cpu_upsample_generic_aa<scalar_t, index_t, out_ndims>( + iter, interp_size); + }); + } else { + AT_DISPATCH_FLOATING_TYPES_AND( + at::ScalarType::Byte, iter.dtype(), "upsample_generic_Nd", [&] { + ti_cpu_upsample_generic_aa<scalar_t, index_t, out_ndims>( + iter, interp_size); + }); + } +} + +template < + typename index_t, + int out_ndims, + typename scale_type, + template <typename, typename> + class F> +void ti_separable_upsample_generic_Nd_kernel_impl( + Tensor& output, + const Tensor& input, + bool align_corners, + const scale_type& scales, + bool antialias) { + auto temp_oshape = input.sizes().vec(); + at::Tensor temp_output, temp_input = input; + for (int i = 0; i < out_ndims - 1; i++) { + int interp_dim = 2 + out_ndims - 1 - i; + temp_oshape[interp_dim] = output.sizes()[interp_dim]; + temp_output = at::empty(temp_oshape, input.options()); + _ti_separable_upsample_generic_Nd_kernel_impl_single_dim< + index_t, + out_ndims, + scale_t, + F>( + temp_output, temp_input, interp_dim, align_corners, scales, antialias); + temp_input = temp_output; + } + _ti_separable_upsample_generic_Nd_kernel_impl_single_dim< + index_t, + out_ndims, + scale_t, + F>(output, temp_input, 2, align_corners, scales, antialias); +} + +void _ti_upsample_bilinear2d_kernel_impl( + Tensor& output, + const Tensor& input, + bool align_corners, + c10::optional<double> scales_h, + c10::optional<double> scales_w, + bool antialias) { + ti_separable_upsample_generic_Nd_kernel_impl< + int64_t, + 2, + scale_t, + HelperInterpLinear>( + output, input, align_corners, {scales_h, scales_w}, antialias); +} + +void _ti_upsample_bicubic2d_kernel_impl( + Tensor& output, + const Tensor& input, + bool align_corners, + c10::optional<double> scales_h, + c10::optional<double> scales_w, + bool antialias) { + ti_separable_upsample_generic_Nd_kernel_impl< + int64_t, + 2, + scale_t, + HelperInterpCubic>( + output, input, align_corners, {scales_h, scales_w}, antialias); +} + +} // namespace internal_upsample +} // namespace native +} // namespace at + +namespace vision { +namespace ops { + +namespace { + +at::Tensor interpolate_linear_aa_forward_kernel( + const at::Tensor& input, + at::IntArrayRef output_size, + bool align_corners) { + TORCH_CHECK(input.device().is_cpu(), "input must be a CPU tensor"); + + c10::optional<c10::ArrayRef<double>> scale_factors = {}; + + // Copied from UpSampleBilinear2d.cpp + auto output = at::empty({0}, input.options()); + auto osize = at::native::upsample::compute_output_size( + input.sizes(), output_size, scale_factors); + auto scale_h = at::native::upsample::get_scale_value(scale_factors, 0); + auto scale_w = at::native::upsample::get_scale_value(scale_factors, 1); + auto full_output_size = + at::native::upsample_2d_common_check(input.sizes(), osize); + + // Allow for empty batch size but not other dimensions + TORCH_CHECK( + input.numel() != 0 || + c10::multiply_integers( + input.sizes().begin() + 1, input.sizes().end()), + "Non-empty 4D data tensor expected but got a tensor with sizes ", + input.sizes()); + + output.resize_(full_output_size, input.suggest_memory_format()); + at::native::internal_upsample::_ti_upsample_bilinear2d_kernel_impl( + output, input, align_corners, scale_h, scale_w, /*antialias=*/true); + return output; +} + +at::Tensor interpolate_bicubic_aa_forward_kernel( + const at::Tensor& input, + at::IntArrayRef output_size, + bool align_corners) { + TORCH_CHECK(input.device().is_cpu(), "input must be a CPU tensor"); + + c10::optional<c10::ArrayRef<double>> scale_factors = {}; + + // Copied from UpSampleBilinear2d.cpp + auto output = at::empty({0}, input.options()); + auto osize = at::native::upsample::compute_output_size( + input.sizes(), output_size, scale_factors); + auto scale_h = at::native::upsample::get_scale_value(scale_factors, 0); + auto scale_w = at::native::upsample::get_scale_value(scale_factors, 1); + auto full_output_size = + at::native::upsample_2d_common_check(input.sizes(), osize); + + // Allow for empty batch size but not other dimensions + TORCH_CHECK( + input.numel() != 0 || + c10::multiply_integers( + input.sizes().begin() + 1, input.sizes().end()), + "Non-empty 4D data tensor expected but got a tensor with sizes ", + input.sizes()); + + output.resize_(full_output_size, input.suggest_memory_format()); + at::native::internal_upsample::_ti_upsample_bicubic2d_kernel_impl( + output, input, align_corners, scale_h, scale_w, /*antialias=*/true); + return output; +} + +// TODO: Implement backward function +// at::Tensor interpolate_linear_aa_backward_kernel( +// const at::Tensor& grad) { +// return grad_input; +// } + +} // namespace + +TORCH_LIBRARY_IMPL(torchvision, CPU, m) { + m.impl( + TORCH_SELECTIVE_NAME("torchvision::_interpolate_linear_aa"), + TORCH_FN(interpolate_linear_aa_forward_kernel)); + m.impl( + TORCH_SELECTIVE_NAME("torchvision::_interpolate_bicubic_aa"), + TORCH_FN(interpolate_bicubic_aa_forward_kernel)); + + // TODO: Implement backward function + // m.impl( + // TORCH_SELECTIVE_NAME("torchvision::_interpolate_linear_aa_backward"), + // TORCH_FN(interpolate_linear_aa_backward_kernel)); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cpu/nms_kernel.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cpu/nms_kernel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..8d12c70a5671a4e51d91f61b7d27fc4b16b6a90c --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cpu/nms_kernel.cpp @@ -0,0 +1,116 @@ +#include <ATen/ATen.h> +#include <torch/library.h> + +namespace vision { +namespace ops { + +namespace { + +template <typename scalar_t> +at::Tensor nms_kernel_impl( + const at::Tensor& dets, + const at::Tensor& scores, + double iou_threshold) { + TORCH_CHECK(!dets.is_cuda(), "dets must be a CPU tensor"); + TORCH_CHECK(!scores.is_cuda(), "scores must be a CPU tensor"); + TORCH_CHECK( + dets.scalar_type() == scores.scalar_type(), + "dets should have the same type as scores"); + + if (dets.numel() == 0) + return at::empty({0}, dets.options().dtype(at::kLong)); + + auto x1_t = dets.select(1, 0).contiguous(); + auto y1_t = dets.select(1, 1).contiguous(); + auto x2_t = dets.select(1, 2).contiguous(); + auto y2_t = dets.select(1, 3).contiguous(); + + at::Tensor areas_t = (x2_t - x1_t) * (y2_t - y1_t); + + auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); + + auto ndets = dets.size(0); + at::Tensor suppressed_t = at::zeros({ndets}, dets.options().dtype(at::kByte)); + at::Tensor keep_t = at::zeros({ndets}, dets.options().dtype(at::kLong)); + + auto suppressed = suppressed_t.data_ptr<uint8_t>(); + auto keep = keep_t.data_ptr<int64_t>(); + auto order = order_t.data_ptr<int64_t>(); + auto x1 = x1_t.data_ptr<scalar_t>(); + auto y1 = y1_t.data_ptr<scalar_t>(); + auto x2 = x2_t.data_ptr<scalar_t>(); + auto y2 = y2_t.data_ptr<scalar_t>(); + auto areas = areas_t.data_ptr<scalar_t>(); + + int64_t num_to_keep = 0; + + for (int64_t _i = 0; _i < ndets; _i++) { + auto i = order[_i]; + if (suppressed[i] == 1) + continue; + keep[num_to_keep++] = i; + auto ix1 = x1[i]; + auto iy1 = y1[i]; + auto ix2 = x2[i]; + auto iy2 = y2[i]; + auto iarea = areas[i]; + + for (int64_t _j = _i + 1; _j < ndets; _j++) { + auto j = order[_j]; + if (suppressed[j] == 1) + continue; + auto xx1 = std::max(ix1, x1[j]); + auto yy1 = std::max(iy1, y1[j]); + auto xx2 = std::min(ix2, x2[j]); + auto yy2 = std::min(iy2, y2[j]); + + auto w = std::max(static_cast<scalar_t>(0), xx2 - xx1); + auto h = std::max(static_cast<scalar_t>(0), yy2 - yy1); + auto inter = w * h; + auto ovr = inter / (iarea + areas[j] - inter); + if (ovr > iou_threshold) + suppressed[j] = 1; + } + } + return keep_t.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep); +} + +at::Tensor nms_kernel( + const at::Tensor& dets, + const at::Tensor& scores, + double iou_threshold) { + TORCH_CHECK( + dets.dim() == 2, "boxes should be a 2d tensor, got ", dets.dim(), "D"); + TORCH_CHECK( + dets.size(1) == 4, + "boxes should have 4 elements in dimension 1, got ", + dets.size(1)); + TORCH_CHECK( + scores.dim() == 1, + "scores should be a 1d tensor, got ", + scores.dim(), + "D"); + TORCH_CHECK( + dets.size(0) == scores.size(0), + "boxes and scores should have same number of elements in ", + "dimension 0, got ", + dets.size(0), + " and ", + scores.size(0)); + + auto result = at::empty({0}, dets.options()); + + AT_DISPATCH_FLOATING_TYPES(dets.scalar_type(), "nms_kernel", [&] { + result = nms_kernel_impl<scalar_t>(dets, scores, iou_threshold); + }); + return result; +} + +} // namespace + +TORCH_LIBRARY_IMPL(torchvision, CPU, m) { + m.impl(TORCH_SELECTIVE_NAME("torchvision::nms"), TORCH_FN(nms_kernel)); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cpu/ps_roi_align_kernel.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cpu/ps_roi_align_kernel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..4f78d59ae6b7f928606921844f32f435ed8c89c7 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cpu/ps_roi_align_kernel.cpp @@ -0,0 +1,432 @@ +#include <ATen/ATen.h> +#include <torch/library.h> + +namespace vision { +namespace ops { + +namespace { + +template <typename T> +T bilinear_interpolate( + const T* input, + int height, + int width, + T y, + T x, + int index /* index for debug only*/) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + return 0; + } + + if (y <= 0) + y = 0; + if (x <= 0) + x = 0; + + int y_low = (int)y; + int x_low = (int)x; + int y_high; + int x_high; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + + // do bilinear interpolation + T v1 = input[y_low * width + x_low]; + T v2 = input[y_low * width + x_high]; + T v3 = input[y_high * width + x_low]; + T v4 = input[y_high * width + x_high]; + T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + return val; +} + +template <typename T> +void ps_roi_align_forward_kernel_impl( + int num_rois, + const T* input, + const T spatial_scale, + int channels, + int height, + int width, + int pooled_height, + int pooled_width, + int sampling_ratio, + const T* rois, + int channels_out, + T* output, + int* channel_mapping) { + for (int n = 0; n < num_rois; n++) { + // [start, end) interval for spatial sampling + const T* offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + + // Do not using rounding; this implementation detail is critical + T roi_start_w = offset_rois[1] * spatial_scale - static_cast<T>(0.5); + T roi_start_h = offset_rois[2] * spatial_scale - static_cast<T>(0.5); + T roi_end_w = offset_rois[3] * spatial_scale - static_cast<T>(0.5); + T roi_end_h = offset_rois[4] * spatial_scale - static_cast<T>(0.5); + + T roi_width = roi_end_w - roi_start_w; + T roi_height = roi_end_h - roi_start_h; + T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); + T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); + + int c_in = 0; + for (int c_out = 0; c_out < channels_out; ++c_out) { + for (int ph = 0; ph < pooled_height; ++ph) { + for (int pw = 0; pw < pooled_width; ++pw) { + int index = + ((n * channels_out + c_out) * pooled_height + ph) * pooled_width + + pw; + + // Do not using floor/ceil; this implementation detail is critical + T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h; + T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); + int roi_bin_grid_w = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_width / pooled_width); + const T count = roi_bin_grid_h * roi_bin_grid_w; + + const T* offset_input = + input + (roi_batch_ind * channels + c_in) * height * width; + + T out_sum = 0; + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + const T y = hstart + + static_cast<T>(iy + .5f) * bin_size_h / + static_cast<T>(roi_bin_grid_h); + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T x = wstart + + static_cast<T>(ix + .5f) * bin_size_w / + static_cast<T>(roi_bin_grid_w); + T val = bilinear_interpolate( + offset_input, height, width, y, x, index); + out_sum += val; + } + } + + out_sum /= count; + output[index] = out_sum; + channel_mapping[index] = c_in; + c_in++; + } + } + } + } +} + +template <typename T> +void bilinear_interpolate_gradient( + int height, + int width, + T y, + T x, + T& w1, + T& w2, + T& w3, + T& w4, + int& x_low, + int& x_high, + int& y_low, + int& y_high, + int index /* index for debug only*/) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + w1 = w2 = w3 = w4 = 0.; + x_low = x_high = y_low = y_high = -1; + return; + } + + if (y <= 0) + y = 0; + if (x <= 0) + x = 0; + + y_low = (int)y; + x_low = (int)x; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + + // reference in forward + // T v1 = input[y_low * width + x_low]; + // T v2 = input[y_low * width + x_high]; + // T v3 = input[y_high * width + x_low]; + // T v4 = input[y_high * width + x_high]; + // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; +} + +template <class T> +inline void add(T* address, const T& val) { + *address += val; +} + +template <typename T> +void ps_roi_align_backward_kernel_impl( + int nthreads, + const T* grad_output, + const int* channel_mapping, + int num_rois, + const T spatial_scale, + int channels, + int height, + int width, + int pooled_height, + int pooled_width, + int sampling_ratio, + int channels_out, + T* grad_input, + const T* rois) { + for (int index = 0; index < nthreads; index++) { + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int n = index / pooled_width / pooled_height / channels_out; + + const T* offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + + // Do not using rounding; this implementation detail is critical + T roi_start_w = offset_rois[1] * spatial_scale - static_cast<T>(0.5); + T roi_start_h = offset_rois[2] * spatial_scale - static_cast<T>(0.5); + T roi_end_w = offset_rois[3] * spatial_scale - static_cast<T>(0.5); + T roi_end_h = offset_rois[4] * spatial_scale - static_cast<T>(0.5); + + // Force too small ROIs to be 1x1 + T roi_width = roi_end_w - roi_start_w; + T roi_height = roi_end_h - roi_start_h; + T bin_size_h = roi_height / static_cast<T>(pooled_height); + T bin_size_w = roi_width / static_cast<T>(pooled_width); + + int c_in = channel_mapping[index]; + T* grad_input_offset = + grad_input + (roi_batch_ind * channels + c_in) * height * width; + + // Do not using floor/ceil; this implementation detail is critical + T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h; + T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w; + + const T grad_output_this_bin = grad_output[index]; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + const T count = roi_bin_grid_h * roi_bin_grid_w; + + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + const T y = hstart + + static_cast<T>(iy + .5f) * bin_size_h / + static_cast<T>(roi_bin_grid_h); + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T x = wstart + + static_cast<T>(ix + .5f) * bin_size_w / + static_cast<T>(roi_bin_grid_w); + + T w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + + bilinear_interpolate_gradient( + height, + width, + y, + x, + w1, + w2, + w3, + w4, + x_low, + x_high, + y_low, + y_high, + index); + + T g1 = grad_output_this_bin * w1 / count; + T g2 = grad_output_this_bin * w2 / count; + T g3 = grad_output_this_bin * w3 / count; + T g4 = grad_output_this_bin * w4 / count; + + if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { + add(grad_input_offset + y_low * width + x_low, g1); + add(grad_input_offset + y_low * width + x_high, g2); + add(grad_input_offset + y_high * width + x_low, g3); + add(grad_input_offset + y_high * width + x_high, g4); + } // if + } // ix + } // iy + } +} + +std::tuple<at::Tensor, at::Tensor> ps_roi_align_forward_kernel( + const at::Tensor& input, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t sampling_ratio) { + // Check if input tensors are CPU tensors + TORCH_CHECK(input.device().is_cpu(), "input must be a CPU tensor"); + TORCH_CHECK(rois.device().is_cpu(), "rois must be a CPU tensor"); + TORCH_CHECK( + rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]"); + + at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "ps_roi_align_forward_kernel"; + at::checkAllSameType(c, {input_t, rois_t}); + + int num_rois = rois.size(0); + int channels = input.size(1); + int height = input.size(2); + int width = input.size(3); + + TORCH_CHECK( + channels % (pooled_height * pooled_width) == 0, + "input channels must be a multiple of pooling height * pooling width"); + int channels_out = channels / (pooled_height * pooled_width); + + auto output = at::zeros( + {num_rois, channels_out, pooled_height, pooled_width}, input.options()); + auto channel_mapping = + at::zeros(output.sizes(), input.options().dtype(at::kInt)); + + if (output.numel() == 0) { + return std::make_tuple(output, channel_mapping); + } + + auto input_ = input.contiguous(), rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), "ps_roi_align_forward_kernel", [&] { + ps_roi_align_forward_kernel_impl<scalar_t>( + num_rois, + input_.data_ptr<scalar_t>(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + rois_.data_ptr<scalar_t>(), + channels_out, + output.data_ptr<scalar_t>(), + channel_mapping.data_ptr<int>()); + }); + return std::make_tuple(output, channel_mapping); +} + +at::Tensor ps_roi_align_backward_kernel( + const at::Tensor& grad, + const at::Tensor& rois, + const at::Tensor& channel_mapping, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t sampling_ratio, + int64_t batch_size, + int64_t channels, + int64_t height, + int64_t width) { + // Check if input tensors are CPU tensors + TORCH_CHECK(grad.device().is_cpu(), "grad must be a CPU tensor"); + TORCH_CHECK(rois.device().is_cpu(), "rois must be a CPU tensor"); + TORCH_CHECK( + channel_mapping.device().is_cpu(), + "channel_mapping must be a CPU tensor"); + + at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}, + channel_mapping_t{channel_mapping, "channel_mapping", 3}; + + at::CheckedFrom c = "ps_roi_align_backward_kernel"; + at::checkAllSameType(c, {grad_t, rois_t}); + + auto num_rois = rois.size(0); + auto grad_input = + at::zeros({batch_size, channels, height, width}, grad.options()); + + // handle possibly empty gradients + if (grad.numel() == 0) { + return grad_input; + } + + int channels_out = channels / (pooled_height * pooled_width); + + auto grad_ = grad.contiguous(), rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad.scalar_type(), "ps_roi_align_backward_kernel", [&] { + ps_roi_align_backward_kernel_impl<scalar_t>( + grad.numel(), + grad_.data_ptr<scalar_t>(), + channel_mapping.data_ptr<int>(), + num_rois, + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + channels_out, + grad_input.data_ptr<scalar_t>(), + rois_.data_ptr<scalar_t>()); + }); + return grad_input; +} + +} // namespace + +TORCH_LIBRARY_IMPL(torchvision, CPU, m) { + m.impl( + TORCH_SELECTIVE_NAME("torchvision::ps_roi_align"), + TORCH_FN(ps_roi_align_forward_kernel)); + m.impl( + TORCH_SELECTIVE_NAME("torchvision::_ps_roi_align_backward"), + TORCH_FN(ps_roi_align_backward_kernel)); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cpu/ps_roi_pool_kernel.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cpu/ps_roi_pool_kernel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..607cbe4bab6091b287fc9a895dc8e17091c5a29f --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cpu/ps_roi_pool_kernel.cpp @@ -0,0 +1,273 @@ +#include <ATen/ATen.h> +#include <torch/library.h> + +namespace vision { +namespace ops { + +namespace { + +template <class T> +inline void add(T* address, const T& val) { + *address += val; +} + +template <typename T> +void ps_roi_pool_forward_kernel_impl( + const T* input, + const T spatial_scale, + int channels, + int height, + int width, + int pooled_height, + int pooled_width, + const T* rois, + int channels_out, + int num_rois, + T* output, + int* channel_mapping) { + for (int n = 0; n < num_rois; ++n) { + const T* offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + int roi_start_w = round(offset_rois[1] * spatial_scale); + int roi_start_h = round(offset_rois[2] * spatial_scale); + int roi_end_w = round(offset_rois[3] * spatial_scale); + int roi_end_h = round(offset_rois[4] * spatial_scale); + + // Force too small ROIs to be 1x1 + int roi_width = std::max(roi_end_w - roi_start_w, 1); + int roi_height = std::max(roi_end_h - roi_start_h, 1); + T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); + T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); + + int c_in = 0; + for (int c_out = 0; c_out < channels_out; ++c_out) { + for (int ph = 0; ph < pooled_height; ++ph) { + for (int pw = 0; pw < pooled_width; ++pw) { + int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h)); + int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w)); + int hend = + static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h)); + int wend = + static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w)); + + // Add roi offsets and clip to input boundaries + hstart = std::min(std::max(hstart + roi_start_h, 0), height - 1); + hend = std::min(std::max(hend + roi_start_h, 0), height - 1); + wstart = std::min(std::max(wstart + roi_start_w, 0), width - 1); + wend = std::min(std::max(wend + roi_start_w, 0), width - 1); + bool is_empty = (hend <= hstart) || (wend <= wstart); + + const T* offset_input = + input + (roi_batch_ind * channels + c_in) * height * width; + + T out_sum = 0; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + int input_index = h * width + w; + out_sum += offset_input[input_index]; + } + } + + int index = + ((n * channels_out + c_out) * pooled_height + ph) * pooled_width + + pw; + T bin_area = (hend - hstart) * (wend - wstart); + output[index] = is_empty ? static_cast<T>(0) : out_sum / bin_area; + channel_mapping[index] = c_in; + c_in++; + } + } + } + } +} + +template <typename T> +void ps_roi_pool_backward_kernel_impl( + const T* grad_output, + const int* channel_mapping, + int num_rois, + const T spatial_scale, + int channels, + int height, + int width, + int pooled_height, + int pooled_width, + int channels_out, + T* grad_input, + const T* rois) { + for (int n = 0; n < num_rois; ++n) { + const T* offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + int roi_start_w = roundf(offset_rois[1] * spatial_scale); + int roi_start_h = roundf(offset_rois[2] * spatial_scale); + int roi_end_w = roundf(offset_rois[3] * spatial_scale); + int roi_end_h = roundf(offset_rois[4] * spatial_scale); + + // Force too small ROIs to be 1x1 + int roi_width = std::max(roi_end_w - roi_start_w, 1); + int roi_height = std::max(roi_end_h - roi_start_h, 1); + T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); + T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); + + for (int ph = 0; ph < pooled_height; ++ph) { + for (int pw = 0; pw < pooled_width; ++pw) { + int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h)); + int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w)); + int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h)); + int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w)); + + // Add roi offsets and clip to input boundaries + hstart = std::min(std::max(hstart + roi_start_h, 0), height); + hend = std::min(std::max(hend + roi_start_h, 0), height); + wstart = std::min(std::max(wstart + roi_start_w, 0), width); + wend = std::min(std::max(wend + roi_start_w, 0), width); + bool is_empty = (hend <= hstart) || (wend <= wstart); + + for (int c_out = 0; c_out < channels_out; ++c_out) { + int index = + ((n * channels_out + c_out) * pooled_height + ph) * pooled_width + + pw; + int c_in = channel_mapping[index]; + + T* grad_input_offset = + grad_input + (roi_batch_ind * channels + c_in) * height * width; + T bin_area = (hend - hstart) * (wend - wstart); + T diff_val = + is_empty ? static_cast<T>(0) : grad_output[index] / bin_area; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + int grad_input_index = h * width + w; + add(grad_input_offset + grad_input_index, diff_val); + } + } + } + } + } + } +} + +std::tuple<at::Tensor, at::Tensor> ps_roi_pool_forward_kernel( + const at::Tensor& input, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width) { + // Check if input tensors are CPU tensors + TORCH_CHECK(input.device().is_cpu(), "input must be a CPU tensor"); + TORCH_CHECK(rois.device().is_cpu(), "rois must be a CPU tensor"); + TORCH_CHECK( + rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]"); + + at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "ps_roi_pool_forward_kernel"; + at::checkAllSameType(c, {input_t, rois_t}); + + int num_rois = rois.size(0); + int channels = input.size(1); + int height = input.size(2); + int width = input.size(3); + + TORCH_CHECK( + channels % (pooled_height * pooled_width) == 0, + "input channels must be a multiple of pooling height * pooling width"); + int channels_out = channels / (pooled_height * pooled_width); + + auto output = at::zeros( + {num_rois, channels_out, pooled_height, pooled_width}, input.options()); + auto channel_mapping = + at::zeros(output.sizes(), input.options().dtype(at::kInt)); + + auto output_size = output.numel(); + if (output_size == 0) { + return std::make_tuple(output, channel_mapping); + } + + auto input_ = input.contiguous(), rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), "ps_roi_pool_forward_kernel", [&] { + ps_roi_pool_forward_kernel_impl<scalar_t>( + input_.data_ptr<scalar_t>(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + rois_.data_ptr<scalar_t>(), + channels_out, + num_rois, + output.data_ptr<scalar_t>(), + channel_mapping.data_ptr<int>()); + }); + return std::make_tuple(output, channel_mapping); +} + +at::Tensor ps_roi_pool_backward_kernel( + const at::Tensor& grad, + const at::Tensor& rois, + const at::Tensor& channel_mapping, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t batch_size, + int64_t channels, + int64_t height, + int64_t width) { + // Check if input tensors are CPU tensors + TORCH_CHECK(grad.device().is_cpu(), "grad must be a CPU tensor"); + TORCH_CHECK(rois.device().is_cpu(), "rois must be a CPU tensor"); + TORCH_CHECK( + channel_mapping.device().is_cpu(), + "channel_mapping must be a CPU tensor"); + + at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}, + channel_mapping_t{channel_mapping, "channel_mapping", 3}; + + at::CheckedFrom c = "ps_roi_pool_backward_kernel"; + at::checkAllSameType(c, {grad_t, rois_t}); + + auto num_rois = rois.size(0); + auto grad_input = + at::zeros({batch_size, channels, height, width}, grad.options()); + + // handle possibly empty gradients + if (grad.numel() == 0) { + return grad_input; + } + + int channels_out = channels / (pooled_height * pooled_width); + + auto grad_ = grad.contiguous(), rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad.scalar_type(), "ps_roi_pool_backward_kernel", [&] { + ps_roi_pool_backward_kernel_impl<scalar_t>( + grad_.data_ptr<scalar_t>(), + channel_mapping.data_ptr<int>(), + num_rois, + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + channels_out, + grad_input.data_ptr<scalar_t>(), + rois_.data_ptr<scalar_t>()); + }); + return grad_input; +} + +} // namespace + +TORCH_LIBRARY_IMPL(torchvision, CPU, m) { + m.impl( + TORCH_SELECTIVE_NAME("torchvision::ps_roi_pool"), + TORCH_FN(ps_roi_pool_forward_kernel)); + m.impl( + TORCH_SELECTIVE_NAME("torchvision::_ps_roi_pool_backward"), + TORCH_FN(ps_roi_pool_backward_kernel)); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cpu/roi_align_common.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cpu/roi_align_common.h new file mode 100644 index 0000000000000000000000000000000000000000..e10c67b5b796eea111930699de551fc210886d34 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cpu/roi_align_common.h @@ -0,0 +1,128 @@ +#pragma once + +#include <ATen/ATen.h> + +namespace vision { +namespace ops { +namespace detail { + +template <typename T> +struct PreCalc { + int pos1; + int pos2; + int pos3; + int pos4; + T w1; + T w2; + T w3; + T w4; +}; + +// This helper computes the interpolation weights (w1, w2...) for every sampling +// point of a given box. There are pool_height * pool_width * roi_bin_grid_h * +// roi_bin_grid_w such sampling points. +// +// The weights (w1, w2...) are computed as the areas in this figure: +// https://en.wikipedia.org/wiki/Bilinear_interpolation#/media/File:Bilinear_interpolation_visualisation.svg +// and pos1, pos2 etc correspond to the indices of their respective pixels. +// +// Note: the weights and indices are shared across all channels, which is why +// they are pre-calculated prior to the main loop in the RoIAlign kernel. +// implementation taken from Caffe2 +template <typename T> +void pre_calc_for_bilinear_interpolate( + int height, + int width, + int pooled_height, + int pooled_width, + T roi_start_h, + T roi_start_w, + T bin_size_h, + T bin_size_w, + int roi_bin_grid_h, + int roi_bin_grid_w, + std::vector<PreCalc<T>>& pre_calc) { + int pre_calc_index = 0; + for (int ph = 0; ph < pooled_height; ph++) { + for (int pw = 0; pw < pooled_width; pw++) { + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + const T yy = roi_start_h + ph * bin_size_h + + static_cast<T>(iy + .5f) * bin_size_h / + static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T xx = roi_start_w + pw * bin_size_w + + static_cast<T>(ix + .5f) * bin_size_w / + static_cast<T>(roi_bin_grid_w); + + T x = xx; + T y = yy; + // deal with: inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + PreCalc<T> pc; + pc.pos1 = 0; + pc.pos2 = 0; + pc.pos3 = 0; + pc.pos4 = 0; + pc.w1 = 0; + pc.w2 = 0; + pc.w3 = 0; + pc.w4 = 0; + pre_calc[pre_calc_index] = pc; + pre_calc_index += 1; + continue; + } + + if (y <= 0) { + y = 0; + } + if (x <= 0) { + x = 0; + } + + int y_low = (int)y; + int x_low = (int)x; + int y_high; + int x_high; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + // save weights and indices + PreCalc<T> pc; + pc.pos1 = y_low * width + x_low; + pc.pos2 = y_low * width + x_high; + pc.pos3 = y_high * width + x_low; + pc.pos4 = y_high * width + x_high; + pc.w1 = w1; + pc.w2 = w2; + pc.w3 = w3; + pc.w4 = w4; + pre_calc[pre_calc_index] = pc; + + pre_calc_index += 1; + } + } + } + } +} + +} // namespace detail +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cpu/roi_align_kernel.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cpu/roi_align_kernel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e6684e953d07b0370f4e65fd119624df7e3a8d2e --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cpu/roi_align_kernel.cpp @@ -0,0 +1,400 @@ +#include <ATen/ATen.h> +#include <torch/library.h> + +#include "./roi_align_common.h" + +namespace vision { +namespace ops { + +namespace { + +template <typename T> +void roi_align_forward_kernel_impl( + int n_rois, + const T* input, + const T& spatial_scale, + int channels, + int height, + int width, + int pooled_height, + int pooled_width, + int sampling_ratio, + bool aligned, + const T* rois, + T* output) { + // (n, c, ph, pw) is an element in the pooled output + // can be parallelized using omp + // #pragma omp parallel for num_threads(32) + for (int n = 0; n < n_rois; n++) { + int index_n = n * channels * pooled_width * pooled_height; + + const T* offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + + // Do not using rounding; this implementation detail is critical + T offset = aligned ? (T)0.5 : (T)0.0; + T roi_start_w = offset_rois[1] * spatial_scale - offset; + T roi_start_h = offset_rois[2] * spatial_scale - offset; + T roi_end_w = offset_rois[3] * spatial_scale - offset; + T roi_end_h = offset_rois[4] * spatial_scale - offset; + + T roi_width = roi_end_w - roi_start_w; + T roi_height = roi_end_h - roi_start_h; + if (!aligned) { + // Force malformed ROIs to be 1x1 + roi_width = std::max(roi_width, (T)1.); + roi_height = std::max(roi_height, (T)1.); + } + + T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); + T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // We do average (integral) pooling inside a bin + // When the grid is empty, output zeros. + const T count = std::max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4 + + // we want to precalculate indices and weights shared by all chanels, + // this is the key point of optimization + std::vector<detail::PreCalc<T>> pre_calc( + roi_bin_grid_h * roi_bin_grid_w * pooled_width * pooled_height); + detail::pre_calc_for_bilinear_interpolate( + height, + width, + pooled_height, + pooled_width, + roi_start_h, + roi_start_w, + bin_size_h, + bin_size_w, + roi_bin_grid_h, + roi_bin_grid_w, + pre_calc); + + for (int c = 0; c < channels; c++) { + int index_n_c = index_n + c * pooled_width * pooled_height; + const T* offset_input = + input + (roi_batch_ind * channels + c) * height * width; + int pre_calc_index = 0; + + for (int ph = 0; ph < pooled_height; ph++) { + for (int pw = 0; pw < pooled_width; pw++) { + int index = index_n_c + ph * pooled_width + pw; + + T output_val = 0.; + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + detail::PreCalc<T> pc = pre_calc[pre_calc_index]; + output_val += pc.w1 * offset_input[pc.pos1] + + pc.w2 * offset_input[pc.pos2] + + pc.w3 * offset_input[pc.pos3] + pc.w4 * offset_input[pc.pos4]; + + pre_calc_index += 1; + } + } + output_val /= count; // Average pooling + + output[index] = output_val; + } // for pw + } // for ph + } // for c + } // for n +} + +template <typename T> +void bilinear_interpolate_gradient( + int height, + int width, + T y, + T x, + T& w1, + T& w2, + T& w3, + T& w4, + int& x_low, + int& x_high, + int& y_low, + int& y_high, + int index /* index for debug only*/) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + w1 = w2 = w3 = w4 = 0.; + x_low = x_high = y_low = y_high = -1; + return; + } + + if (y <= 0) + y = 0; + if (x <= 0) + x = 0; + + y_low = (int)y; + x_low = (int)x; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + + // reference in forward + // T v1 = input[y_low * width + x_low]; + // T v2 = input[y_low * width + x_high]; + // T v3 = input[y_high * width + x_low]; + // T v4 = input[y_high * width + x_high]; + // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; +} + +template <class T> +inline void add(T* address, const T& val) { + *address += val; +} + +template <typename T> +void roi_align_backward_kernel_impl( + int nthreads, + const T* grad_output, + const T& spatial_scale, + int channels, + int height, + int width, + int pooled_height, + int pooled_width, + int sampling_ratio, + bool aligned, + T* grad_input, + const T* rois, + int n_stride, + int c_stride, + int h_stride, + int w_stride) { + for (int index = 0; index < nthreads; index++) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + + // Do not using rounding; this implementation detail is critical + T offset = aligned ? (T)0.5 : (T)0.0; + T roi_start_w = offset_rois[1] * spatial_scale - offset; + T roi_start_h = offset_rois[2] * spatial_scale - offset; + T roi_end_w = offset_rois[3] * spatial_scale - offset; + T roi_end_h = offset_rois[4] * spatial_scale - offset; + + T roi_width = roi_end_w - roi_start_w; + T roi_height = roi_end_h - roi_start_h; + if (!aligned) { + // Force malformed ROIs to be 1x1 + roi_width = std::max(roi_width, (T)1.); + roi_height = std::max(roi_height, (T)1.); + } + + T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); + T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); + + T* offset_grad_input = + grad_input + ((roi_batch_ind * channels + c) * height * width); + + int output_offset = n * n_stride + c * c_stride; + const T* offset_grad_output = grad_output + output_offset; + const T grad_output_this_bin = + offset_grad_output[ph * h_stride + pw * w_stride]; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // We do average (integral) pooling inside a bin + const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 + + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + const T y = roi_start_h + ph * bin_size_h + + static_cast<T>(iy + .5f) * bin_size_h / + static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T x = roi_start_w + pw * bin_size_w + + static_cast<T>(ix + .5f) * bin_size_w / + static_cast<T>(roi_bin_grid_w); + + T w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + + bilinear_interpolate_gradient( + height, + width, + y, + x, + w1, + w2, + w3, + w4, + x_low, + x_high, + y_low, + y_high, + index); + + T g1 = grad_output_this_bin * w1 / count; + T g2 = grad_output_this_bin * w2 / count; + T g3 = grad_output_this_bin * w3 / count; + T g4 = grad_output_this_bin * w4 / count; + + if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { + // atomic add is not needed for now since it is single threaded + add(offset_grad_input + y_low * width + x_low, static_cast<T>(g1)); + add(offset_grad_input + y_low * width + x_high, static_cast<T>(g2)); + add(offset_grad_input + y_high * width + x_low, static_cast<T>(g3)); + add(offset_grad_input + y_high * width + x_high, static_cast<T>(g4)); + } // if + } // ix + } // iy + } // for +} + +at::Tensor roi_align_forward_kernel( + const at::Tensor& input, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t sampling_ratio, + bool aligned) { + TORCH_CHECK(input.device().is_cpu(), "input must be a CPU tensor"); + TORCH_CHECK(rois.device().is_cpu(), "rois must be a CPU tensor"); + TORCH_CHECK(rois.size(1) == 5, "rois must have shape as Tensor[K, 5]"); + + at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "roi_align_forward_kernel"; + at::checkAllSameType(c, {input_t, rois_t}); + + auto num_rois = rois.size(0); + auto channels = input.size(1); + auto height = input.size(2); + auto width = input.size(3); + + at::Tensor output = at::zeros( + {num_rois, channels, pooled_height, pooled_width}, input.options()); + + if (output.numel() == 0) + return output; + + auto input_ = input.contiguous(), rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), "roi_align_forward_kernel", [&] { + roi_align_forward_kernel_impl<scalar_t>( + num_rois, + input_.data_ptr<scalar_t>(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + aligned, + rois_.data_ptr<scalar_t>(), + output.data_ptr<scalar_t>()); + }); + return output; +} + +at::Tensor roi_align_backward_kernel( + const at::Tensor& grad, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t batch_size, + int64_t channels, + int64_t height, + int64_t width, + int64_t sampling_ratio, + bool aligned) { + TORCH_CHECK(grad.device().is_cpu(), "grad must be a CPU tensor"); + TORCH_CHECK(rois.device().is_cpu(), "rois must be a CPU tensor"); + + at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "roi_align_backward_kernel"; + at::checkAllSameType(c, {grad_t, rois_t}); + + at::Tensor grad_input = + at::zeros({batch_size, channels, height, width}, grad.options()); + + // handle possibly empty gradients + if (grad.numel() == 0) { + return grad_input; + } + + // get stride values to ensure indexing into gradients is correct. + int n_stride = grad.stride(0); + int c_stride = grad.stride(1); + int h_stride = grad.stride(2); + int w_stride = grad.stride(3); + + auto rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad.scalar_type(), "roi_align_backward_kernel", [&] { + roi_align_backward_kernel_impl<scalar_t>( + grad.numel(), + grad.data_ptr<scalar_t>(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + aligned, + grad_input.data_ptr<scalar_t>(), + rois_.data_ptr<scalar_t>(), + n_stride, + c_stride, + h_stride, + w_stride); + }); + return grad_input; +} + +} // namespace + +TORCH_LIBRARY_IMPL(torchvision, CPU, m) { + m.impl( + TORCH_SELECTIVE_NAME("torchvision::roi_align"), + TORCH_FN(roi_align_forward_kernel)); + m.impl( + TORCH_SELECTIVE_NAME("torchvision::_roi_align_backward"), + TORCH_FN(roi_align_backward_kernel)); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cpu/roi_pool_kernel.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cpu/roi_pool_kernel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..b099523896a439322c6e6e7049c084fce662f717 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cpu/roi_pool_kernel.cpp @@ -0,0 +1,249 @@ +#include <float.h> + +#include <ATen/ATen.h> +#include <torch/library.h> + +namespace vision { +namespace ops { + +namespace { + +template <class T> +inline void add(T* address, const T& val) { + *address += val; +} + +template <typename T> +void roi_pool_forward_kernel_impl( + const T* input, + const T spatial_scale, + int channels, + int height, + int width, + int pooled_height, + int pooled_width, + const T* rois, + int num_rois, + T* output, + int* argmax_data) { + for (int n = 0; n < num_rois; ++n) { + const T* offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + int roi_start_w = round(offset_rois[1] * spatial_scale); + int roi_start_h = round(offset_rois[2] * spatial_scale); + int roi_end_w = round(offset_rois[3] * spatial_scale); + int roi_end_h = round(offset_rois[4] * spatial_scale); + + // Force malformed ROIs to be 1x1 + int roi_width = std::max(roi_end_w - roi_start_w + 1, 1); + int roi_height = std::max(roi_end_h - roi_start_h + 1, 1); + T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); + T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); + + for (int ph = 0; ph < pooled_height; ++ph) { + for (int pw = 0; pw < pooled_width; ++pw) { + int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h)); + int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w)); + int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h)); + int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w)); + + // Add roi offsets and clip to input boundaries + hstart = std::min(std::max(hstart + roi_start_h, 0), height); + hend = std::min(std::max(hend + roi_start_h, 0), height); + wstart = std::min(std::max(wstart + roi_start_w, 0), width); + wend = std::min(std::max(wend + roi_start_w, 0), width); + bool is_empty = (hend <= hstart) || (wend <= wstart); + + for (int c = 0; c < channels; ++c) { + // Define an empty pooling region to be zero + T maxval = is_empty ? 0 : -FLT_MAX; + // If nothing is pooled, argmax = -1 causes nothing to be backprop'd + int maxidx = -1; + + const T* input_offset = + input + (roi_batch_ind * channels + c) * height * width; + + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + int input_index = h * width + w; + if (input_offset[input_index] > maxval) { + maxval = input_offset[input_index]; + maxidx = input_index; + } + } + } + int index = + ((n * channels + c) * pooled_height + ph) * pooled_width + pw; + output[index] = maxval; + argmax_data[index] = maxidx; + } // channels + } // pooled_width + } // pooled_height + } // num_rois +} + +template <typename T> +void roi_pool_backward_kernel_impl( + const T* grad_output, + const int* argmax_data, + int num_rois, + int channels, + int height, + int width, + int pooled_height, + int pooled_width, + T* grad_input, + const T* rois, + int n_stride, + int c_stride, + int h_stride, + int w_stride) { + for (int n = 0; n < num_rois; ++n) { + const T* offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + + for (int c = 0; c < channels; ++c) { + T* grad_input_offset = + grad_input + ((roi_batch_ind * channels + c) * height * width); + const int* argmax_data_offset = + argmax_data + (n * channels + c) * pooled_height * pooled_width; + + for (int ph = 0; ph < pooled_height; ++ph) { + for (int pw = 0; pw < pooled_width; ++pw) { + int output_offset = n * n_stride + c * c_stride; + int argmax = argmax_data_offset[ph * pooled_width + pw]; + + if (argmax != -1) { + add(grad_input_offset + argmax, + static_cast<T>( + grad_output + [output_offset + ph * h_stride + pw * w_stride])); + } + } // pooled_width + } // pooled_height + } // channels + } // num_rois +} + +std::tuple<at::Tensor, at::Tensor> roi_pool_forward_kernel( + const at::Tensor& input, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width) { + TORCH_CHECK(input.device().is_cpu(), "input must be a CPU tensor"); + TORCH_CHECK(rois.device().is_cpu(), "rois must be a CPU tensor"); + + at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "roi_pool_forward_kernel"; + at::checkAllSameType(c, {input_t, rois_t}); + + int num_rois = rois.size(0); + int channels = input.size(1); + int height = input.size(2); + int width = input.size(3); + + at::Tensor output = at::zeros( + {num_rois, channels, pooled_height, pooled_width}, input.options()); + at::Tensor argmax = at::zeros( + {num_rois, channels, pooled_height, pooled_width}, + input.options().dtype(at::kInt)); + + if (output.numel() == 0) { + return std::make_tuple(output, argmax); + } + + auto input_ = input.contiguous(), rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), "roi_pool_forward_kernel", [&] { + roi_pool_forward_kernel_impl<scalar_t>( + input_.data_ptr<scalar_t>(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + rois_.data_ptr<scalar_t>(), + num_rois, + output.data_ptr<scalar_t>(), + argmax.data_ptr<int>()); + }); + return std::make_tuple(output, argmax); +} + +at::Tensor roi_pool_backward_kernel( + const at::Tensor& grad, + const at::Tensor& rois, + const at::Tensor& argmax, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t batch_size, + int64_t channels, + int64_t height, + int64_t width) { + // Check if input tensors are CPU tensors + TORCH_CHECK(grad.device().is_cpu(), "grad must be a CPU tensor"); + TORCH_CHECK(rois.device().is_cpu(), "rois must be a CPU tensor"); + TORCH_CHECK(argmax.device().is_cpu(), "argmax must be a CPU tensor"); + TORCH_CHECK( + rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]"); + + at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "roi_pool_backward_kernel"; + at::checkAllSameType(c, {grad_t, rois_t}); + + auto num_rois = rois.size(0); + + at::Tensor grad_input = + at::zeros({batch_size, channels, height, width}, grad.options()); + + // handle possibly empty gradients + if (grad.numel() == 0) { + return grad_input; + } + + // get stride values to ensure indexing into gradients is correct. + int n_stride = grad.stride(0); + int c_stride = grad.stride(1); + int h_stride = grad.stride(2); + int w_stride = grad.stride(3); + + auto rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad.scalar_type(), "roi_pool_backward_kernel", [&] { + roi_pool_backward_kernel_impl<scalar_t>( + grad.data_ptr<scalar_t>(), + argmax.data_ptr<int>(), + num_rois, + channels, + height, + width, + pooled_height, + pooled_width, + grad_input.data_ptr<scalar_t>(), + rois_.data_ptr<scalar_t>(), + n_stride, + c_stride, + h_stride, + w_stride); + }); + return grad_input; +} + +} // namespace + +TORCH_LIBRARY_IMPL(torchvision, CPU, m) { + m.impl( + TORCH_SELECTIVE_NAME("torchvision::roi_pool"), + TORCH_FN(roi_pool_forward_kernel)); + m.impl( + TORCH_SELECTIVE_NAME("torchvision::_roi_pool_backward"), + TORCH_FN(roi_pool_backward_kernel)); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cuda/cuda_helpers.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cuda/cuda_helpers.h new file mode 100644 index 0000000000000000000000000000000000000000..cec4a183899e53b667473026763f5c4e6a166321 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cuda/cuda_helpers.h @@ -0,0 +1,16 @@ +#pragma once + +namespace vision { +namespace ops { + +#define CUDA_1D_KERNEL_LOOP(i, n) \ + for (int i = (blockIdx.x * blockDim.x) + threadIdx.x; i < (n); \ + i += (blockDim.x * gridDim.x)) + +template <typename integer> +constexpr __host__ __device__ inline integer ceil_div(integer n, integer m) { + return (n + m - 1) / m; +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cuda/deform_conv2d_kernel.cu b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cuda/deform_conv2d_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..6f257322b8502330a3ca9090580e78ffd3f8e2bb --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cuda/deform_conv2d_kernel.cu @@ -0,0 +1,1218 @@ +/*! + ******************* BEGIN Caffe Copyright Notice and Disclaimer + ***************** + * + * COPYRIGHT + * + * All contributions by the University of California: + * Copyright (c) 2014-2017 The Regents of the University of California (Regents) + * All rights reserved. + * + * All other contributions: + * Copyright (c) 2014-2017, the respective contributors + * All rights reserved. + * + * Caffe uses a shared copyright model: each contributor holds copyright over + * their contributions to Caffe. The project versioning records all such + * contribution and copyright details. If a contributor wants to further mark + * their specific copyright on a particular contribution, they should indicate + * their copyright solely in the commit message of the change when it is + * committed. + * + * LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * 1. Redistributions of source code must retain the above copyright notice, + *this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright notice, + * this list of conditions and the following disclaimer in the documentation + * and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE + *FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + *DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + *SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + *CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, + *OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + *OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * CONTRIBUTION AGREEMENT + * + * By contributing to the BVLC/caffe repository through pull-request, comment, + * or otherwise, the contributor releases their content to the + * license and copyright terms herein. + * + ***************** END Caffe Copyright Notice and Disclaimer + ********************* + * + * Copyright (c) 2018 Microsoft + * Licensed under The MIT License [see LICENSE for details] + * \file modulated_deformable_im2col.cuh + * \brief Function definitions of converting an image to + * column matrix based on kernel, padding, dilation, and offset. + * These functions are mainly used in deformable convolution operators. + * \ref: https://arxiv.org/abs/1703.06211 + * \author Yuwen Xiong, Haozhi Qi, Jifeng Dai, Xizhou Zhu, Han Hu, Dazhi Cheng + */ + +// modified from +// https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch/blob/mmdetection/mmdet/ops/dcn/src/deform_conv_cuda_kernel.cu + +// modified from +// https://github.com/open-mmlab/mmdetection/blob/master/mmdet/ops/dcn/src/deform_conv_cuda.cpp + +#include <ATen/ATen.h> +#include <ATen/cuda/CUDAContext.h> +#include <c10/cuda/CUDAGuard.h> +#include <torch/library.h> +#include <THC/THCAtomics.cuh> + +#include "cuda_helpers.h" + +namespace vision { +namespace ops { + +namespace { + +const int kMaxParallelImgs = 32; + +inline unsigned int GET_THREADS() { +#ifdef __HIP_PLATFORM_HCC__ + return 256; +#endif + if (at::cuda::getCurrentDeviceProperties()->major >= 6) { + return 1024; + } + return 512; +} + +inline unsigned int GET_BLOCKS( + const unsigned int THREADS, + const unsigned int N) { + unsigned int kMaxGridNum = + at::cuda::getCurrentDeviceProperties()->maxGridSize[0]; + return std::min(kMaxGridNum, (N + THREADS - 1) / THREADS); +} + +template <typename scalar_t> +__device__ scalar_t bilinear_interpolate( + const scalar_t* in, + int height, + int width, + scalar_t h, + scalar_t w) { + if (h <= -1 || height <= h || w <= -1 || width <= w) { + return 0; + } + + int h_low = floor(h); + int w_low = floor(w); + int h_high = h_low + 1; + int w_high = w_low + 1; + + scalar_t lh = h - h_low; + scalar_t lw = w - w_low; + scalar_t hh = 1 - lh, hw = 1 - lw; + + scalar_t v1 = 0; + if (h_low >= 0 && w_low >= 0) + v1 = in[h_low * width + w_low]; + scalar_t v2 = 0; + if (h_low >= 0 && w_high <= width - 1) + v2 = in[h_low * width + w_high]; + scalar_t v3 = 0; + if (h_high <= height - 1 && w_low >= 0) + v3 = in[h_high * width + w_low]; + scalar_t v4 = 0; + if (h_high <= height - 1 && w_high <= width - 1) + v4 = in[h_high * width + w_high]; + + scalar_t w1 = hh * hw, w2 = hh * lw, w3 = lh * hw, w4 = lh * lw; + + scalar_t val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + return val; +} + +template <typename scalar_t> +__global__ void deformable_im2col_kernel( + int n, + const scalar_t* input_ptr, + const scalar_t* offset_ptr, + const scalar_t* mask_ptr, + int height, + int width, + int weight_h, + int weight_w, + int pad_h, + int pad_w, + int stride_h, + int stride_w, + int dilation_h, + int dilation_w, + int batch_sz, + int n_in_channels, + int n_offset_grps, + int out_h, + int out_w, + bool use_mask, + scalar_t* columns_ptr) { + CUDA_1D_KERNEL_LOOP(index, n) { + const int out_x = index % out_w; + const int out_y = (index / out_w) % out_h; + const int out_b = (index / (out_w * out_h)) % batch_sz; + const int in_c = index / (out_w * out_h * batch_sz); + const int out_c = in_c * weight_h * weight_w; + + int c_per_offset_grp = n_in_channels / n_offset_grps; + const int grp_idx = in_c / c_per_offset_grp; + + columns_ptr += + (out_c * (batch_sz * out_h * out_w) + out_b * (out_h * out_w) + + out_y * out_w + out_x); + + input_ptr += + (out_b * (n_in_channels * height * width) + in_c * (height * width)); + + offset_ptr += (out_b * n_offset_grps + grp_idx) * 2 * weight_h * weight_w * + out_h * out_w; + + if (use_mask) { + mask_ptr += (out_b * n_offset_grps + grp_idx) * weight_h * weight_w * + out_h * out_w; + } + + for (int i = 0; i < weight_h; ++i) { + for (int j = 0; j < weight_w; ++j) { + const int mask_idx = i * weight_w + j; + const int offset_idx = 2 * mask_idx; + + scalar_t mask_value = 1; + if (use_mask) { + mask_value = + mask_ptr[mask_idx * (out_h * out_w) + out_y * out_w + out_x]; + } + + const scalar_t offset_h = + offset_ptr[offset_idx * (out_h * out_w) + out_y * out_w + out_x]; + const scalar_t offset_w = offset_ptr + [(offset_idx + 1) * (out_h * out_w) + out_y * out_w + out_x]; + const scalar_t y = + (out_y * stride_h - pad_h) + i * dilation_h + offset_h; + const scalar_t x = + (out_x * stride_w - pad_w) + j * dilation_w + offset_w; + *columns_ptr = + mask_value * bilinear_interpolate(input_ptr, height, width, y, x); + columns_ptr += batch_sz * out_h * out_w; + } + } + } +} + +void deformable_im2col( + const at::Tensor& input, + const at::Tensor& data_offset, + const at::Tensor& data_mask, + int n_in_channels, + int height, + int width, + int weight_h, + int weight_w, + int pad_h, + int pad_w, + int stride_h, + int stride_w, + int dilation_h, + int dilation_w, + int out_h, + int out_w, + int parallel_imgs, + int deformable_group, + bool use_mask, + at::Tensor data_col) { + int num_kernels = n_in_channels * out_h * out_w * parallel_imgs; + + const unsigned int threads = GET_THREADS(); + const unsigned int blocks = GET_BLOCKS(threads, num_kernels); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), "deformable_im2col", ([&] { + deformable_im2col_kernel<<<blocks, threads>>>( + num_kernels, + input.data_ptr<scalar_t>(), + data_offset.data_ptr<scalar_t>(), + data_mask.data_ptr<scalar_t>(), + height, + width, + weight_h, + weight_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + parallel_imgs, + n_in_channels, + deformable_group, + out_h, + out_w, + use_mask, + data_col.data_ptr<scalar_t>()); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf("error in deformable_im2col: %s\n", cudaGetErrorString(err)); + } +} + +int get_greatest_divisor_below_bound(int n, int bound) { + for (int k = bound; k > 1; --k) { + if (n % k == 0) { + return k; + } + } + return 1; +} + +template <typename scalar_t> +__global__ void deformable_col2im_kernel( + int n, + const scalar_t* col, + const scalar_t* offset_ptr, + const scalar_t* mask_ptr, + int channels, + int height, + int width, + int kernel_h, + int kernel_w, + int pad_h, + int pad_w, + int stride_h, + int stride_w, + int dilation_h, + int dilation_w, + int batch_sz, + int n_offset_grps, + int out_h, + int out_w, + bool use_mask, + scalar_t* grad_im) { + CUDA_1D_KERNEL_LOOP(index, n) { + const int out_x = index % out_w; + const int out_y = (index / out_w) % out_h; + const int b = (index / (out_w * out_h)) % batch_sz; + const int j = (index / (out_w * out_h * batch_sz)) % kernel_w; + const int i = (index / (out_w * out_h * batch_sz * kernel_w)) % kernel_h; + const int c = index / (out_w * out_h * batch_sz * kernel_w * kernel_h); + + int c_per_offset_grp = channels / n_offset_grps; + const int offset_grp = c / c_per_offset_grp; + + offset_ptr += (b * n_offset_grps + offset_grp) * 2 * kernel_h * kernel_w * + out_h * out_w; + + if (use_mask) { + mask_ptr += (b * n_offset_grps + offset_grp) * kernel_h * kernel_w * + out_h * out_w; + } + + const int mask_idx = i * kernel_w + j; + const int offset_idx = 2 * mask_idx; + + const int offset_h_ptr = ((offset_idx)*out_h + out_y) * out_w + out_x; + const int offset_w_ptr = ((offset_idx + 1) * out_h + out_y) * out_w + out_x; + + const scalar_t offset_h = offset_ptr[offset_h_ptr]; + const scalar_t offset_w = offset_ptr[offset_w_ptr]; + + scalar_t mask_value = 1; + if (use_mask) { + mask_value = mask_ptr[(mask_idx * out_h + out_y) * out_w + out_x]; + } + + const scalar_t y = (out_y * stride_h - pad_h) + i * dilation_h + offset_h; + const scalar_t x = (out_x * stride_w - pad_w) + j * dilation_w + offset_w; + + for (int dy = -1; dy <= 1; dy++) { + for (int dx = -1; dx <= 1; dx++) { + int yp = int(y) + dy; + int xp = int(x) + dx; + if (0 <= yp && yp < height && 0 <= xp && xp < width && + std::abs(y - yp) < 1 && std::abs(x - xp) < 1) { + int grad_pos = ((b * channels + c) * height + yp) * width + xp; + scalar_t weight = (1 - std::abs(y - yp)) * (1 - std::abs(x - xp)); + atomicAdd(grad_im + grad_pos, mask_value * weight * col[index]); + } + } + } + } +} + +void compute_grad_input( + const at::Tensor& columns, + const at::Tensor& offset, + const at::Tensor& mask, + int channels, + int height, + int width, + int weight_h, + int weight_w, + int pad_h, + int pad_w, + int stride_h, + int stride_w, + int dilation_h, + int dilation_w, + int parallel_imgs, + int n_offset_grps, + bool use_mask, + at::Tensor grad_im) { + int out_h = + (height + 2 * pad_h - (dilation_h * (weight_h - 1) + 1)) / stride_h + 1; + int out_w = + (width + 2 * pad_w - (dilation_w * (weight_w - 1) + 1)) / stride_w + 1; + int num_kernels = + channels * weight_h * weight_w * out_h * out_w * parallel_imgs; + + const unsigned int threads = GET_THREADS(); + const unsigned int blocks = GET_BLOCKS(threads, num_kernels); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + columns.scalar_type(), "compute_grad_input", ([&] { + deformable_col2im_kernel<<<blocks, threads>>>( + num_kernels, + columns.data_ptr<scalar_t>(), + offset.data_ptr<scalar_t>(), + mask.data_ptr<scalar_t>(), + channels, + height, + width, + weight_h, + weight_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + parallel_imgs, + n_offset_grps, + out_h, + out_w, + use_mask, + grad_im.data_ptr<scalar_t>()); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf("error in compute_grad_input: %s\n", cudaGetErrorString(err)); + } +} + +template <typename scalar_t> +__device__ scalar_t get_coordinate_weight( + const scalar_t* im_data, + int height, + int width, + scalar_t y, + scalar_t x, + bool is_y_direction) { + int y_l = floor(y); + int x_l = floor(x); + int y_h = y_l + 1; + int x_h = x_l + 1; + + bool valid_y_l = 0 <= y_l && y_l < height; + bool valid_y_h = 0 <= y_h && y_h < height; + bool valid_x_l = 0 <= x_l && x_l < width; + bool valid_x_h = 0 <= x_h && x_h < width; + + scalar_t zero = 0; + scalar_t v_yx = (valid_y_l && valid_x_l) ? im_data[y_l * width + x_l] : zero; + scalar_t v_yX = (valid_y_l && valid_x_h) ? im_data[y_l * width + x_h] : zero; + scalar_t v_Yx = (valid_y_h && valid_x_l) ? im_data[y_h * width + x_l] : zero; + scalar_t v_YX = (valid_y_h && valid_x_h) ? im_data[y_h * width + x_h] : zero; + + if (is_y_direction) { + scalar_t dx = x - x_l; + return dx * (v_YX - v_yX) + (1 - dx) * (v_Yx - v_yx); + } else { + scalar_t dy = y - y_l; + return dy * (v_YX - v_Yx) + (1 - dy) * (v_yX - v_yx); + } +} + +template <typename scalar_t> +__global__ void deformable_col2im_coord_kernel( + int n, + const scalar_t* col_ptr, + const scalar_t* im_ptr, + const scalar_t* offset_ptr, + const scalar_t* mask_ptr, + int channels, + int height, + int width, + int weight_h, + int weight_w, + int pad_h, + int pad_w, + int stride_h, + int stride_w, + int dilation_h, + int dilation_w, + int batch_sz, + int offset_channels, + int n_offset_grps, + int out_h, + int out_w, + const bool use_mask, + scalar_t* grad_offset, + scalar_t* grad_mask) { + CUDA_1D_KERNEL_LOOP(index, n) { + scalar_t grad_offset_val = 0; + scalar_t grad_mask_val = 0; + + int w = index % out_w; + int h = (index / out_w) % out_h; + int w_w = (index / (out_w * out_h * 2)) % weight_w; + int w_h = (index / (out_w * out_h * 2 * weight_w)) % weight_h; + int c = (index / (out_w * out_h)) % offset_channels; + int b = index / (out_w * out_h * offset_channels); + + const int offset_grp = c / (2 * weight_h * weight_w); + const int col_step = weight_h * weight_w; + + int c_per_offset_grp = channels / n_offset_grps; + + col_ptr += offset_grp * c_per_offset_grp * weight_h * weight_w * batch_sz * + out_w * out_h; + im_ptr += + (b * n_offset_grps + offset_grp) * c_per_offset_grp * height * width; + offset_ptr += (b * n_offset_grps + offset_grp) * 2 * weight_h * weight_w * + out_h * out_w; + + if (use_mask) { + mask_ptr += (b * n_offset_grps + offset_grp) * weight_h * weight_w * + out_h * out_w; + } + + const int offset_c = c - offset_grp * 2 * weight_h * weight_w; + const bool is_y_direction = offset_c % 2 == 0; + + const int c_bound = c_per_offset_grp * weight_h * weight_w; + for (int col_c = (offset_c / 2); col_c < c_bound; col_c += col_step) { + const int col_pos = (((col_c * batch_sz + b) * out_h) + h) * out_w + w; + + int out_x = col_pos % out_w; + int out_y = (col_pos / out_w) % out_h; + int j = (col_pos / (out_w * out_h * batch_sz)) % weight_w; + int i = (col_pos / (out_w * out_h * batch_sz * weight_w)) % weight_h; + + const int mask_idx = i * weight_w + j; + + const int offset_h_ptr = + (((2 * mask_idx) * out_h + out_y) * out_w + out_x); + const int offset_w_ptr = + (((2 * mask_idx + 1) * out_h + out_y) * out_w + out_x); + const scalar_t offset_h = offset_ptr[offset_h_ptr]; + const scalar_t offset_w = offset_ptr[offset_w_ptr]; + + scalar_t mask_value = 1; + if (use_mask) { + mask_value = mask_ptr[(mask_idx * out_h + out_y) * out_w + out_x]; + } + + scalar_t y = (out_y * stride_h - pad_h) + i * dilation_h + offset_h; + scalar_t x = (out_x * stride_w - pad_w) + j * dilation_w + offset_w; + + const scalar_t weight = + get_coordinate_weight(im_ptr, height, width, y, x, is_y_direction); + grad_offset_val += mask_value * weight * col_ptr[col_pos]; + + if (use_mask && is_y_direction) { + grad_mask_val += col_ptr[col_pos] * + bilinear_interpolate(im_ptr, height, width, y, x); + } + + im_ptr += height * width; + } + + grad_offset[index] = grad_offset_val; + + if (use_mask && is_y_direction) { + const int idx = + ((((b * n_offset_grps + offset_grp) * weight_h + w_h) * weight_w + + w_w) * + out_h + + h) * + out_w + + w; + grad_mask[idx] = grad_mask_val; + } + } +} + +void compute_grad_offset_and_mask( + const at::Tensor& columns, + const at::Tensor& input, + const at::Tensor& offset, + const at::Tensor& mask, + int channels, + int height, + int width, + int weight_h, + int weight_w, + int pad_h, + int pad_w, + int stride_h, + int stride_w, + int dilation_h, + int dilation_w, + int parallel_imgs, + int n_offset_grps, + bool use_mask, + at::Tensor grad_offset, + at::Tensor grad_mask) { + int out_h = + (height + 2 * pad_h - (dilation_h * (weight_h - 1) + 1)) / stride_h + 1; + int out_w = + (width + 2 * pad_w - (dilation_w * (weight_w - 1) + 1)) / stride_w + 1; + int num_kernels = + out_h * out_w * 2 * weight_h * weight_w * n_offset_grps * parallel_imgs; + + const unsigned int threads = GET_THREADS(); + const unsigned int blocks = GET_BLOCKS(threads, num_kernels); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + columns.scalar_type(), "compute_grad_offset_and_mask", ([&] { + deformable_col2im_coord_kernel<<<blocks, threads>>>( + num_kernels, + columns.data_ptr<scalar_t>(), + input.data_ptr<scalar_t>(), + offset.data_ptr<scalar_t>(), + mask.data_ptr<scalar_t>(), + channels, + height, + width, + weight_h, + weight_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + parallel_imgs, + 2 * weight_h * weight_w * n_offset_grps, + n_offset_grps, + out_h, + out_w, + use_mask, + grad_offset.data_ptr<scalar_t>(), + grad_mask.data_ptr<scalar_t>()); + })); + + cudaError_t err = cudaGetLastError(); + if (err != cudaSuccess) { + printf( + "error in compute_grad_offset_and_mask: %s\n", cudaGetErrorString(err)); + } +} + +std::tuple<at::Tensor, at::Tensor, at::Tensor> backward_gradient_inputs( + at::Tensor input, + at::Tensor weight, + at::Tensor offset, + at::Tensor mask, + at::Tensor grad_out, + int stride_h, + int stride_w, + int pad_h, + int pad_w, + int dilation_h, + int dilation_w, + int n_weight_grps, + int n_offset_grps, + int n_parallel_imgs, + bool use_mask) { + at::DeviceGuard guard(input.device()); + + int batch_sz = input.size(0); + long n_in_channels = input.size(1); + long in_h = input.size(2); + long in_w = input.size(3); + + n_parallel_imgs = std::min(batch_sz, n_parallel_imgs); + + long n_out_channels = weight.size(0); + int weight_h = weight.size(2); + int weight_w = weight.size(3); + + long out_w = + (in_w + 2 * pad_w - (dilation_w * (weight_w - 1) + 1)) / stride_w + 1; + long out_h = + (in_h + 2 * pad_h - (dilation_h * (weight_h - 1) + 1)) / stride_h + 1; + + auto grad_input = at::zeros_like(input); + auto grad_offset = at::zeros_like(offset); + auto grad_mask = at::zeros_like(mask); + + if (batch_sz == 0) { + return std::make_tuple(grad_input, grad_offset, grad_mask); + } + + auto columns = at::empty( + {n_in_channels * weight_w * weight_h, n_parallel_imgs * out_h * out_w}, + input.options()); + + // Separate into blocks + grad_input = grad_input.reshape( + {batch_sz / n_parallel_imgs, n_parallel_imgs, n_in_channels, in_h, in_w}); + input = input.reshape( + {batch_sz / n_parallel_imgs, n_parallel_imgs, n_in_channels, in_h, in_w}); + + grad_offset = grad_offset.reshape( + {batch_sz / n_parallel_imgs, + n_parallel_imgs, + n_offset_grps * 2 * weight_h * weight_w, + out_h, + out_w}); + offset = offset.reshape( + {batch_sz / n_parallel_imgs, + n_parallel_imgs, + n_offset_grps * 2 * weight_h * weight_w, + out_h, + out_w}); + + if (use_mask) { + grad_mask = grad_mask.reshape( + {batch_sz / n_parallel_imgs, + n_parallel_imgs, + n_offset_grps * weight_h * weight_w, + out_h, + out_w}); + mask = mask.reshape( + {batch_sz / n_parallel_imgs, + n_parallel_imgs, + n_offset_grps * weight_h * weight_w, + out_h, + out_w}); + } + + grad_out = grad_out + .reshape( + {batch_sz / n_parallel_imgs, + n_parallel_imgs, + n_weight_grps, + n_out_channels / n_weight_grps, + out_h, + out_w}) + .permute({0, 2, 3, 1, 4, 5}); + + weight = weight.reshape( + {n_weight_grps, + weight.size(0) / n_weight_grps, + weight.size(1), + weight.size(2), + weight.size(3)}); + + columns = columns.view( + {n_weight_grps, columns.size(0) / n_weight_grps, columns.size(1)}); + for (int elt = 0; elt < batch_sz / n_parallel_imgs; elt++) { + columns.zero_(); + // Separate into weight groups + for (int g = 0; g < n_weight_grps; g++) { + columns[g] = columns[g].addmm_( + weight[g].flatten(1).transpose(0, 1), grad_out[elt][g].flatten(1)); + } + + compute_grad_offset_and_mask( + columns, + input[elt], + offset[elt], + mask[elt], + n_in_channels, + in_h, + in_w, + weight_h, + weight_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + n_parallel_imgs, + n_offset_grps, + use_mask, + grad_offset[elt], + grad_mask[elt]); + + compute_grad_input( + columns, + offset[elt], + mask[elt], + n_in_channels, + in_h, + in_w, + weight_h, + weight_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + n_parallel_imgs, + n_offset_grps, + use_mask, + grad_input[elt]); + } + + grad_input = grad_input.view({batch_sz, n_in_channels, in_h, in_w}); + grad_offset = grad_offset.view( + {batch_sz, n_offset_grps * 2 * weight_h * weight_w, out_h, out_w}); + + if (use_mask) { + grad_mask = grad_mask.view( + {batch_sz, n_offset_grps * weight_h * weight_w, out_h, out_w}); + } + + return std::make_tuple(grad_input, grad_offset, grad_mask); +} + +at::Tensor backward_gradient_parameters( + at::Tensor input, + const at::Tensor& weight, + at::Tensor offset, + at::Tensor mask, + const at::Tensor& grad_out, + int stride_h, + int stride_w, + int pad_h, + int pad_w, + int dilation_h, + int dilation_w, + int n_weight_grps, + int n_offset_grps, + int n_parallel_imgs, + bool use_mask) { + at::DeviceGuard guard(input.device()); + + int batch_sz = input.size(0); + long n_in_channels = input.size(1); + long in_h = input.size(2); + long in_w = input.size(3); + + n_parallel_imgs = std::min(batch_sz, n_parallel_imgs); + + long n_out_channels = weight.size(0); + int weight_h = weight.size(2); + int weight_w = weight.size(3); + + long out_h = grad_out.size(2); + long out_w = grad_out.size(3); + + auto grad_weight = at::zeros_like(weight); + if (batch_sz == 0) { + return grad_weight; + } + + at::Tensor grad_out_buf = grad_out + .reshape( + {batch_sz / n_parallel_imgs, + n_parallel_imgs, + n_weight_grps, + n_out_channels / n_weight_grps, + out_h, + out_w}) + .permute({0, 2, 3, 1, 4, 5}) + .contiguous(); + + input = input.reshape( + {batch_sz / n_parallel_imgs, n_parallel_imgs, n_in_channels, in_h, in_w}); + + offset = offset.reshape( + {batch_sz / n_parallel_imgs, + n_parallel_imgs, + n_offset_grps * 2 * weight_h * weight_w, + out_h, + out_w}); + + if (use_mask) { + mask = mask.reshape( + {batch_sz / n_parallel_imgs, + n_parallel_imgs, + n_offset_grps * weight_h * weight_w, + out_h, + out_w}); + } + + grad_weight = grad_weight.reshape( + {n_weight_grps, + grad_weight.size(0) / n_weight_grps, + grad_weight.size(1), + grad_weight.size(2), + grad_weight.size(3)}); + + auto columns = at::empty( + {n_weight_grps, + n_in_channels * weight_w * weight_h / n_weight_grps, + n_parallel_imgs * out_h * out_w}, + input.options()); + + for (int elt = 0; elt < batch_sz / n_parallel_imgs; elt++) { + deformable_im2col( + input[elt], + offset[elt], + mask[elt], + n_in_channels, + in_h, + in_w, + weight_h, + weight_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + out_h, + out_w, + n_parallel_imgs, + n_offset_grps, + use_mask, + columns); + + for (int g = 0; g < n_weight_grps; g++) { + grad_weight[g] = + grad_weight[g] + .flatten(1) + .addmm_( + grad_out_buf[elt][g].flatten(1), columns[g].transpose(1, 0)) + .view_as(grad_weight[g]); + } + } + + grad_weight = grad_weight.view( + {grad_weight.size(0) * grad_weight.size(1), + grad_weight.size(2), + grad_weight.size(3), + grad_weight.size(4)}); + return grad_weight; +} + +at::Tensor deform_conv2d_forward_kernel( + const at::Tensor& input, + const at::Tensor& weight, + const at::Tensor& offset, + const at::Tensor& mask, + const at::Tensor& bias, + int64_t stride_h, + int64_t stride_w, + int64_t pad_h, + int64_t pad_w, + int64_t dilation_h, + int64_t dilation_w, + int64_t n_weight_grps, + int64_t n_offset_grps, + bool use_mask) { + at::Tensor input_c = input.contiguous(); + at::Tensor offset_c = offset.contiguous(); + at::Tensor weight_c = weight.contiguous(); + at::Tensor mask_c = mask.contiguous(); + at::Tensor bias_c = bias.contiguous(); + + TORCH_CHECK(input_c.ndimension() == 4); + TORCH_CHECK(offset_c.ndimension() == 4); + TORCH_CHECK(!use_mask || mask_c.ndimension() == 4); + TORCH_CHECK(weight_c.ndimension() == 4); + TORCH_CHECK(input_c.is_cuda(), "input must be a CUDA tensor"); + + at::DeviceGuard guard(input_c.device()); + + int batch_sz = input_c.size(0); + int in_channels = input_c.size(1); + int in_h = input_c.size(2); + int in_w = input_c.size(3); + + int n_parallel_imgs = + get_greatest_divisor_below_bound(batch_sz, kMaxParallelImgs); + + int out_channels = weight_c.size(0); + int weight_h = weight_c.size(2); + int weight_w = weight_c.size(3); + + int ker_h = dilation_h * (weight_h - 1) + 1; + int ker_w = dilation_w * (weight_w - 1) + 1; + int out_h = ((in_h + 2 * pad_h - ker_h) / stride_h) + 1; + int out_w = ((in_w + 2 * pad_w - ker_w) / stride_w) + 1; + + TORCH_CHECK( + weight_h > 0 && weight_w > 0, + "weight_h: ", + weight_h, + " weight_w: ", + weight_w); + TORCH_CHECK( + stride_h > 0 && stride_w > 0, + "stride_h: ", + stride_h, + " stride_w: ", + stride_w); + TORCH_CHECK(pad_h >= 0 && pad_w >= 0, "pad_h: ", pad_h, " pad_w: ", pad_w); + TORCH_CHECK( + dilation_h > 0 && dilation_w > 0, + "dilation_h: ", + dilation_h, + " dilation_w: ", + dilation_w); + + TORCH_CHECK(weight_c.size(1) * n_weight_grps == input_c.size(1)); + TORCH_CHECK(weight_c.size(0) % n_weight_grps == 0); + TORCH_CHECK( + (offset_c.size(1) == n_offset_grps * 2 * weight_h * weight_w), + "offset.shape[1] is not valid: got: ", + offset_c.size(1), + " expected: ", + n_offset_grps * 2 * weight_h * weight_w); + TORCH_CHECK( + (!use_mask || mask_c.size(1) == n_offset_grps * weight_h * weight_w), + "mask.shape[1] is not valid: got: ", + mask_c.size(1), + " expected: ", + n_offset_grps * weight_h * weight_w); + TORCH_CHECK(input_c.size(1) % n_offset_grps == 0); + + TORCH_CHECK( + (offset_c.size(0) == input_c.size(0)), "invalid batch size of offset"); + TORCH_CHECK( + (offset_c.size(2) == out_h && offset_c.size(3) == out_w), + "offset output dims: (", + offset_c.size(2), + ", ", + offset_c.size(3), + ") - ", + "computed output dims: (", + out_h, + ", ", + out_w, + ")"); + TORCH_CHECK( + (mask_c.size(0) == input_c.size(0)), "invalid batch size of mask"); + TORCH_CHECK( + (!use_mask || (mask_c.size(2) == out_h && mask_c.size(3) == out_w)), + "mask output dims: (", + mask_c.size(2), + ", ", + mask_c.size(3), + ") - ", + "computed output dims: (", + out_h, + ", ", + out_w, + ")"); + TORCH_CHECK( + out_h > 0 && out_w > 0, + "Calculated output size too small - out_h: ", + out_h, + " out_w: ", + out_w); + + auto out = + at::zeros({batch_sz, out_channels, out_h, out_w}, input_c.options()); + if (batch_sz == 0) { + return out; + } + + // Separate batches into blocks + out = out.view( + {batch_sz / n_parallel_imgs, + n_parallel_imgs, + out_channels, + out_h, + out_w}); + input_c = input_c.view( + {batch_sz / n_parallel_imgs, n_parallel_imgs, in_channels, in_h, in_w}); + + offset_c = offset_c.view( + {batch_sz / n_parallel_imgs, + n_parallel_imgs, + n_offset_grps * 2 * weight_h * weight_w, + out_h, + out_w}); + + if (use_mask) { + mask_c = mask_c.view( + {batch_sz / n_parallel_imgs, + n_parallel_imgs, + n_offset_grps * weight_h * weight_w, + out_h, + out_w}); + } + + at::Tensor out_buf = at::zeros( + {batch_sz / n_parallel_imgs, + out_channels, + n_parallel_imgs * out_h, + out_w}, + out.options()); + + // Separate channels into convolution groups + out_buf = out_buf.view( + {out_buf.size(0), + n_weight_grps, + out_buf.size(1) / n_weight_grps, + out_buf.size(2), + out_buf.size(3)}); + weight_c = weight_c.view( + {n_weight_grps, + weight_c.size(0) / n_weight_grps, + weight_c.size(1), + weight_c.size(2), + weight_c.size(3)}); + + // Sample points and perform convolution + auto columns = at::zeros( + {in_channels * weight_h * weight_w, n_parallel_imgs * out_h * out_w}, + input_c.options()); + for (int b = 0; b < batch_sz / n_parallel_imgs; b++) { + deformable_im2col( + input_c[b], + offset_c[b], + mask_c[b], + in_channels, + in_h, + in_w, + weight_h, + weight_w, + pad_h, + pad_w, + stride_h, + stride_w, + dilation_h, + dilation_w, + out_h, + out_w, + n_parallel_imgs, + n_offset_grps, + use_mask, + columns); + + columns = columns.view( + {n_weight_grps, columns.size(0) / n_weight_grps, columns.size(1)}); + for (int g = 0; g < n_weight_grps; g++) { + out_buf[b][g] = out_buf[b][g] + .flatten(1) + .addmm_(weight_c[g].flatten(1), columns[g]) + .view_as(out_buf[b][g]); + } + columns = + columns.view({columns.size(0) * columns.size(1), columns.size(2)}); + } + + out_buf = out_buf.view( + {batch_sz / n_parallel_imgs, + out_channels, + n_parallel_imgs, + out_h, + out_w}); + out_buf.transpose_(1, 2); + out.copy_(out_buf); + out = out.view({batch_sz, out_channels, out_h, out_w}); + + return out + bias_c.view({1, out_channels, 1, 1}); +} + +std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor> +deform_conv2d_backward_kernel( + const at::Tensor& grad_out, + const at::Tensor& input, + const at::Tensor& weight, + const at::Tensor& offset, + const at::Tensor& mask, + const at::Tensor& bias, + int64_t stride_h, + int64_t stride_w, + int64_t pad_h, + int64_t pad_w, + int64_t dilation_h, + int64_t dilation_w, + int64_t n_weight_grps, + int64_t n_offset_grps, + bool use_mask) { + at::Tensor grad_out_c = grad_out.contiguous(); + at::Tensor input_c = input.contiguous(); + at::Tensor weight_c = weight.contiguous(); + at::Tensor offset_c = offset.contiguous(); + at::Tensor mask_c = mask.contiguous(); + at::Tensor bias_c = bias.contiguous(); + + const int batch_sz = input_c.size(0); + const int n_parallel_imgs = + get_greatest_divisor_below_bound(batch_sz, kMaxParallelImgs); + + auto grad_input_and_offset_and_mask = backward_gradient_inputs( + input_c, + weight_c, + offset_c, + mask_c, + grad_out_c, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + n_weight_grps, + n_offset_grps, + n_parallel_imgs, + use_mask); + + auto grad_input = std::get<0>(grad_input_and_offset_and_mask); + auto grad_offset = std::get<1>(grad_input_and_offset_and_mask); + auto grad_mask = std::get<2>(grad_input_and_offset_and_mask); + + auto grad_weight = backward_gradient_parameters( + input_c, + weight_c, + offset_c, + mask_c, + grad_out_c, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + n_weight_grps, + n_offset_grps, + n_parallel_imgs, + use_mask); + + auto value = grad_out_c.sum({0, 2, 3}); + auto grad_bias = at::ones_like(bias_c) * value; + + return std::make_tuple( + grad_input, grad_weight, grad_offset, grad_mask, grad_bias); +} + +} // namespace + +TORCH_LIBRARY_IMPL(torchvision, CUDA, m) { + m.impl( + TORCH_SELECTIVE_NAME("torchvision::deform_conv2d"), + TORCH_FN(deform_conv2d_forward_kernel)); + m.impl( + TORCH_SELECTIVE_NAME("torchvision::_deform_conv2d_backward"), + TORCH_FN(deform_conv2d_backward_kernel)); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cuda/interpolate_aa_kernels.cu b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cuda/interpolate_aa_kernels.cu new file mode 100644 index 0000000000000000000000000000000000000000..4259fa2b0e887ae04d758f328e628ad9624d5f41 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cuda/interpolate_aa_kernels.cu @@ -0,0 +1,403 @@ +#include <torch/library.h> +// Copied and adapted from +// Adapted from interp.cpp from Caffe util by Pauline Luc +// Originally developed by George Papandreou +#include <ATen/ATen.h> +#include <ATen/AccumulateType.h> +#include <ATen/NativeFunctions.h> +#include <ATen/TensorUtils.h> +#include <ATen/Utils.h> +#include <ATen/cuda/CUDAContext.h> +#include <ATen/cuda/CUDAApplyUtils.cuh> +#include <ATen/native/cuda/KernelUtils.cuh> +#include <ATen/native/cuda/UpSample.cuh> + +// Below is experimental temporary code before merging it to PyTorch +namespace at { +namespace native { +namespace internal_upsample { + +__device__ __forceinline__ size_t +idx(const size_t nc, + const size_t height, + const size_t width, + const size_t y, + const size_t x) { + return (nc * height + y) * width + x; +} + +// taken from +// https://github.com/python-pillow/Pillow/blob/6812205f18ca4ef54372e87e1a13ce4a859434df/ +// src/libImaging/Resample.c#L20-L29 +template <typename accscalar_t> +__device__ __forceinline__ static accscalar_t bilinear_filter(accscalar_t x) { + if (x < 0.0) { + x = -x; + } + if (x < 1.0) { + return static_cast<accscalar_t>(1.0) - x; + } + return static_cast<accscalar_t>(0.0); +} + +// taken from +// https://github.com/python-pillow/Pillow/blob/6812205f18ca4ef54372e87e1a13ce4a859434df/ +// src/libImaging/Resample.c#L46-L62 +template <typename accscalar_t> +__device__ __forceinline__ static accscalar_t bicubic_filter(accscalar_t x) { + // https://en.wikipedia.org/wiki/Bicubic_interpolation#Bicubic_convolution_algorithm +#define a -0.5 + if (x < 0.0) { + x = -x; + } + if (x < 1.0) { + return ((a + 2.0) * x - (a + 3.0)) * x * x + static_cast<accscalar_t>(1.0); + } + if (x < 2.0) { + return (((x - 5) * x + 8) * x - 4) * a; + } + return static_cast<accscalar_t>(0.0); +#undef a +} + +template <typename scalar_t, typename accscalar_t, typename filter_fn_t> +__device__ __forceinline__ static void _compute_weights( + const int64_t i, + const int64_t input_size, + const accscalar_t scale, + const accscalar_t support, + scalar_t* wt_ptr, + int64_t interp_size, + filter_fn_t filter_fn, + int64_t& xmin, + int64_t& xmax) { + accscalar_t invscale = (scale >= 1.0) ? 1.0 / scale : 1.0; + accscalar_t center = scale * (i + 0.5); + xmin = max( + static_cast<int64_t>(center - support + 0.5), static_cast<int64_t>(0)); + xmax = min(static_cast<int64_t>(center + support + 0.5), input_size) - xmin; + + accscalar_t total_w = 0.0; + int64_t j = 0; + for (j = 0; j < xmax; j++) { + accscalar_t w = filter_fn((j + xmin - center + 0.5) * invscale); + wt_ptr[j] = static_cast<scalar_t>(w); + total_w += w; + } + for (j = 0; j < xmax; j++) { + if (total_w != 0.0) { + wt_ptr[j] /= total_w; + } + } + for (; j < interp_size; j++) { + wt_ptr[j] = static_cast<scalar_t>(0.0); + } +} + +template <typename scalar_t, typename accscalar_t> +__device__ __forceinline__ static accscalar_t interpolate_aa_single_dim( + scalar_t* src, + scalar_t* weights, + int64_t size) { + scalar_t t = static_cast<accscalar_t>(*src); + scalar_t wts = static_cast<accscalar_t>(weights[0]); + accscalar_t output = t * wts; + + int64_t j = 1; + for (; j < size; j++) { + wts = static_cast<accscalar_t>(weights[j]); + t = static_cast<accscalar_t>(*(src + j)); + output += t * wts; + } + return output; +} + +template <typename scalar_t, typename accscalar_t, int interp_size> +C10_LAUNCH_BOUNDS_1(1024) +__global__ void upsample_gen2d_out_frame( + const int n, + const accscalar_t rheight, + const accscalar_t rwidth, + const bool align_corners, + const PackedTensorAccessor64<scalar_t, 4> idata, + PackedTensorAccessor64<scalar_t, 4> odata) { + int index = threadIdx.x + blockIdx.x * blockDim.x; + + const int batchsize = idata.size(0); + const int channels = idata.size(1); + const int height1 = idata.size(2); + const int width1 = idata.size(3); + const int height2 = odata.size(2); + const int width2 = odata.size(3); + + if (index < n) { + const int w2 = index % width2; // 0:width2-1 + const int h2 = index / width2; // 0:height2-1 + // special case: just copy + if (height1 == height2 && width1 == width2) { + const int h1 = h2; + const int w1 = w2; + for (int n = 0; n < batchsize; n++) { + for (int c = 0; c < channels; ++c) { + const scalar_t val = idata[n][c][h1][w1]; + odata[n][c][h2][w2] = val; + } + } + return; + } + + const accscalar_t support_h = static_cast<accscalar_t>( + (rheight >= 1.0) ? (interp_size * 0.5) * rheight : interp_size * 0.5); + const accscalar_t support_w = static_cast<accscalar_t>( + (rwidth >= 1.0) ? (interp_size * 0.5) * rwidth : interp_size * 0.5); + + const int interp_height = (int)ceilf(support_h) * 2 + 1; + const int interp_width = (int)ceilf(support_w) * 2 + 1; + + // Setup local buffers + // TODO: maybe we can specify dynamic shared memory size before calling the + // cuda code, however we should then ensure that device has enough shared + // memory + scalar_t wx[256]; + scalar_t wy[256]; + scalar_t buffer1[256]; + scalar_t buffer2[256]; + + // Compute weights + int64_t xmin, xsize, ymin, ysize; + typedef scalar_t (*filter_fn_t)(scalar_t); + if (interp_size == 2) { + _compute_weights<scalar_t, accscalar_t, filter_fn_t>( + w2, + width1, + rwidth, + support_w, + wx, + interp_width, + bilinear_filter, + xmin, + xsize); + _compute_weights<scalar_t, accscalar_t, filter_fn_t>( + h2, + height1, + rheight, + support_h, + wy, + interp_height, + bilinear_filter, + ymin, + ysize); + } else if (interp_size == 4) { + _compute_weights<scalar_t, accscalar_t, filter_fn_t>( + w2, + width1, + rwidth, + support_w, + wx, + interp_width, + bicubic_filter, + xmin, + xsize); + _compute_weights<scalar_t, accscalar_t, filter_fn_t>( + h2, + height1, + rheight, + support_h, + wy, + interp_height, + bicubic_filter, + ymin, + ysize); + } + + for (int n = 0; n < batchsize; n++) { + for (int c = 0; c < channels; ++c) { + // interpolate on x-axis for ymin to ymin + ysize + for (int64_t y = 0; y < ysize; y++) { + // copy data into the local buffer and use + // interpolate_aa_single_dim method + for (int x = 0; x < xsize; x++) { + buffer1[x] = idata[n][c][ymin + y][xmin + x]; + } + + buffer2[y] = static_cast<scalar_t>( + interpolate_aa_single_dim<scalar_t, accscalar_t>( + buffer1, wx, xsize)); + } + odata[n][c][h2][w2] = static_cast<scalar_t>( + interpolate_aa_single_dim<scalar_t, accscalar_t>( + buffer2, wy, ysize)); + } + } + } +} + +template <int interp_size> +static void upsample_gen2d_out_cuda_template( + const Tensor& output, + const Tensor& input, + IntArrayRef output_size, + bool align_corners, + c10::optional<double> scales_h, + c10::optional<double> scales_w) { + TensorArg input_arg{input, "input", 1}, output_arg{output, "output", 2}; + checkAllSameGPU("upsample_gen2d_out_cuda", {input_arg, output_arg}); + + int output_height = output_size[0]; + int output_width = output_size[1]; + + int nbatch = input.size(0); + int channels = input.size(1); + int input_height = input.size(2); + int input_width = input.size(3); + + const int num_kernels = output_height * output_width; + const int num_threads = std::min( + at::cuda::getCurrentDeviceProperties()->maxThreadsPerBlock, 1024); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), "upsample_bilinear2d_out_frame", [&] { + using accscalar_t = at::acc_type<scalar_t, true>; + + auto idata = input.packed_accessor64<scalar_t, 4>(); + auto odata = output.packed_accessor64<scalar_t, 4>(); + + const accscalar_t rheight = area_pixel_compute_scale<accscalar_t>( + input_height, output_height, align_corners, scales_h); + const accscalar_t rwidth = area_pixel_compute_scale<accscalar_t>( + input_width, output_width, align_corners, scales_w); + + // We are using static buffer memory of 256 * sizeof(float) per thread + // to store weights. Size of weights array is + // interp_size = scale * 2 + 1 for bilinear mode + TORCH_CHECK( + rheight < (255 / interp_size), + "Max supported scale factor is 127 (bilinear), 63 (bicubic)"); + TORCH_CHECK( + rwidth < (255 / interp_size), + "Max supported scale factor is 127 (bilinear), 63 (bicubic)"); + + upsample_gen2d_out_frame<scalar_t, accscalar_t, interp_size> + <<<cuda::ATenCeilDiv(num_kernels, num_threads), + num_threads, + 0, + stream>>>( + num_kernels, rheight, rwidth, align_corners, idata, odata); + C10_CUDA_KERNEL_LAUNCH_CHECK(); + }); +} + +} // namespace internal_upsample +} // namespace native +} // namespace at + +namespace vision { +namespace ops { + +namespace { + +// Copied from "UpSample.h" as we can not use UpSample.h with UpSample.cuh +static std::array<int64_t, 4> upsample_2d_common_check( + at::IntArrayRef input_size, + at::IntArrayRef output_size) { + TORCH_CHECK( + output_size.size() == 2, + "It is expected output_size equals to 2, but got size ", + output_size.size()); + + TORCH_CHECK( + input_size.size() == 4, + "It is expected input_size equals to 4, but got size ", + input_size.size()); + + int64_t output_height = output_size[0]; + int64_t output_width = output_size[1]; + + int64_t nbatch = input_size[0]; + int64_t channels = input_size[1]; + int64_t input_height = input_size[2]; + int64_t input_width = input_size[3]; + + TORCH_CHECK( + input_height > 0 && input_width > 0 && output_height > 0 && + output_width > 0, + "Input and output sizes should be greater than 0," + " but got input (H: ", + input_height, + ", W: ", + input_width, + ") output (H: ", + output_height, + ", W: ", + output_width, + ")"); + + return {nbatch, channels, output_height, output_width}; +} + +template <int interp_size> +at::Tensor interpolate_gen2d_aa_forward_kernel( + const at::Tensor& input, + at::IntArrayRef output_size, + bool align_corners) { + c10::optional<c10::ArrayRef<double>> scale_factors = {}; + + // Copied from UpSampleBilinear2d.cpp + auto output = at::empty({0}, input.options()); + auto osize = at::native::upsample::compute_output_size( + input.sizes(), output_size, scale_factors); + auto scale_h = at::native::upsample_cuda::get_scale_value(scale_factors, 0); + auto scale_w = at::native::upsample_cuda::get_scale_value(scale_factors, 1); + + auto full_output_size = upsample_2d_common_check(input.sizes(), osize); + + // Allow for empty batch size but not other dimensions + TORCH_CHECK( + input.numel() != 0 || + c10::multiply_integers( + input.sizes().begin() + 1, input.sizes().end()), + "Non-empty 4D data tensor expected but got a tensor with sizes ", + input.sizes()); + + output.resize_(full_output_size, input.suggest_memory_format()); + + at::native::internal_upsample::upsample_gen2d_out_cuda_template<interp_size>( + output, + input, + {full_output_size[2], full_output_size[3]}, + align_corners, + scale_h, + scale_w); + return output; +} + +at::Tensor interpolate_linear_aa_forward_kernel( + const at::Tensor& input, + at::IntArrayRef output_size, + bool align_corners) { + return interpolate_gen2d_aa_forward_kernel<2>( + input, output_size, align_corners); +} + +at::Tensor interpolate_bicubic_aa_forward_kernel( + const at::Tensor& input, + at::IntArrayRef output_size, + bool align_corners) { + return interpolate_gen2d_aa_forward_kernel<4>( + input, output_size, align_corners); +} + +} // namespace + +TORCH_LIBRARY_IMPL(torchvision, CUDA, m) { + m.impl( + TORCH_SELECTIVE_NAME("torchvision::_interpolate_linear_aa"), + TORCH_FN(interpolate_linear_aa_forward_kernel)); + m.impl( + TORCH_SELECTIVE_NAME("torchvision::_interpolate_bicubic_aa"), + TORCH_FN(interpolate_bicubic_aa_forward_kernel)); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cuda/nms_kernel.cu b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cuda/nms_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..ce1ed4ae1f53571cc02dfead9a533819b82822b8 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cuda/nms_kernel.cu @@ -0,0 +1,173 @@ +#include <ATen/ATen.h> +#include <ATen/AccumulateType.h> +#include <ATen/cuda/CUDAContext.h> +#include <c10/cuda/CUDAGuard.h> +#include <torch/library.h> + +#include "cuda_helpers.h" + +namespace vision { +namespace ops { + +namespace { + +int const threadsPerBlock = sizeof(unsigned long long) * 8; + +template <typename T> +__device__ inline bool devIoU( + T const* const a, + T const* const b, + const float threshold) { + T left = max(a[0], b[0]), right = min(a[2], b[2]); + T top = max(a[1], b[1]), bottom = min(a[3], b[3]); + T width = max(right - left, (T)0), height = max(bottom - top, (T)0); + using acc_T = at::acc_type<T, /*is_cuda=*/true>; + acc_T interS = (acc_T)width * height; + acc_T Sa = ((acc_T)a[2] - a[0]) * (a[3] - a[1]); + acc_T Sb = ((acc_T)b[2] - b[0]) * (b[3] - b[1]); + return (interS / (Sa + Sb - interS)) > threshold; +} + +template <typename T> +__global__ void nms_kernel_impl( + int n_boxes, + double iou_threshold, + const T* dev_boxes, + unsigned long long* dev_mask) { + const int row_start = blockIdx.y; + const int col_start = blockIdx.x; + + if (row_start > col_start) + return; + + const int row_size = + min(n_boxes - row_start * threadsPerBlock, threadsPerBlock); + const int col_size = + min(n_boxes - col_start * threadsPerBlock, threadsPerBlock); + + __shared__ T block_boxes[threadsPerBlock * 4]; + if (threadIdx.x < col_size) { + block_boxes[threadIdx.x * 4 + 0] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 0]; + block_boxes[threadIdx.x * 4 + 1] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 1]; + block_boxes[threadIdx.x * 4 + 2] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 2]; + block_boxes[threadIdx.x * 4 + 3] = + dev_boxes[(threadsPerBlock * col_start + threadIdx.x) * 4 + 3]; + } + __syncthreads(); + + if (threadIdx.x < row_size) { + const int cur_box_idx = threadsPerBlock * row_start + threadIdx.x; + const T* cur_box = dev_boxes + cur_box_idx * 4; + int i = 0; + unsigned long long t = 0; + int start = 0; + if (row_start == col_start) { + start = threadIdx.x + 1; + } + for (i = start; i < col_size; i++) { + if (devIoU<T>(cur_box, block_boxes + i * 4, iou_threshold)) { + t |= 1ULL << i; + } + } + const int col_blocks = ceil_div(n_boxes, threadsPerBlock); + dev_mask[cur_box_idx * col_blocks + col_start] = t; + } +} + +at::Tensor nms_kernel( + const at::Tensor& dets, + const at::Tensor& scores, + double iou_threshold) { + TORCH_CHECK(dets.is_cuda(), "dets must be a CUDA tensor"); + TORCH_CHECK(scores.is_cuda(), "scores must be a CUDA tensor"); + + TORCH_CHECK( + dets.dim() == 2, "boxes should be a 2d tensor, got ", dets.dim(), "D"); + TORCH_CHECK( + dets.size(1) == 4, + "boxes should have 4 elements in dimension 1, got ", + dets.size(1)); + TORCH_CHECK( + scores.dim() == 1, + "scores should be a 1d tensor, got ", + scores.dim(), + "D"); + TORCH_CHECK( + dets.size(0) == scores.size(0), + "boxes and scores should have same number of elements in ", + "dimension 0, got ", + dets.size(0), + " and ", + scores.size(0)) + + at::cuda::CUDAGuard device_guard(dets.device()); + + if (dets.numel() == 0) { + return at::empty({0}, dets.options().dtype(at::kLong)); + } + + auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); + auto dets_sorted = dets.index_select(0, order_t).contiguous(); + + int dets_num = dets.size(0); + + const int col_blocks = ceil_div(dets_num, threadsPerBlock); + + at::Tensor mask = + at::empty({dets_num * col_blocks}, dets.options().dtype(at::kLong)); + + dim3 blocks(col_blocks, col_blocks); + dim3 threads(threadsPerBlock); + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + dets_sorted.scalar_type(), "nms_kernel", [&] { + nms_kernel_impl<scalar_t><<<blocks, threads, 0, stream>>>( + dets_num, + iou_threshold, + dets_sorted.data_ptr<scalar_t>(), + (unsigned long long*)mask.data_ptr<int64_t>()); + }); + + at::Tensor mask_cpu = mask.to(at::kCPU); + unsigned long long* mask_host = + (unsigned long long*)mask_cpu.data_ptr<int64_t>(); + + std::vector<unsigned long long> remv(col_blocks); + memset(&remv[0], 0, sizeof(unsigned long long) * col_blocks); + + at::Tensor keep = + at::empty({dets_num}, dets.options().dtype(at::kLong).device(at::kCPU)); + int64_t* keep_out = keep.data_ptr<int64_t>(); + + int num_to_keep = 0; + for (int i = 0; i < dets_num; i++) { + int nblock = i / threadsPerBlock; + int inblock = i % threadsPerBlock; + + if (!(remv[nblock] & (1ULL << inblock))) { + keep_out[num_to_keep++] = i; + unsigned long long* p = mask_host + i * col_blocks; + for (int j = nblock; j < col_blocks; j++) { + remv[j] |= p[j]; + } + } + } + + AT_CUDA_CHECK(cudaGetLastError()); + return order_t.index( + {keep.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep) + .to(order_t.device(), keep.scalar_type())}); +} + +} // namespace + +TORCH_LIBRARY_IMPL(torchvision, CUDA, m) { + m.impl(TORCH_SELECTIVE_NAME("torchvision::nms"), TORCH_FN(nms_kernel)); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cuda/ps_roi_align_kernel.cu b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cuda/ps_roi_align_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..b9c04fc99738ceb1a50164d8e6516363fe5bb9c0 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cuda/ps_roi_align_kernel.cu @@ -0,0 +1,452 @@ +#include <ATen/ATen.h> +#include <ATen/cuda/CUDAContext.h> +#include <c10/cuda/CUDAGuard.h> +#include <torch/library.h> +#include <THC/THCAtomics.cuh> + +#include "cuda_helpers.h" + +namespace vision { +namespace ops { + +namespace { + +template <typename T> +__device__ T bilinear_interpolate( + const T* input, + int height, + int width, + T y, + T x, + int index /* index for debug only*/) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + return 0; + } + + if (y <= 0) + y = 0; + if (x <= 0) + x = 0; + + int y_low = (int)y; + int x_low = (int)x; + int y_high; + int x_high; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + + // do bilinear interpolation + T v1 = input[y_low * width + x_low]; + T v2 = input[y_low * width + x_high]; + T v3 = input[y_high * width + x_low]; + T v4 = input[y_high * width + x_high]; + T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + return val; +} + +template <typename T> +__global__ void ps_roi_align_forward_kernel_impl( + int nthreads, + const T* input, + const T spatial_scale, + int channels, + int height, + int width, + int pooled_height, + int pooled_width, + int sampling_ratio, + const T* rois, + int channels_out, + T* output, + int* channel_mapping) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c_out, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c_out = (index / pooled_width / pooled_height) % channels_out; + int n = index / pooled_width / pooled_height / channels_out; + + // (n, c_in, ph, pw) is the associated element in the input + int c_in = (c_out * pooled_height + ph) * pooled_width + pw; + + // [start, end) interval for spatial sampling + const T* offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + + // Do not using rounding; this implementation detail is critical + T roi_start_w = offset_rois[1] * spatial_scale - static_cast<T>(0.5); + T roi_start_h = offset_rois[2] * spatial_scale - static_cast<T>(0.5); + T roi_end_w = offset_rois[3] * spatial_scale - static_cast<T>(0.5); + T roi_end_h = offset_rois[4] * spatial_scale - static_cast<T>(0.5); + + T roi_width = roi_end_w - roi_start_w; + T roi_height = roi_end_h - roi_start_h; + T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); + T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); + + // Do not using floor/ceil; this implementation detail is critical + T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h; + T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + const T count = roi_bin_grid_h * roi_bin_grid_w; + + const T* offset_input = + input + (roi_batch_ind * channels + c_in) * height * width; + T out_sum = 0; + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + const T y = hstart + + static_cast<T>(iy + .5f) * bin_size_h / + static_cast<T>(roi_bin_grid_h); + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T x = wstart + + static_cast<T>(ix + .5f) * bin_size_w / + static_cast<T>(roi_bin_grid_w); + T val = bilinear_interpolate(offset_input, height, width, y, x, index); + out_sum += val; + } + } + + out_sum /= count; + output[index] = out_sum; + channel_mapping[index] = c_in; + } +} + +template <typename T> +__device__ void bilinear_interpolate_gradient( + int height, + int width, + T y, + T x, + T& w1, + T& w2, + T& w3, + T& w4, + int& x_low, + int& x_high, + int& y_low, + int& y_high, + int index /* index for debug only*/) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + w1 = w2 = w3 = w4 = 0.; + x_low = x_high = y_low = y_high = -1; + return; + } + + if (y <= 0) + y = 0; + if (x <= 0) + x = 0; + + y_low = (int)y; + x_low = (int)x; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + + // reference in forward + // T v1 = input[y_low * width + x_low]; + // T v2 = input[y_low * width + x_high]; + // T v3 = input[y_high * width + x_low]; + // T v4 = input[y_high * width + x_high]; + // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; +} + +template <typename T> +__global__ void ps_roi_align_backward_kernel_impl( + int nthreads, + const T* grad_output, + const int* channel_mapping, + int num_rois, + const T spatial_scale, + int channels, + int height, + int width, + int pooled_height, + int pooled_width, + int sampling_ratio, + int channels_out, + T* grad_input, + const T* rois) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, *, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int n = index / pooled_width / pooled_height / channels_out; + + const T* offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + + // Do not using rounding; this implementation detail is critical + T roi_start_w = offset_rois[1] * spatial_scale - static_cast<T>(0.5); + T roi_start_h = offset_rois[2] * spatial_scale - static_cast<T>(0.5); + T roi_end_w = offset_rois[3] * spatial_scale - static_cast<T>(0.5); + T roi_end_h = offset_rois[4] * spatial_scale - static_cast<T>(0.5); + + // Force too small ROIs to be 1x1 + T roi_width = roi_end_w - roi_start_w; + T roi_height = roi_end_h - roi_start_h; + T bin_size_h = roi_height / static_cast<T>(pooled_height); + T bin_size_w = roi_width / static_cast<T>(pooled_width); + + int c_in = channel_mapping[index]; + T* grad_input_offset = + grad_input + (roi_batch_ind * channels + c_in) * height * width; + + // Do not using floor/ceil; this implementation detail is critical + T hstart = static_cast<T>(ph) * bin_size_h + roi_start_h; + T wstart = static_cast<T>(pw) * bin_size_w + roi_start_w; + + const T grad_output_this_bin = grad_output[index]; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + const T count = roi_bin_grid_h * roi_bin_grid_w; + + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + const T y = hstart + + static_cast<T>(iy + .5f) * bin_size_h / + static_cast<T>(roi_bin_grid_h); + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T x = wstart + + static_cast<T>(ix + .5f) * bin_size_w / + static_cast<T>(roi_bin_grid_w); + + T w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + + bilinear_interpolate_gradient( + height, + width, + y, + x, + w1, + w2, + w3, + w4, + x_low, + x_high, + y_low, + y_high, + index); + + T g1 = grad_output_this_bin * w1 / count; + T g2 = grad_output_this_bin * w2 / count; + T g3 = grad_output_this_bin * w3 / count; + T g4 = grad_output_this_bin * w4 / count; + + if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { + atomicAdd(grad_input_offset + y_low * width + x_low, g1); + atomicAdd(grad_input_offset + y_low * width + x_high, g2); + atomicAdd(grad_input_offset + y_high * width + x_low, g3); + atomicAdd(grad_input_offset + y_high * width + x_high, g4); + } // if + } // ix + } // iy + } +} + +std::tuple<at::Tensor, at::Tensor> ps_roi_align_forward_kernel( + const at::Tensor& input, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t sampling_ratio) { + // Check if input tensors are CUDA tensors + TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor"); + TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor"); + TORCH_CHECK( + rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]"); + + at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "ps_roi_align_forward_kernel"; + at::checkAllSameGPU(c, {input_t, rois_t}); + at::checkAllSameType(c, {input_t, rois_t}); + + at::cuda::CUDAGuard device_guard(input.device()); + + auto num_rois = rois.size(0); + auto channels = input.size(1); + auto height = input.size(2); + auto width = input.size(3); + + TORCH_CHECK( + channels % (pooled_height * pooled_width) == 0, + "input channels must be a multiple of pooling height * pooling width"); + int channels_out = channels / (pooled_height * pooled_width); + + auto output = at::zeros( + {num_rois, channels_out, pooled_height, pooled_width}, input.options()); + auto channel_mapping = + at::zeros(output.sizes(), input.options().dtype(at::kInt)); + + auto output_size = output.numel(); + if (output_size == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return std::make_tuple(output, channel_mapping); + } + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min( + ceil_div(static_cast<int64_t>(output_size), static_cast<int64_t>(512)), + static_cast<int64_t>(4096))); + dim3 block(512); + + auto input_ = input.contiguous(), rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), "ps_roi_align_forward_kernel", [&] { + ps_roi_align_forward_kernel_impl<scalar_t><<<grid, block, 0, stream>>>( + output_size, + input_.data_ptr<scalar_t>(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + rois_.data_ptr<scalar_t>(), + channels_out, + output.data_ptr<scalar_t>(), + channel_mapping.data_ptr<int>()); + }); + AT_CUDA_CHECK(cudaGetLastError()); + cudaDeviceSynchronize(); + return std::make_tuple(output, channel_mapping); +} + +at::Tensor ps_roi_align_backward_kernel( + const at::Tensor& grad, + const at::Tensor& rois, + const at::Tensor& channel_mapping, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t sampling_ratio, + int64_t batch_size, + int64_t channels, + int64_t height, + int64_t width) { + // Check if input tensors are CUDA tensors + TORCH_CHECK(grad.is_cuda(), "grad must be a CUDA tensor"); + TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor"); + TORCH_CHECK( + channel_mapping.is_cuda(), "channel_mapping must be a CUDA tensor"); + + at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}, + channel_mapping_t{channel_mapping, "channel_mapping", 3}; + + at::CheckedFrom c = "ps_roi_align_backward_kernel"; + at::checkAllSameGPU(c, {grad_t, rois_t, channel_mapping_t}); + at::checkAllSameType(c, {grad_t, rois_t}); + + at::cuda::CUDAGuard device_guard(grad.device()); + + auto num_rois = rois.size(0); + auto grad_input = + at::zeros({batch_size, channels, height, width}, grad.options()); + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min( + ceil_div(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)), + static_cast<int64_t>(4096))); + dim3 block(512); + + // handle possibly empty gradients + if (grad.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return grad_input; + } + + int channels_out = channels / (pooled_height * pooled_width); + + auto grad_ = grad.contiguous(), rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad.scalar_type(), "ps_roi_align_backward_kernel", [&] { + ps_roi_align_backward_kernel_impl<scalar_t><<<grid, block, 0, stream>>>( + grad.numel(), + grad_.data_ptr<scalar_t>(), + channel_mapping.data_ptr<int>(), + num_rois, + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + channels_out, + grad_input.data_ptr<scalar_t>(), + rois_.data_ptr<scalar_t>()); + }); + AT_CUDA_CHECK(cudaGetLastError()); + return grad_input; +} + +} // namespace + +TORCH_LIBRARY_IMPL(torchvision, CUDA, m) { + m.impl( + TORCH_SELECTIVE_NAME("torchvision::ps_roi_align"), + TORCH_FN(ps_roi_align_forward_kernel)); + m.impl( + TORCH_SELECTIVE_NAME("torchvision::_ps_roi_align_backward"), + TORCH_FN(ps_roi_align_backward_kernel)); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cuda/ps_roi_pool_kernel.cu b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cuda/ps_roi_pool_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..85f43fa99a2b6d30cb606fe5c04c2bb2420be9f3 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cuda/ps_roi_pool_kernel.cu @@ -0,0 +1,288 @@ +#include <ATen/ATen.h> +#include <ATen/cuda/CUDAContext.h> +#include <c10/cuda/CUDAGuard.h> +#include <torch/library.h> +#include <THC/THCAtomics.cuh> + +#include "cuda_helpers.h" + +namespace vision { +namespace ops { + +namespace { + +template <typename T> +__global__ void ps_roi_pool_forward_kernel_impl( + int nthreads, + const T* input, + const T spatial_scale, + int channels, + int height, + int width, + int pooled_height, + int pooled_width, + const T* rois, + int channels_out, + T* output, + int* channel_mapping) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c_out, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c_out = (index / pooled_width / pooled_height) % channels_out; + int n = index / pooled_width / pooled_height / channels_out; + + // (n, c_in, ph, pw) is the associated element in the input + int c_in = (c_out * pooled_height + ph) * pooled_width + pw; + + // [start, end) interval for spatial sampling + const T* offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + int roi_start_w = roundf(offset_rois[1] * spatial_scale); + int roi_start_h = roundf(offset_rois[2] * spatial_scale); + int roi_end_w = roundf(offset_rois[3] * spatial_scale); + int roi_end_h = roundf(offset_rois[4] * spatial_scale); + + // Force too small ROIs to be 1x1 + int roi_width = max(roi_end_w - roi_start_w, 1); + int roi_height = max(roi_end_h - roi_start_h, 1); + T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); + T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); + + int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h)); + int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w)); + int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h)); + int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w)); + + // Add roi offsets and clip to input boundaries + hstart = min(max(hstart + roi_start_h, 0), height - 1); + hend = min(max(hend + roi_start_h, 0), height - 1); + wstart = min(max(wstart + roi_start_w, 0), width - 1); + wend = min(max(wend + roi_start_w, 0), width - 1); + bool is_empty = (hend <= hstart) || (wend <= wstart); + + const T* offset_input = + input + (roi_batch_ind * channels + c_in) * height * width; + T out_sum = 0; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + int input_index = h * width + w; + out_sum += offset_input[input_index]; + } + } + + T bin_area = (hend - hstart) * (wend - wstart); + output[index] = is_empty ? static_cast<T>(0) : out_sum / bin_area; + channel_mapping[index] = c_in; + } +} + +template <typename T> +__global__ void ps_roi_pool_backward_kernel_impl( + int nthreads, + const T* grad_output, + const int* channel_mapping, + int num_rois, + const T spatial_scale, + int channels, + int height, + int width, + int pooled_height, + int pooled_width, + int channels_out, + T* grad_input, + const T* rois) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, *, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int n = index / pooled_width / pooled_height / channels_out; + + const T* offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + int roi_start_w = roundf(offset_rois[1] * spatial_scale); + int roi_start_h = roundf(offset_rois[2] * spatial_scale); + int roi_end_w = roundf(offset_rois[3] * spatial_scale); + int roi_end_h = roundf(offset_rois[4] * spatial_scale); + + // Force too small ROIs to be 1x1 + int roi_width = max(roi_end_w - roi_start_w, 1); + int roi_height = max(roi_end_h - roi_start_h, 1); + T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); + T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); + + int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h)); + int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w)); + int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h)); + int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w)); + + // Add roi offsets and clip to input boundaries + hstart = min(max(hstart + roi_start_h, 0), height); + hend = min(max(hend + roi_start_h, 0), height); + wstart = min(max(wstart + roi_start_w, 0), width); + wend = min(max(wend + roi_start_w, 0), width); + bool is_empty = (hend <= hstart) || (wend <= wstart); + + int c_in = channel_mapping[index]; + T* grad_input_offset = + grad_input + (roi_batch_ind * channels + c_in) * height * width; + T bin_area = (hend - hstart) * (wend - wstart); + T diff_val = is_empty ? static_cast<T>(0) : grad_output[index] / bin_area; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + int grad_input_index = h * width + w; + atomicAdd(grad_input_offset + grad_input_index, diff_val); + } + } + } +} + +std::tuple<at::Tensor, at::Tensor> ps_roi_pool_forward_kernel( + const at::Tensor& input, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width) { + // Check if input tensors are CUDA tensors + TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor"); + TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor"); + TORCH_CHECK( + rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]"); + + at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "ps_roi_pool_forward_kernel"; + at::checkAllSameGPU(c, {input_t, rois_t}); + at::checkAllSameType(c, {input_t, rois_t}); + + at::cuda::CUDAGuard device_guard(input.device()); + + auto num_rois = rois.size(0); + auto channels = input.size(1); + auto height = input.size(2); + auto width = input.size(3); + + TORCH_CHECK( + channels % (pooled_height * pooled_width) == 0, + "input channels must be a multiple of pooling height * pooling width"); + int channels_out = channels / (pooled_height * pooled_width); + + auto output = at::zeros( + {num_rois, channels_out, pooled_height, pooled_width}, input.options()); + auto channel_mapping = + at::zeros(output.sizes(), input.options().dtype(at::kInt)); + + auto output_size = output.numel(); + if (output_size == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return std::make_tuple(output, channel_mapping); + } + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min( + ceil_div(static_cast<int64_t>(output_size), static_cast<int64_t>(512)), + static_cast<int64_t>(4096))); + dim3 block(512); + + auto input_ = input.contiguous(), rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), "ps_roi_pool_forward_kernel", [&] { + ps_roi_pool_forward_kernel_impl<scalar_t><<<grid, block, 0, stream>>>( + output_size, + input_.data_ptr<scalar_t>(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + rois_.data_ptr<scalar_t>(), + channels_out, + output.data_ptr<scalar_t>(), + channel_mapping.data_ptr<int>()); + }); + AT_CUDA_CHECK(cudaGetLastError()); + return std::make_tuple(output, channel_mapping); +} + +at::Tensor ps_roi_pool_backward_kernel( + const at::Tensor& grad, + const at::Tensor& rois, + const at::Tensor& channel_mapping, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t batch_size, + int64_t channels, + int64_t height, + int64_t width) { + // Check if input tensors are CUDA tensors + TORCH_CHECK(grad.is_cuda(), "grad must be a CUDA tensor"); + TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor"); + TORCH_CHECK( + channel_mapping.is_cuda(), "channel_mapping must be a CUDA tensor"); + + at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}, + channel_mapping_t{channel_mapping, "channel_mapping", 3}; + + at::CheckedFrom c = "ps_roi_pool_backward_kernel"; + at::checkAllSameGPU(c, {grad_t, rois_t, channel_mapping_t}); + at::checkAllSameType(c, {grad_t, rois_t}); + + at::cuda::CUDAGuard device_guard(grad.device()); + + auto num_rois = rois.size(0); + auto grad_input = + at::zeros({batch_size, channels, height, width}, grad.options()); + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min( + ceil_div(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)), + static_cast<int64_t>(4096))); + dim3 block(512); + + // handle possibly empty gradients + if (grad.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return grad_input; + } + + int channels_out = channels / (pooled_height * pooled_width); + + auto grad_ = grad.contiguous(), rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad.scalar_type(), "ps_roi_pool_backward_kernel", [&] { + ps_roi_pool_backward_kernel_impl<scalar_t><<<grid, block, 0, stream>>>( + grad.numel(), + grad_.data_ptr<scalar_t>(), + channel_mapping.data_ptr<int>(), + num_rois, + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + channels_out, + grad_input.data_ptr<scalar_t>(), + rois_.data_ptr<scalar_t>()); + }); + AT_CUDA_CHECK(cudaGetLastError()); + return grad_input; +} + +} // namespace + +TORCH_LIBRARY_IMPL(torchvision, CUDA, m) { + m.impl( + TORCH_SELECTIVE_NAME("torchvision::ps_roi_pool"), + TORCH_FN(ps_roi_pool_forward_kernel)); + m.impl( + TORCH_SELECTIVE_NAME("torchvision::_ps_roi_pool_backward"), + TORCH_FN(ps_roi_pool_backward_kernel)); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cuda/roi_align_kernel.cu b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cuda/roi_align_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..48183f4908ef865acdf63c5d244e1e1f260f1d93 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cuda/roi_align_kernel.cu @@ -0,0 +1,461 @@ +#include <ATen/ATen.h> +#include <ATen/cuda/CUDAContext.h> +#include <c10/cuda/CUDAGuard.h> +#include <torch/library.h> +#include <THC/THCAtomics.cuh> + +#include "cuda_helpers.h" + +namespace vision { +namespace ops { + +namespace { + +template <typename T> +__device__ T bilinear_interpolate( + const T* input, + int height, + int width, + T y, + T x, + int index /* index for debug only*/) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + return 0; + } + + if (y <= 0) + y = 0; + if (x <= 0) + x = 0; + + int y_low = (int)y; + int x_low = (int)x; + int y_high; + int x_high; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + + // do bilinear interpolation + T v1 = input[y_low * width + x_low]; + T v2 = input[y_low * width + x_high]; + T v3 = input[y_high * width + x_low]; + T v4 = input[y_high * width + x_high]; + T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; + + T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + return val; +} + +template <typename T> +__global__ void roi_align_forward_kernel_impl( + int nthreads, + const T* input, + const T spatial_scale, + int channels, + int height, + int width, + int pooled_height, + int pooled_width, + int sampling_ratio, + bool aligned, + const T* rois, + T* output) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + + // Do not using rounding; this implementation detail is critical + T offset = aligned ? (T)0.5 : (T)0.0; + T roi_start_w = offset_rois[1] * spatial_scale - offset; + T roi_start_h = offset_rois[2] * spatial_scale - offset; + T roi_end_w = offset_rois[3] * spatial_scale - offset; + T roi_end_h = offset_rois[4] * spatial_scale - offset; + + T roi_width = roi_end_w - roi_start_w; + T roi_height = roi_end_h - roi_start_h; + if (!aligned) { + // Force malformed ROIs to be 1x1 + roi_width = max(roi_width, (T)1.); + roi_height = max(roi_height, (T)1.); + } + + T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); + T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); + + const T* offset_input = + input + (roi_batch_ind * channels + c) * height * width; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // We do average (integral) pooling inside a bin + // When the grid is empty, output zeros. + const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4 + + T output_val = 0.; + for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 + { + const T y = roi_start_h + ph * bin_size_h + + static_cast<T>(iy + .5f) * bin_size_h / + static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T x = roi_start_w + pw * bin_size_w + + static_cast<T>(ix + .5f) * bin_size_w / + static_cast<T>(roi_bin_grid_w); + + T val = bilinear_interpolate(offset_input, height, width, y, x, index); + output_val += val; + } + } + output_val /= count; + + output[index] = output_val; + } +} + +template <typename T> +__device__ void bilinear_interpolate_gradient( + int height, + int width, + T y, + T x, + T& w1, + T& w2, + T& w3, + T& w4, + int& x_low, + int& x_high, + int& y_low, + int& y_high, + int index /* index for debug only*/) { + // deal with cases that inverse elements are out of feature map boundary + if (y < -1.0 || y > height || x < -1.0 || x > width) { + // empty + w1 = w2 = w3 = w4 = 0.; + x_low = x_high = y_low = y_high = -1; + return; + } + + if (y <= 0) + y = 0; + if (x <= 0) + x = 0; + + y_low = (int)y; + x_low = (int)x; + + if (y_low >= height - 1) { + y_high = y_low = height - 1; + y = (T)y_low; + } else { + y_high = y_low + 1; + } + + if (x_low >= width - 1) { + x_high = x_low = width - 1; + x = (T)x_low; + } else { + x_high = x_low + 1; + } + + T ly = y - y_low; + T lx = x - x_low; + T hy = 1. - ly, hx = 1. - lx; + + // reference in forward + // T v1 = input[y_low * width + x_low]; + // T v2 = input[y_low * width + x_high]; + // T v3 = input[y_high * width + x_low]; + // T v4 = input[y_high * width + x_high]; + // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4); + + w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx; +} + +template <typename T> +__global__ void roi_align_backward_kernel_impl( + int nthreads, + const T* grad_output, + const T spatial_scale, + int channels, + int height, + int width, + int pooled_height, + int pooled_width, + int sampling_ratio, + bool aligned, + T* grad_input, + const T* rois, + int n_stride, + int c_stride, + int h_stride, + int w_stride) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + + // Do not using rounding; this implementation detail is critical + T offset = aligned ? (T)0.5 : (T)0.0; + T roi_start_w = offset_rois[1] * spatial_scale - offset; + T roi_start_h = offset_rois[2] * spatial_scale - offset; + T roi_end_w = offset_rois[3] * spatial_scale - offset; + T roi_end_h = offset_rois[4] * spatial_scale - offset; + + T roi_width = roi_end_w - roi_start_w; + T roi_height = roi_end_h - roi_start_h; + if (!aligned) { + // Force malformed ROIs to be 1x1 + roi_width = max(roi_width, (T)1.); + roi_height = max(roi_height, (T)1.); + } + + T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); + T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); + + T* offset_grad_input = + grad_input + ((roi_batch_ind * channels + c) * height * width); + + // We need to index the gradient using the tensor strides to access the + // correct values. + int output_offset = n * n_stride + c * c_stride; + const T* offset_grad_output = grad_output + output_offset; + const T grad_output_this_bin = + offset_grad_output[ph * h_stride + pw * w_stride]; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // We do average (integral) pooling inside a bin + const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4 + + for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1 + { + const T y = roi_start_h + ph * bin_size_h + + static_cast<T>(iy + .5f) * bin_size_h / + static_cast<T>(roi_bin_grid_h); // e.g., 0.5, 1.5 + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + const T x = roi_start_w + pw * bin_size_w + + static_cast<T>(ix + .5f) * bin_size_w / + static_cast<T>(roi_bin_grid_w); + + T w1, w2, w3, w4; + int x_low, x_high, y_low, y_high; + + bilinear_interpolate_gradient( + height, + width, + y, + x, + w1, + w2, + w3, + w4, + x_low, + x_high, + y_low, + y_high, + index); + + T g1 = grad_output_this_bin * w1 / count; + T g2 = grad_output_this_bin * w2 / count; + T g3 = grad_output_this_bin * w3 / count; + T g4 = grad_output_this_bin * w4 / count; + + if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) { + atomicAdd( + offset_grad_input + y_low * width + x_low, static_cast<T>(g1)); + atomicAdd( + offset_grad_input + y_low * width + x_high, static_cast<T>(g2)); + atomicAdd( + offset_grad_input + y_high * width + x_low, static_cast<T>(g3)); + atomicAdd( + offset_grad_input + y_high * width + x_high, static_cast<T>(g4)); + } // if + } // ix + } // iy + } // CUDA_1D_KERNEL_LOOP +} + +at::Tensor roi_align_forward_kernel( + const at::Tensor& input, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t sampling_ratio, + bool aligned) { + TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor"); + TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor"); + TORCH_CHECK(rois.size(1) == 5, "rois must have shape as Tensor[K, 5]"); + + at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "roi_align_forward_kernel"; + at::checkAllSameGPU(c, {input_t, rois_t}); + at::checkAllSameType(c, {input_t, rois_t}); + + at::cuda::CUDAGuard device_guard(input.device()); + + auto num_rois = rois.size(0); + auto channels = input.size(1); + auto height = input.size(2); + auto width = input.size(3); + + at::Tensor output = at::zeros( + {num_rois, channels, pooled_height, pooled_width}, input.options()); + + auto output_size = num_rois * pooled_height * pooled_width * channels; + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min( + ceil_div(static_cast<int64_t>(output_size), static_cast<int64_t>(512)), + static_cast<int64_t>(4096))); + dim3 block(512); + + if (output.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return output; + } + + auto input_ = input.contiguous(), rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), "roi_align_forward_kernel", [&] { + roi_align_forward_kernel_impl<scalar_t><<<grid, block, 0, stream>>>( + output_size, + input_.data_ptr<scalar_t>(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + aligned, + rois_.data_ptr<scalar_t>(), + output.data_ptr<scalar_t>()); + }); + AT_CUDA_CHECK(cudaGetLastError()); + return output; +} + +at::Tensor roi_align_backward_kernel( + const at::Tensor& grad, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t batch_size, + int64_t channels, + int64_t height, + int64_t width, + int64_t sampling_ratio, + bool aligned) { + TORCH_CHECK(grad.is_cuda(), "grad must be a CUDA tensor"); + TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor"); + + at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "roi_align_backward_kernel"; + at::checkAllSameGPU(c, {grad_t, rois_t}); + at::checkAllSameType(c, {grad_t, rois_t}); + + at::cuda::CUDAGuard device_guard(grad.device()); + + at::Tensor grad_input = + at::zeros({batch_size, channels, height, width}, grad.options()); + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min( + ceil_div(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)), + static_cast<int64_t>(4096))); + dim3 block(512); + + // handle possibly empty gradients + if (grad.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return grad_input; + } + + int n_stride = grad.stride(0); + int c_stride = grad.stride(1); + int h_stride = grad.stride(2); + int w_stride = grad.stride(3); + + auto rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad.scalar_type(), "roi_align_backward_kernel", [&] { + roi_align_backward_kernel_impl<scalar_t><<<grid, block, 0, stream>>>( + grad.numel(), + grad.data_ptr<scalar_t>(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + aligned, + grad_input.data_ptr<scalar_t>(), + rois_.data_ptr<scalar_t>(), + n_stride, + c_stride, + h_stride, + w_stride); + }); + AT_CUDA_CHECK(cudaGetLastError()); + return grad_input; +} + +} // namespace + +TORCH_LIBRARY_IMPL(torchvision, CUDA, m) { + m.impl( + TORCH_SELECTIVE_NAME("torchvision::roi_align"), + TORCH_FN(roi_align_forward_kernel)); + m.impl( + TORCH_SELECTIVE_NAME("torchvision::_roi_align_backward"), + TORCH_FN(roi_align_backward_kernel)); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cuda/roi_pool_kernel.cu b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cuda/roi_pool_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..6b953bdcdf589872fe6d9e160dfcff72465ef636 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/cuda/roi_pool_kernel.cu @@ -0,0 +1,272 @@ +#include <ATen/ATen.h> +#include <ATen/cuda/CUDAContext.h> +#include <c10/cuda/CUDAGuard.h> +#include <float.h> +#include <torch/library.h> +#include <THC/THCAtomics.cuh> + +#include "cuda_helpers.h" + +namespace vision { +namespace ops { + +namespace { + +template <typename T> +__global__ void roi_pool_forward_kernel_impl( + int nthreads, + const T* input, + const T spatial_scale, + int channels, + int height, + int width, + int pooled_height, + int pooled_width, + const T* rois, + T* output, + int* argmax_data) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + int roi_start_w = round(offset_rois[1] * spatial_scale); + int roi_start_h = round(offset_rois[2] * spatial_scale); + int roi_end_w = round(offset_rois[3] * spatial_scale); + int roi_end_h = round(offset_rois[4] * spatial_scale); + + // Force malformed ROIs to be 1x1 + int roi_width = max(roi_end_w - roi_start_w + 1, 1); + int roi_height = max(roi_end_h - roi_start_h + 1, 1); + T bin_size_h = static_cast<T>(roi_height) / static_cast<T>(pooled_height); + T bin_size_w = static_cast<T>(roi_width) / static_cast<T>(pooled_width); + + int hstart = static_cast<int>(floor(static_cast<T>(ph) * bin_size_h)); + int wstart = static_cast<int>(floor(static_cast<T>(pw) * bin_size_w)); + int hend = static_cast<int>(ceil(static_cast<T>(ph + 1) * bin_size_h)); + int wend = static_cast<int>(ceil(static_cast<T>(pw + 1) * bin_size_w)); + + // Add roi offsets and clip to input boundaries + hstart = min(max(hstart + roi_start_h, 0), height); + hend = min(max(hend + roi_start_h, 0), height); + wstart = min(max(wstart + roi_start_w, 0), width); + wend = min(max(wend + roi_start_w, 0), width); + bool is_empty = (hend <= hstart) || (wend <= wstart); + + // Define an empty pooling region to be zero + T maxval = is_empty ? 0 : -FLT_MAX; + // If nothing is pooled, argmax = -1 causes nothing to be backprop'd + int maxidx = -1; + const T* offset_input = + input + (roi_batch_ind * channels + c) * height * width; + for (int h = hstart; h < hend; ++h) { + for (int w = wstart; w < wend; ++w) { + int input_index = h * width + w; + if (offset_input[input_index] > maxval) { + maxval = offset_input[input_index]; + maxidx = input_index; + } + } + } + output[index] = maxval; + argmax_data[index] = maxidx; + } +} + +template <typename T> +__global__ void roi_pool_backward_kernel_impl( + int nthreads, + const T* grad_output, + const int* argmax_data, + int num_rois, + const T spatial_scale, + int channels, + int height, + int width, + int pooled_height, + int pooled_width, + T* grad_input, + const T* rois, + int n_stride, + int c_stride, + int h_stride, + int w_stride) { + CUDA_1D_KERNEL_LOOP(index, nthreads) { + // (n, c, ph, pw) is an element in the pooled output + int pw = index % pooled_width; + int ph = (index / pooled_width) % pooled_height; + int c = (index / pooled_width / pooled_height) % channels; + int n = index / pooled_width / pooled_height / channels; + + const T* offset_rois = rois + n * 5; + int roi_batch_ind = offset_rois[0]; + T* grad_input_offset = + grad_input + ((roi_batch_ind * channels + c) * height * width); + + int output_offset = n * n_stride + c * c_stride; + const int* argmax_data_offset = + argmax_data + (n * channels + c) * pooled_height * pooled_width; + int argmax = argmax_data_offset[ph * pooled_width + pw]; + + if (argmax != -1) { + atomicAdd( + grad_input_offset + argmax, + static_cast<T>( + grad_output[output_offset + ph * h_stride + pw * w_stride])); + } + } +} + +std::tuple<at::Tensor, at::Tensor> roi_pool_forward_kernel( + const at::Tensor& input, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width) { + TORCH_CHECK(input.is_cuda(), "input must be a CUDA tensor"); + TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor"); + TORCH_CHECK( + rois.size(1) == 5, "Tensor rois should have shape as Tensor[K, 5]"); + + at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "roi_pool_forward_kernel"; + at::checkAllSameGPU(c, {input_t, rois_t}); + at::checkAllSameType(c, {input_t, rois_t}); + + at::cuda::CUDAGuard device_guard(input.device()); + + auto num_rois = rois.size(0); + auto channels = input.size(1); + auto height = input.size(2); + auto width = input.size(3); + + at::Tensor output = at::zeros( + {num_rois, channels, pooled_height, pooled_width}, input.options()); + at::Tensor argmax = at::zeros( + {num_rois, channels, pooled_height, pooled_width}, + input.options().dtype(at::kInt)); + + auto output_size = num_rois * pooled_height * pooled_width * channels; + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min( + ceil_div(static_cast<int64_t>(output_size), static_cast<int64_t>(512)), + static_cast<int64_t>(4096))); + dim3 block(512); + + if (output.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return std::make_tuple(output, argmax); + } + + auto input_ = input.contiguous(), rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + input.scalar_type(), "roi_pool_forward_kernel", [&] { + roi_pool_forward_kernel_impl<scalar_t><<<grid, block, 0, stream>>>( + output_size, + input_.data_ptr<scalar_t>(), + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + rois_.data_ptr<scalar_t>(), + output.data_ptr<scalar_t>(), + argmax.data_ptr<int>()); + }); + AT_CUDA_CHECK(cudaGetLastError()); + return std::make_tuple(output, argmax); +} + +at::Tensor roi_pool_backward_kernel( + const at::Tensor& grad, + const at::Tensor& rois, + const at::Tensor& argmax, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t batch_size, + int64_t channels, + int64_t height, + int64_t width) { + // Check if input tensors are CUDA tensors + TORCH_CHECK(grad.is_cuda(), "grad must be a CUDA tensor"); + TORCH_CHECK(rois.is_cuda(), "rois must be a CUDA tensor"); + TORCH_CHECK(argmax.is_cuda(), "argmax must be a CUDA tensor"); + + at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2}, + argmax_t{argmax, "argmax", 3}; + + at::CheckedFrom c = "roi_pool_backward_kernel"; + at::checkAllSameGPU(c, {grad_t, rois_t, argmax_t}); + at::checkAllSameType(c, {grad_t, rois_t}); + + at::cuda::CUDAGuard device_guard(grad.device()); + + auto num_rois = rois.size(0); + + at::Tensor grad_input = + at::zeros({batch_size, channels, height, width}, grad.options()); + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + dim3 grid(std::min( + ceil_div(static_cast<int64_t>(grad.numel()), static_cast<int64_t>(512)), + static_cast<int64_t>(4096))); + dim3 block(512); + + // handle possibly empty gradients + if (grad.numel() == 0) { + AT_CUDA_CHECK(cudaGetLastError()); + return grad_input; + } + + int n_stride = grad.stride(0); + int c_stride = grad.stride(1); + int h_stride = grad.stride(2); + int w_stride = grad.stride(3); + + auto argmax_ = argmax.contiguous(), rois_ = rois.contiguous(); + AT_DISPATCH_FLOATING_TYPES_AND_HALF( + grad.scalar_type(), "roi_pool_backward_kernel", [&] { + roi_pool_backward_kernel_impl<scalar_t><<<grid, block, 0, stream>>>( + grad.numel(), + grad.data_ptr<scalar_t>(), + argmax_.data_ptr<int>(), + num_rois, + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + grad_input.data_ptr<scalar_t>(), + rois_.data_ptr<scalar_t>(), + n_stride, + c_stride, + h_stride, + w_stride); + }); + AT_CUDA_CHECK(cudaGetLastError()); + return grad_input; +} + +} // namespace + +TORCH_LIBRARY_IMPL(torchvision, CUDA, m) { + m.impl( + TORCH_SELECTIVE_NAME("torchvision::roi_pool"), + TORCH_FN(roi_pool_forward_kernel)); + m.impl( + TORCH_SELECTIVE_NAME("torchvision::_roi_pool_backward"), + TORCH_FN(roi_pool_backward_kernel)); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/deform_conv2d.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/deform_conv2d.cpp new file mode 100644 index 0000000000000000000000000000000000000000..073e16f2355da98fc181d8b8131e033db9b1d75d --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/deform_conv2d.cpp @@ -0,0 +1,94 @@ +#include "deform_conv2d.h" + +#include <torch/types.h> + +namespace vision { +namespace ops { + +at::Tensor deform_conv2d( + const at::Tensor& input, + const at::Tensor& weight, + const at::Tensor& offset, + const at::Tensor& mask, + const at::Tensor& bias, + int64_t stride_h, + int64_t stride_w, + int64_t pad_h, + int64_t pad_w, + int64_t dilation_h, + int64_t dilation_w, + int64_t groups, + int64_t offset_groups, + bool use_mask) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("torchvision::deform_conv2d", "") + .typed<decltype(deform_conv2d)>(); + return op.call( + input, + weight, + offset, + mask, + bias, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + groups, + offset_groups, + use_mask); +} + +namespace detail { + +std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor> +_deform_conv2d_backward( + const at::Tensor& grad, + const at::Tensor& input, + const at::Tensor& weight, + const at::Tensor& offset, + const at::Tensor& mask, + const at::Tensor& bias, + int64_t stride_h, + int64_t stride_w, + int64_t pad_h, + int64_t pad_w, + int64_t dilation_h, + int64_t dilation_w, + int64_t groups, + int64_t offset_groups, + bool use_mask) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("torchvision::_deform_conv2d_backward", "") + .typed<decltype(_deform_conv2d_backward)>(); + return op.call( + grad, + input, + weight, + offset, + mask, + bias, + stride_h, + stride_w, + pad_h, + pad_w, + dilation_h, + dilation_w, + groups, + offset_groups, + use_mask); +} + +} // namespace detail + +TORCH_LIBRARY_FRAGMENT(torchvision, m) { + m.def(TORCH_SELECTIVE_SCHEMA( + "torchvision::deform_conv2d(Tensor input, Tensor weight, Tensor offset, Tensor mask, Tensor bias, int stride_h, int stride_w, int pad_h, int pad_w, int dilation_h, int dilation_w, int groups, int offset_groups, bool use_mask) -> Tensor")); + m.def(TORCH_SELECTIVE_SCHEMA( + "torchvision::_deform_conv2d_backward(Tensor grad, Tensor input, Tensor weight, Tensor offset, Tensor mask, Tensor bias, int stride_h, int stride_w, int pad_h, int pad_w, int dilation_h, int dilation_w, int groups, int offset_groups, bool use_mask) -> (Tensor, Tensor, Tensor, Tensor, Tensor)")); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/deform_conv2d.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/deform_conv2d.h new file mode 100644 index 0000000000000000000000000000000000000000..a35be02aac8a828ff155b7d81cf58d6257ca92f9 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/deform_conv2d.h @@ -0,0 +1,48 @@ +#pragma once + +#include <ATen/ATen.h> +#include "../macros.h" + +namespace vision { +namespace ops { + +VISION_API at::Tensor deform_conv2d( + const at::Tensor& input, + const at::Tensor& weight, + const at::Tensor& offset, + const at::Tensor& mask, + const at::Tensor& bias, + int64_t stride_h, + int64_t stride_w, + int64_t pad_h, + int64_t pad_w, + int64_t dilation_h, + int64_t dilation_w, + int64_t groups, + int64_t offset_groups, + bool use_mask); + +namespace detail { + +std::tuple<at::Tensor, at::Tensor, at::Tensor, at::Tensor, at::Tensor> +_deform_conv2d_backward( + const at::Tensor& grad, + const at::Tensor& input, + const at::Tensor& weight, + const at::Tensor& offset, + const at::Tensor& mask, + const at::Tensor& bias, + int64_t stride_h, + int64_t stride_w, + int64_t pad_h, + int64_t pad_w, + int64_t dilation_h, + int64_t dilation_w, + int64_t groups, + int64_t offset_groups, + bool use_mask); + +} // namespace detail + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/interpolate_aa.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/interpolate_aa.cpp new file mode 100644 index 0000000000000000000000000000000000000000..90bc26a1fb5d0b470422b994d090968c30c37b1d --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/interpolate_aa.cpp @@ -0,0 +1,59 @@ +#include "interpolate_aa.h" + +#include <torch/types.h> + +namespace vision { +namespace ops { + +at::Tensor interpolate_linear_aa( + const at::Tensor& input, // Input image + at::IntArrayRef output_size, // Output image size + bool align_corners) // The flag to align corners +{ + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("torchvision::_interpolate_linear_aa", "") + .typed<decltype(interpolate_linear_aa)>(); + return op.call(input, output_size, align_corners); +} + +at::Tensor interpolate_bicubic_aa( + const at::Tensor& input, // Input image + at::IntArrayRef output_size, // Output image size + bool align_corners) // The flag to align corners +{ + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("torchvision::_interpolate_bicubic_aa", "") + .typed<decltype(_interpolate_bicubic_aa)>(); + return op.call(input, output_size, align_corners); +} + +namespace detail { + +// TODO: Implement backward function +// at::Tensor _interpolate_linear_aa_backward( +// const at::Tensor& grad, +// at::IntArrayRef output_size, +// bool align_corners) +// { +// return at::Tensor(); +// } + +} // namespace detail + +TORCH_LIBRARY_FRAGMENT(torchvision, m) { + m.def(TORCH_SELECTIVE_SCHEMA( + "torchvision::_interpolate_linear_aa(Tensor input, int[] output_size, bool align_corners) -> Tensor")); + m.def(TORCH_SELECTIVE_SCHEMA( + "torchvision::_interpolate_bicubic_aa(Tensor input, int[] output_size, bool align_corners) -> Tensor")); + // TODO: Implement backward function + // m.def(TORCH_SELECTIVE_SCHEMA( + // "torchvision::_interpolate_linear_aa_backward(Tensor grad, Tensor rois, + // float spatial_scale, int pooled_height, int pooled_width, int + // batch_size, int channels, int height, int width, int sampling_ratio, + // bool aligned) -> Tensor")); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/interpolate_aa.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/interpolate_aa.h new file mode 100644 index 0000000000000000000000000000000000000000..0a9ffb4b1680909d2c0f4831eba0337127f237db --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/interpolate_aa.h @@ -0,0 +1,30 @@ +#pragma once + +#include <ATen/ATen.h> +#include "../macros.h" + +namespace vision { +namespace ops { + +VISION_API at::Tensor _interpolate_linear_aa( + const at::Tensor& input, + at::IntArrayRef output_size, + bool align_corners = false); + +VISION_API at::Tensor _interpolate_bicubic_aa( + const at::Tensor& input, + at::IntArrayRef output_size, + bool align_corners = false); + +namespace detail { + +// TODO: Implement backward function +// at::Tensor _interpolate_linear_aa_backward( +// const at::Tensor& grad, +// at::IntArrayRef output_size, +// bool align_corners=false); + +} // namespace detail + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/nms.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/nms.cpp new file mode 100644 index 0000000000000000000000000000000000000000..9ed8f713671c88a3319d11f4397f3255e6eee4f8 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/nms.cpp @@ -0,0 +1,24 @@ +#include "nms.h" + +#include <torch/types.h> + +namespace vision { +namespace ops { + +at::Tensor nms( + const at::Tensor& dets, + const at::Tensor& scores, + double iou_threshold) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("torchvision::nms", "") + .typed<decltype(nms)>(); + return op.call(dets, scores, iou_threshold); +} + +TORCH_LIBRARY_FRAGMENT(torchvision, m) { + m.def(TORCH_SELECTIVE_SCHEMA( + "torchvision::nms(Tensor dets, Tensor scores, float iou_threshold) -> Tensor")); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/nms.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/nms.h new file mode 100644 index 0000000000000000000000000000000000000000..8c75a242bffa5a0ab99f29548f9975cc386f17b9 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/nms.h @@ -0,0 +1,15 @@ +#pragma once + +#include <ATen/ATen.h> +#include "../macros.h" + +namespace vision { +namespace ops { + +VISION_API at::Tensor nms( + const at::Tensor& dets, + const at::Tensor& scores, + double iou_threshold); + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/ops.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/ops.h new file mode 100644 index 0000000000000000000000000000000000000000..77995e44197518aa18ded64e3264ff65202b0f0b --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/ops.h @@ -0,0 +1,8 @@ +#pragma once + +#include "deform_conv2d.h" +#include "nms.h" +#include "ps_roi_align.h" +#include "ps_roi_pool.h" +#include "roi_align.h" +#include "roi_pool.h" diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/ps_roi_align.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/ps_roi_align.cpp new file mode 100644 index 0000000000000000000000000000000000000000..19f4c699d660e20eae970d0bdc3841295e42f377 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/ps_roi_align.cpp @@ -0,0 +1,64 @@ +#include "ps_roi_align.h" + +#include <torch/types.h> + +namespace vision { +namespace ops { + +std::tuple<at::Tensor, at::Tensor> ps_roi_align( + const at::Tensor& input, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t sampling_ratio) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("torchvision::ps_roi_align", "") + .typed<decltype(ps_roi_align)>(); + return op.call( + input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio); +} + +namespace detail { + +at::Tensor _ps_roi_align_backward( + const at::Tensor& grad, + const at::Tensor& rois, + const at::Tensor& channel_mapping, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t sampling_ratio, + int64_t batch_size, + int64_t channels, + int64_t height, + int64_t width) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("torchvision::_ps_roi_align_backward", "") + .typed<decltype(_ps_roi_align_backward)>(); + return op.call( + grad, + rois, + channel_mapping, + spatial_scale, + pooled_height, + pooled_width, + sampling_ratio, + batch_size, + channels, + height, + width); +} + +} // namespace detail + +TORCH_LIBRARY_FRAGMENT(torchvision, m) { + m.def(TORCH_SELECTIVE_SCHEMA( + "torchvision::ps_roi_align(Tensor input, Tensor rois, float spatial_scale, int pooled_height, int pooled_width, int sampling_ratio) -> (Tensor, Tensor)")); + m.def(TORCH_SELECTIVE_SCHEMA( + "torchvision::_ps_roi_align_backward(Tensor grad, Tensor rois, Tensor channel_mapping, float spatial_scale, int pooled_height, int pooled_width, int sampling_ratio, int batch_size, int channels, int height, int width) -> Tensor")); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/ps_roi_align.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/ps_roi_align.h new file mode 100644 index 0000000000000000000000000000000000000000..c5ed865982cdfa679d04564c56e1a86e4eb3f8c9 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/ps_roi_align.h @@ -0,0 +1,35 @@ +#pragma once + +#include <ATen/ATen.h> +#include "../macros.h" + +namespace vision { +namespace ops { + +VISION_API std::tuple<at::Tensor, at::Tensor> ps_roi_align( + const at::Tensor& input, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t sampling_ratio); + +namespace detail { + +at::Tensor _ps_roi_align_backward( + const at::Tensor& grad, + const at::Tensor& rois, + const at::Tensor& channel_mapping, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t sampling_ratio, + int64_t batch_size, + int64_t channels, + int64_t height, + int64_t width); + +} // namespace detail + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/ps_roi_pool.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/ps_roi_pool.cpp new file mode 100644 index 0000000000000000000000000000000000000000..0c6335228200d3becd243d92d39e000f183916de --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/ps_roi_pool.cpp @@ -0,0 +1,60 @@ +#include "ps_roi_pool.h" + +#include <torch/types.h> + +namespace vision { +namespace ops { + +std::tuple<at::Tensor, at::Tensor> ps_roi_pool( + const at::Tensor& input, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("torchvision::ps_roi_pool", "") + .typed<decltype(ps_roi_pool)>(); + return op.call(input, rois, spatial_scale, pooled_height, pooled_width); +} + +namespace detail { + +at::Tensor _ps_roi_pool_backward( + const at::Tensor& grad, + const at::Tensor& rois, + const at::Tensor& channel_mapping, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t batch_size, + int64_t channels, + int64_t height, + int64_t width) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("torchvision::_ps_roi_pool_backward", "") + .typed<decltype(_ps_roi_pool_backward)>(); + return op.call( + grad, + rois, + channel_mapping, + spatial_scale, + pooled_height, + pooled_width, + batch_size, + channels, + height, + width); +} + +} // namespace detail + +TORCH_LIBRARY_FRAGMENT(torchvision, m) { + m.def(TORCH_SELECTIVE_SCHEMA( + "torchvision::ps_roi_pool(Tensor input, Tensor rois, float spatial_scale, int pooled_height, int pooled_width) -> (Tensor, Tensor)")); + m.def(TORCH_SELECTIVE_SCHEMA( + "torchvision::_ps_roi_pool_backward(Tensor grad, Tensor rois, Tensor channel_mapping, float spatial_scale, int pooled_height, int pooled_width, int batch_size, int channels, int height, int width) -> Tensor")); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/ps_roi_pool.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/ps_roi_pool.h new file mode 100644 index 0000000000000000000000000000000000000000..20c2511e7aaba77ff27b2745be275bac0f463bb9 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/ps_roi_pool.h @@ -0,0 +1,33 @@ +#pragma once + +#include <ATen/ATen.h> +#include "../macros.h" + +namespace vision { +namespace ops { + +VISION_API std::tuple<at::Tensor, at::Tensor> ps_roi_pool( + const at::Tensor& input, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width); + +namespace detail { + +at::Tensor _ps_roi_pool_backward( + const at::Tensor& grad, + const at::Tensor& rois, + const at::Tensor& channel_mapping, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t batch_size, + int64_t channels, + int64_t height, + int64_t width); + +} // namespace detail + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/quantized/cpu/qnms_kernel.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/quantized/cpu/qnms_kernel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..fbbc062f3e9cd7f828460f92ef3e5a5ec6ee7acc --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/quantized/cpu/qnms_kernel.cpp @@ -0,0 +1,129 @@ +#include <ATen/ATen.h> +#include <ATen/native/quantized/affine_quantizer.h> +#include <torch/library.h> + +namespace vision { +namespace ops { + +namespace { + +template <typename scalar_t> +at::Tensor qnms_kernel_impl( + const at::Tensor& dets, + const at::Tensor& scores, + double iou_threshold) { + TORCH_CHECK(!dets.is_cuda(), "dets must be a CPU tensor"); + TORCH_CHECK(!scores.is_cuda(), "scores must be a CPU tensor"); + TORCH_CHECK( + dets.scalar_type() == scores.scalar_type(), + "dets should have the same type as scores"); + + if (dets.numel() == 0) + return at::empty({0}, dets.options().dtype(at::kLong)); + + const auto ndets = dets.size(0); + + auto x1_t = dets.select(1, 0).contiguous(); + auto y1_t = dets.select(1, 1).contiguous(); + auto x2_t = dets.select(1, 2).contiguous(); + auto y2_t = dets.select(1, 3).contiguous(); + auto order_t = std::get<1>(scores.sort(0, /* descending=*/true)); + at::Tensor suppressed_t = at::zeros({ndets}, dets.options().dtype(at::kByte)); + at::Tensor keep_t = at::zeros({ndets}, dets.options().dtype(at::kLong)); + at::Tensor areas_t = at::zeros({ndets}, dets.options().dtype(at::kFloat)); + + auto suppressed = suppressed_t.data_ptr<uint8_t>(); + auto keep = keep_t.data_ptr<int64_t>(); + auto order = order_t.data_ptr<int64_t>(); + auto x1 = x1_t.data_ptr<scalar_t>(); + auto y1 = y1_t.data_ptr<scalar_t>(); + auto x2 = x2_t.data_ptr<scalar_t>(); + auto y2 = y2_t.data_ptr<scalar_t>(); + auto areas = areas_t.data_ptr<float>(); + + for (int64_t i = 0; i < ndets; i++) { + // Note 1: To get the exact area we'd need to multiply by scale**2, but this + // would get canceled out in the computation of ovr below. So we leave that + // out. + // Note 2: degenerate boxes (x2 < x1 or y2 < y1) may underflow, although + // integral promotion rules will likely prevent it (see + // https://stackoverflow.com/questions/32959564/subtraction-of-two-unsigned-gives-signed + // for more details). + areas[i] = (x2[i].val_ - x1[i].val_) * (y2[i].val_ - y1[i].val_); + } + + int64_t num_to_keep = 0; + + for (int64_t _i = 0; _i < ndets; _i++) { + auto i = order[_i]; + if (suppressed[i] == 1) + continue; + keep[num_to_keep++] = i; + + // We explicitly cast coordinates to float so that the code can be + // vectorized. + float ix1val = x1[i].val_; + float iy1val = y1[i].val_; + float ix2val = x2[i].val_; + float iy2val = y2[i].val_; + float iarea = areas[i]; + + for (int64_t _j = _i + 1; _j < ndets; _j++) { + auto j = order[_j]; + if (suppressed[j] == 1) + continue; + float xx1 = std::max(ix1val, (float)x1[j].val_); + float yy1 = std::max(iy1val, (float)y1[j].val_); + float xx2 = std::min(ix2val, (float)x2[j].val_); + float yy2 = std::min(iy2val, (float)y2[j].val_); + + auto w = std::max(0.f, xx2 - xx1); // * scale (gets canceled below) + auto h = std::max(0.f, yy2 - yy1); // * scale (gets canceled below) + auto inter = w * h; + auto ovr = inter / (iarea + areas[j] - inter); + if (ovr > iou_threshold) + suppressed[j] = 1; + } + } + return keep_t.narrow(/*dim=*/0, /*start=*/0, /*length=*/num_to_keep); +} + +at::Tensor qnms_kernel( + const at::Tensor& dets, + const at::Tensor& scores, + double iou_threshold) { + TORCH_CHECK( + dets.dim() == 2, "boxes should be a 2d tensor, got ", dets.dim(), "D"); + TORCH_CHECK( + dets.size(1) == 4, + "boxes should have 4 elements in dimension 1, got ", + dets.size(1)); + TORCH_CHECK( + scores.dim() == 1, + "scores should be a 1d tensor, got ", + scores.dim(), + "D"); + TORCH_CHECK( + dets.size(0) == scores.size(0), + "boxes and scores should have same number of elements in ", + "dimension 0, got ", + dets.size(0), + " and ", + scores.size(0)); + + auto result = at::empty({0}); + + AT_DISPATCH_QINT_TYPES(dets.scalar_type(), "qnms_kernel", [&] { + result = qnms_kernel_impl<scalar_t>(dets, scores, iou_threshold); + }); + return result; +} + +} // namespace + +TORCH_LIBRARY_IMPL(torchvision, QuantizedCPU, m) { + m.impl(TORCH_SELECTIVE_NAME("torchvision::nms"), TORCH_FN(qnms_kernel)); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/quantized/cpu/qroi_align_kernel.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/quantized/cpu/qroi_align_kernel.cpp new file mode 100644 index 0000000000000000000000000000000000000000..cfd5ec4ee972f72faa7a3676d475e3115ac14a92 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/quantized/cpu/qroi_align_kernel.cpp @@ -0,0 +1,212 @@ +#include <ATen/ATen.h> +#include <ATen/native/quantized/affine_quantizer.h> +#include <torch/library.h> + +#include "../../cpu/roi_align_common.h" + +namespace vision { +namespace ops { + +namespace { + +template <typename T> +void qroi_align_forward_kernel_impl( + int n_rois, + const at::Tensor& t_input, + const float& spatial_scale, + int channels, + int height, + int width, + int pooled_height, + int pooled_width, + int sampling_ratio, + bool aligned, + const at::Tensor& t_rois, + T* output) { + // Don't delete these otherwise the .data_ptr() data might be undefined + auto t_input_cont = t_input.contiguous(); + auto t_rois_cont = t_rois.contiguous(); + + const T* input = t_input_cont.data_ptr<T>(); + int64_t input_zp = t_input.q_zero_point(); + float input_scale = t_input.q_scale(); + + const T* rois = t_rois_cont.data_ptr<T>(); + int64_t rois_zp = t_rois.q_zero_point(); + float rois_scale = t_rois.q_scale(); + + for (int n = 0; n < n_rois; n++) { + int index_n = n * channels * pooled_width * pooled_height; + + const T* offset_rois = rois + n * 5; + + // FIXME: change this when batches of size > 1 are allowed + const int roi_batch_ind = 0; + + // Do not using rounding; this implementation detail is critical + float offset = aligned ? 0.5 : 0.; + float roi_start_w = + at::native::dequantize_val(rois_scale, rois_zp, offset_rois[1]) * + spatial_scale - + offset; + float roi_start_h = + at::native::dequantize_val(rois_scale, rois_zp, offset_rois[2]) * + spatial_scale - + offset; + float roi_end_w = + at::native::dequantize_val(rois_scale, rois_zp, offset_rois[3]) * + spatial_scale - + offset; + float roi_end_h = + at::native::dequantize_val(rois_scale, rois_zp, offset_rois[4]) * + spatial_scale - + offset; + + float roi_width = roi_end_w - roi_start_w; + float roi_height = roi_end_h - roi_start_h; + if (!aligned) { + // Force malformed ROIs to be 1x1 + roi_width = std::max(roi_width, 1.f); + roi_height = std::max(roi_height, 1.f); + } + + float bin_size_h = roi_height / pooled_height; + float bin_size_w = roi_width / pooled_width; + + // We use roi_bin_grid to sample the grid and mimic integral + int roi_bin_grid_h = (sampling_ratio > 0) + ? sampling_ratio + : ceil(roi_height / pooled_height); // e.g., = 2 + int roi_bin_grid_w = + (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width); + + // We do average (integral) pooling inside a bin + // When the grid is empty, output zeros. + const float count = + std::max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4 + + // we want to precalculate indices and weights shared by all chanels, + // this is the key point of optimization + std::vector<detail::PreCalc<float>> pre_calc( + roi_bin_grid_h * roi_bin_grid_w * pooled_width * pooled_height); + detail::pre_calc_for_bilinear_interpolate( + height, + width, + pooled_height, + pooled_width, + roi_start_h, + roi_start_w, + bin_size_h, + bin_size_w, + roi_bin_grid_h, + roi_bin_grid_w, + pre_calc); + + for (int c = 0; c < channels; c++) { + int index_n_c = index_n + c * pooled_width * pooled_height; + const T* offset_input = + input + (roi_batch_ind * channels + c) * height * width; + int pre_calc_index = 0; + + for (int ph = 0; ph < pooled_height; ph++) { + for (int pw = 0; pw < pooled_width; pw++) { + int index = index_n_c + ph * pooled_width + pw; + + float output_val = 0.; + float sum_w = 0.; + for (int iy = 0; iy < roi_bin_grid_h; iy++) { + for (int ix = 0; ix < roi_bin_grid_w; ix++) { + detail::PreCalc<float> pc = pre_calc[pre_calc_index]; + + // Optimization: we use the raw values here and we'll dequantize + // later + output_val += pc.w1 * offset_input[pc.pos1].val_ + + pc.w2 * offset_input[pc.pos2].val_ + + pc.w3 * offset_input[pc.pos3].val_ + + pc.w4 * offset_input[pc.pos4].val_; + sum_w += pc.w1 + pc.w2 + pc.w3 + pc.w4; + + pre_calc_index += 1; + } + } + // Dequantize here + output_val = input_scale * (output_val - (float)input_zp * sum_w); + + output_val /= count; // Average pooling + + output[index] = + at::native::quantize_val<T>(input_scale, input_zp, output_val); + } // for pw + } // for ph + } // for c + } // for n +} + +at::Tensor qroi_align_forward_kernel( + const at::Tensor& input, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t sampling_ratio, + bool aligned) { + TORCH_CHECK(input.device().is_cpu(), "input must be a CPU tensor"); + TORCH_CHECK(rois.device().is_cpu(), "rois must be a CPU tensor"); + TORCH_CHECK(rois.size(1) == 5, "rois must have shape as Tensor[K, 5]"); + // The first column of the RoI tensor is an image index, but not all indices + // are representable depending on the quantization. For example 1, 3, 5... + // indices can't be represented when qscale is 2. To prevent any bug, we force + // a batch size of 1 and we ignore the first column + TORCH_CHECK( + input.size(0) == 1, + "Only one image per batch is allowed in roi_align when quantized tensors are passed."); + + at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2}; + + at::CheckedFrom c = "qroi_align_forward_kernel"; + at::checkAllSameType(c, {input_t, rois_t}); + + auto num_rois = rois.size(0); + auto channels = input.size(1); + auto height = input.size(2); + auto width = input.size(3); + + // FIXME: This is private, API might change: + // https://github.com/pytorch/pytorch/wiki/Introducing-Quantized-Tensor#quantized-tensor-apis + at::Tensor output = at::_empty_affine_quantized( + {num_rois, channels, pooled_height, pooled_width}, + input.options(), + input.q_scale(), + input.q_zero_point()); + + if (output.numel() == 0) + return output; + + AT_DISPATCH_QINT_TYPES(input.scalar_type(), "qroi_align_forward_kernel", [&] { + qroi_align_forward_kernel_impl<scalar_t>( + num_rois, + input, + spatial_scale, + channels, + height, + width, + pooled_height, + pooled_width, + sampling_ratio, + aligned, + rois, + output.data_ptr<scalar_t>()); + }); + return output; +} + +} // namespace + +TORCH_LIBRARY_IMPL(torchvision, QuantizedCPU, m) { + m.impl( + TORCH_SELECTIVE_NAME("torchvision::roi_align"), + TORCH_FN(qroi_align_forward_kernel)); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/roi_align.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/roi_align.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1e0d97ff979d0c77a3b875475502a34e67e58a25 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/roi_align.cpp @@ -0,0 +1,74 @@ +#include "roi_align.h" + +#include <torch/types.h> + +namespace vision { +namespace ops { + +at::Tensor roi_align( + const at::Tensor& input, // Input feature map. + const at::Tensor& rois, // List of ROIs to pool over. + double spatial_scale, // The scale of the image features. ROIs will be + // scaled to this. + int64_t pooled_height, // The height of the pooled feature map. + int64_t pooled_width, // The width of the pooled feature + int64_t sampling_ratio, // The number of points to sample in each bin + bool aligned) // The flag for pixel shift +// along each axis. +{ + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("torchvision::roi_align", "") + .typed<decltype(roi_align)>(); + return op.call( + input, + rois, + spatial_scale, + pooled_height, + pooled_width, + sampling_ratio, + aligned); +} + +namespace detail { + +at::Tensor _roi_align_backward( + const at::Tensor& grad, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t batch_size, + int64_t channels, + int64_t height, + int64_t width, + int64_t sampling_ratio, + bool aligned) { + static auto op = + c10::Dispatcher::singleton() + .findSchemaOrThrow("torchvision::_roi_align_backward", "") + .typed<decltype(_roi_align_backward)>(); + return op.call( + grad, + rois, + spatial_scale, + pooled_height, + pooled_width, + batch_size, + channels, + height, + width, + sampling_ratio, + aligned); +} + +} // namespace detail + +TORCH_LIBRARY_FRAGMENT(torchvision, m) { + m.def(TORCH_SELECTIVE_SCHEMA( + "torchvision::roi_align(Tensor input, Tensor rois, float spatial_scale, int pooled_height, int pooled_width, int sampling_ratio, bool aligned) -> Tensor")); + m.def(TORCH_SELECTIVE_SCHEMA( + "torchvision::_roi_align_backward(Tensor grad, Tensor rois, float spatial_scale, int pooled_height, int pooled_width, int batch_size, int channels, int height, int width, int sampling_ratio, bool aligned) -> Tensor")); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/roi_align.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/roi_align.h new file mode 100644 index 0000000000000000000000000000000000000000..2ddb6ac39455c5816c3f66fb91313e0cb82844bd --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/roi_align.h @@ -0,0 +1,36 @@ +#pragma once + +#include <ATen/ATen.h> +#include "../macros.h" + +namespace vision { +namespace ops { + +VISION_API at::Tensor roi_align( + const at::Tensor& input, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t sampling_ratio, + bool aligned); + +namespace detail { + +at::Tensor _roi_align_backward( + const at::Tensor& grad, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t batch_size, + int64_t channels, + int64_t height, + int64_t width, + int64_t sampling_ratio, + bool aligned); + +} // namespace detail + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/roi_pool.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/roi_pool.cpp new file mode 100644 index 0000000000000000000000000000000000000000..d88ec30ec2ae04db85f3cd3f311e5705f153b363 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/roi_pool.cpp @@ -0,0 +1,59 @@ +#include "roi_pool.h" + +#include <torch/types.h> + +namespace vision { +namespace ops { + +std::tuple<at::Tensor, at::Tensor> roi_pool( + const at::Tensor& input, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("torchvision::roi_pool", "") + .typed<decltype(roi_pool)>(); + return op.call(input, rois, spatial_scale, pooled_height, pooled_width); +} + +namespace detail { + +at::Tensor _roi_pool_backward( + const at::Tensor& grad, + const at::Tensor& rois, + const at::Tensor& argmax, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t batch_size, + int64_t channels, + int64_t height, + int64_t width) { + static auto op = c10::Dispatcher::singleton() + .findSchemaOrThrow("torchvision::_roi_pool_backward", "") + .typed<decltype(_roi_pool_backward)>(); + return op.call( + grad, + rois, + argmax, + spatial_scale, + pooled_height, + pooled_width, + batch_size, + channels, + height, + width); +} + +} // namespace detail + +TORCH_LIBRARY_FRAGMENT(torchvision, m) { + m.def(TORCH_SELECTIVE_SCHEMA( + "torchvision::roi_pool(Tensor input, Tensor rois, float spatial_scale, int pooled_height, int pooled_width) -> (Tensor, Tensor)")); + m.def(TORCH_SELECTIVE_SCHEMA( + "torchvision::_roi_pool_backward(Tensor grad, Tensor rois, Tensor argmax, float spatial_scale, int pooled_height, int pooled_width, int batch_size, int channels, int height, int width) -> Tensor")); +} + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/roi_pool.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/roi_pool.h new file mode 100644 index 0000000000000000000000000000000000000000..25ef5a1986daeaed80dfce3e2a80eb07c9c807c2 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/ops/roi_pool.h @@ -0,0 +1,33 @@ +#pragma once + +#include <ATen/ATen.h> +#include "../macros.h" + +namespace vision { +namespace ops { + +VISION_API std::tuple<at::Tensor, at::Tensor> roi_pool( + const at::Tensor& input, + const at::Tensor& rois, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width); + +namespace detail { + +at::Tensor _roi_pool_backward( + const at::Tensor& grad, + const at::Tensor& rois, + const at::Tensor& argmax, + double spatial_scale, + int64_t pooled_height, + int64_t pooled_width, + int64_t batch_size, + int64_t channels, + int64_t height, + int64_t width); + +} // namespace detail + +} // namespace ops +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/vision.cpp b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/vision.cpp new file mode 100644 index 0000000000000000000000000000000000000000..1b75de4a754c9d1028cfa4641d103e4b4fcfc8ff --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/vision.cpp @@ -0,0 +1,36 @@ +#include "vision.h" + +#ifndef MOBILE +#include <Python.h> +#endif +#include <torch/library.h> + +#ifdef WITH_CUDA +#include <cuda.h> +#endif +#ifdef WITH_HIP +#include <hip/hip_runtime.h> +#endif + +// If we are in a Windows environment, we need to define +// initialization functions for the _custom_ops extension +#ifdef _WIN32 +PyMODINIT_FUNC PyInit__C(void) { + // No need to do anything. + return NULL; +} +#endif + +namespace vision { +int64_t cuda_version() { +#ifdef WITH_CUDA + return CUDA_VERSION; +#else + return -1; +#endif +} + +TORCH_LIBRARY_FRAGMENT(torchvision, m) { + m.def("_cuda_version", &cuda_version); +} +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/vision.h b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/vision.h new file mode 100644 index 0000000000000000000000000000000000000000..22f8c6cdd38ac131d0fdc97c6456f1d8f806fa42 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/csrc/vision.h @@ -0,0 +1,16 @@ +#pragma once + +#include <cstdint> +#include "macros.h" + +namespace vision { +VISION_API int64_t cuda_version(); + +namespace detail { +extern "C" VISION_INLINE_VARIABLE auto _register_ops = &cuda_version; +#ifdef HINT_MSVC_LINKER_INCLUDE_SYMBOL +#pragma comment(linker, "/include:_register_ops") +#endif + +} // namespace detail +} // namespace vision diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/__init__.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b60fc7c7964125734e73767b9fe5d5f510d95c5e --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/__init__.py @@ -0,0 +1,39 @@ +from .lsun import LSUN, LSUNClass +from .folder import ImageFolder, DatasetFolder +from .coco import CocoCaptions, CocoDetection +from .cifar import CIFAR10, CIFAR100 +from .stl10 import STL10 +from .mnist import MNIST, EMNIST, FashionMNIST, KMNIST, QMNIST +from .svhn import SVHN +from .phototour import PhotoTour +from .fakedata import FakeData +from .semeion import SEMEION +from .omniglot import Omniglot +from .sbu import SBU +from .flickr import Flickr8k, Flickr30k +from .voc import VOCSegmentation, VOCDetection +from .cityscapes import Cityscapes +from .imagenet import ImageNet +from .caltech import Caltech101, Caltech256 +from .celeba import CelebA +from .widerface import WIDERFace +from .sbd import SBDataset +from .vision import VisionDataset +from .usps import USPS +from .kinetics import Kinetics400 +from .hmdb51 import HMDB51 +from .ucf101 import UCF101 +from .places365 import Places365 +from .kitti import Kitti + +__all__ = ('LSUN', 'LSUNClass', + 'ImageFolder', 'DatasetFolder', 'FakeData', + 'CocoCaptions', 'CocoDetection', + 'CIFAR10', 'CIFAR100', 'EMNIST', 'FashionMNIST', 'QMNIST', + 'MNIST', 'KMNIST', 'STL10', 'SVHN', 'PhotoTour', 'SEMEION', + 'Omniglot', 'SBU', 'Flickr8k', 'Flickr30k', + 'VOCSegmentation', 'VOCDetection', 'Cityscapes', 'ImageNet', + 'Caltech101', 'Caltech256', 'CelebA', 'WIDERFace', 'SBDataset', + 'VisionDataset', 'USPS', 'Kinetics400', 'HMDB51', 'UCF101', + 'Places365', 'Kitti', + ) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/_utils.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..5661b31a4b95b4cb7910aad540233cb70cf1f227 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/_utils.py @@ -0,0 +1,6 @@ +def _download_file_from_remote_location(fpath: str, url: str) -> None: + pass + + +def _is_remote_location_available() -> bool: + return False diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/caltech.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/caltech.py new file mode 100644 index 0000000000000000000000000000000000000000..1a254edb430be2a5229580512343792d124441dd --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/caltech.py @@ -0,0 +1,219 @@ +from PIL import Image +import os +import os.path +from typing import Any, Callable, List, Optional, Union, Tuple + +from .vision import VisionDataset +from .utils import download_and_extract_archive, verify_str_arg + + +class Caltech101(VisionDataset): + """`Caltech 101 <http://www.vision.caltech.edu/Image_Datasets/Caltech101/>`_ Dataset. + + .. warning:: + + This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format. + + Args: + root (string): Root directory of dataset where directory + ``caltech101`` exists or will be saved to if download is set to True. + target_type (string or list, optional): Type of target to use, ``category`` or + ``annotation``. Can also be a list to output a tuple with all specified target types. + ``category`` represents the target class, and ``annotation`` is a list of points + from a hand-generated outline. Defaults to ``category``. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + """ + + def __init__( + self, + root: str, + target_type: Union[List[str], str] = "category", + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + super(Caltech101, self).__init__(os.path.join(root, 'caltech101'), + transform=transform, + target_transform=target_transform) + os.makedirs(self.root, exist_ok=True) + if not isinstance(target_type, list): + target_type = [target_type] + self.target_type = [verify_str_arg(t, "target_type", ("category", "annotation")) + for t in target_type] + + if download: + self.download() + + if not self._check_integrity(): + raise RuntimeError('Dataset not found or corrupted.' + + ' You can use download=True to download it') + + self.categories = sorted(os.listdir(os.path.join(self.root, "101_ObjectCategories"))) + self.categories.remove("BACKGROUND_Google") # this is not a real class + + # For some reason, the category names in "101_ObjectCategories" and + # "Annotations" do not always match. This is a manual map between the + # two. Defaults to using same name, since most names are fine. + name_map = {"Faces": "Faces_2", + "Faces_easy": "Faces_3", + "Motorbikes": "Motorbikes_16", + "airplanes": "Airplanes_Side_2"} + self.annotation_categories = list(map(lambda x: name_map[x] if x in name_map else x, self.categories)) + + self.index: List[int] = [] + self.y = [] + for (i, c) in enumerate(self.categories): + n = len(os.listdir(os.path.join(self.root, "101_ObjectCategories", c))) + self.index.extend(range(1, n + 1)) + self.y.extend(n * [i]) + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where the type of target specified by target_type. + """ + import scipy.io + + img = Image.open(os.path.join(self.root, + "101_ObjectCategories", + self.categories[self.y[index]], + "image_{:04d}.jpg".format(self.index[index]))) + + target: Any = [] + for t in self.target_type: + if t == "category": + target.append(self.y[index]) + elif t == "annotation": + data = scipy.io.loadmat(os.path.join(self.root, + "Annotations", + self.annotation_categories[self.y[index]], + "annotation_{:04d}.mat".format(self.index[index]))) + target.append(data["obj_contour"]) + target = tuple(target) if len(target) > 1 else target[0] + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def _check_integrity(self) -> bool: + # can be more robust and check hash of files + return os.path.exists(os.path.join(self.root, "101_ObjectCategories")) + + def __len__(self) -> int: + return len(self.index) + + def download(self) -> None: + if self._check_integrity(): + print('Files already downloaded and verified') + return + + download_and_extract_archive( + "http://www.vision.caltech.edu/Image_Datasets/Caltech101/101_ObjectCategories.tar.gz", + self.root, + filename="101_ObjectCategories.tar.gz", + md5="b224c7392d521a49829488ab0f1120d9") + download_and_extract_archive( + "http://www.vision.caltech.edu/Image_Datasets/Caltech101/Annotations.tar", + self.root, + filename="101_Annotations.tar", + md5="6f83eeb1f24d99cab4eb377263132c91") + + def extra_repr(self) -> str: + return "Target type: {target_type}".format(**self.__dict__) + + +class Caltech256(VisionDataset): + """`Caltech 256 <http://www.vision.caltech.edu/Image_Datasets/Caltech256/>`_ Dataset. + + Args: + root (string): Root directory of dataset where directory + ``caltech256`` exists or will be saved to if download is set to True. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + """ + + def __init__( + self, + root: str, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + super(Caltech256, self).__init__(os.path.join(root, 'caltech256'), + transform=transform, + target_transform=target_transform) + os.makedirs(self.root, exist_ok=True) + + if download: + self.download() + + if not self._check_integrity(): + raise RuntimeError('Dataset not found or corrupted.' + + ' You can use download=True to download it') + + self.categories = sorted(os.listdir(os.path.join(self.root, "256_ObjectCategories"))) + self.index: List[int] = [] + self.y = [] + for (i, c) in enumerate(self.categories): + n = len(os.listdir(os.path.join(self.root, "256_ObjectCategories", c))) + self.index.extend(range(1, n + 1)) + self.y.extend(n * [i]) + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is index of the target class. + """ + img = Image.open(os.path.join(self.root, + "256_ObjectCategories", + self.categories[self.y[index]], + "{:03d}_{:04d}.jpg".format(self.y[index] + 1, self.index[index]))) + + target = self.y[index] + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def _check_integrity(self) -> bool: + # can be more robust and check hash of files + return os.path.exists(os.path.join(self.root, "256_ObjectCategories")) + + def __len__(self) -> int: + return len(self.index) + + def download(self) -> None: + if self._check_integrity(): + print('Files already downloaded and verified') + return + + download_and_extract_archive( + "http://www.vision.caltech.edu/Image_Datasets/Caltech256/256_ObjectCategories.tar", + self.root, + filename="256_ObjectCategories.tar", + md5="67b4f42ca05d46448c6bb8ecd2220f6d") diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/celeba.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/celeba.py new file mode 100644 index 0000000000000000000000000000000000000000..56588aaef572cb863cc2f8debc9e3ee6f0cf379b --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/celeba.py @@ -0,0 +1,192 @@ +from collections import namedtuple +import csv +from functools import partial +import torch +import os +import PIL +from typing import Any, Callable, List, Optional, Union, Tuple +from .vision import VisionDataset +from .utils import download_file_from_google_drive, check_integrity, verify_str_arg + +CSV = namedtuple("CSV", ["header", "index", "data"]) + + +class CelebA(VisionDataset): + """`Large-scale CelebFaces Attributes (CelebA) Dataset <http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html>`_ Dataset. + + Args: + root (string): Root directory where images are downloaded to. + split (string): One of {'train', 'valid', 'test', 'all'}. + Accordingly dataset is selected. + target_type (string or list, optional): Type of target to use, ``attr``, ``identity``, ``bbox``, + or ``landmarks``. Can also be a list to output a tuple with all specified target types. + The targets represent: + + - ``attr`` (np.array shape=(40,) dtype=int): binary (0, 1) labels for attributes + - ``identity`` (int): label for each person (data points with the same identity are the same person) + - ``bbox`` (np.array shape=(4,) dtype=int): bounding box (x, y, width, height) + - ``landmarks`` (np.array shape=(10,) dtype=int): landmark points (lefteye_x, lefteye_y, righteye_x, + righteye_y, nose_x, nose_y, leftmouth_x, leftmouth_y, rightmouth_x, rightmouth_y) + + Defaults to ``attr``. If empty, ``None`` will be returned as target. + + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.ToTensor`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + """ + + base_folder = "celeba" + # There currently does not appear to be a easy way to extract 7z in python (without introducing additional + # dependencies). The "in-the-wild" (not aligned+cropped) images are only in 7z, so they are not available + # right now. + file_list = [ + # File ID MD5 Hash Filename + ("0B7EVK8r0v71pZjFTYXZWM3FlRnM", "00d2c5bc6d35e252742224ab0c1e8fcb", "img_align_celeba.zip"), + # ("0B7EVK8r0v71pbWNEUjJKdDQ3dGc","b6cd7e93bc7a96c2dc33f819aa3ac651", "img_align_celeba_png.7z"), + # ("0B7EVK8r0v71peklHb0pGdDl6R28", "b6cd7e93bc7a96c2dc33f819aa3ac651", "img_celeba.7z"), + ("0B7EVK8r0v71pblRyaVFSWGxPY0U", "75e246fa4810816ffd6ee81facbd244c", "list_attr_celeba.txt"), + ("1_ee_0u7vcNLOfNLegJRHmolfH5ICW-XS", "32bd1bd63d3c78cd57e08160ec5ed1e2", "identity_CelebA.txt"), + ("0B7EVK8r0v71pbThiMVRxWXZ4dU0", "00566efa6fedff7a56946cd1c10f1c16", "list_bbox_celeba.txt"), + ("0B7EVK8r0v71pd0FJY3Blby1HUTQ", "cc24ecafdb5b50baae59b03474781f8c", "list_landmarks_align_celeba.txt"), + # ("0B7EVK8r0v71pTzJIdlJWdHczRlU", "063ee6ddb681f96bc9ca28c6febb9d1a", "list_landmarks_celeba.txt"), + ("0B7EVK8r0v71pY0NSMzRuSXJEVkk", "d32c9cbf5e040fd4025c592c306e6668", "list_eval_partition.txt"), + ] + + def __init__( + self, + root: str, + split: str = "train", + target_type: Union[List[str], str] = "attr", + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + super(CelebA, self).__init__(root, transform=transform, + target_transform=target_transform) + self.split = split + if isinstance(target_type, list): + self.target_type = target_type + else: + self.target_type = [target_type] + + if not self.target_type and self.target_transform is not None: + raise RuntimeError('target_transform is specified but target_type is empty') + + if download: + self.download() + + if not self._check_integrity(): + raise RuntimeError('Dataset not found or corrupted.' + + ' You can use download=True to download it') + + split_map = { + "train": 0, + "valid": 1, + "test": 2, + "all": None, + } + split_ = split_map[verify_str_arg(split.lower(), "split", + ("train", "valid", "test", "all"))] + splits = self._load_csv("list_eval_partition.txt") + identity = self._load_csv("identity_CelebA.txt") + bbox = self._load_csv("list_bbox_celeba.txt", header=1) + landmarks_align = self._load_csv("list_landmarks_align_celeba.txt", header=1) + attr = self._load_csv("list_attr_celeba.txt", header=1) + + mask = slice(None) if split_ is None else (splits.data == split_).squeeze() + + self.filename = splits.index + self.identity = identity.data[mask] + self.bbox = bbox.data[mask] + self.landmarks_align = landmarks_align.data[mask] + self.attr = attr.data[mask] + # map from {-1, 1} to {0, 1} + self.attr = torch.div(self.attr + 1, 2, rounding_mode='floor') + self.attr_names = attr.header + + def _load_csv( + self, + filename: str, + header: Optional[int] = None, + ) -> CSV: + data, indices, headers = [], [], [] + + fn = partial(os.path.join, self.root, self.base_folder) + with open(fn(filename)) as csv_file: + data = list(csv.reader(csv_file, delimiter=' ', skipinitialspace=True)) + + if header is not None: + headers = data[header] + data = data[header + 1:] + + indices = [row[0] for row in data] + data = [row[1:] for row in data] + data_int = [list(map(int, i)) for i in data] + + return CSV(headers, indices, torch.tensor(data_int)) + + def _check_integrity(self) -> bool: + for (_, md5, filename) in self.file_list: + fpath = os.path.join(self.root, self.base_folder, filename) + _, ext = os.path.splitext(filename) + # Allow original archive to be deleted (zip and 7z) + # Only need the extracted images + if ext not in [".zip", ".7z"] and not check_integrity(fpath, md5): + return False + + # Should check a hash of the images + return os.path.isdir(os.path.join(self.root, self.base_folder, "img_align_celeba")) + + def download(self) -> None: + import zipfile + + if self._check_integrity(): + print('Files already downloaded and verified') + return + + for (file_id, md5, filename) in self.file_list: + download_file_from_google_drive(file_id, os.path.join(self.root, self.base_folder), filename, md5) + + with zipfile.ZipFile(os.path.join(self.root, self.base_folder, "img_align_celeba.zip"), "r") as f: + f.extractall(os.path.join(self.root, self.base_folder)) + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + X = PIL.Image.open(os.path.join(self.root, self.base_folder, "img_align_celeba", self.filename[index])) + + target: Any = [] + for t in self.target_type: + if t == "attr": + target.append(self.attr[index, :]) + elif t == "identity": + target.append(self.identity[index, 0]) + elif t == "bbox": + target.append(self.bbox[index, :]) + elif t == "landmarks": + target.append(self.landmarks_align[index, :]) + else: + # TODO: refactor with utils.verify_str_arg + raise ValueError("Target type \"{}\" is not recognized.".format(t)) + + if self.transform is not None: + X = self.transform(X) + + if target: + target = tuple(target) if len(target) > 1 else target[0] + + if self.target_transform is not None: + target = self.target_transform(target) + else: + target = None + + return X, target + + def __len__(self) -> int: + return len(self.attr) + + def extra_repr(self) -> str: + lines = ["Target type: {target_type}", "Split: {split}"] + return '\n'.join(lines).format(**self.__dict__) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/cifar.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/cifar.py new file mode 100644 index 0000000000000000000000000000000000000000..9d939326c76a627fb3fe116af09ab8270332cc63 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/cifar.py @@ -0,0 +1,169 @@ +from PIL import Image +import os +import os.path +import numpy as np +import pickle +from typing import Any, Callable, Optional, Tuple + +from .vision import VisionDataset +from .utils import check_integrity, download_and_extract_archive + + +class CIFAR10(VisionDataset): + """`CIFAR10 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset. + + Args: + root (string): Root directory of dataset where directory + ``cifar-10-batches-py`` exists or will be saved to if download is set to True. + train (bool, optional): If True, creates dataset from training set, otherwise + creates from test set. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + + """ + base_folder = 'cifar-10-batches-py' + url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz" + filename = "cifar-10-python.tar.gz" + tgz_md5 = 'c58f30108f718f92721af3b95e74349a' + train_list = [ + ['data_batch_1', 'c99cafc152244af753f735de768cd75f'], + ['data_batch_2', 'd4bba439e000b95fd0a9bffe97cbabec'], + ['data_batch_3', '54ebc095f3ab1f0389bbae665268c751'], + ['data_batch_4', '634d18415352ddfa80567beed471001a'], + ['data_batch_5', '482c414d41f54cd18b22e5b47cb7c3cb'], + ] + + test_list = [ + ['test_batch', '40351d587109b95175f43aff81a1287e'], + ] + meta = { + 'filename': 'batches.meta', + 'key': 'label_names', + 'md5': '5ff9c542aee3614f3951f8cda6e48888', + } + + def __init__( + self, + root: str, + train: bool = True, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + + super(CIFAR10, self).__init__(root, transform=transform, + target_transform=target_transform) + + self.train = train # training set or test set + + if download: + self.download() + + if not self._check_integrity(): + raise RuntimeError('Dataset not found or corrupted.' + + ' You can use download=True to download it') + + if self.train: + downloaded_list = self.train_list + else: + downloaded_list = self.test_list + + self.data: Any = [] + self.targets = [] + + # now load the picked numpy arrays + for file_name, checksum in downloaded_list: + file_path = os.path.join(self.root, self.base_folder, file_name) + with open(file_path, 'rb') as f: + entry = pickle.load(f, encoding='latin1') + self.data.append(entry['data']) + if 'labels' in entry: + self.targets.extend(entry['labels']) + else: + self.targets.extend(entry['fine_labels']) + + self.data = np.vstack(self.data).reshape(-1, 3, 32, 32) + self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC + + self._load_meta() + + def _load_meta(self) -> None: + path = os.path.join(self.root, self.base_folder, self.meta['filename']) + if not check_integrity(path, self.meta['md5']): + raise RuntimeError('Dataset metadata file not found or corrupted.' + + ' You can use download=True to download it') + with open(path, 'rb') as infile: + data = pickle.load(infile, encoding='latin1') + self.classes = data[self.meta['key']] + self.class_to_idx = {_class: i for i, _class in enumerate(self.classes)} + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is index of the target class. + """ + img, target = self.data[index], self.targets[index] + + # doing this so that it is consistent with all other datasets + # to return a PIL Image + img = Image.fromarray(img) + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self) -> int: + return len(self.data) + + def _check_integrity(self) -> bool: + root = self.root + for fentry in (self.train_list + self.test_list): + filename, md5 = fentry[0], fentry[1] + fpath = os.path.join(root, self.base_folder, filename) + if not check_integrity(fpath, md5): + return False + return True + + def download(self) -> None: + if self._check_integrity(): + print('Files already downloaded and verified') + return + download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.tgz_md5) + + def extra_repr(self) -> str: + return "Split: {}".format("Train" if self.train is True else "Test") + + +class CIFAR100(CIFAR10): + """`CIFAR100 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset. + + This is a subclass of the `CIFAR10` Dataset. + """ + base_folder = 'cifar-100-python' + url = "https://www.cs.toronto.edu/~kriz/cifar-100-python.tar.gz" + filename = "cifar-100-python.tar.gz" + tgz_md5 = 'eb9058c3a382ffc7106e4002c42a8d85' + train_list = [ + ['train', '16019d7e3df5f24257cddd939b257f8d'], + ] + + test_list = [ + ['test', 'f0ef6b0ae62326f3e7ffdfab6717acfc'], + ] + meta = { + 'filename': 'meta', + 'key': 'fine_label_names', + 'md5': '7973b15100ade9c7d40fb424638fde48', + } diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/cityscapes.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/cityscapes.py new file mode 100644 index 0000000000000000000000000000000000000000..bed7524ac4ff0db9a4e78bfed2d4b8cd465c3402 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/cityscapes.py @@ -0,0 +1,215 @@ +import json +import os +from collections import namedtuple +from typing import Any, Callable, Dict, List, Optional, Union, Tuple + +from .utils import extract_archive, verify_str_arg, iterable_to_str +from .vision import VisionDataset +from PIL import Image + + +class Cityscapes(VisionDataset): + """`Cityscapes <http://www.cityscapes-dataset.com/>`_ Dataset. + + Args: + root (string): Root directory of dataset where directory ``leftImg8bit`` + and ``gtFine`` or ``gtCoarse`` are located. + split (string, optional): The image split to use, ``train``, ``test`` or ``val`` if mode="fine" + otherwise ``train``, ``train_extra`` or ``val`` + mode (string, optional): The quality mode to use, ``fine`` or ``coarse`` + target_type (string or list, optional): Type of target to use, ``instance``, ``semantic``, ``polygon`` + or ``color``. Can also be a list to output a tuple with all specified target types. + transform (callable, optional): A function/transform that takes in a PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + transforms (callable, optional): A function/transform that takes input sample and its target as entry + and returns a transformed version. + + Examples: + + Get semantic segmentation target + + .. code-block:: python + + dataset = Cityscapes('./data/cityscapes', split='train', mode='fine', + target_type='semantic') + + img, smnt = dataset[0] + + Get multiple targets + + .. code-block:: python + + dataset = Cityscapes('./data/cityscapes', split='train', mode='fine', + target_type=['instance', 'color', 'polygon']) + + img, (inst, col, poly) = dataset[0] + + Validate on the "coarse" set + + .. code-block:: python + + dataset = Cityscapes('./data/cityscapes', split='val', mode='coarse', + target_type='semantic') + + img, smnt = dataset[0] + """ + + # Based on https://github.com/mcordts/cityscapesScripts + CityscapesClass = namedtuple('CityscapesClass', ['name', 'id', 'train_id', 'category', 'category_id', + 'has_instances', 'ignore_in_eval', 'color']) + + classes = [ + CityscapesClass('unlabeled', 0, 255, 'void', 0, False, True, (0, 0, 0)), + CityscapesClass('ego vehicle', 1, 255, 'void', 0, False, True, (0, 0, 0)), + CityscapesClass('rectification border', 2, 255, 'void', 0, False, True, (0, 0, 0)), + CityscapesClass('out of roi', 3, 255, 'void', 0, False, True, (0, 0, 0)), + CityscapesClass('static', 4, 255, 'void', 0, False, True, (0, 0, 0)), + CityscapesClass('dynamic', 5, 255, 'void', 0, False, True, (111, 74, 0)), + CityscapesClass('ground', 6, 255, 'void', 0, False, True, (81, 0, 81)), + CityscapesClass('road', 7, 0, 'flat', 1, False, False, (128, 64, 128)), + CityscapesClass('sidewalk', 8, 1, 'flat', 1, False, False, (244, 35, 232)), + CityscapesClass('parking', 9, 255, 'flat', 1, False, True, (250, 170, 160)), + CityscapesClass('rail track', 10, 255, 'flat', 1, False, True, (230, 150, 140)), + CityscapesClass('building', 11, 2, 'construction', 2, False, False, (70, 70, 70)), + CityscapesClass('wall', 12, 3, 'construction', 2, False, False, (102, 102, 156)), + CityscapesClass('fence', 13, 4, 'construction', 2, False, False, (190, 153, 153)), + CityscapesClass('guard rail', 14, 255, 'construction', 2, False, True, (180, 165, 180)), + CityscapesClass('bridge', 15, 255, 'construction', 2, False, True, (150, 100, 100)), + CityscapesClass('tunnel', 16, 255, 'construction', 2, False, True, (150, 120, 90)), + CityscapesClass('pole', 17, 5, 'object', 3, False, False, (153, 153, 153)), + CityscapesClass('polegroup', 18, 255, 'object', 3, False, True, (153, 153, 153)), + CityscapesClass('traffic light', 19, 6, 'object', 3, False, False, (250, 170, 30)), + CityscapesClass('traffic sign', 20, 7, 'object', 3, False, False, (220, 220, 0)), + CityscapesClass('vegetation', 21, 8, 'nature', 4, False, False, (107, 142, 35)), + CityscapesClass('terrain', 22, 9, 'nature', 4, False, False, (152, 251, 152)), + CityscapesClass('sky', 23, 10, 'sky', 5, False, False, (70, 130, 180)), + CityscapesClass('person', 24, 11, 'human', 6, True, False, (220, 20, 60)), + CityscapesClass('rider', 25, 12, 'human', 6, True, False, (255, 0, 0)), + CityscapesClass('car', 26, 13, 'vehicle', 7, True, False, (0, 0, 142)), + CityscapesClass('truck', 27, 14, 'vehicle', 7, True, False, (0, 0, 70)), + CityscapesClass('bus', 28, 15, 'vehicle', 7, True, False, (0, 60, 100)), + CityscapesClass('caravan', 29, 255, 'vehicle', 7, True, True, (0, 0, 90)), + CityscapesClass('trailer', 30, 255, 'vehicle', 7, True, True, (0, 0, 110)), + CityscapesClass('train', 31, 16, 'vehicle', 7, True, False, (0, 80, 100)), + CityscapesClass('motorcycle', 32, 17, 'vehicle', 7, True, False, (0, 0, 230)), + CityscapesClass('bicycle', 33, 18, 'vehicle', 7, True, False, (119, 11, 32)), + CityscapesClass('license plate', -1, -1, 'vehicle', 7, False, True, (0, 0, 142)), + ] + + def __init__( + self, + root: str, + split: str = "train", + mode: str = "fine", + target_type: Union[List[str], str] = "instance", + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + transforms: Optional[Callable] = None, + ) -> None: + super(Cityscapes, self).__init__(root, transforms, transform, target_transform) + self.mode = 'gtFine' if mode == 'fine' else 'gtCoarse' + self.images_dir = os.path.join(self.root, 'leftImg8bit', split) + self.targets_dir = os.path.join(self.root, self.mode, split) + self.target_type = target_type + self.split = split + self.images = [] + self.targets = [] + + verify_str_arg(mode, "mode", ("fine", "coarse")) + if mode == "fine": + valid_modes = ("train", "test", "val") + else: + valid_modes = ("train", "train_extra", "val") + msg = ("Unknown value '{}' for argument split if mode is '{}'. " + "Valid values are {{{}}}.") + msg = msg.format(split, mode, iterable_to_str(valid_modes)) + verify_str_arg(split, "split", valid_modes, msg) + + if not isinstance(target_type, list): + self.target_type = [target_type] + [verify_str_arg(value, "target_type", + ("instance", "semantic", "polygon", "color")) + for value in self.target_type] + + if not os.path.isdir(self.images_dir) or not os.path.isdir(self.targets_dir): + + if split == 'train_extra': + image_dir_zip = os.path.join(self.root, 'leftImg8bit{}'.format('_trainextra.zip')) + else: + image_dir_zip = os.path.join(self.root, 'leftImg8bit{}'.format('_trainvaltest.zip')) + + if self.mode == 'gtFine': + target_dir_zip = os.path.join(self.root, '{}{}'.format(self.mode, '_trainvaltest.zip')) + elif self.mode == 'gtCoarse': + target_dir_zip = os.path.join(self.root, '{}{}'.format(self.mode, '.zip')) + + if os.path.isfile(image_dir_zip) and os.path.isfile(target_dir_zip): + extract_archive(from_path=image_dir_zip, to_path=self.root) + extract_archive(from_path=target_dir_zip, to_path=self.root) + else: + raise RuntimeError('Dataset not found or incomplete. Please make sure all required folders for the' + ' specified "split" and "mode" are inside the "root" directory') + + for city in os.listdir(self.images_dir): + img_dir = os.path.join(self.images_dir, city) + target_dir = os.path.join(self.targets_dir, city) + for file_name in os.listdir(img_dir): + target_types = [] + for t in self.target_type: + target_name = '{}_{}'.format(file_name.split('_leftImg8bit')[0], + self._get_target_suffix(self.mode, t)) + target_types.append(os.path.join(target_dir, target_name)) + + self.images.append(os.path.join(img_dir, file_name)) + self.targets.append(target_types) + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + Returns: + tuple: (image, target) where target is a tuple of all target types if target_type is a list with more + than one item. Otherwise target is a json object if target_type="polygon", else the image segmentation. + """ + + image = Image.open(self.images[index]).convert('RGB') + + targets: Any = [] + for i, t in enumerate(self.target_type): + if t == 'polygon': + target = self._load_json(self.targets[index][i]) + else: + target = Image.open(self.targets[index][i]) + + targets.append(target) + + target = tuple(targets) if len(targets) > 1 else targets[0] + + if self.transforms is not None: + image, target = self.transforms(image, target) + + return image, target + + def __len__(self) -> int: + return len(self.images) + + def extra_repr(self) -> str: + lines = ["Split: {split}", "Mode: {mode}", "Type: {target_type}"] + return '\n'.join(lines).format(**self.__dict__) + + def _load_json(self, path: str) -> Dict[str, Any]: + with open(path, 'r') as file: + data = json.load(file) + return data + + def _get_target_suffix(self, mode: str, target_type: str) -> str: + if target_type == 'instance': + return '{}_instanceIds.png'.format(mode) + elif target_type == 'semantic': + return '{}_labelIds.png'.format(mode) + elif target_type == 'color': + return '{}_color.png'.format(mode) + else: + return '{}_polygons.json'.format(mode) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/coco.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/coco.py new file mode 100644 index 0000000000000000000000000000000000000000..d59a23efb4d8feebb1c9c82ba12c1c4d8b378a51 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/coco.py @@ -0,0 +1,99 @@ +from .vision import VisionDataset +from PIL import Image +import os +import os.path +from typing import Any, Callable, Optional, Tuple, List + + +class CocoDetection(VisionDataset): + """`MS Coco Detection <https://cocodataset.org/#detection-2016>`_ Dataset. + + Args: + root (string): Root directory where images are downloaded to. + annFile (string): Path to json annotation file. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.ToTensor`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + transforms (callable, optional): A function/transform that takes input sample and its target as entry + and returns a transformed version. + """ + + def __init__( + self, + root: str, + annFile: str, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + transforms: Optional[Callable] = None, + ): + super().__init__(root, transforms, transform, target_transform) + from pycocotools.coco import COCO + + self.coco = COCO(annFile) + self.ids = list(sorted(self.coco.imgs.keys())) + + def _load_image(self, id: int) -> Image.Image: + path = self.coco.loadImgs(id)[0]["file_name"] + return Image.open(os.path.join(self.root, path)).convert("RGB") + + def _load_target(self, id) -> List[Any]: + return self.coco.loadAnns(self.coco.getAnnIds(id)) + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + id = self.ids[index] + image = self._load_image(id) + target = self._load_target(id) + + if self.transforms is not None: + image, target = self.transforms(image, target) + + return image, target + + def __len__(self) -> int: + return len(self.ids) + + +class CocoCaptions(CocoDetection): + """`MS Coco Captions <https://cocodataset.org/#captions-2015>`_ Dataset. + + Args: + root (string): Root directory where images are downloaded to. + annFile (string): Path to json annotation file. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.ToTensor`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + transforms (callable, optional): A function/transform that takes input sample and its target as entry + and returns a transformed version. + + Example: + + .. code:: python + + import torchvision.datasets as dset + import torchvision.transforms as transforms + cap = dset.CocoCaptions(root = 'dir where images are', + annFile = 'json annotation file', + transform=transforms.ToTensor()) + + print('Number of samples: ', len(cap)) + img, target = cap[3] # load 4th sample + + print("Image Size: ", img.size()) + print(target) + + Output: :: + + Number of samples: 82783 + Image Size: (3L, 427L, 640L) + [u'A plane emitting smoke stream flying over a mountain.', + u'A plane darts across a bright blue sky behind a mountain covered in snow', + u'A plane leaves a contrail above the snowy mountain top.', + u'A mountain that has a plane flying overheard in the distance.', + u'A mountain view with a plume of smoke in the background'] + + """ + + def _load_target(self, id) -> List[str]: + return [ann["caption"] for ann in super()._load_target(id)] diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/fakedata.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/fakedata.py new file mode 100644 index 0000000000000000000000000000000000000000..ddb14505275b9994491ce8cb4c0fa81f962d4197 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/fakedata.py @@ -0,0 +1,66 @@ +import torch +from typing import Any, Callable, Optional, Tuple +from .vision import VisionDataset +from .. import transforms + + +class FakeData(VisionDataset): + """A fake dataset that returns randomly generated images and returns them as PIL images + + Args: + size (int, optional): Size of the dataset. Default: 1000 images + image_size(tuple, optional): Size if the returned images. Default: (3, 224, 224) + num_classes(int, optional): Number of classes in the dataset. Default: 10 + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + random_offset (int): Offsets the index-based random seed used to + generate each image. Default: 0 + + """ + + def __init__( + self, + size: int = 1000, + image_size: Tuple[int, int, int] = (3, 224, 224), + num_classes: int = 10, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + random_offset: int = 0, + ) -> None: + super(FakeData, self).__init__(None, transform=transform, # type: ignore[arg-type] + target_transform=target_transform) + self.size = size + self.num_classes = num_classes + self.image_size = image_size + self.random_offset = random_offset + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is class_index of the target class. + """ + # create random image that is consistent with the index id + if index >= len(self): + raise IndexError("{} index out of range".format(self.__class__.__name__)) + rng_state = torch.get_rng_state() + torch.manual_seed(index + self.random_offset) + img = torch.randn(*self.image_size) + target = torch.randint(0, self.num_classes, size=(1,), dtype=torch.long)[0] + torch.set_rng_state(rng_state) + + # convert to PIL Image + img = transforms.ToPILImage()(img) + if self.transform is not None: + img = self.transform(img) + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target.item() + + def __len__(self) -> int: + return self.size diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/flickr.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/flickr.py new file mode 100644 index 0000000000000000000000000000000000000000..a3b3e411b6e57e53a36b7077946e471f1f32dc4d --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/flickr.py @@ -0,0 +1,167 @@ +from collections import defaultdict +from PIL import Image +from html.parser import HTMLParser +from typing import Any, Callable, Dict, List, Optional, Tuple + +import glob +import os +from .vision import VisionDataset + + +class Flickr8kParser(HTMLParser): + """Parser for extracting captions from the Flickr8k dataset web page.""" + + def __init__(self, root: str) -> None: + super(Flickr8kParser, self).__init__() + + self.root = root + + # Data structure to store captions + self.annotations: Dict[str, List[str]] = {} + + # State variables + self.in_table = False + self.current_tag: Optional[str] = None + self.current_img: Optional[str] = None + + def handle_starttag(self, tag: str, attrs: List[Tuple[str, Optional[str]]]) -> None: + self.current_tag = tag + + if tag == 'table': + self.in_table = True + + def handle_endtag(self, tag: str) -> None: + self.current_tag = None + + if tag == 'table': + self.in_table = False + + def handle_data(self, data: str) -> None: + if self.in_table: + if data == 'Image Not Found': + self.current_img = None + elif self.current_tag == 'a': + img_id = data.split('/')[-2] + img_id = os.path.join(self.root, img_id + '_*.jpg') + img_id = glob.glob(img_id)[0] + self.current_img = img_id + self.annotations[img_id] = [] + elif self.current_tag == 'li' and self.current_img: + img_id = self.current_img + self.annotations[img_id].append(data.strip()) + + +class Flickr8k(VisionDataset): + """`Flickr8k Entities <http://hockenmaier.cs.illinois.edu/8k-pictures.html>`_ Dataset. + + Args: + root (string): Root directory where images are downloaded to. + ann_file (string): Path to annotation file. + transform (callable, optional): A function/transform that takes in a PIL image + and returns a transformed version. E.g, ``transforms.ToTensor`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + """ + + def __init__( + self, + root: str, + ann_file: str, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + ) -> None: + super(Flickr8k, self).__init__(root, transform=transform, + target_transform=target_transform) + self.ann_file = os.path.expanduser(ann_file) + + # Read annotations and store in a dict + parser = Flickr8kParser(self.root) + with open(self.ann_file) as fh: + parser.feed(fh.read()) + self.annotations = parser.annotations + + self.ids = list(sorted(self.annotations.keys())) + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: Tuple (image, target). target is a list of captions for the image. + """ + img_id = self.ids[index] + + # Image + img = Image.open(img_id).convert('RGB') + if self.transform is not None: + img = self.transform(img) + + # Captions + target = self.annotations[img_id] + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self) -> int: + return len(self.ids) + + +class Flickr30k(VisionDataset): + """`Flickr30k Entities <http://web.engr.illinois.edu/~bplumme2/Flickr30kEntities/>`_ Dataset. + + Args: + root (string): Root directory where images are downloaded to. + ann_file (string): Path to annotation file. + transform (callable, optional): A function/transform that takes in a PIL image + and returns a transformed version. E.g, ``transforms.ToTensor`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + """ + + def __init__( + self, + root: str, + ann_file: str, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + ) -> None: + super(Flickr30k, self).__init__(root, transform=transform, + target_transform=target_transform) + self.ann_file = os.path.expanduser(ann_file) + + # Read annotations and store in a dict + self.annotations = defaultdict(list) + with open(self.ann_file) as fh: + for line in fh: + img_id, caption = line.strip().split('\t') + self.annotations[img_id[:-2]].append(caption) + + self.ids = list(sorted(self.annotations.keys())) + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: Tuple (image, target). target is a list of captions for the image. + """ + img_id = self.ids[index] + + # Image + filename = os.path.join(self.root, img_id) + img = Image.open(filename).convert('RGB') + if self.transform is not None: + img = self.transform(img) + + # Captions + target = self.annotations[img_id] + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self) -> int: + return len(self.ids) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/folder.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/folder.py new file mode 100644 index 0000000000000000000000000000000000000000..29255c7ab40014238a447889322be5fa84dbfdb3 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/folder.py @@ -0,0 +1,314 @@ +from .vision import VisionDataset + +from PIL import Image + +import os +import os.path +from typing import Any, Callable, cast, Dict, List, Optional, Tuple + + +def has_file_allowed_extension(filename: str, extensions: Tuple[str, ...]) -> bool: + """Checks if a file is an allowed extension. + + Args: + filename (string): path to a file + extensions (tuple of strings): extensions to consider (lowercase) + + Returns: + bool: True if the filename ends with one of given extensions + """ + return filename.lower().endswith(extensions) + + +def is_image_file(filename: str) -> bool: + """Checks if a file is an allowed image extension. + + Args: + filename (string): path to a file + + Returns: + bool: True if the filename ends with a known image extension + """ + return has_file_allowed_extension(filename, IMG_EXTENSIONS) + + +def find_classes(directory: str) -> Tuple[List[str], Dict[str, int]]: + """Finds the class folders in a dataset. + + See :class:`DatasetFolder` for details. + """ + classes = sorted(entry.name for entry in os.scandir(directory) if entry.is_dir()) + if not classes: + raise FileNotFoundError(f"Couldn't find any class folder in {directory}.") + + class_to_idx = {cls_name: i for i, cls_name in enumerate(classes)} + return classes, class_to_idx + + +def make_dataset( + directory: str, + class_to_idx: Optional[Dict[str, int]] = None, + extensions: Optional[Tuple[str, ...]] = None, + is_valid_file: Optional[Callable[[str], bool]] = None, +) -> List[Tuple[str, int]]: + """Generates a list of samples of a form (path_to_sample, class). + + See :class:`DatasetFolder` for details. + + Note: The class_to_idx parameter is here optional and will use the logic of the ``find_classes`` function + by default. + """ + directory = os.path.expanduser(directory) + + if class_to_idx is None: + _, class_to_idx = find_classes(directory) + elif not class_to_idx: + raise ValueError("'class_to_index' must have at least one entry to collect any samples.") + + both_none = extensions is None and is_valid_file is None + both_something = extensions is not None and is_valid_file is not None + if both_none or both_something: + raise ValueError("Both extensions and is_valid_file cannot be None or not None at the same time") + + if extensions is not None: + + def is_valid_file(x: str) -> bool: + return has_file_allowed_extension(x, cast(Tuple[str, ...], extensions)) + + is_valid_file = cast(Callable[[str], bool], is_valid_file) + + instances = [] + available_classes = set() + for target_class in sorted(class_to_idx.keys()): + class_index = class_to_idx[target_class] + target_dir = os.path.join(directory, target_class) + if not os.path.isdir(target_dir): + continue + for root, _, fnames in sorted(os.walk(target_dir, followlinks=True)): + for fname in sorted(fnames): + path = os.path.join(root, fname) + if is_valid_file(path): + item = path, class_index + instances.append(item) + + if target_class not in available_classes: + available_classes.add(target_class) + + empty_classes = set(class_to_idx.keys()) - available_classes + if empty_classes: + msg = f"Found no valid file for the classes {', '.join(sorted(empty_classes))}. " + if extensions is not None: + msg += f"Supported extensions are: {', '.join(extensions)}" + raise FileNotFoundError(msg) + + return instances + + +class DatasetFolder(VisionDataset): + """A generic data loader. + + This default directory structure can be customized by overriding the + :meth:`find_classes` method. + + Args: + root (string): Root directory path. + loader (callable): A function to load a sample given its path. + extensions (tuple[string]): A list of allowed extensions. + both extensions and is_valid_file should not be passed. + transform (callable, optional): A function/transform that takes in + a sample and returns a transformed version. + E.g, ``transforms.RandomCrop`` for images. + target_transform (callable, optional): A function/transform that takes + in the target and transforms it. + is_valid_file (callable, optional): A function that takes path of a file + and check if the file is a valid file (used to check of corrupt files) + both extensions and is_valid_file should not be passed. + + Attributes: + classes (list): List of the class names sorted alphabetically. + class_to_idx (dict): Dict with items (class_name, class_index). + samples (list): List of (sample path, class_index) tuples + targets (list): The class_index value for each image in the dataset + """ + + def __init__( + self, + root: str, + loader: Callable[[str], Any], + extensions: Optional[Tuple[str, ...]] = None, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + is_valid_file: Optional[Callable[[str], bool]] = None, + ) -> None: + super(DatasetFolder, self).__init__(root, transform=transform, + target_transform=target_transform) + classes, class_to_idx = self.find_classes(self.root) + samples = self.make_dataset(self.root, class_to_idx, extensions, is_valid_file) + + self.loader = loader + self.extensions = extensions + + self.classes = classes + self.class_to_idx = class_to_idx + self.samples = samples + self.targets = [s[1] for s in samples] + + @staticmethod + def make_dataset( + directory: str, + class_to_idx: Dict[str, int], + extensions: Optional[Tuple[str, ...]] = None, + is_valid_file: Optional[Callable[[str], bool]] = None, + ) -> List[Tuple[str, int]]: + """Generates a list of samples of a form (path_to_sample, class). + + This can be overridden to e.g. read files from a compressed zip file instead of from the disk. + + Args: + directory (str): root dataset directory, corresponding to ``self.root``. + class_to_idx (Dict[str, int]): Dictionary mapping class name to class index. + extensions (optional): A list of allowed extensions. + Either extensions or is_valid_file should be passed. Defaults to None. + is_valid_file (optional): A function that takes path of a file + and checks if the file is a valid file + (used to check of corrupt files) both extensions and + is_valid_file should not be passed. Defaults to None. + + Raises: + ValueError: In case ``class_to_idx`` is empty. + ValueError: In case ``extensions`` and ``is_valid_file`` are None or both are not None. + FileNotFoundError: In case no valid file was found for any class. + + Returns: + List[Tuple[str, int]]: samples of a form (path_to_sample, class) + """ + if class_to_idx is None: + # prevent potential bug since make_dataset() would use the class_to_idx logic of the + # find_classes() function, instead of using that of the find_classes() method, which + # is potentially overridden and thus could have a different logic. + raise ValueError( + "The class_to_idx parameter cannot be None." + ) + return make_dataset(directory, class_to_idx, extensions=extensions, is_valid_file=is_valid_file) + + def find_classes(self, directory: str) -> Tuple[List[str], Dict[str, int]]: + """Find the class folders in a dataset structured as follows:: + + directory/ + ├── class_x + │ ├── xxx.ext + │ ├── xxy.ext + │ └── ... + │ └── xxz.ext + └── class_y + ├── 123.ext + ├── nsdf3.ext + └── ... + └── asd932_.ext + + This method can be overridden to only consider + a subset of classes, or to adapt to a different dataset directory structure. + + Args: + directory(str): Root directory path, corresponding to ``self.root`` + + Raises: + FileNotFoundError: If ``dir`` has no class folders. + + Returns: + (Tuple[List[str], Dict[str, int]]): List of all classes and dictionary mapping each class to an index. + """ + return find_classes(directory) + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (sample, target) where target is class_index of the target class. + """ + path, target = self.samples[index] + sample = self.loader(path) + if self.transform is not None: + sample = self.transform(sample) + if self.target_transform is not None: + target = self.target_transform(target) + + return sample, target + + def __len__(self) -> int: + return len(self.samples) + + +IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.tif', '.tiff', '.webp') + + +def pil_loader(path: str) -> Image.Image: + # open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835) + with open(path, 'rb') as f: + img = Image.open(f) + return img.convert('RGB') + + +# TODO: specify the return type +def accimage_loader(path: str) -> Any: + import accimage + try: + return accimage.Image(path) + except IOError: + # Potentially a decoding problem, fall back to PIL.Image + return pil_loader(path) + + +def default_loader(path: str) -> Any: + from torchvision import get_image_backend + if get_image_backend() == 'accimage': + return accimage_loader(path) + else: + return pil_loader(path) + + +class ImageFolder(DatasetFolder): + """A generic data loader where the images are arranged in this way by default: :: + + root/dog/xxx.png + root/dog/xxy.png + root/dog/[...]/xxz.png + + root/cat/123.png + root/cat/nsdf3.png + root/cat/[...]/asd932_.png + + This class inherits from :class:`~torchvision.datasets.DatasetFolder` so + the same methods can be overridden to customize the dataset. + + Args: + root (string): Root directory path. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + loader (callable, optional): A function to load an image given its path. + is_valid_file (callable, optional): A function that takes path of an Image file + and check if the file is a valid file (used to check of corrupt files) + + Attributes: + classes (list): List of the class names sorted alphabetically. + class_to_idx (dict): Dict with items (class_name, class_index). + imgs (list): List of (image path, class_index) tuples + """ + + def __init__( + self, + root: str, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + loader: Callable[[str], Any] = default_loader, + is_valid_file: Optional[Callable[[str], bool]] = None, + ): + super(ImageFolder, self).__init__(root, loader, IMG_EXTENSIONS if is_valid_file is None else None, + transform=transform, + target_transform=target_transform, + is_valid_file=is_valid_file) + self.imgs = self.samples diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/hmdb51.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/hmdb51.py new file mode 100644 index 0000000000000000000000000000000000000000..4912eb016000acf2d469f6221d8ba3ae3b6bc3c0 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/hmdb51.py @@ -0,0 +1,132 @@ +import glob +import os + +from .folder import find_classes, make_dataset +from .video_utils import VideoClips +from .vision import VisionDataset + + +class HMDB51(VisionDataset): + """ + `HMDB51 <http://serre-lab.clps.brown.edu/resource/hmdb-a-large-human-motion-database/>`_ + dataset. + + HMDB51 is an action recognition video dataset. + This dataset consider every video as a collection of video clips of fixed size, specified + by ``frames_per_clip``, where the step in frames between each clip is given by + ``step_between_clips``. + + To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5`` + and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two + elements will come from video 1, and the next three elements from video 2. + Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all + frames in a video might be present. + + Internally, it uses a VideoClips object to handle clip creation. + + Args: + root (string): Root directory of the HMDB51 Dataset. + annotation_path (str): Path to the folder containing the split files. + frames_per_clip (int): Number of frames in a clip. + step_between_clips (int): Number of frames between each clip. + fold (int, optional): Which fold to use. Should be between 1 and 3. + train (bool, optional): If ``True``, creates a dataset from the train split, + otherwise from the ``test`` split. + transform (callable, optional): A function/transform that takes in a TxHxWxC video + and returns a transformed version. + + Returns: + tuple: A 3-tuple with the following entries: + + - video (Tensor[T, H, W, C]): The `T` video frames + - audio(Tensor[K, L]): the audio frames, where `K` is the number of channels + and `L` is the number of points + - label (int): class of the video clip + """ + + data_url = "http://serre-lab.clps.brown.edu/wp-content/uploads/2013/10/hmdb51_org.rar" + splits = { + "url": "http://serre-lab.clps.brown.edu/wp-content/uploads/2013/10/test_train_splits.rar", + "md5": "15e67781e70dcfbdce2d7dbb9b3344b5" + } + TRAIN_TAG = 1 + TEST_TAG = 2 + + def __init__(self, root, annotation_path, frames_per_clip, step_between_clips=1, + frame_rate=None, fold=1, train=True, transform=None, + _precomputed_metadata=None, num_workers=1, _video_width=0, + _video_height=0, _video_min_dimension=0, _audio_samples=0): + super(HMDB51, self).__init__(root) + if fold not in (1, 2, 3): + raise ValueError("fold should be between 1 and 3, got {}".format(fold)) + + extensions = ('avi',) + self.classes, class_to_idx = find_classes(self.root) + self.samples = make_dataset( + self.root, + class_to_idx, + extensions, + ) + + video_paths = [path for (path, _) in self.samples] + video_clips = VideoClips( + video_paths, + frames_per_clip, + step_between_clips, + frame_rate, + _precomputed_metadata, + num_workers=num_workers, + _video_width=_video_width, + _video_height=_video_height, + _video_min_dimension=_video_min_dimension, + _audio_samples=_audio_samples, + ) + # we bookkeep the full version of video clips because we want to be able + # to return the meta data of full version rather than the subset version of + # video clips + self.full_video_clips = video_clips + self.fold = fold + self.train = train + self.indices = self._select_fold(video_paths, annotation_path, fold, train) + self.video_clips = video_clips.subset(self.indices) + self.transform = transform + + @property + def metadata(self): + return self.full_video_clips.metadata + + def _select_fold(self, video_list, annotations_dir, fold, train): + target_tag = self.TRAIN_TAG if train else self.TEST_TAG + split_pattern_name = "*test_split{}.txt".format(fold) + split_pattern_path = os.path.join(annotations_dir, split_pattern_name) + annotation_paths = glob.glob(split_pattern_path) + selected_files = [] + for filepath in annotation_paths: + with open(filepath) as fid: + lines = fid.readlines() + for line in lines: + video_filename, tag_string = line.split() + tag = int(tag_string) + if tag == target_tag: + selected_files.append(video_filename) + selected_files = set(selected_files) + + indices = [] + for video_index, video_path in enumerate(video_list): + if os.path.basename(video_path) in selected_files: + indices.append(video_index) + + return indices + + def __len__(self): + return self.video_clips.num_clips() + + def __getitem__(self, idx): + video, audio, _, video_idx = self.video_clips.get_clip(idx) + sample_index = self.indices[video_idx] + _, class_index = self.samples[sample_index] + + if self.transform is not None: + video = self.transform(video) + + return video, audio, class_index diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/imagenet.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/imagenet.py new file mode 100644 index 0000000000000000000000000000000000000000..6dfc9bfebfd66d3f9cac016812a77269d9947191 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/imagenet.py @@ -0,0 +1,221 @@ +import warnings +from contextlib import contextmanager +import os +import shutil +import tempfile +from typing import Any, Dict, List, Iterator, Optional, Tuple +import torch +from .folder import ImageFolder +from .utils import check_integrity, extract_archive, verify_str_arg + +ARCHIVE_META = { + 'train': ('ILSVRC2012_img_train.tar', '1d675b47d978889d74fa0da5fadfb00e'), + 'val': ('ILSVRC2012_img_val.tar', '29b22e2961454d5413ddabcf34fc5622'), + 'devkit': ('ILSVRC2012_devkit_t12.tar.gz', 'fa75699e90414af021442c21a62c3abf') +} + +META_FILE = "meta.bin" + + +class ImageNet(ImageFolder): + """`ImageNet <http://image-net.org/>`_ 2012 Classification Dataset. + + Args: + root (string): Root directory of the ImageNet Dataset. + split (string, optional): The dataset split, supports ``train``, or ``val``. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + loader (callable, optional): A function to load an image given its path. + + Attributes: + classes (list): List of the class name tuples. + class_to_idx (dict): Dict with items (class_name, class_index). + wnids (list): List of the WordNet IDs. + wnid_to_idx (dict): Dict with items (wordnet_id, class_index). + imgs (list): List of (image path, class_index) tuples + targets (list): The class_index value for each image in the dataset + """ + + def __init__(self, root: str, split: str = 'train', download: Optional[str] = None, **kwargs: Any) -> None: + if download is True: + msg = ("The dataset is no longer publicly accessible. You need to " + "download the archives externally and place them in the root " + "directory.") + raise RuntimeError(msg) + elif download is False: + msg = ("The use of the download flag is deprecated, since the dataset " + "is no longer publicly accessible.") + warnings.warn(msg, RuntimeWarning) + + root = self.root = os.path.expanduser(root) + self.split = verify_str_arg(split, "split", ("train", "val")) + + self.parse_archives() + wnid_to_classes = load_meta_file(self.root)[0] + + super(ImageNet, self).__init__(self.split_folder, **kwargs) + self.root = root + + self.wnids = self.classes + self.wnid_to_idx = self.class_to_idx + self.classes = [wnid_to_classes[wnid] for wnid in self.wnids] + self.class_to_idx = {cls: idx + for idx, clss in enumerate(self.classes) + for cls in clss} + + def parse_archives(self) -> None: + if not check_integrity(os.path.join(self.root, META_FILE)): + parse_devkit_archive(self.root) + + if not os.path.isdir(self.split_folder): + if self.split == 'train': + parse_train_archive(self.root) + elif self.split == 'val': + parse_val_archive(self.root) + + @property + def split_folder(self) -> str: + return os.path.join(self.root, self.split) + + def extra_repr(self) -> str: + return "Split: {split}".format(**self.__dict__) + + +def load_meta_file(root: str, file: Optional[str] = None) -> Tuple[Dict[str, str], List[str]]: + if file is None: + file = META_FILE + file = os.path.join(root, file) + + if check_integrity(file): + return torch.load(file) + else: + msg = ("The meta file {} is not present in the root directory or is corrupted. " + "This file is automatically created by the ImageNet dataset.") + raise RuntimeError(msg.format(file, root)) + + +def _verify_archive(root: str, file: str, md5: str) -> None: + if not check_integrity(os.path.join(root, file), md5): + msg = ("The archive {} is not present in the root directory or is corrupted. " + "You need to download it externally and place it in {}.") + raise RuntimeError(msg.format(file, root)) + + +def parse_devkit_archive(root: str, file: Optional[str] = None) -> None: + """Parse the devkit archive of the ImageNet2012 classification dataset and save + the meta information in a binary file. + + Args: + root (str): Root directory containing the devkit archive + file (str, optional): Name of devkit archive. Defaults to + 'ILSVRC2012_devkit_t12.tar.gz' + """ + import scipy.io as sio + + def parse_meta_mat(devkit_root: str) -> Tuple[Dict[int, str], Dict[str, str]]: + metafile = os.path.join(devkit_root, "data", "meta.mat") + meta = sio.loadmat(metafile, squeeze_me=True)['synsets'] + nums_children = list(zip(*meta))[4] + meta = [meta[idx] for idx, num_children in enumerate(nums_children) + if num_children == 0] + idcs, wnids, classes = list(zip(*meta))[:3] + classes = [tuple(clss.split(', ')) for clss in classes] + idx_to_wnid = {idx: wnid for idx, wnid in zip(idcs, wnids)} + wnid_to_classes = {wnid: clss for wnid, clss in zip(wnids, classes)} + return idx_to_wnid, wnid_to_classes + + def parse_val_groundtruth_txt(devkit_root: str) -> List[int]: + file = os.path.join(devkit_root, "data", + "ILSVRC2012_validation_ground_truth.txt") + with open(file, 'r') as txtfh: + val_idcs = txtfh.readlines() + return [int(val_idx) for val_idx in val_idcs] + + @contextmanager + def get_tmp_dir() -> Iterator[str]: + tmp_dir = tempfile.mkdtemp() + try: + yield tmp_dir + finally: + shutil.rmtree(tmp_dir) + + archive_meta = ARCHIVE_META["devkit"] + if file is None: + file = archive_meta[0] + md5 = archive_meta[1] + + _verify_archive(root, file, md5) + + with get_tmp_dir() as tmp_dir: + extract_archive(os.path.join(root, file), tmp_dir) + + devkit_root = os.path.join(tmp_dir, "ILSVRC2012_devkit_t12") + idx_to_wnid, wnid_to_classes = parse_meta_mat(devkit_root) + val_idcs = parse_val_groundtruth_txt(devkit_root) + val_wnids = [idx_to_wnid[idx] for idx in val_idcs] + + torch.save((wnid_to_classes, val_wnids), os.path.join(root, META_FILE)) + + +def parse_train_archive(root: str, file: Optional[str] = None, folder: str = "train") -> None: + """Parse the train images archive of the ImageNet2012 classification dataset and + prepare it for usage with the ImageNet dataset. + + Args: + root (str): Root directory containing the train images archive + file (str, optional): Name of train images archive. Defaults to + 'ILSVRC2012_img_train.tar' + folder (str, optional): Optional name for train images folder. Defaults to + 'train' + """ + archive_meta = ARCHIVE_META["train"] + if file is None: + file = archive_meta[0] + md5 = archive_meta[1] + + _verify_archive(root, file, md5) + + train_root = os.path.join(root, folder) + extract_archive(os.path.join(root, file), train_root) + + archives = [os.path.join(train_root, archive) for archive in os.listdir(train_root)] + for archive in archives: + extract_archive(archive, os.path.splitext(archive)[0], remove_finished=True) + + +def parse_val_archive( + root: str, file: Optional[str] = None, wnids: Optional[List[str]] = None, folder: str = "val" +) -> None: + """Parse the validation images archive of the ImageNet2012 classification dataset + and prepare it for usage with the ImageNet dataset. + + Args: + root (str): Root directory containing the validation images archive + file (str, optional): Name of validation images archive. Defaults to + 'ILSVRC2012_img_val.tar' + wnids (list, optional): List of WordNet IDs of the validation images. If None + is given, the IDs are loaded from the meta file in the root directory + folder (str, optional): Optional name for validation images folder. Defaults to + 'val' + """ + archive_meta = ARCHIVE_META["val"] + if file is None: + file = archive_meta[0] + md5 = archive_meta[1] + if wnids is None: + wnids = load_meta_file(root)[1] + + _verify_archive(root, file, md5) + + val_root = os.path.join(root, folder) + extract_archive(os.path.join(root, file), val_root) + + images = sorted([os.path.join(val_root, image) for image in os.listdir(val_root)]) + + for wnid in set(wnids): + os.mkdir(os.path.join(val_root, wnid)) + + for wnid, img_file in zip(wnids, images): + shutil.move(img_file, os.path.join(val_root, wnid, os.path.basename(img_file))) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/kinetics.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/kinetics.py new file mode 100644 index 0000000000000000000000000000000000000000..a8986986c17a9960678fd689483dc829bbccd2fb --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/kinetics.py @@ -0,0 +1,91 @@ +from .utils import list_dir +from .folder import find_classes, make_dataset +from .video_utils import VideoClips +from .vision import VisionDataset + + +class Kinetics400(VisionDataset): + """ + `Kinetics-400 <https://deepmind.com/research/open-source/open-source-datasets/kinetics/>`_ + dataset. + + Kinetics-400 is an action recognition video dataset. + This dataset consider every video as a collection of video clips of fixed size, specified + by ``frames_per_clip``, where the step in frames between each clip is given by + ``step_between_clips``. + + To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5`` + and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two + elements will come from video 1, and the next three elements from video 2. + Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all + frames in a video might be present. + + Internally, it uses a VideoClips object to handle clip creation. + + Args: + root (string): Root directory of the Kinetics-400 Dataset. Should be structured as follows: + + .. code:: + + root/ + ├── class1 + │ ├── clip1.avi + │ ├── clip2.avi + │ └── ... + └── class2 + ├── clipx.avi + └── ... + + frames_per_clip (int): number of frames in a clip + step_between_clips (int): number of frames between each clip + transform (callable, optional): A function/transform that takes in a TxHxWxC video + and returns a transformed version. + + Returns: + tuple: A 3-tuple with the following entries: + + - video (Tensor[T, H, W, C]): the `T` video frames + - audio(Tensor[K, L]): the audio frames, where `K` is the number of channels + and `L` is the number of points + - label (int): class of the video clip + """ + + def __init__(self, root, frames_per_clip, step_between_clips=1, frame_rate=None, + extensions=('avi',), transform=None, _precomputed_metadata=None, + num_workers=1, _video_width=0, _video_height=0, + _video_min_dimension=0, _audio_samples=0, _audio_channels=0): + super(Kinetics400, self).__init__(root) + + self.classes, class_to_idx = find_classes(self.root) + self.samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file=None) + video_list = [x[0] for x in self.samples] + self.video_clips = VideoClips( + video_list, + frames_per_clip, + step_between_clips, + frame_rate, + _precomputed_metadata, + num_workers=num_workers, + _video_width=_video_width, + _video_height=_video_height, + _video_min_dimension=_video_min_dimension, + _audio_samples=_audio_samples, + _audio_channels=_audio_channels, + ) + self.transform = transform + + @property + def metadata(self): + return self.video_clips.metadata + + def __len__(self): + return self.video_clips.num_clips() + + def __getitem__(self, idx): + video, audio, info, video_idx = self.video_clips.get_clip(idx) + label = self.samples[video_idx][1] + + if self.transform is not None: + video = self.transform(video) + + return video, audio, label diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/kitti.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/kitti.py new file mode 100644 index 0000000000000000000000000000000000000000..8db2e45b7155bb9e41d7582f400913cc03a16474 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/kitti.py @@ -0,0 +1,161 @@ +import csv +import os +from typing import Any, Callable, List, Optional, Tuple + +from PIL import Image + +from .utils import download_and_extract_archive +from .vision import VisionDataset + + +class Kitti(VisionDataset): + """`KITTI <http://www.cvlibs.net/datasets/kitti>`_ Dataset. + + Args: + root (string): Root directory where images are downloaded to. + Expects the following folder structure if download=False: + + .. code:: + + <root> + └── Kitti + └─ raw + ├── training + | ├── image_2 + | └── label_2 + └── testing + └── image_2 + train (bool, optional): Use ``train`` split if true, else ``test`` split. + Defaults to ``train``. + transform (callable, optional): A function/transform that takes in a PIL image + and returns a transformed version. E.g, ``transforms.ToTensor`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + transforms (callable, optional): A function/transform that takes input sample + and its target as entry and returns a transformed version. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + + """ + + data_url = "https://s3.eu-central-1.amazonaws.com/avg-kitti/" + resources = [ + "data_object_image_2.zip", + "data_object_label_2.zip", + ] + image_dir_name = "image_2" + labels_dir_name = "label_2" + + def __init__( + self, + root: str, + train: bool = True, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + transforms: Optional[Callable] = None, + download: bool = False, + ): + super().__init__( + root, + transform=transform, + target_transform=target_transform, + transforms=transforms, + ) + self.images = [] + self.targets = [] + self.root = root + self.train = train + self._location = "training" if self.train else "testing" + + if download: + self.download() + if not self._check_exists(): + raise RuntimeError( + "Dataset not found. You may use download=True to download it." + ) + + image_dir = os.path.join(self._raw_folder, self._location, self.image_dir_name) + if self.train: + labels_dir = os.path.join(self._raw_folder, self._location, self.labels_dir_name) + for img_file in os.listdir(image_dir): + self.images.append(os.path.join(image_dir, img_file)) + if self.train: + self.targets.append( + os.path.join(labels_dir, f"{img_file.split('.')[0]}.txt") + ) + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """Get item at a given index. + + Args: + index (int): Index + Returns: + tuple: (image, target), where + target is a list of dictionaries with the following keys: + + - type: str + - truncated: float + - occluded: int + - alpha: float + - bbox: float[4] + - dimensions: float[3] + - locations: float[3] + - rotation_y: float + + """ + image = Image.open(self.images[index]) + target = self._parse_target(index) if self.train else None + if self.transforms: + image, target = self.transforms(image, target) + return image, target + + def _parse_target(self, index: int) -> List: + target = [] + with open(self.targets[index]) as inp: + content = csv.reader(inp, delimiter=" ") + for line in content: + target.append({ + "type": line[0], + "truncated": float(line[1]), + "occluded": int(line[2]), + "alpha": float(line[3]), + "bbox": [float(x) for x in line[4:8]], + "dimensions": [float(x) for x in line[8:11]], + "location": [float(x) for x in line[11:14]], + "rotation_y": float(line[14]), + }) + return target + + def __len__(self) -> int: + return len(self.images) + + @property + def _raw_folder(self) -> str: + return os.path.join(self.root, self.__class__.__name__, "raw") + + def _check_exists(self) -> bool: + """Check if the data directory exists.""" + folders = [self.image_dir_name] + if self.train: + folders.append(self.labels_dir_name) + return all( + os.path.isdir(os.path.join(self._raw_folder, self._location, fname)) + for fname in folders + ) + + def download(self) -> None: + """Download the KITTI data if it doesn't exist already.""" + + if self._check_exists(): + return + + os.makedirs(self._raw_folder, exist_ok=True) + + # download files + for fname in self.resources: + download_and_extract_archive( + url=f"{self.data_url}{fname}", + download_root=self._raw_folder, + filename=fname, + ) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/lsun.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/lsun.py new file mode 100644 index 0000000000000000000000000000000000000000..75b284b597fe8828720285af2a80b0c41fc0c126 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/lsun.py @@ -0,0 +1,163 @@ +from .vision import VisionDataset +from PIL import Image +import os +import os.path +import io +import string +from collections.abc import Iterable +import pickle +from typing import Any, Callable, cast, List, Optional, Tuple, Union +from .utils import verify_str_arg, iterable_to_str + + +class LSUNClass(VisionDataset): + def __init__( + self, root: str, transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None + ) -> None: + import lmdb + super(LSUNClass, self).__init__(root, transform=transform, + target_transform=target_transform) + + self.env = lmdb.open(root, max_readers=1, readonly=True, lock=False, + readahead=False, meminit=False) + with self.env.begin(write=False) as txn: + self.length = txn.stat()['entries'] + cache_file = '_cache_' + ''.join(c for c in root if c in string.ascii_letters) + if os.path.isfile(cache_file): + self.keys = pickle.load(open(cache_file, "rb")) + else: + with self.env.begin(write=False) as txn: + self.keys = [key for key in txn.cursor().iternext(keys=True, values=False)] + pickle.dump(self.keys, open(cache_file, "wb")) + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + img, target = None, None + env = self.env + with env.begin(write=False) as txn: + imgbuf = txn.get(self.keys[index]) + + buf = io.BytesIO() + buf.write(imgbuf) + buf.seek(0) + img = Image.open(buf).convert('RGB') + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self) -> int: + return self.length + + +class LSUN(VisionDataset): + """ + `LSUN <https://www.yf.io/p/lsun>`_ dataset. + + Args: + root (string): Root directory for the database files. + classes (string or list): One of {'train', 'val', 'test'} or a list of + categories to load. e,g. ['bedroom_train', 'church_outdoor_train']. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + """ + + def __init__( + self, + root: str, + classes: Union[str, List[str]] = "train", + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + ) -> None: + super(LSUN, self).__init__(root, transform=transform, + target_transform=target_transform) + self.classes = self._verify_classes(classes) + + # for each class, create an LSUNClassDataset + self.dbs = [] + for c in self.classes: + self.dbs.append(LSUNClass( + root=os.path.join(root, f"{c}_lmdb"), + transform=transform)) + + self.indices = [] + count = 0 + for db in self.dbs: + count += len(db) + self.indices.append(count) + + self.length = count + + def _verify_classes(self, classes: Union[str, List[str]]) -> List[str]: + categories = ['bedroom', 'bridge', 'church_outdoor', 'classroom', + 'conference_room', 'dining_room', 'kitchen', + 'living_room', 'restaurant', 'tower'] + dset_opts = ['train', 'val', 'test'] + + try: + classes = cast(str, classes) + verify_str_arg(classes, "classes", dset_opts) + if classes == 'test': + classes = [classes] + else: + classes = [c + '_' + classes for c in categories] + except ValueError: + if not isinstance(classes, Iterable): + msg = ("Expected type str or Iterable for argument classes, " + "but got type {}.") + raise ValueError(msg.format(type(classes))) + + classes = list(classes) + msg_fmtstr_type = ("Expected type str for elements in argument classes, " + "but got type {}.") + for c in classes: + verify_str_arg(c, custom_msg=msg_fmtstr_type.format(type(c))) + c_short = c.split('_') + category, dset_opt = '_'.join(c_short[:-1]), c_short[-1] + + msg_fmtstr = "Unknown value '{}' for {}. Valid values are {{{}}}." + msg = msg_fmtstr.format(category, "LSUN class", + iterable_to_str(categories)) + verify_str_arg(category, valid_values=categories, custom_msg=msg) + + msg = msg_fmtstr.format(dset_opt, "postfix", iterable_to_str(dset_opts)) + verify_str_arg(dset_opt, valid_values=dset_opts, custom_msg=msg) + + return classes + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: Tuple (image, target) where target is the index of the target category. + """ + target = 0 + sub = 0 + for ind in self.indices: + if index < ind: + break + target += 1 + sub = ind + + db = self.dbs[target] + index = index - sub + + if self.target_transform is not None: + target = self.target_transform(target) + + img, _ = db[index] + return img, target + + def __len__(self) -> int: + return self.length + + def extra_repr(self) -> str: + return "Classes: {classes}".format(**self.__dict__) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/mnist.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/mnist.py new file mode 100644 index 0000000000000000000000000000000000000000..e356f17dd1b86c2186392bccd63fc3aabfc733a7 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/mnist.py @@ -0,0 +1,512 @@ +from .vision import VisionDataset +import warnings +from PIL import Image +import os +import os.path +import numpy as np +import torch +import codecs +import string +from typing import Any, Callable, Dict, List, Optional, Tuple +from urllib.error import URLError +from .utils import download_and_extract_archive, extract_archive, verify_str_arg, check_integrity +import shutil + + +class MNIST(VisionDataset): + """`MNIST <http://yann.lecun.com/exdb/mnist/>`_ Dataset. + + Args: + root (string): Root directory of dataset where ``MNIST/processed/training.pt`` + and ``MNIST/processed/test.pt`` exist. + train (bool, optional): If True, creates dataset from ``training.pt``, + otherwise from ``test.pt``. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + """ + + mirrors = [ + 'http://yann.lecun.com/exdb/mnist/', + 'https://ossci-datasets.s3.amazonaws.com/mnist/', + ] + + resources = [ + ("train-images-idx3-ubyte.gz", "f68b3c2dcbeaaa9fbdd348bbdeb94873"), + ("train-labels-idx1-ubyte.gz", "d53e105ee54ea40749a09fcbcd1e9432"), + ("t10k-images-idx3-ubyte.gz", "9fb629c4189551a2d022fa330f9573f3"), + ("t10k-labels-idx1-ubyte.gz", "ec29112dd5afa0611ce80d1b7f02629c") + ] + + training_file = 'training.pt' + test_file = 'test.pt' + classes = ['0 - zero', '1 - one', '2 - two', '3 - three', '4 - four', + '5 - five', '6 - six', '7 - seven', '8 - eight', '9 - nine'] + + @property + def train_labels(self): + warnings.warn("train_labels has been renamed targets") + return self.targets + + @property + def test_labels(self): + warnings.warn("test_labels has been renamed targets") + return self.targets + + @property + def train_data(self): + warnings.warn("train_data has been renamed data") + return self.data + + @property + def test_data(self): + warnings.warn("test_data has been renamed data") + return self.data + + def __init__( + self, + root: str, + train: bool = True, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + super(MNIST, self).__init__(root, transform=transform, + target_transform=target_transform) + self.train = train # training set or test set + + if self._check_legacy_exist(): + self.data, self.targets = self._load_legacy_data() + return + + if download: + self.download() + + if not self._check_exists(): + raise RuntimeError('Dataset not found.' + + ' You can use download=True to download it') + + self.data, self.targets = self._load_data() + + def _check_legacy_exist(self): + processed_folder_exists = os.path.exists(self.processed_folder) + if not processed_folder_exists: + return False + + return all( + check_integrity(os.path.join(self.processed_folder, file)) for file in (self.training_file, self.test_file) + ) + + def _load_legacy_data(self): + # This is for BC only. We no longer cache the data in a custom binary, but simply read from the raw data + # directly. + data_file = self.training_file if self.train else self.test_file + return torch.load(os.path.join(self.processed_folder, data_file)) + + def _load_data(self): + image_file = f"{'train' if self.train else 't10k'}-images-idx3-ubyte" + data = read_image_file(os.path.join(self.raw_folder, image_file)) + + label_file = f"{'train' if self.train else 't10k'}-labels-idx1-ubyte" + targets = read_label_file(os.path.join(self.raw_folder, label_file)) + + return data, targets + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is index of the target class. + """ + img, target = self.data[index], int(self.targets[index]) + + # doing this so that it is consistent with all other datasets + # to return a PIL Image + img = Image.fromarray(img.numpy(), mode='L') + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self) -> int: + return len(self.data) + + @property + def raw_folder(self) -> str: + return os.path.join(self.root, self.__class__.__name__, 'raw') + + @property + def processed_folder(self) -> str: + return os.path.join(self.root, self.__class__.__name__, 'processed') + + @property + def class_to_idx(self) -> Dict[str, int]: + return {_class: i for i, _class in enumerate(self.classes)} + + def _check_exists(self) -> bool: + return all( + check_integrity(os.path.join(self.raw_folder, os.path.splitext(os.path.basename(url))[0])) + for url, _ in self.resources + ) + + def download(self) -> None: + """Download the MNIST data if it doesn't exist already.""" + + if self._check_exists(): + return + + os.makedirs(self.raw_folder, exist_ok=True) + + # download files + for filename, md5 in self.resources: + for mirror in self.mirrors: + url = "{}{}".format(mirror, filename) + try: + print("Downloading {}".format(url)) + download_and_extract_archive( + url, download_root=self.raw_folder, + filename=filename, + md5=md5 + ) + except URLError as error: + print( + "Failed to download (trying next):\n{}".format(error) + ) + continue + finally: + print() + break + else: + raise RuntimeError("Error downloading {}".format(filename)) + + def extra_repr(self) -> str: + return "Split: {}".format("Train" if self.train is True else "Test") + + +class FashionMNIST(MNIST): + """`Fashion-MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset. + + Args: + root (string): Root directory of dataset where ``FashionMNIST/processed/training.pt`` + and ``FashionMNIST/processed/test.pt`` exist. + train (bool, optional): If True, creates dataset from ``training.pt``, + otherwise from ``test.pt``. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + """ + mirrors = [ + "http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/" + ] + + resources = [ + ("train-images-idx3-ubyte.gz", "8d4fb7e6c68d591d4c3dfef9ec88bf0d"), + ("train-labels-idx1-ubyte.gz", "25c81989df183df01b3e8a0aad5dffbe"), + ("t10k-images-idx3-ubyte.gz", "bef4ecab320f06d8554ea6380940ec79"), + ("t10k-labels-idx1-ubyte.gz", "bb300cfdad3c16e7a12a480ee83cd310") + ] + classes = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat', 'Sandal', + 'Shirt', 'Sneaker', 'Bag', 'Ankle boot'] + + +class KMNIST(MNIST): + """`Kuzushiji-MNIST <https://github.com/rois-codh/kmnist>`_ Dataset. + + Args: + root (string): Root directory of dataset where ``KMNIST/processed/training.pt`` + and ``KMNIST/processed/test.pt`` exist. + train (bool, optional): If True, creates dataset from ``training.pt``, + otherwise from ``test.pt``. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + """ + mirrors = [ + "http://codh.rois.ac.jp/kmnist/dataset/kmnist/" + ] + + resources = [ + ("train-images-idx3-ubyte.gz", "bdb82020997e1d708af4cf47b453dcf7"), + ("train-labels-idx1-ubyte.gz", "e144d726b3acfaa3e44228e80efcd344"), + ("t10k-images-idx3-ubyte.gz", "5c965bf0a639b31b8f53240b1b52f4d7"), + ("t10k-labels-idx1-ubyte.gz", "7320c461ea6c1c855c0b718fb2a4b134") + ] + classes = ['o', 'ki', 'su', 'tsu', 'na', 'ha', 'ma', 'ya', 're', 'wo'] + + +class EMNIST(MNIST): + """`EMNIST <https://www.westernsydney.edu.au/bens/home/reproducible_research/emnist>`_ Dataset. + + Args: + root (string): Root directory of dataset where ``EMNIST/processed/training.pt`` + and ``EMNIST/processed/test.pt`` exist. + split (string): The dataset has 6 different splits: ``byclass``, ``bymerge``, + ``balanced``, ``letters``, ``digits`` and ``mnist``. This argument specifies + which one to use. + train (bool, optional): If True, creates dataset from ``training.pt``, + otherwise from ``test.pt``. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + """ + url = 'https://www.itl.nist.gov/iaui/vip/cs_links/EMNIST/gzip.zip' + md5 = "58c8d27c78d21e728a6bc7b3cc06412e" + splits = ('byclass', 'bymerge', 'balanced', 'letters', 'digits', 'mnist') + # Merged Classes assumes Same structure for both uppercase and lowercase version + _merged_classes = {'c', 'i', 'j', 'k', 'l', 'm', 'o', 'p', 's', 'u', 'v', 'w', 'x', 'y', 'z'} + _all_classes = set(string.digits + string.ascii_letters) + classes_split_dict = { + 'byclass': sorted(list(_all_classes)), + 'bymerge': sorted(list(_all_classes - _merged_classes)), + 'balanced': sorted(list(_all_classes - _merged_classes)), + 'letters': ['N/A'] + list(string.ascii_lowercase), + 'digits': list(string.digits), + 'mnist': list(string.digits), + } + + def __init__(self, root: str, split: str, **kwargs: Any) -> None: + self.split = verify_str_arg(split, "split", self.splits) + self.training_file = self._training_file(split) + self.test_file = self._test_file(split) + super(EMNIST, self).__init__(root, **kwargs) + self.classes = self.classes_split_dict[self.split] + + @staticmethod + def _training_file(split) -> str: + return 'training_{}.pt'.format(split) + + @staticmethod + def _test_file(split) -> str: + return 'test_{}.pt'.format(split) + + @property + def _file_prefix(self) -> str: + return f"emnist-{self.split}-{'train' if self.train else 'test'}" + + @property + def images_file(self) -> str: + return os.path.join(self.raw_folder, f"{self._file_prefix}-images-idx3-ubyte") + + @property + def labels_file(self) -> str: + return os.path.join(self.raw_folder, f"{self._file_prefix}-labels-idx1-ubyte") + + def _load_data(self): + return read_image_file(self.images_file), read_label_file(self.labels_file) + + def _check_exists(self) -> bool: + return all(check_integrity(file) for file in (self.images_file, self.labels_file)) + + def download(self) -> None: + """Download the EMNIST data if it doesn't exist already.""" + + if self._check_exists(): + return + + os.makedirs(self.raw_folder, exist_ok=True) + + download_and_extract_archive(self.url, download_root=self.raw_folder, md5=self.md5) + gzip_folder = os.path.join(self.raw_folder, 'gzip') + for gzip_file in os.listdir(gzip_folder): + if gzip_file.endswith('.gz'): + extract_archive(os.path.join(gzip_folder, gzip_file), self.raw_folder) + shutil.rmtree(gzip_folder) + + +class QMNIST(MNIST): + """`QMNIST <https://github.com/facebookresearch/qmnist>`_ Dataset. + + Args: + root (string): Root directory of dataset whose ``processed`` + subdir contains torch binary files with the datasets. + what (string,optional): Can be 'train', 'test', 'test10k', + 'test50k', or 'nist' for respectively the mnist compatible + training set, the 60k qmnist testing set, the 10k qmnist + examples that match the mnist testing set, the 50k + remaining qmnist testing examples, or all the nist + digits. The default is to select 'train' or 'test' + according to the compatibility argument 'train'. + compat (bool,optional): A boolean that says whether the target + for each example is class number (for compatibility with + the MNIST dataloader) or a torch vector containing the + full qmnist information. Default=True. + download (bool, optional): If true, downloads the dataset from + the internet and puts it in root directory. If dataset is + already downloaded, it is not downloaded again. + transform (callable, optional): A function/transform that + takes in an PIL image and returns a transformed + version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform + that takes in the target and transforms it. + train (bool,optional,compatibility): When argument 'what' is + not specified, this boolean decides whether to load the + training set ot the testing set. Default: True. + """ + + subsets = { + 'train': 'train', + 'test': 'test', + 'test10k': 'test', + 'test50k': 'test', + 'nist': 'nist' + } + resources: Dict[str, List[Tuple[str, str]]] = { # type: ignore[assignment] + 'train': [('https://raw.githubusercontent.com/facebookresearch/qmnist/master/qmnist-train-images-idx3-ubyte.gz', + 'ed72d4157d28c017586c42bc6afe6370'), + ('https://raw.githubusercontent.com/facebookresearch/qmnist/master/qmnist-train-labels-idx2-int.gz', + '0058f8dd561b90ffdd0f734c6a30e5e4')], + 'test': [('https://raw.githubusercontent.com/facebookresearch/qmnist/master/qmnist-test-images-idx3-ubyte.gz', + '1394631089c404de565df7b7aeaf9412'), + ('https://raw.githubusercontent.com/facebookresearch/qmnist/master/qmnist-test-labels-idx2-int.gz', + '5b5b05890a5e13444e108efe57b788aa')], + 'nist': [('https://raw.githubusercontent.com/facebookresearch/qmnist/master/xnist-images-idx3-ubyte.xz', + '7f124b3b8ab81486c9d8c2749c17f834'), + ('https://raw.githubusercontent.com/facebookresearch/qmnist/master/xnist-labels-idx2-int.xz', + '5ed0e788978e45d4a8bd4b7caec3d79d')] + } + classes = ['0 - zero', '1 - one', '2 - two', '3 - three', '4 - four', + '5 - five', '6 - six', '7 - seven', '8 - eight', '9 - nine'] + + def __init__( + self, root: str, what: Optional[str] = None, compat: bool = True, + train: bool = True, **kwargs: Any + ) -> None: + if what is None: + what = 'train' if train else 'test' + self.what = verify_str_arg(what, "what", tuple(self.subsets.keys())) + self.compat = compat + self.data_file = what + '.pt' + self.training_file = self.data_file + self.test_file = self.data_file + super(QMNIST, self).__init__(root, train, **kwargs) + + @property + def images_file(self) -> str: + (url, _), _ = self.resources[self.subsets[self.what]] + return os.path.join(self.raw_folder, os.path.splitext(os.path.basename(url))[0]) + + @property + def labels_file(self) -> str: + _, (url, _) = self.resources[self.subsets[self.what]] + return os.path.join(self.raw_folder, os.path.splitext(os.path.basename(url))[0]) + + def _check_exists(self) -> bool: + return all(check_integrity(file) for file in (self.images_file, self.labels_file)) + + def _load_data(self): + data = read_sn3_pascalvincent_tensor(self.images_file) + assert (data.dtype == torch.uint8) + assert (data.ndimension() == 3) + + targets = read_sn3_pascalvincent_tensor(self.labels_file).long() + assert (targets.ndimension() == 2) + + if self.what == 'test10k': + data = data[0:10000, :, :].clone() + targets = targets[0:10000, :].clone() + elif self.what == 'test50k': + data = data[10000:, :, :].clone() + targets = targets[10000:, :].clone() + + return data, targets + + def download(self) -> None: + """Download the QMNIST data if it doesn't exist already. + Note that we only download what has been asked for (argument 'what'). + """ + if self._check_exists(): + return + + os.makedirs(self.raw_folder, exist_ok=True) + split = self.resources[self.subsets[self.what]] + + for url, md5 in split: + filename = url.rpartition('/')[2] + file_path = os.path.join(self.raw_folder, filename) + if not os.path.isfile(file_path): + download_and_extract_archive(url, self.raw_folder, filename=filename, md5=md5) + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + # redefined to handle the compat flag + img, target = self.data[index], self.targets[index] + img = Image.fromarray(img.numpy(), mode='L') + if self.transform is not None: + img = self.transform(img) + if self.compat: + target = int(target[0]) + if self.target_transform is not None: + target = self.target_transform(target) + return img, target + + def extra_repr(self) -> str: + return "Split: {}".format(self.what) + + +def get_int(b: bytes) -> int: + return int(codecs.encode(b, 'hex'), 16) + + +SN3_PASCALVINCENT_TYPEMAP = { + 8: (torch.uint8, np.uint8, np.uint8), + 9: (torch.int8, np.int8, np.int8), + 11: (torch.int16, np.dtype('>i2'), 'i2'), + 12: (torch.int32, np.dtype('>i4'), 'i4'), + 13: (torch.float32, np.dtype('>f4'), 'f4'), + 14: (torch.float64, np.dtype('>f8'), 'f8') +} + + +def read_sn3_pascalvincent_tensor(path: str, strict: bool = True) -> torch.Tensor: + """Read a SN3 file in "Pascal Vincent" format (Lush file 'libidx/idx-io.lsh'). + Argument may be a filename, compressed filename, or file object. + """ + # read + with open(path, "rb") as f: + data = f.read() + # parse + magic = get_int(data[0:4]) + nd = magic % 256 + ty = magic // 256 + assert 1 <= nd <= 3 + assert 8 <= ty <= 14 + m = SN3_PASCALVINCENT_TYPEMAP[ty] + s = [get_int(data[4 * (i + 1): 4 * (i + 2)]) for i in range(nd)] + parsed = np.frombuffer(data, dtype=m[1], offset=(4 * (nd + 1))) + assert parsed.shape[0] == np.prod(s) or not strict + return torch.from_numpy(parsed.astype(m[2], copy=False)).view(*s) + + +def read_label_file(path: str) -> torch.Tensor: + x = read_sn3_pascalvincent_tensor(path, strict=False) + assert(x.dtype == torch.uint8) + assert(x.ndimension() == 1) + return x.long() + + +def read_image_file(path: str) -> torch.Tensor: + x = read_sn3_pascalvincent_tensor(path, strict=False) + assert(x.dtype == torch.uint8) + assert(x.ndimension() == 3) + return x diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/omniglot.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/omniglot.py new file mode 100644 index 0000000000000000000000000000000000000000..b78bf86d16f6a8be36c58e33d7e1a76fde221811 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/omniglot.py @@ -0,0 +1,98 @@ +from PIL import Image +from os.path import join +from typing import Any, Callable, List, Optional, Tuple +from .vision import VisionDataset +from .utils import download_and_extract_archive, check_integrity, list_dir, list_files + + +class Omniglot(VisionDataset): + """`Omniglot <https://github.com/brendenlake/omniglot>`_ Dataset. + + Args: + root (string): Root directory of dataset where directory + ``omniglot-py`` exists. + background (bool, optional): If True, creates dataset from the "background" set, otherwise + creates from the "evaluation" set. This terminology is defined by the authors. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset zip files from the internet and + puts it in root directory. If the zip files are already downloaded, they are not + downloaded again. + """ + folder = 'omniglot-py' + download_url_prefix = 'https://raw.githubusercontent.com/brendenlake/omniglot/master/python' + zips_md5 = { + 'images_background': '68d2efa1b9178cc56df9314c21c6e718', + 'images_evaluation': '6b91aef0f799c5bb55b94e3f2daec811' + } + + def __init__( + self, + root: str, + background: bool = True, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + super(Omniglot, self).__init__(join(root, self.folder), transform=transform, + target_transform=target_transform) + self.background = background + + if download: + self.download() + + if not self._check_integrity(): + raise RuntimeError('Dataset not found or corrupted.' + + ' You can use download=True to download it') + + self.target_folder = join(self.root, self._get_target_folder()) + self._alphabets = list_dir(self.target_folder) + self._characters: List[str] = sum([[join(a, c) for c in list_dir(join(self.target_folder, a))] + for a in self._alphabets], []) + self._character_images = [[(image, idx) for image in list_files(join(self.target_folder, character), '.png')] + for idx, character in enumerate(self._characters)] + self._flat_character_images: List[Tuple[str, int]] = sum(self._character_images, []) + + def __len__(self) -> int: + return len(self._flat_character_images) + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is index of the target character class. + """ + image_name, character_class = self._flat_character_images[index] + image_path = join(self.target_folder, self._characters[character_class], image_name) + image = Image.open(image_path, mode='r').convert('L') + + if self.transform: + image = self.transform(image) + + if self.target_transform: + character_class = self.target_transform(character_class) + + return image, character_class + + def _check_integrity(self) -> bool: + zip_filename = self._get_target_folder() + if not check_integrity(join(self.root, zip_filename + '.zip'), self.zips_md5[zip_filename]): + return False + return True + + def download(self) -> None: + if self._check_integrity(): + print('Files already downloaded and verified') + return + + filename = self._get_target_folder() + zip_filename = filename + '.zip' + url = self.download_url_prefix + '/' + zip_filename + download_and_extract_archive(url, self.root, filename=zip_filename, md5=self.zips_md5[filename]) + + def _get_target_folder(self) -> str: + return 'images_background' if self.background else 'images_evaluation' diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/phototour.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/phototour.py new file mode 100644 index 0000000000000000000000000000000000000000..abb89701e1e92744ca6f5f7eb95d05e7178a9abc --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/phototour.py @@ -0,0 +1,219 @@ +import os +import numpy as np +from PIL import Image +from typing import Any, Callable, List, Optional, Tuple, Union + +import torch +from .vision import VisionDataset + +from .utils import download_url + + +class PhotoTour(VisionDataset): + """`Multi-view Stereo Correspondence <http://matthewalunbrown.com/patchdata/patchdata.html>`_ Dataset. + + .. note:: + + We only provide the newer version of the dataset, since the authors state that it + + is more suitable for training descriptors based on difference of Gaussian, or Harris corners, as the + patches are centred on real interest point detections, rather than being projections of 3D points as is the + case in the old dataset. + + The original dataset is available under http://phototour.cs.washington.edu/patches/default.htm. + + + Args: + root (string): Root directory where images are. + name (string): Name of the dataset to load. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + + """ + urls = { + 'notredame_harris': [ + 'http://matthewalunbrown.com/patchdata/notredame_harris.zip', + 'notredame_harris.zip', + '69f8c90f78e171349abdf0307afefe4d' + ], + 'yosemite_harris': [ + 'http://matthewalunbrown.com/patchdata/yosemite_harris.zip', + 'yosemite_harris.zip', + 'a73253d1c6fbd3ba2613c45065c00d46' + ], + 'liberty_harris': [ + 'http://matthewalunbrown.com/patchdata/liberty_harris.zip', + 'liberty_harris.zip', + 'c731fcfb3abb4091110d0ae8c7ba182c' + ], + 'notredame': [ + 'http://icvl.ee.ic.ac.uk/vbalnt/notredame.zip', + 'notredame.zip', + '509eda8535847b8c0a90bbb210c83484' + ], + 'yosemite': [ + 'http://icvl.ee.ic.ac.uk/vbalnt/yosemite.zip', + 'yosemite.zip', + '533b2e8eb7ede31be40abc317b2fd4f0' + ], + 'liberty': [ + 'http://icvl.ee.ic.ac.uk/vbalnt/liberty.zip', + 'liberty.zip', + 'fdd9152f138ea5ef2091746689176414' + ], + } + means = {'notredame': 0.4854, 'yosemite': 0.4844, 'liberty': 0.4437, + 'notredame_harris': 0.4854, 'yosemite_harris': 0.4844, 'liberty_harris': 0.4437} + stds = {'notredame': 0.1864, 'yosemite': 0.1818, 'liberty': 0.2019, + 'notredame_harris': 0.1864, 'yosemite_harris': 0.1818, 'liberty_harris': 0.2019} + lens = {'notredame': 468159, 'yosemite': 633587, 'liberty': 450092, + 'liberty_harris': 379587, 'yosemite_harris': 450912, 'notredame_harris': 325295} + image_ext = 'bmp' + info_file = 'info.txt' + matches_files = 'm50_100000_100000_0.txt' + + def __init__( + self, root: str, name: str, train: bool = True, transform: Optional[Callable] = None, download: bool = False + ) -> None: + super(PhotoTour, self).__init__(root, transform=transform) + self.name = name + self.data_dir = os.path.join(self.root, name) + self.data_down = os.path.join(self.root, '{}.zip'.format(name)) + self.data_file = os.path.join(self.root, '{}.pt'.format(name)) + + self.train = train + self.mean = self.means[name] + self.std = self.stds[name] + + if download: + self.download() + + if not self._check_datafile_exists(): + self.cache() + + # load the serialized data + self.data, self.labels, self.matches = torch.load(self.data_file) + + def __getitem__(self, index: int) -> Union[torch.Tensor, Tuple[Any, Any, torch.Tensor]]: + """ + Args: + index (int): Index + + Returns: + tuple: (data1, data2, matches) + """ + if self.train: + data = self.data[index] + if self.transform is not None: + data = self.transform(data) + return data + m = self.matches[index] + data1, data2 = self.data[m[0]], self.data[m[1]] + if self.transform is not None: + data1 = self.transform(data1) + data2 = self.transform(data2) + return data1, data2, m[2] + + def __len__(self) -> int: + return len(self.data if self.train else self.matches) + + def _check_datafile_exists(self) -> bool: + return os.path.exists(self.data_file) + + def _check_downloaded(self) -> bool: + return os.path.exists(self.data_dir) + + def download(self) -> None: + if self._check_datafile_exists(): + print('# Found cached data {}'.format(self.data_file)) + return + + if not self._check_downloaded(): + # download files + url = self.urls[self.name][0] + filename = self.urls[self.name][1] + md5 = self.urls[self.name][2] + fpath = os.path.join(self.root, filename) + + download_url(url, self.root, filename, md5) + + print('# Extracting data {}\n'.format(self.data_down)) + + import zipfile + with zipfile.ZipFile(fpath, 'r') as z: + z.extractall(self.data_dir) + + os.unlink(fpath) + + def cache(self) -> None: + # process and save as torch files + print('# Caching data {}'.format(self.data_file)) + + dataset = ( + read_image_file(self.data_dir, self.image_ext, self.lens[self.name]), + read_info_file(self.data_dir, self.info_file), + read_matches_files(self.data_dir, self.matches_files) + ) + + with open(self.data_file, 'wb') as f: + torch.save(dataset, f) + + def extra_repr(self) -> str: + return "Split: {}".format("Train" if self.train is True else "Test") + + +def read_image_file(data_dir: str, image_ext: str, n: int) -> torch.Tensor: + """Return a Tensor containing the patches + """ + + def PIL2array(_img: Image.Image) -> np.ndarray: + """Convert PIL image type to numpy 2D array + """ + return np.array(_img.getdata(), dtype=np.uint8).reshape(64, 64) + + def find_files(_data_dir: str, _image_ext: str) -> List[str]: + """Return a list with the file names of the images containing the patches + """ + files = [] + # find those files with the specified extension + for file_dir in os.listdir(_data_dir): + if file_dir.endswith(_image_ext): + files.append(os.path.join(_data_dir, file_dir)) + return sorted(files) # sort files in ascend order to keep relations + + patches = [] + list_files = find_files(data_dir, image_ext) + + for fpath in list_files: + img = Image.open(fpath) + for y in range(0, img.height, 64): + for x in range(0, img.width, 64): + patch = img.crop((x, y, x + 64, y + 64)) + patches.append(PIL2array(patch)) + return torch.ByteTensor(np.array(patches[:n])) + + +def read_info_file(data_dir: str, info_file: str) -> torch.Tensor: + """Return a Tensor containing the list of labels + Read the file and keep only the ID of the 3D point. + """ + with open(os.path.join(data_dir, info_file), 'r') as f: + labels = [int(line.split()[0]) for line in f] + return torch.LongTensor(labels) + + +def read_matches_files(data_dir: str, matches_file: str) -> torch.Tensor: + """Return a Tensor containing the ground truth matches + Read the file and keep only 3D point ID. + Matches are represented with a 1, non matches with a 0. + """ + matches = [] + with open(os.path.join(data_dir, matches_file), 'r') as f: + for line in f: + line_split = line.split() + matches.append([int(line_split[0]), int(line_split[3]), + int(line_split[1] == line_split[4])]) + return torch.LongTensor(matches) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/places365.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/places365.py new file mode 100644 index 0000000000000000000000000000000000000000..648e0d604ba46e6d608e91489fdcf4992a62124c --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/places365.py @@ -0,0 +1,170 @@ +import os +from os import path +from typing import Any, Callable, Dict, List, Optional, Tuple +from urllib.parse import urljoin + +from .folder import default_loader +from .utils import verify_str_arg, check_integrity, download_and_extract_archive +from .vision import VisionDataset + + +class Places365(VisionDataset): + r"""`Places365 <http://places2.csail.mit.edu/index.html>`_ classification dataset. + + Args: + root (string): Root directory of the Places365 dataset. + split (string, optional): The dataset split. Can be one of ``train-standard`` (default), ``train-challenge``, + ``val``. + small (bool, optional): If ``True``, uses the small images, i. e. resized to 256 x 256 pixels, instead of the + high resolution ones. + download (bool, optional): If ``True``, downloads the dataset components and places them in ``root``. Already + downloaded archives are not downloaded again. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + loader (callable, optional): A function to load an image given its path. + + Attributes: + classes (list): List of the class names. + class_to_idx (dict): Dict with items (class_name, class_index). + imgs (list): List of (image path, class_index) tuples + targets (list): The class_index value for each image in the dataset + + Raises: + RuntimeError: If ``download is False`` and the meta files, i. e. the devkit, are not present or corrupted. + RuntimeError: If ``download is True`` and the image archive is already extracted. + """ + _SPLITS = ("train-standard", "train-challenge", "val") + _BASE_URL = "http://data.csail.mit.edu/places/places365/" + # {variant: (archive, md5)} + _DEVKIT_META = { + "standard": ("filelist_places365-standard.tar", "35a0585fee1fa656440f3ab298f8479c"), + "challenge": ("filelist_places365-challenge.tar", "70a8307e459c3de41690a7c76c931734"), + } + # (file, md5) + _CATEGORIES_META = ("categories_places365.txt", "06c963b85866bd0649f97cb43dd16673") + # {split: (file, md5)} + _FILE_LIST_META = { + "train-standard": ("places365_train_standard.txt", "30f37515461640559006b8329efbed1a"), + "train-challenge": ("places365_train_challenge.txt", "b2931dc997b8c33c27e7329c073a6b57"), + "val": ("places365_val.txt", "e9f2fd57bfd9d07630173f4e8708e4b1"), + } + # {(split, small): (file, md5)} + _IMAGES_META = { + ("train-standard", False): ("train_large_places365standard.tar", "67e186b496a84c929568076ed01a8aa1"), + ("train-challenge", False): ("train_large_places365challenge.tar", "605f18e68e510c82b958664ea134545f"), + ("val", False): ("val_large.tar", "9b71c4993ad89d2d8bcbdc4aef38042f"), + ("train-standard", True): ("train_256_places365standard.tar", "53ca1c756c3d1e7809517cc47c5561c5"), + ("train-challenge", True): ("train_256_places365challenge.tar", "741915038a5e3471ec7332404dfb64ef"), + ("val", True): ("val_256.tar", "e27b17d8d44f4af9a78502beb927f808"), + } + + def __init__( + self, + root: str, + split: str = "train-standard", + small: bool = False, + download: bool = False, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + loader: Callable[[str], Any] = default_loader, + ) -> None: + super().__init__(root, transform=transform, target_transform=target_transform) + + self.split = self._verify_split(split) + self.small = small + self.loader = loader + + self.classes, self.class_to_idx = self.load_categories(download) + self.imgs, self.targets = self.load_file_list(download) + + if download: + self.download_images() + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + file, target = self.imgs[index] + image = self.loader(file) + + if self.transforms is not None: + image, target = self.transforms(image, target) + + return image, target + + def __len__(self) -> int: + return len(self.imgs) + + @property + def variant(self) -> str: + return "challenge" if "challenge" in self.split else "standard" + + @property + def images_dir(self) -> str: + size = "256" if self.small else "large" + if self.split.startswith("train"): + dir = f"data_{size}_{self.variant}" + else: + dir = f"{self.split}_{size}" + return path.join(self.root, dir) + + def load_categories(self, download: bool = True) -> Tuple[List[str], Dict[str, int]]: + def process(line: str) -> Tuple[str, int]: + cls, idx = line.split() + return cls, int(idx) + + file, md5 = self._CATEGORIES_META + file = path.join(self.root, file) + if not self._check_integrity(file, md5, download): + self.download_devkit() + + with open(file, "r") as fh: + class_to_idx = dict(process(line) for line in fh) + + return sorted(class_to_idx.keys()), class_to_idx + + def load_file_list(self, download: bool = True) -> Tuple[List[Tuple[str, int]], List[int]]: + def process(line: str, sep="/") -> Tuple[str, int]: + image, idx = line.split() + return path.join(self.images_dir, image.lstrip(sep).replace(sep, os.sep)), int(idx) + + file, md5 = self._FILE_LIST_META[self.split] + file = path.join(self.root, file) + if not self._check_integrity(file, md5, download): + self.download_devkit() + + with open(file, "r") as fh: + images = [process(line) for line in fh] + + _, targets = zip(*images) + return images, list(targets) + + def download_devkit(self) -> None: + file, md5 = self._DEVKIT_META[self.variant] + download_and_extract_archive(urljoin(self._BASE_URL, file), self.root, md5=md5) + + def download_images(self) -> None: + if path.exists(self.images_dir): + raise RuntimeError( + f"The directory {self.images_dir} already exists. If you want to re-download or re-extract the images, " + f"delete the directory." + ) + + file, md5 = self._IMAGES_META[(self.split, self.small)] + download_and_extract_archive(urljoin(self._BASE_URL, file), self.root, md5=md5) + + if self.split.startswith("train"): + os.rename(self.images_dir.rsplit("_", 1)[0], self.images_dir) + + def extra_repr(self) -> str: + return "\n".join(("Split: {split}", "Small: {small}")).format(**self.__dict__) + + def _verify_split(self, split: str) -> str: + return verify_str_arg(split, "split", self._SPLITS) + + def _check_integrity(self, file: str, md5: str, download: bool) -> bool: + integrity = check_integrity(file, md5=md5) + if not integrity and not download: + raise RuntimeError( + f"The file {file} does not exist or is corrupted. You can set download=True to download it." + ) + return integrity diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/samplers/__init__.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/samplers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..870322d39b43c366c2862c942d96acfc81b0668a --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/samplers/__init__.py @@ -0,0 +1,3 @@ +from .clip_sampler import DistributedSampler, UniformClipSampler, RandomClipSampler + +__all__ = ('DistributedSampler', 'UniformClipSampler', 'RandomClipSampler') diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/samplers/clip_sampler.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/samplers/clip_sampler.py new file mode 100644 index 0000000000000000000000000000000000000000..0f90e3ad1b0ea6ceb2b9365042f520b407e10574 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/samplers/clip_sampler.py @@ -0,0 +1,181 @@ +import math +import torch +from torch.utils.data import Sampler +import torch.distributed as dist +from torchvision.datasets.video_utils import VideoClips +from typing import Optional, List, Iterator, Sized, Union, cast + + +class DistributedSampler(Sampler): + """ + Extension of DistributedSampler, as discussed in + https://github.com/pytorch/pytorch/issues/23430 + + Example: + dataset: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13] + num_replicas: 4 + shuffle: False + + when group_size = 1 + RANK | shard_dataset + ========================= + rank_0 | [0, 4, 8, 12] + rank_1 | [1, 5, 9, 13] + rank_2 | [2, 6, 10, 0] + rank_3 | [3, 7, 11, 1] + + when group_size = 2 + + RANK | shard_dataset + ========================= + rank_0 | [0, 1, 8, 9] + rank_1 | [2, 3, 10, 11] + rank_2 | [4, 5, 12, 13] + rank_3 | [6, 7, 0, 1] + + """ + + def __init__( + self, + dataset: Sized, + num_replicas: Optional[int] = None, + rank: Optional[int] = None, + shuffle: bool = False, + group_size: int = 1, + ) -> None: + if num_replicas is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + num_replicas = dist.get_world_size() + if rank is None: + if not dist.is_available(): + raise RuntimeError("Requires distributed package to be available") + rank = dist.get_rank() + assert len(dataset) % group_size == 0, ( + "dataset length must be a multiplier of group size" + "dataset length: %d, group size: %d" % (len(dataset), group_size) + ) + self.dataset = dataset + self.group_size = group_size + self.num_replicas = num_replicas + self.rank = rank + self.epoch = 0 + dataset_group_length = len(dataset) // group_size + self.num_group_samples = int( + math.ceil(dataset_group_length * 1.0 / self.num_replicas) + ) + self.num_samples = self.num_group_samples * group_size + self.total_size = self.num_samples * self.num_replicas + self.shuffle = shuffle + + def __iter__(self) -> Iterator[int]: + # deterministically shuffle based on epoch + g = torch.Generator() + g.manual_seed(self.epoch) + indices: Union[torch.Tensor, List[int]] + if self.shuffle: + indices = torch.randperm(len(self.dataset), generator=g).tolist() + else: + indices = list(range(len(self.dataset))) + + # add extra samples to make it evenly divisible + indices += indices[:(self.total_size - len(indices))] + assert len(indices) == self.total_size + + total_group_size = self.total_size // self.group_size + indices = torch.reshape( + torch.LongTensor(indices), (total_group_size, self.group_size) + ) + + # subsample + indices = indices[self.rank:total_group_size:self.num_replicas, :] + indices = torch.reshape(indices, (-1,)).tolist() + assert len(indices) == self.num_samples + + if isinstance(self.dataset, Sampler): + orig_indices = list(iter(self.dataset)) + indices = [orig_indices[i] for i in indices] + + return iter(indices) + + def __len__(self) -> int: + return self.num_samples + + def set_epoch(self, epoch: int) -> None: + self.epoch = epoch + + +class UniformClipSampler(Sampler): + """ + Sample `num_video_clips_per_video` clips for each video, equally spaced. + When number of unique clips in the video is fewer than num_video_clips_per_video, + repeat the clips until `num_video_clips_per_video` clips are collected + + Args: + video_clips (VideoClips): video clips to sample from + num_clips_per_video (int): number of clips to be sampled per video + """ + def __init__(self, video_clips: VideoClips, num_clips_per_video: int) -> None: + if not isinstance(video_clips, VideoClips): + raise TypeError("Expected video_clips to be an instance of VideoClips, " + "got {}".format(type(video_clips))) + self.video_clips = video_clips + self.num_clips_per_video = num_clips_per_video + + def __iter__(self) -> Iterator[int]: + idxs = [] + s = 0 + # select num_clips_per_video for each video, uniformly spaced + for c in self.video_clips.clips: + length = len(c) + if length == 0: + # corner case where video decoding fails + continue + + sampled = ( + torch.linspace(s, s + length - 1, steps=self.num_clips_per_video) + .floor() + .to(torch.int64) + ) + s += length + idxs.append(sampled) + return iter(cast(List[int], torch.cat(idxs).tolist())) + + def __len__(self) -> int: + return sum( + self.num_clips_per_video for c in self.video_clips.clips if len(c) > 0 + ) + + +class RandomClipSampler(Sampler): + """ + Samples at most `max_video_clips_per_video` clips for each video randomly + + Args: + video_clips (VideoClips): video clips to sample from + max_clips_per_video (int): maximum number of clips to be sampled per video + """ + def __init__(self, video_clips: VideoClips, max_clips_per_video: int) -> None: + if not isinstance(video_clips, VideoClips): + raise TypeError("Expected video_clips to be an instance of VideoClips, " + "got {}".format(type(video_clips))) + self.video_clips = video_clips + self.max_clips_per_video = max_clips_per_video + + def __iter__(self) -> Iterator[int]: + idxs = [] + s = 0 + # select at most max_clips_per_video for each video, randomly + for c in self.video_clips.clips: + length = len(c) + size = min(length, self.max_clips_per_video) + sampled = torch.randperm(length)[:size] + s + s += length + idxs.append(sampled) + idxs_ = torch.cat(idxs) + # shuffle all clips randomly + perm = torch.randperm(len(idxs_)) + return iter(idxs_[perm].tolist()) + + def __len__(self) -> int: + return sum(min(len(c), self.max_clips_per_video) for c in self.video_clips.clips) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/sbd.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/sbd.py new file mode 100644 index 0000000000000000000000000000000000000000..e47c94938580b6a61ac24c7026fbc29a0ed1a758 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/sbd.py @@ -0,0 +1,126 @@ +import os +import shutil +from .vision import VisionDataset +from typing import Any, Callable, Optional, Tuple + +import numpy as np + +from PIL import Image +from .utils import download_url, verify_str_arg, download_and_extract_archive + + +class SBDataset(VisionDataset): + """`Semantic Boundaries Dataset <http://home.bharathh.info/pubs/codes/SBD/download.html>`_ + + The SBD currently contains annotations from 11355 images taken from the PASCAL VOC 2011 dataset. + + .. note :: + + Please note that the train and val splits included with this dataset are different from + the splits in the PASCAL VOC dataset. In particular some "train" images might be part of + VOC2012 val. + If you are interested in testing on VOC 2012 val, then use `image_set='train_noval'`, + which excludes all val images. + + .. warning:: + + This class needs `scipy <https://docs.scipy.org/doc/>`_ to load target files from `.mat` format. + + Args: + root (string): Root directory of the Semantic Boundaries Dataset + image_set (string, optional): Select the image_set to use, ``train``, ``val`` or ``train_noval``. + Image set ``train_noval`` excludes VOC 2012 val images. + mode (string, optional): Select target type. Possible values 'boundaries' or 'segmentation'. + In case of 'boundaries', the target is an array of shape `[num_classes, H, W]`, + where `num_classes=20`. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + transforms (callable, optional): A function/transform that takes input sample and its target as entry + and returns a transformed version. Input sample is PIL image and target is a numpy array + if `mode='boundaries'` or PIL image if `mode='segmentation'`. + """ + + url = "https://www2.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz" + md5 = "82b4d87ceb2ed10f6038a1cba92111cb" + filename = "benchmark.tgz" + + voc_train_url = "http://home.bharathh.info/pubs/codes/SBD/train_noval.txt" + voc_split_filename = "train_noval.txt" + voc_split_md5 = "79bff800c5f0b1ec6b21080a3c066722" + + def __init__( + self, + root: str, + image_set: str = "train", + mode: str = "boundaries", + download: bool = False, + transforms: Optional[Callable] = None, + ) -> None: + + try: + from scipy.io import loadmat + self._loadmat = loadmat + except ImportError: + raise RuntimeError("Scipy is not found. This dataset needs to have scipy installed: " + "pip install scipy") + + super(SBDataset, self).__init__(root, transforms) + self.image_set = verify_str_arg(image_set, "image_set", + ("train", "val", "train_noval")) + self.mode = verify_str_arg(mode, "mode", ("segmentation", "boundaries")) + self.num_classes = 20 + + sbd_root = self.root + image_dir = os.path.join(sbd_root, 'img') + mask_dir = os.path.join(sbd_root, 'cls') + + if download: + download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.md5) + extracted_ds_root = os.path.join(self.root, "benchmark_RELEASE", "dataset") + for f in ["cls", "img", "inst", "train.txt", "val.txt"]: + old_path = os.path.join(extracted_ds_root, f) + shutil.move(old_path, sbd_root) + download_url(self.voc_train_url, sbd_root, self.voc_split_filename, + self.voc_split_md5) + + if not os.path.isdir(sbd_root): + raise RuntimeError('Dataset not found or corrupted.' + + ' You can use download=True to download it') + + split_f = os.path.join(sbd_root, image_set.rstrip('\n') + '.txt') + + with open(os.path.join(split_f), "r") as fh: + file_names = [x.strip() for x in fh.readlines()] + + self.images = [os.path.join(image_dir, x + ".jpg") for x in file_names] + self.masks = [os.path.join(mask_dir, x + ".mat") for x in file_names] + assert (len(self.images) == len(self.masks)) + + self._get_target = self._get_segmentation_target \ + if self.mode == "segmentation" else self._get_boundaries_target + + def _get_segmentation_target(self, filepath: str) -> Image.Image: + mat = self._loadmat(filepath) + return Image.fromarray(mat['GTcls'][0]['Segmentation'][0]) + + def _get_boundaries_target(self, filepath: str) -> np.ndarray: + mat = self._loadmat(filepath) + return np.concatenate([np.expand_dims(mat['GTcls'][0]['Boundaries'][0][i][0].toarray(), axis=0) + for i in range(self.num_classes)], axis=0) + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + img = Image.open(self.images[index]).convert('RGB') + target = self._get_target(self.masks[index]) + + if self.transforms is not None: + img, target = self.transforms(img, target) + + return img, target + + def __len__(self) -> int: + return len(self.images) + + def extra_repr(self) -> str: + lines = ["Image set: {image_set}", "Mode: {mode}"] + return '\n'.join(lines).format(**self.__dict__) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/sbu.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/sbu.py new file mode 100644 index 0000000000000000000000000000000000000000..6c8ad15686b6b1928e4288ff597be496228dcbb8 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/sbu.py @@ -0,0 +1,114 @@ +from PIL import Image +from .utils import download_url, check_integrity +from typing import Any, Callable, Optional, Tuple + +import os +from .vision import VisionDataset + + +class SBU(VisionDataset): + """`SBU Captioned Photo <http://www.cs.virginia.edu/~vicente/sbucaptions/>`_ Dataset. + + Args: + root (string): Root directory of dataset where tarball + ``SBUCaptionedPhotoDataset.tar.gz`` exists. + transform (callable, optional): A function/transform that takes in a PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If True, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + """ + url = "http://www.cs.virginia.edu/~vicente/sbucaptions/SBUCaptionedPhotoDataset.tar.gz" + filename = "SBUCaptionedPhotoDataset.tar.gz" + md5_checksum = '9aec147b3488753cf758b4d493422285' + + def __init__( + self, + root: str, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = True, + ) -> None: + super(SBU, self).__init__(root, transform=transform, + target_transform=target_transform) + + if download: + self.download() + + if not self._check_integrity(): + raise RuntimeError('Dataset not found or corrupted.' + + ' You can use download=True to download it') + + # Read the caption for each photo + self.photos = [] + self.captions = [] + + file1 = os.path.join(self.root, 'dataset', 'SBU_captioned_photo_dataset_urls.txt') + file2 = os.path.join(self.root, 'dataset', 'SBU_captioned_photo_dataset_captions.txt') + + for line1, line2 in zip(open(file1), open(file2)): + url = line1.rstrip() + photo = os.path.basename(url) + filename = os.path.join(self.root, 'dataset', photo) + if os.path.exists(filename): + caption = line2.rstrip() + self.photos.append(photo) + self.captions.append(caption) + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is a caption for the photo. + """ + filename = os.path.join(self.root, 'dataset', self.photos[index]) + img = Image.open(filename).convert('RGB') + if self.transform is not None: + img = self.transform(img) + + target = self.captions[index] + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self) -> int: + """The number of photos in the dataset.""" + return len(self.photos) + + def _check_integrity(self) -> bool: + """Check the md5 checksum of the downloaded tarball.""" + root = self.root + fpath = os.path.join(root, self.filename) + if not check_integrity(fpath, self.md5_checksum): + return False + return True + + def download(self) -> None: + """Download and extract the tarball, and download each individual photo.""" + import tarfile + + if self._check_integrity(): + print('Files already downloaded and verified') + return + + download_url(self.url, self.root, self.filename, self.md5_checksum) + + # Extract file + with tarfile.open(os.path.join(self.root, self.filename), 'r:gz') as tar: + tar.extractall(path=self.root) + + # Download individual photos + with open(os.path.join(self.root, 'dataset', 'SBU_captioned_photo_dataset_urls.txt')) as fh: + for line in fh: + url = line.rstrip() + try: + download_url(url, os.path.join(self.root, 'dataset')) + except OSError: + # The images point to public images on Flickr. + # Note: Images might be removed by users at anytime. + pass diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/semeion.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/semeion.py new file mode 100644 index 0000000000000000000000000000000000000000..20ce4e5f5d513192def8efe9845cc866f2290a37 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/semeion.py @@ -0,0 +1,92 @@ +from PIL import Image +import os +import os.path +import numpy as np +from typing import Any, Callable, Optional, Tuple +from .vision import VisionDataset +from .utils import download_url, check_integrity + + +class SEMEION(VisionDataset): + r"""`SEMEION <http://archive.ics.uci.edu/ml/datasets/semeion+handwritten+digit>`_ Dataset. + + Args: + root (string): Root directory of dataset where directory + ``semeion.py`` exists. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + + """ + url = "http://archive.ics.uci.edu/ml/machine-learning-databases/semeion/semeion.data" + filename = "semeion.data" + md5_checksum = 'cb545d371d2ce14ec121470795a77432' + + def __init__( + self, + root: str, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = True, + ) -> None: + super(SEMEION, self).__init__(root, transform=transform, + target_transform=target_transform) + + if download: + self.download() + + if not self._check_integrity(): + raise RuntimeError('Dataset not found or corrupted.' + + ' You can use download=True to download it') + + fp = os.path.join(self.root, self.filename) + data = np.loadtxt(fp) + # convert value to 8 bit unsigned integer + # color (white #255) the pixels + self.data = (data[:, :256] * 255).astype('uint8') + self.data = np.reshape(self.data, (-1, 16, 16)) + self.labels = np.nonzero(data[:, 256:])[1] + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is index of the target class. + """ + img, target = self.data[index], int(self.labels[index]) + + # doing this so that it is consistent with all other datasets + # to return a PIL Image + img = Image.fromarray(img, mode='L') + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self) -> int: + return len(self.data) + + def _check_integrity(self) -> bool: + root = self.root + fpath = os.path.join(root, self.filename) + if not check_integrity(fpath, self.md5_checksum): + return False + return True + + def download(self) -> None: + if self._check_integrity(): + print('Files already downloaded and verified') + return + + root = self.root + download_url(self.url, root, self.filename, self.md5_checksum) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/stl10.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/stl10.py new file mode 100644 index 0000000000000000000000000000000000000000..50e9af882bc8ecd53eaf1e61dd4f7ca384ed4fd1 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/stl10.py @@ -0,0 +1,187 @@ +from PIL import Image +import os +import os.path +import numpy as np +from typing import Any, Callable, Optional, Tuple + +from .vision import VisionDataset +from .utils import check_integrity, download_and_extract_archive, verify_str_arg + + +class STL10(VisionDataset): + """`STL10 <https://cs.stanford.edu/~acoates/stl10/>`_ Dataset. + + Args: + root (string): Root directory of dataset where directory + ``stl10_binary`` exists. + split (string): One of {'train', 'test', 'unlabeled', 'train+unlabeled'}. + Accordingly dataset is selected. + folds (int, optional): One of {0-9} or None. + For training, loads one of the 10 pre-defined folds of 1k samples for the + standard evaluation procedure. If no value is passed, loads the 5k samples. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + """ + base_folder = 'stl10_binary' + url = "http://ai.stanford.edu/~acoates/stl10/stl10_binary.tar.gz" + filename = "stl10_binary.tar.gz" + tgz_md5 = '91f7769df0f17e558f3565bffb0c7dfb' + class_names_file = 'class_names.txt' + folds_list_file = 'fold_indices.txt' + train_list = [ + ['train_X.bin', '918c2871b30a85fa023e0c44e0bee87f'], + ['train_y.bin', '5a34089d4802c674881badbb80307741'], + ['unlabeled_X.bin', '5242ba1fed5e4be9e1e742405eb56ca4'] + ] + + test_list = [ + ['test_X.bin', '7f263ba9f9e0b06b93213547f721ac82'], + ['test_y.bin', '36f9794fa4beb8a2c72628de14fa638e'] + ] + splits = ('train', 'train+unlabeled', 'unlabeled', 'test') + + def __init__( + self, + root: str, + split: str = "train", + folds: Optional[int] = None, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + super(STL10, self).__init__(root, transform=transform, + target_transform=target_transform) + self.split = verify_str_arg(split, "split", self.splits) + self.folds = self._verify_folds(folds) + + if download: + self.download() + elif not self._check_integrity(): + raise RuntimeError( + 'Dataset not found or corrupted. ' + 'You can use download=True to download it') + + # now load the picked numpy arrays + self.labels: Optional[np.ndarray] + if self.split == 'train': + self.data, self.labels = self.__loadfile( + self.train_list[0][0], self.train_list[1][0]) + self.__load_folds(folds) + + elif self.split == 'train+unlabeled': + self.data, self.labels = self.__loadfile( + self.train_list[0][0], self.train_list[1][0]) + self.__load_folds(folds) + unlabeled_data, _ = self.__loadfile(self.train_list[2][0]) + self.data = np.concatenate((self.data, unlabeled_data)) + self.labels = np.concatenate( + (self.labels, np.asarray([-1] * unlabeled_data.shape[0]))) + + elif self.split == 'unlabeled': + self.data, _ = self.__loadfile(self.train_list[2][0]) + self.labels = np.asarray([-1] * self.data.shape[0]) + else: # self.split == 'test': + self.data, self.labels = self.__loadfile( + self.test_list[0][0], self.test_list[1][0]) + + class_file = os.path.join( + self.root, self.base_folder, self.class_names_file) + if os.path.isfile(class_file): + with open(class_file) as f: + self.classes = f.read().splitlines() + + def _verify_folds(self, folds: Optional[int]) -> Optional[int]: + if folds is None: + return folds + elif isinstance(folds, int): + if folds in range(10): + return folds + msg = ("Value for argument folds should be in the range [0, 10), " + "but got {}.") + raise ValueError(msg.format(folds)) + else: + msg = "Expected type None or int for argument folds, but got type {}." + raise ValueError(msg.format(type(folds))) + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is index of the target class. + """ + target: Optional[int] + if self.labels is not None: + img, target = self.data[index], int(self.labels[index]) + else: + img, target = self.data[index], None + + # doing this so that it is consistent with all other datasets + # to return a PIL Image + img = Image.fromarray(np.transpose(img, (1, 2, 0))) + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self) -> int: + return self.data.shape[0] + + def __loadfile(self, data_file: str, labels_file: Optional[str] = None) -> Tuple[np.ndarray, Optional[np.ndarray]]: + labels = None + if labels_file: + path_to_labels = os.path.join( + self.root, self.base_folder, labels_file) + with open(path_to_labels, 'rb') as f: + labels = np.fromfile(f, dtype=np.uint8) - 1 # 0-based + + path_to_data = os.path.join(self.root, self.base_folder, data_file) + with open(path_to_data, 'rb') as f: + # read whole file in uint8 chunks + everything = np.fromfile(f, dtype=np.uint8) + images = np.reshape(everything, (-1, 3, 96, 96)) + images = np.transpose(images, (0, 1, 3, 2)) + + return images, labels + + def _check_integrity(self) -> bool: + root = self.root + for fentry in (self.train_list + self.test_list): + filename, md5 = fentry[0], fentry[1] + fpath = os.path.join(root, self.base_folder, filename) + if not check_integrity(fpath, md5): + return False + return True + + def download(self) -> None: + if self._check_integrity(): + print('Files already downloaded and verified') + return + download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.tgz_md5) + self._check_integrity() + + def extra_repr(self) -> str: + return "Split: {split}".format(**self.__dict__) + + def __load_folds(self, folds: Optional[int]) -> None: + # loads one of the folds if specified + if folds is None: + return + path_to_folds = os.path.join( + self.root, self.base_folder, self.folds_list_file) + with open(path_to_folds, 'r') as f: + str_idx = f.read().splitlines()[folds] + list_idx = np.fromstring(str_idx, dtype=np.int64, sep=' ') + self.data = self.data[list_idx, :, :, :] + if self.labels is not None: + self.labels = self.labels[list_idx] diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/svhn.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/svhn.py new file mode 100644 index 0000000000000000000000000000000000000000..f1adee687ebe675f3d835fe280027c205f1cd76b --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/svhn.py @@ -0,0 +1,121 @@ +from .vision import VisionDataset +from PIL import Image +import os +import os.path +import numpy as np +from typing import Any, Callable, Optional, Tuple +from .utils import download_url, check_integrity, verify_str_arg + + +class SVHN(VisionDataset): + """`SVHN <http://ufldl.stanford.edu/housenumbers/>`_ Dataset. + Note: The SVHN dataset assigns the label `10` to the digit `0`. However, in this Dataset, + we assign the label `0` to the digit `0` to be compatible with PyTorch loss functions which + expect the class labels to be in the range `[0, C-1]` + + .. warning:: + + This class needs `scipy <https://docs.scipy.org/doc/>`_ to load data from `.mat` format. + + Args: + root (string): Root directory of dataset where directory + ``SVHN`` exists. + split (string): One of {'train', 'test', 'extra'}. + Accordingly dataset is selected. 'extra' is Extra training set. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + + """ + + split_list = { + 'train': ["http://ufldl.stanford.edu/housenumbers/train_32x32.mat", + "train_32x32.mat", "e26dedcc434d2e4c54c9b2d4a06d8373"], + 'test': ["http://ufldl.stanford.edu/housenumbers/test_32x32.mat", + "test_32x32.mat", "eb5a983be6a315427106f1b164d9cef3"], + 'extra': ["http://ufldl.stanford.edu/housenumbers/extra_32x32.mat", + "extra_32x32.mat", "a93ce644f1a588dc4d68dda5feec44a7"]} + + def __init__( + self, + root: str, + split: str = "train", + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + super(SVHN, self).__init__(root, transform=transform, + target_transform=target_transform) + self.split = verify_str_arg(split, "split", tuple(self.split_list.keys())) + self.url = self.split_list[split][0] + self.filename = self.split_list[split][1] + self.file_md5 = self.split_list[split][2] + + if download: + self.download() + + if not self._check_integrity(): + raise RuntimeError('Dataset not found or corrupted.' + + ' You can use download=True to download it') + + # import here rather than at top of file because this is + # an optional dependency for torchvision + import scipy.io as sio + + # reading(loading) mat file as array + loaded_mat = sio.loadmat(os.path.join(self.root, self.filename)) + + self.data = loaded_mat['X'] + # loading from the .mat file gives an np array of type np.uint8 + # converting to np.int64, so that we have a LongTensor after + # the conversion from the numpy array + # the squeeze is needed to obtain a 1D tensor + self.labels = loaded_mat['y'].astype(np.int64).squeeze() + + # the svhn dataset assigns the class label "10" to the digit 0 + # this makes it inconsistent with several loss functions + # which expect the class labels to be in the range [0, C-1] + np.place(self.labels, self.labels == 10, 0) + self.data = np.transpose(self.data, (3, 2, 0, 1)) + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is index of the target class. + """ + img, target = self.data[index], int(self.labels[index]) + + # doing this so that it is consistent with all other datasets + # to return a PIL Image + img = Image.fromarray(np.transpose(img, (1, 2, 0))) + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self) -> int: + return len(self.data) + + def _check_integrity(self) -> bool: + root = self.root + md5 = self.split_list[self.split][2] + fpath = os.path.join(root, self.filename) + return check_integrity(fpath, md5) + + def download(self) -> None: + md5 = self.split_list[self.split][2] + download_url(self.url, self.root, self.filename, md5) + + def extra_repr(self) -> str: + return "Split: {split}".format(**self.__dict__) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/ucf101.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/ucf101.py new file mode 100644 index 0000000000000000000000000000000000000000..71f62257bcb5d3543abd32079e2b80ee66017493 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/ucf101.py @@ -0,0 +1,108 @@ +import os + +from .folder import find_classes, make_dataset +from .video_utils import VideoClips +from .vision import VisionDataset + + +class UCF101(VisionDataset): + """ + `UCF101 <https://www.crcv.ucf.edu/data/UCF101.php>`_ dataset. + + UCF101 is an action recognition video dataset. + This dataset consider every video as a collection of video clips of fixed size, specified + by ``frames_per_clip``, where the step in frames between each clip is given by + ``step_between_clips``. + + To give an example, for 2 videos with 10 and 15 frames respectively, if ``frames_per_clip=5`` + and ``step_between_clips=5``, the dataset size will be (2 + 3) = 5, where the first two + elements will come from video 1, and the next three elements from video 2. + Note that we drop clips which do not have exactly ``frames_per_clip`` elements, so not all + frames in a video might be present. + + Internally, it uses a VideoClips object to handle clip creation. + + Args: + root (string): Root directory of the UCF101 Dataset. + annotation_path (str): path to the folder containing the split files + frames_per_clip (int): number of frames in a clip. + step_between_clips (int, optional): number of frames between each clip. + fold (int, optional): which fold to use. Should be between 1 and 3. + train (bool, optional): if ``True``, creates a dataset from the train split, + otherwise from the ``test`` split. + transform (callable, optional): A function/transform that takes in a TxHxWxC video + and returns a transformed version. + + Returns: + tuple: A 3-tuple with the following entries: + + - video (Tensor[T, H, W, C]): the `T` video frames + - audio(Tensor[K, L]): the audio frames, where `K` is the number of channels + and `L` is the number of points + - label (int): class of the video clip + """ + + def __init__(self, root, annotation_path, frames_per_clip, step_between_clips=1, + frame_rate=None, fold=1, train=True, transform=None, + _precomputed_metadata=None, num_workers=1, _video_width=0, + _video_height=0, _video_min_dimension=0, _audio_samples=0): + super(UCF101, self).__init__(root) + if not 1 <= fold <= 3: + raise ValueError("fold should be between 1 and 3, got {}".format(fold)) + + extensions = ('avi',) + self.fold = fold + self.train = train + + self.classes, class_to_idx = find_classes(self.root) + self.samples = make_dataset(self.root, class_to_idx, extensions, is_valid_file=None) + video_list = [x[0] for x in self.samples] + video_clips = VideoClips( + video_list, + frames_per_clip, + step_between_clips, + frame_rate, + _precomputed_metadata, + num_workers=num_workers, + _video_width=_video_width, + _video_height=_video_height, + _video_min_dimension=_video_min_dimension, + _audio_samples=_audio_samples, + ) + # we bookkeep the full version of video clips because we want to be able + # to return the meta data of full version rather than the subset version of + # video clips + self.full_video_clips = video_clips + self.indices = self._select_fold(video_list, annotation_path, fold, train) + self.video_clips = video_clips.subset(self.indices) + self.transform = transform + + @property + def metadata(self): + return self.full_video_clips.metadata + + def _select_fold(self, video_list, annotation_path, fold, train): + name = "train" if train else "test" + name = "{}list{:02d}.txt".format(name, fold) + f = os.path.join(annotation_path, name) + selected_files = [] + with open(f, "r") as fid: + data = fid.readlines() + data = [x.strip().split(" ") for x in data] + data = [os.path.join(self.root, x[0]) for x in data] + selected_files.extend(data) + selected_files = set(selected_files) + indices = [i for i in range(len(video_list)) if video_list[i] in selected_files] + return indices + + def __len__(self): + return self.video_clips.num_clips() + + def __getitem__(self, idx): + video, audio, info, video_idx = self.video_clips.get_clip(idx) + label = self.samples[self.indices[video_idx]][1] + + if self.transform is not None: + video = self.transform(video) + + return video, audio, label diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/usps.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/usps.py new file mode 100644 index 0000000000000000000000000000000000000000..c315b8d3111f522dc0ce1d5253afed01878b609d --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/usps.py @@ -0,0 +1,91 @@ +from PIL import Image +import os +import numpy as np +from typing import Any, Callable, cast, Optional, Tuple + +from .utils import download_url +from .vision import VisionDataset + + +class USPS(VisionDataset): + """`USPS <https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass.html#usps>`_ Dataset. + The data-format is : [label [index:value ]*256 \\n] * num_lines, where ``label`` lies in ``[1, 10]``. + The value for each pixel lies in ``[-1, 1]``. Here we transform the ``label`` into ``[0, 9]`` + and make pixel values in ``[0, 255]``. + + Args: + root (string): Root directory of dataset to store``USPS`` data files. + train (bool, optional): If True, creates dataset from ``usps.bz2``, + otherwise from ``usps.t.bz2``. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + + """ + split_list = { + 'train': [ + "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/usps.bz2", + "usps.bz2", 'ec16c51db3855ca6c91edd34d0e9b197' + ], + 'test': [ + "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/multiclass/usps.t.bz2", + "usps.t.bz2", '8ea070ee2aca1ac39742fdd1ef5ed118' + ], + } + + def __init__( + self, + root: str, + train: bool = True, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + super(USPS, self).__init__(root, transform=transform, + target_transform=target_transform) + split = 'train' if train else 'test' + url, filename, checksum = self.split_list[split] + full_path = os.path.join(self.root, filename) + + if download and not os.path.exists(full_path): + download_url(url, self.root, filename, md5=checksum) + + import bz2 + with bz2.open(full_path) as fp: + raw_data = [line.decode().split() for line in fp.readlines()] + tmp_list = [[x.split(':')[-1] for x in data[1:]] for data in raw_data] + imgs = np.asarray(tmp_list, dtype=np.float32).reshape((-1, 16, 16)) + imgs = ((cast(np.ndarray, imgs) + 1) / 2 * 255).astype(dtype=np.uint8) + targets = [int(d[0]) - 1 for d in raw_data] + + self.data = imgs + self.targets = targets + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is index of the target class. + """ + img, target = self.data[index], int(self.targets[index]) + + # doing this so that it is consistent with all other datasets + # to return a PIL Image + img = Image.fromarray(img, mode='L') + + if self.transform is not None: + img = self.transform(img) + + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self) -> int: + return len(self.data) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/utils.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e6798bd4500827f72744fc7de73dc0032b011288 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/utils.py @@ -0,0 +1,451 @@ +import os +import os.path +import hashlib +import gzip +import re +import tarfile +from typing import Any, Callable, List, Iterable, Optional, TypeVar, Dict, IO, Tuple +from urllib.parse import urlparse +import zipfile +import lzma +import contextlib +import urllib +import urllib.request +import urllib.error +import pathlib + +import torch +from torch.utils.model_zoo import tqdm + +from ._utils import ( + _download_file_from_remote_location, + _is_remote_location_available, +) + + +USER_AGENT = "pytorch/vision" + + +def _urlretrieve(url: str, filename: str, chunk_size: int = 1024) -> None: + with open(filename, "wb") as fh: + with urllib.request.urlopen(urllib.request.Request(url, headers={"User-Agent": USER_AGENT})) as response: + with tqdm(total=response.length) as pbar: + for chunk in iter(lambda: response.read(chunk_size), ""): + if not chunk: + break + pbar.update(chunk_size) + fh.write(chunk) + + +def gen_bar_updater() -> Callable[[int, int, int], None]: + pbar = tqdm(total=None) + + def bar_update(count, block_size, total_size): + if pbar.total is None and total_size: + pbar.total = total_size + progress_bytes = count * block_size + pbar.update(progress_bytes - pbar.n) + + return bar_update + + +def calculate_md5(fpath: str, chunk_size: int = 1024 * 1024) -> str: + md5 = hashlib.md5() + with open(fpath, 'rb') as f: + for chunk in iter(lambda: f.read(chunk_size), b''): + md5.update(chunk) + return md5.hexdigest() + + +def check_md5(fpath: str, md5: str, **kwargs: Any) -> bool: + return md5 == calculate_md5(fpath, **kwargs) + + +def check_integrity(fpath: str, md5: Optional[str] = None) -> bool: + if not os.path.isfile(fpath): + return False + if md5 is None: + return True + return check_md5(fpath, md5) + + +def _get_redirect_url(url: str, max_hops: int = 3) -> str: + initial_url = url + headers = {"Method": "HEAD", "User-Agent": USER_AGENT} + + for _ in range(max_hops + 1): + with urllib.request.urlopen(urllib.request.Request(url, headers=headers)) as response: + if response.url == url or response.url is None: + return url + + url = response.url + else: + raise RecursionError( + f"Request to {initial_url} exceeded {max_hops} redirects. The last redirect points to {url}." + ) + + +def _get_google_drive_file_id(url: str) -> Optional[str]: + parts = urlparse(url) + + if re.match(r"(drive|docs)[.]google[.]com", parts.netloc) is None: + return None + + match = re.match(r"/file/d/(?P<id>[^/]*)", parts.path) + if match is None: + return None + + return match.group("id") + + +def download_url( + url: str, root: str, filename: Optional[str] = None, md5: Optional[str] = None, max_redirect_hops: int = 3 +) -> None: + """Download a file from a url and place it in root. + + Args: + url (str): URL to download file from + root (str): Directory to place downloaded file in + filename (str, optional): Name to save the file under. If None, use the basename of the URL + md5 (str, optional): MD5 checksum of the download. If None, do not check + max_redirect_hops (int, optional): Maximum number of redirect hops allowed + """ + root = os.path.expanduser(root) + if not filename: + filename = os.path.basename(url) + fpath = os.path.join(root, filename) + + os.makedirs(root, exist_ok=True) + + # check if file is already present locally + if check_integrity(fpath, md5): + print('Using downloaded and verified file: ' + fpath) + return + + if _is_remote_location_available(): + _download_file_from_remote_location(fpath, url) + else: + # expand redirect chain if needed + url = _get_redirect_url(url, max_hops=max_redirect_hops) + + # check if file is located on Google Drive + file_id = _get_google_drive_file_id(url) + if file_id is not None: + return download_file_from_google_drive(file_id, root, filename, md5) + + # download the file + try: + print('Downloading ' + url + ' to ' + fpath) + _urlretrieve(url, fpath) + except (urllib.error.URLError, IOError) as e: # type: ignore[attr-defined] + if url[:5] == 'https': + url = url.replace('https:', 'http:') + print('Failed download. Trying https -> http instead.' + ' Downloading ' + url + ' to ' + fpath) + _urlretrieve(url, fpath) + else: + raise e + + # check integrity of downloaded file + if not check_integrity(fpath, md5): + raise RuntimeError("File not found or corrupted.") + + +def list_dir(root: str, prefix: bool = False) -> List[str]: + """List all directories at a given root + + Args: + root (str): Path to directory whose folders need to be listed + prefix (bool, optional): If true, prepends the path to each result, otherwise + only returns the name of the directories found + """ + root = os.path.expanduser(root) + directories = [p for p in os.listdir(root) if os.path.isdir(os.path.join(root, p))] + if prefix is True: + directories = [os.path.join(root, d) for d in directories] + return directories + + +def list_files(root: str, suffix: str, prefix: bool = False) -> List[str]: + """List all files ending with a suffix at a given root + + Args: + root (str): Path to directory whose folders need to be listed + suffix (str or tuple): Suffix of the files to match, e.g. '.png' or ('.jpg', '.png'). + It uses the Python "str.endswith" method and is passed directly + prefix (bool, optional): If true, prepends the path to each result, otherwise + only returns the name of the files found + """ + root = os.path.expanduser(root) + files = [p for p in os.listdir(root) if os.path.isfile(os.path.join(root, p)) and p.endswith(suffix)] + if prefix is True: + files = [os.path.join(root, d) for d in files] + return files + + +def _quota_exceeded(response: "requests.models.Response") -> bool: # type: ignore[name-defined] + try: + start = next(response.iter_content(chunk_size=128, decode_unicode=True)) + return isinstance(start, str) and "Google Drive - Quota exceeded" in start + except StopIteration: + return False + + +def download_file_from_google_drive(file_id: str, root: str, filename: Optional[str] = None, md5: Optional[str] = None): + """Download a Google Drive file from and place it in root. + + Args: + file_id (str): id of file to be downloaded + root (str): Directory to place downloaded file in + filename (str, optional): Name to save the file under. If None, use the id of the file. + md5 (str, optional): MD5 checksum of the download. If None, do not check + """ + # Based on https://stackoverflow.com/questions/38511444/python-download-files-from-google-drive-using-url + import requests + url = "https://docs.google.com/uc?export=download" + + root = os.path.expanduser(root) + if not filename: + filename = file_id + fpath = os.path.join(root, filename) + + os.makedirs(root, exist_ok=True) + + if os.path.isfile(fpath) and check_integrity(fpath, md5): + print('Using downloaded and verified file: ' + fpath) + else: + session = requests.Session() + + response = session.get(url, params={'id': file_id}, stream=True) + token = _get_confirm_token(response) + + if token: + params = {'id': file_id, 'confirm': token} + response = session.get(url, params=params, stream=True) + + if _quota_exceeded(response): + msg = ( + f"The daily quota of the file {filename} is exceeded and it " + f"can't be downloaded. This is a limitation of Google Drive " + f"and can only be overcome by trying again later." + ) + raise RuntimeError(msg) + + _save_response_content(response, fpath) + + +def _get_confirm_token(response: "requests.models.Response") -> Optional[str]: # type: ignore[name-defined] + for key, value in response.cookies.items(): + if key.startswith('download_warning'): + return value + + return None + + +def _save_response_content( + response: "requests.models.Response", destination: str, chunk_size: int = 32768, # type: ignore[name-defined] +) -> None: + with open(destination, "wb") as f: + pbar = tqdm(total=None) + progress = 0 + for chunk in response.iter_content(chunk_size): + if chunk: # filter out keep-alive new chunks + f.write(chunk) + progress += len(chunk) + pbar.update(progress - pbar.n) + pbar.close() + + +def _extract_tar(from_path: str, to_path: str, compression: Optional[str]) -> None: + with tarfile.open(from_path, f"r:{compression[1:]}" if compression else "r") as tar: + tar.extractall(to_path) + + +_ZIP_COMPRESSION_MAP: Dict[str, int] = { + ".xz": zipfile.ZIP_LZMA, +} + + +def _extract_zip(from_path: str, to_path: str, compression: Optional[str]) -> None: + with zipfile.ZipFile( + from_path, "r", compression=_ZIP_COMPRESSION_MAP[compression] if compression else zipfile.ZIP_STORED + ) as zip: + zip.extractall(to_path) + + +_ARCHIVE_EXTRACTORS: Dict[str, Callable[[str, str, Optional[str]], None]] = { + ".tar": _extract_tar, + ".zip": _extract_zip, +} +_COMPRESSED_FILE_OPENERS: Dict[str, Callable[..., IO]] = {".gz": gzip.open, ".xz": lzma.open} +_FILE_TYPE_ALIASES: Dict[str, Tuple[Optional[str], Optional[str]]] = {".tgz": (".tar", ".gz")} + + +def _verify_archive_type(archive_type: str) -> None: + if archive_type not in _ARCHIVE_EXTRACTORS.keys(): + valid_types = "', '".join(_ARCHIVE_EXTRACTORS.keys()) + raise RuntimeError(f"Unknown archive type '{archive_type}'. Known archive types are '{valid_types}'.") + + +def _verify_compression(compression: str) -> None: + if compression not in _COMPRESSED_FILE_OPENERS.keys(): + valid_types = "', '".join(_COMPRESSED_FILE_OPENERS.keys()) + raise RuntimeError(f"Unknown compression '{compression}'. Known compressions are '{valid_types}'.") + + +def _detect_file_type(file: str) -> Tuple[str, Optional[str], Optional[str]]: + path = pathlib.Path(file) + suffix = path.suffix + suffixes = pathlib.Path(file).suffixes + if not suffixes: + raise RuntimeError( + f"File '{file}' has no suffixes that could be used to detect the archive type and compression." + ) + elif len(suffixes) > 2: + raise RuntimeError( + "Archive type and compression detection only works for 1 or 2 suffixes. " f"Got {len(suffixes)} instead." + ) + elif len(suffixes) == 2: + # if we have exactly two suffixes we assume the first one is the archive type and the second on is the + # compression + archive_type, compression = suffixes + _verify_archive_type(archive_type) + _verify_compression(compression) + return "".join(suffixes), archive_type, compression + + # check if the suffix is a known alias + with contextlib.suppress(KeyError): + return (suffix, *_FILE_TYPE_ALIASES[suffix]) + + # check if the suffix is an archive type + with contextlib.suppress(RuntimeError): + _verify_archive_type(suffix) + return suffix, suffix, None + + # check if the suffix is a compression + with contextlib.suppress(RuntimeError): + _verify_compression(suffix) + return suffix, None, suffix + + raise RuntimeError(f"Suffix '{suffix}' is neither recognized as archive type nor as compression.") + + +def _decompress(from_path: str, to_path: Optional[str] = None, remove_finished: bool = False) -> str: + r"""Decompress a file. + + The compression is automatically detected from the file name. + + Args: + from_path (str): Path to the file to be decompressed. + to_path (str): Path to the decompressed file. If omitted, ``from_path`` without compression extension is used. + remove_finished (bool): If ``True``, remove the file after the extraction. + + Returns: + (str): Path to the decompressed file. + """ + suffix, archive_type, compression = _detect_file_type(from_path) + if not compression: + raise RuntimeError(f"Couldn't detect a compression from suffix {suffix}.") + + if to_path is None: + to_path = from_path.replace(suffix, archive_type if archive_type is not None else "") + + # We don't need to check for a missing key here, since this was already done in _detect_file_type() + compressed_file_opener = _COMPRESSED_FILE_OPENERS[compression] + + with compressed_file_opener(from_path, "rb") as rfh, open(to_path, "wb") as wfh: + wfh.write(rfh.read()) + + if remove_finished: + os.remove(from_path) + + return to_path + + +def extract_archive(from_path: str, to_path: Optional[str] = None, remove_finished: bool = False) -> str: + """Extract an archive. + + The archive type and a possible compression is automatically detected from the file name. If the file is compressed + but not an archive the call is dispatched to :func:`decompress`. + + Args: + from_path (str): Path to the file to be extracted. + to_path (str): Path to the directory the file will be extracted to. If omitted, the directory of the file is + used. + remove_finished (bool): If ``True``, remove the file after the extraction. + + Returns: + (str): Path to the directory the file was extracted to. + """ + if to_path is None: + to_path = os.path.dirname(from_path) + + suffix, archive_type, compression = _detect_file_type(from_path) + if not archive_type: + return _decompress( + from_path, + os.path.join(to_path, os.path.basename(from_path).replace(suffix, "")), + remove_finished=remove_finished, + ) + + # We don't need to check for a missing key here, since this was already done in _detect_file_type() + extractor = _ARCHIVE_EXTRACTORS[archive_type] + + extractor(from_path, to_path, compression) + + return to_path + + +def download_and_extract_archive( + url: str, + download_root: str, + extract_root: Optional[str] = None, + filename: Optional[str] = None, + md5: Optional[str] = None, + remove_finished: bool = False, +) -> None: + download_root = os.path.expanduser(download_root) + if extract_root is None: + extract_root = download_root + if not filename: + filename = os.path.basename(url) + + download_url(url, download_root, filename, md5) + + archive = os.path.join(download_root, filename) + print("Extracting {} to {}".format(archive, extract_root)) + extract_archive(archive, extract_root, remove_finished) + + +def iterable_to_str(iterable: Iterable) -> str: + return "'" + "', '".join([str(item) for item in iterable]) + "'" + + +T = TypeVar("T", str, bytes) + + +def verify_str_arg( + value: T, arg: Optional[str] = None, valid_values: Iterable[T] = None, custom_msg: Optional[str] = None, +) -> T: + if not isinstance(value, torch._six.string_classes): + if arg is None: + msg = "Expected type str, but got type {type}." + else: + msg = "Expected type str for argument {arg}, but got type {type}." + msg = msg.format(type=type(value), arg=arg) + raise ValueError(msg) + + if valid_values is None: + return value + + if value not in valid_values: + if custom_msg is not None: + msg = custom_msg + else: + msg = ("Unknown value '{value}' for argument {arg}. " + "Valid values are {{{valid_values}}}.") + msg = msg.format(value=value, arg=arg, + valid_values=iterable_to_str(valid_values)) + raise ValueError(msg) + + return value diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/video_utils.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/video_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..987270c4cd403b5ae8683b6967b12863e950f8d9 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/video_utils.py @@ -0,0 +1,426 @@ +import bisect +import math +import warnings +from fractions import Fraction +from typing import List + +import torch +from torchvision.io import ( + _probe_video_from_file, + _read_video_from_file, + read_video, + read_video_timestamps, +) + +from .utils import tqdm + + +def pts_convert(pts, timebase_from, timebase_to, round_func=math.floor): + """convert pts between different time bases + Args: + pts: presentation timestamp, float + timebase_from: original timebase. Fraction + timebase_to: new timebase. Fraction + round_func: rounding function. + """ + new_pts = Fraction(pts, 1) * timebase_from / timebase_to + return round_func(new_pts) + + +def unfold(tensor, size, step, dilation=1): + """ + similar to tensor.unfold, but with the dilation + and specialized for 1d tensors + + Returns all consecutive windows of `size` elements, with + `step` between windows. The distance between each element + in a window is given by `dilation`. + """ + assert tensor.dim() == 1 + o_stride = tensor.stride(0) + numel = tensor.numel() + new_stride = (step * o_stride, dilation * o_stride) + new_size = ((numel - (dilation * (size - 1) + 1)) // step + 1, size) + if new_size[0] < 1: + new_size = (0, size) + return torch.as_strided(tensor, new_size, new_stride) + + +class _VideoTimestampsDataset(object): + """ + Dataset used to parallelize the reading of the timestamps + of a list of videos, given their paths in the filesystem. + + Used in VideoClips and defined at top level so it can be + pickled when forking. + """ + + def __init__(self, video_paths: List[str]): + self.video_paths = video_paths + + def __len__(self): + return len(self.video_paths) + + def __getitem__(self, idx): + return read_video_timestamps(self.video_paths[idx]) + + +def _collate_fn(x): + """ + Dummy collate function to be used with _VideoTimestampsDataset + """ + return x + + +class VideoClips(object): + """ + Given a list of video files, computes all consecutive subvideos of size + `clip_length_in_frames`, where the distance between each subvideo in the + same video is defined by `frames_between_clips`. + If `frame_rate` is specified, it will also resample all the videos to have + the same frame rate, and the clips will refer to this frame rate. + + Creating this instance the first time is time-consuming, as it needs to + decode all the videos in `video_paths`. It is recommended that you + cache the results after instantiation of the class. + + Recreating the clips for different clip lengths is fast, and can be done + with the `compute_clips` method. + + Args: + video_paths (List[str]): paths to the video files + clip_length_in_frames (int): size of a clip in number of frames + frames_between_clips (int): step (in frames) between each clip + frame_rate (int, optional): if specified, it will resample the video + so that it has `frame_rate`, and then the clips will be defined + on the resampled video + num_workers (int): how many subprocesses to use for data loading. + 0 means that the data will be loaded in the main process. (default: 0) + """ + + def __init__( + self, + video_paths, + clip_length_in_frames=16, + frames_between_clips=1, + frame_rate=None, + _precomputed_metadata=None, + num_workers=0, + _video_width=0, + _video_height=0, + _video_min_dimension=0, + _video_max_dimension=0, + _audio_samples=0, + _audio_channels=0, + ): + + self.video_paths = video_paths + self.num_workers = num_workers + + # these options are not valid for pyav backend + self._video_width = _video_width + self._video_height = _video_height + self._video_min_dimension = _video_min_dimension + self._video_max_dimension = _video_max_dimension + self._audio_samples = _audio_samples + self._audio_channels = _audio_channels + + if _precomputed_metadata is None: + self._compute_frame_pts() + else: + self._init_from_metadata(_precomputed_metadata) + self.compute_clips(clip_length_in_frames, frames_between_clips, frame_rate) + + def _compute_frame_pts(self): + self.video_pts = [] + self.video_fps = [] + + # strategy: use a DataLoader to parallelize read_video_timestamps + # so need to create a dummy dataset first + import torch.utils.data + + dl = torch.utils.data.DataLoader( + _VideoTimestampsDataset(self.video_paths), + batch_size=16, + num_workers=self.num_workers, + collate_fn=_collate_fn, + ) + + with tqdm(total=len(dl)) as pbar: + for batch in dl: + pbar.update(1) + clips, fps = list(zip(*batch)) + # we need to specify dtype=torch.long because for empty list, + # torch.as_tensor will use torch.float as default dtype. This + # happens when decoding fails and no pts is returned in the list. + clips = [torch.as_tensor(c, dtype=torch.long) for c in clips] + self.video_pts.extend(clips) + self.video_fps.extend(fps) + + def _init_from_metadata(self, metadata): + self.video_paths = metadata["video_paths"] + assert len(self.video_paths) == len(metadata["video_pts"]) + self.video_pts = metadata["video_pts"] + assert len(self.video_paths) == len(metadata["video_fps"]) + self.video_fps = metadata["video_fps"] + + @property + def metadata(self): + _metadata = { + "video_paths": self.video_paths, + "video_pts": self.video_pts, + "video_fps": self.video_fps, + } + return _metadata + + def subset(self, indices): + video_paths = [self.video_paths[i] for i in indices] + video_pts = [self.video_pts[i] for i in indices] + video_fps = [self.video_fps[i] for i in indices] + metadata = { + "video_paths": video_paths, + "video_pts": video_pts, + "video_fps": video_fps, + } + return type(self)( + video_paths, + self.num_frames, + self.step, + self.frame_rate, + _precomputed_metadata=metadata, + num_workers=self.num_workers, + _video_width=self._video_width, + _video_height=self._video_height, + _video_min_dimension=self._video_min_dimension, + _video_max_dimension=self._video_max_dimension, + _audio_samples=self._audio_samples, + _audio_channels=self._audio_channels, + ) + + @staticmethod + def compute_clips_for_video(video_pts, num_frames, step, fps, frame_rate): + if fps is None: + # if for some reason the video doesn't have fps (because doesn't have a video stream) + # set the fps to 1. The value doesn't matter, because video_pts is empty anyway + fps = 1 + if frame_rate is None: + frame_rate = fps + total_frames = len(video_pts) * (float(frame_rate) / fps) + idxs = VideoClips._resample_video_idx( + int(math.floor(total_frames)), fps, frame_rate + ) + video_pts = video_pts[idxs] + clips = unfold(video_pts, num_frames, step) + if not clips.numel(): + warnings.warn("There aren't enough frames in the current video to get a clip for the given clip length and " + "frames between clips. The video (and potentially others) will be skipped.") + if isinstance(idxs, slice): + idxs = [idxs] * len(clips) + else: + idxs = unfold(idxs, num_frames, step) + return clips, idxs + + def compute_clips(self, num_frames, step, frame_rate=None): + """ + Compute all consecutive sequences of clips from video_pts. + Always returns clips of size `num_frames`, meaning that the + last few frames in a video can potentially be dropped. + + Args: + num_frames (int): number of frames for the clip + step (int): distance between two clips + frame_rate (int, optional): The frame rate + """ + self.num_frames = num_frames + self.step = step + self.frame_rate = frame_rate + self.clips = [] + self.resampling_idxs = [] + for video_pts, fps in zip(self.video_pts, self.video_fps): + clips, idxs = self.compute_clips_for_video( + video_pts, num_frames, step, fps, frame_rate + ) + self.clips.append(clips) + self.resampling_idxs.append(idxs) + clip_lengths = torch.as_tensor([len(v) for v in self.clips]) + self.cumulative_sizes = clip_lengths.cumsum(0).tolist() + + def __len__(self): + return self.num_clips() + + def num_videos(self): + return len(self.video_paths) + + def num_clips(self): + """ + Number of subclips that are available in the video list. + """ + return self.cumulative_sizes[-1] + + def get_clip_location(self, idx): + """ + Converts a flattened representation of the indices into a video_idx, clip_idx + representation. + """ + video_idx = bisect.bisect_right(self.cumulative_sizes, idx) + if video_idx == 0: + clip_idx = idx + else: + clip_idx = idx - self.cumulative_sizes[video_idx - 1] + return video_idx, clip_idx + + @staticmethod + def _resample_video_idx(num_frames, original_fps, new_fps): + step = float(original_fps) / new_fps + if step.is_integer(): + # optimization: if step is integer, don't need to perform + # advanced indexing + step = int(step) + return slice(None, None, step) + idxs = torch.arange(num_frames, dtype=torch.float32) * step + idxs = idxs.floor().to(torch.int64) + return idxs + + def get_clip(self, idx): + """ + Gets a subclip from a list of videos. + + Args: + idx (int): index of the subclip. Must be between 0 and num_clips(). + + Returns: + video (Tensor) + audio (Tensor) + info (Dict) + video_idx (int): index of the video in `video_paths` + """ + if idx >= self.num_clips(): + raise IndexError( + "Index {} out of range " + "({} number of clips)".format(idx, self.num_clips()) + ) + video_idx, clip_idx = self.get_clip_location(idx) + video_path = self.video_paths[video_idx] + clip_pts = self.clips[video_idx][clip_idx] + + from torchvision import get_video_backend + + backend = get_video_backend() + + if backend == "pyav": + # check for invalid options + if self._video_width != 0: + raise ValueError("pyav backend doesn't support _video_width != 0") + if self._video_height != 0: + raise ValueError("pyav backend doesn't support _video_height != 0") + if self._video_min_dimension != 0: + raise ValueError( + "pyav backend doesn't support _video_min_dimension != 0" + ) + if self._video_max_dimension != 0: + raise ValueError( + "pyav backend doesn't support _video_max_dimension != 0" + ) + if self._audio_samples != 0: + raise ValueError("pyav backend doesn't support _audio_samples != 0") + + if backend == "pyav": + start_pts = clip_pts[0].item() + end_pts = clip_pts[-1].item() + video, audio, info = read_video(video_path, start_pts, end_pts) + else: + info = _probe_video_from_file(video_path) + video_fps = info.video_fps + audio_fps = None + + video_start_pts = clip_pts[0].item() + video_end_pts = clip_pts[-1].item() + + audio_start_pts, audio_end_pts = 0, -1 + audio_timebase = Fraction(0, 1) + video_timebase = Fraction( + info.video_timebase.numerator, info.video_timebase.denominator + ) + if info.has_audio: + audio_timebase = Fraction( + info.audio_timebase.numerator, info.audio_timebase.denominator + ) + audio_start_pts = pts_convert( + video_start_pts, video_timebase, audio_timebase, math.floor + ) + audio_end_pts = pts_convert( + video_end_pts, video_timebase, audio_timebase, math.ceil + ) + audio_fps = info.audio_sample_rate + video, audio, info = _read_video_from_file( + video_path, + video_width=self._video_width, + video_height=self._video_height, + video_min_dimension=self._video_min_dimension, + video_max_dimension=self._video_max_dimension, + video_pts_range=(video_start_pts, video_end_pts), + video_timebase=video_timebase, + audio_samples=self._audio_samples, + audio_channels=self._audio_channels, + audio_pts_range=(audio_start_pts, audio_end_pts), + audio_timebase=audio_timebase, + ) + + info = {"video_fps": video_fps} + if audio_fps is not None: + info["audio_fps"] = audio_fps + + if self.frame_rate is not None: + resampling_idx = self.resampling_idxs[video_idx][clip_idx] + if isinstance(resampling_idx, torch.Tensor): + resampling_idx = resampling_idx - resampling_idx[0] + video = video[resampling_idx] + info["video_fps"] = self.frame_rate + assert len(video) == self.num_frames, "{} x {}".format( + video.shape, self.num_frames + ) + return video, audio, info, video_idx + + def __getstate__(self): + video_pts_sizes = [len(v) for v in self.video_pts] + # To be back-compatible, we convert data to dtype torch.long as needed + # because for empty list, in legacy implementation, torch.as_tensor will + # use torch.float as default dtype. This happens when decoding fails and + # no pts is returned in the list. + video_pts = [x.to(torch.int64) for x in self.video_pts] + # video_pts can be an empty list if no frames have been decoded + if video_pts: + video_pts = torch.cat(video_pts) + # avoid bug in https://github.com/pytorch/pytorch/issues/32351 + # TODO: Revert it once the bug is fixed. + video_pts = video_pts.numpy() + + # make a copy of the fields of self + d = self.__dict__.copy() + d["video_pts_sizes"] = video_pts_sizes + d["video_pts"] = video_pts + # delete the following attributes to reduce the size of dictionary. They + # will be re-computed in "__setstate__()" + del d["clips"] + del d["resampling_idxs"] + del d["cumulative_sizes"] + + # for backwards-compatibility + d["_version"] = 2 + return d + + def __setstate__(self, d): + # for backwards-compatibility + if "_version" not in d: + self.__dict__ = d + return + + video_pts = torch.as_tensor(d["video_pts"], dtype=torch.int64) + video_pts = torch.split(video_pts, d["video_pts_sizes"], dim=0) + # don't need this info anymore + del d["video_pts_sizes"] + + d["video_pts"] = video_pts + self.__dict__ = d + # recompute attributes "clips", "resampling_idxs" and other derivative ones + self.compute_clips(self.num_frames, self.step, self.frame_rate) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/vision.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/vision.py new file mode 100644 index 0000000000000000000000000000000000000000..7aa25f79eb8559008d0cfc7851f63efc73df212a --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/vision.py @@ -0,0 +1,87 @@ +import os +import torch +import torch.utils.data as data +from typing import Any, Callable, List, Optional, Tuple + + +class VisionDataset(data.Dataset): + _repr_indent = 4 + + def __init__( + self, + root: str, + transforms: Optional[Callable] = None, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + ) -> None: + if isinstance(root, torch._six.string_classes): + root = os.path.expanduser(root) + self.root = root + + has_transforms = transforms is not None + has_separate_transform = transform is not None or target_transform is not None + if has_transforms and has_separate_transform: + raise ValueError("Only transforms or transform/target_transform can " + "be passed as argument") + + # for backwards-compatibility + self.transform = transform + self.target_transform = target_transform + + if has_separate_transform: + transforms = StandardTransform(transform, target_transform) + self.transforms = transforms + + def __getitem__(self, index: int) -> Any: + raise NotImplementedError + + def __len__(self) -> int: + raise NotImplementedError + + def __repr__(self) -> str: + head = "Dataset " + self.__class__.__name__ + body = ["Number of datapoints: {}".format(self.__len__())] + if self.root is not None: + body.append("Root location: {}".format(self.root)) + body += self.extra_repr().splitlines() + if hasattr(self, "transforms") and self.transforms is not None: + body += [repr(self.transforms)] + lines = [head] + [" " * self._repr_indent + line for line in body] + return '\n'.join(lines) + + def _format_transform_repr(self, transform: Callable, head: str) -> List[str]: + lines = transform.__repr__().splitlines() + return (["{}{}".format(head, lines[0])] + + ["{}{}".format(" " * len(head), line) for line in lines[1:]]) + + def extra_repr(self) -> str: + return "" + + +class StandardTransform(object): + def __init__(self, transform: Optional[Callable] = None, target_transform: Optional[Callable] = None) -> None: + self.transform = transform + self.target_transform = target_transform + + def __call__(self, input: Any, target: Any) -> Tuple[Any, Any]: + if self.transform is not None: + input = self.transform(input) + if self.target_transform is not None: + target = self.target_transform(target) + return input, target + + def _format_transform_repr(self, transform: Callable, head: str) -> List[str]: + lines = transform.__repr__().splitlines() + return (["{}{}".format(head, lines[0])] + + ["{}{}".format(" " * len(head), line) for line in lines[1:]]) + + def __repr__(self) -> str: + body = [self.__class__.__name__] + if self.transform is not None: + body += self._format_transform_repr(self.transform, + "Transform: ") + if self.target_transform is not None: + body += self._format_transform_repr(self.target_transform, + "Target transform: ") + + return '\n'.join(body) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/voc.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/voc.py new file mode 100644 index 0000000000000000000000000000000000000000..56bd92c797255533db357d342a968580aa58befc --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/voc.py @@ -0,0 +1,231 @@ +import os +import collections +from .vision import VisionDataset +from xml.etree.ElementTree import Element as ET_Element +try: + from defusedxml.ElementTree import parse as ET_parse +except ImportError: + from xml.etree.ElementTree import parse as ET_parse +from PIL import Image +from typing import Any, Callable, Dict, Optional, Tuple, List +from .utils import download_and_extract_archive, verify_str_arg +import warnings + +DATASET_YEAR_DICT = { + '2012': { + 'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar', + 'filename': 'VOCtrainval_11-May-2012.tar', + 'md5': '6cd6e144f989b92b3379bac3b3de84fd', + 'base_dir': os.path.join('VOCdevkit', 'VOC2012') + }, + '2011': { + 'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2011/VOCtrainval_25-May-2011.tar', + 'filename': 'VOCtrainval_25-May-2011.tar', + 'md5': '6c3384ef61512963050cb5d687e5bf1e', + 'base_dir': os.path.join('TrainVal', 'VOCdevkit', 'VOC2011') + }, + '2010': { + 'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2010/VOCtrainval_03-May-2010.tar', + 'filename': 'VOCtrainval_03-May-2010.tar', + 'md5': 'da459979d0c395079b5c75ee67908abb', + 'base_dir': os.path.join('VOCdevkit', 'VOC2010') + }, + '2009': { + 'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2009/VOCtrainval_11-May-2009.tar', + 'filename': 'VOCtrainval_11-May-2009.tar', + 'md5': '59065e4b188729180974ef6572f6a212', + 'base_dir': os.path.join('VOCdevkit', 'VOC2009') + }, + '2008': { + 'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2008/VOCtrainval_14-Jul-2008.tar', + 'filename': 'VOCtrainval_11-May-2012.tar', + 'md5': '2629fa636546599198acfcfbfcf1904a', + 'base_dir': os.path.join('VOCdevkit', 'VOC2008') + }, + '2007': { + 'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtrainval_06-Nov-2007.tar', + 'filename': 'VOCtrainval_06-Nov-2007.tar', + 'md5': 'c52e279531787c972589f7e41ab4ae64', + 'base_dir': os.path.join('VOCdevkit', 'VOC2007') + }, + '2007-test': { + 'url': 'http://host.robots.ox.ac.uk/pascal/VOC/voc2007/VOCtest_06-Nov-2007.tar', + 'filename': 'VOCtest_06-Nov-2007.tar', + 'md5': 'b6e924de25625d8de591ea690078ad9f', + 'base_dir': os.path.join('VOCdevkit', 'VOC2007') + } +} + + +class _VOCBase(VisionDataset): + _SPLITS_DIR: str + _TARGET_DIR: str + _TARGET_FILE_EXT: str + + def __init__( + self, + root: str, + year: str = "2012", + image_set: str = "train", + download: bool = False, + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + transforms: Optional[Callable] = None, + ): + super().__init__(root, transforms, transform, target_transform) + if year == "2007-test": + if image_set == "test": + warnings.warn( + "Acessing the test image set of the year 2007 with year='2007-test' is deprecated. " + "Please use the combination year='2007' and image_set='test' instead." + ) + year = "2007" + else: + raise ValueError( + "In the test image set of the year 2007 only image_set='test' is allowed. " + "For all other image sets use year='2007' instead." + ) + self.year = year + + valid_image_sets = ["train", "trainval", "val"] + if year == "2007": + valid_image_sets.append("test") + self.image_set = verify_str_arg(image_set, "image_set", valid_image_sets) + + key = "2007-test" if year == "2007" and image_set == "test" else year + dataset_year_dict = DATASET_YEAR_DICT[key] + + self.url = dataset_year_dict["url"] + self.filename = dataset_year_dict["filename"] + self.md5 = dataset_year_dict["md5"] + + base_dir = dataset_year_dict["base_dir"] + voc_root = os.path.join(self.root, base_dir) + + if download: + download_and_extract_archive(self.url, self.root, filename=self.filename, md5=self.md5) + + if not os.path.isdir(voc_root): + raise RuntimeError("Dataset not found or corrupted. You can use download=True to download it") + + splits_dir = os.path.join(voc_root, "ImageSets", self._SPLITS_DIR) + split_f = os.path.join(splits_dir, image_set.rstrip("\n") + ".txt") + with open(os.path.join(split_f), "r") as f: + file_names = [x.strip() for x in f.readlines()] + + image_dir = os.path.join(voc_root, "JPEGImages") + self.images = [os.path.join(image_dir, x + ".jpg") for x in file_names] + + target_dir = os.path.join(voc_root, self._TARGET_DIR) + self.targets = [os.path.join(target_dir, x + self._TARGET_FILE_EXT) for x in file_names] + + assert len(self.images) == len(self.targets) + + def __len__(self) -> int: + return len(self.images) + + +class VOCSegmentation(_VOCBase): + """`Pascal VOC <http://host.robots.ox.ac.uk/pascal/VOC/>`_ Segmentation Dataset. + + Args: + root (string): Root directory of the VOC Dataset. + year (string, optional): The dataset year, supports years ``"2007"`` to ``"2012"``. + image_set (string, optional): Select the image_set to use, ``"train"``, ``"trainval"`` or ``"val"``. If + ``year=="2007"``, can also be ``"test"``. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + transforms (callable, optional): A function/transform that takes input sample and its target as entry + and returns a transformed version. + """ + + _SPLITS_DIR = "Segmentation" + _TARGET_DIR = "SegmentationClass" + _TARGET_FILE_EXT = ".png" + + @property + def masks(self) -> List[str]: + return self.targets + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is the image segmentation. + """ + img = Image.open(self.images[index]).convert("RGB") + target = Image.open(self.masks[index]) + + if self.transforms is not None: + img, target = self.transforms(img, target) + + return img, target + + +class VOCDetection(_VOCBase): + """`Pascal VOC <http://host.robots.ox.ac.uk/pascal/VOC/>`_ Detection Dataset. + + Args: + root (string): Root directory of the VOC Dataset. + year (string, optional): The dataset year, supports years ``"2007"`` to ``"2012"``. + image_set (string, optional): Select the image_set to use, ``"train"``, ``"trainval"`` or ``"val"``. If + ``year=="2007"``, can also be ``"test"``. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + (default: alphabetic indexing of VOC's 20 classes). + transform (callable, optional): A function/transform that takes in an PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, required): A function/transform that takes in the + target and transforms it. + transforms (callable, optional): A function/transform that takes input sample and its target as entry + and returns a transformed version. + """ + + _SPLITS_DIR = "Main" + _TARGET_DIR = "Annotations" + _TARGET_FILE_EXT = ".xml" + + @property + def annotations(self) -> List[str]: + return self.targets + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is a dictionary of the XML tree. + """ + img = Image.open(self.images[index]).convert("RGB") + target = self.parse_voc_xml(ET_parse(self.annotations[index]).getroot()) + + if self.transforms is not None: + img, target = self.transforms(img, target) + + return img, target + + def parse_voc_xml(self, node: ET_Element) -> Dict[str, Any]: + voc_dict: Dict[str, Any] = {} + children = list(node) + if children: + def_dic: Dict[str, Any] = collections.defaultdict(list) + for dc in map(self.parse_voc_xml, children): + for ind, v in dc.items(): + def_dic[ind].append(v) + if node.tag == "annotation": + def_dic["object"] = [def_dic["object"]] + voc_dict = {node.tag: {ind: v[0] if len(v) == 1 else v for ind, v in def_dic.items()}} + if node.text: + text = node.text.strip() + if not children: + voc_dict[node.tag] = text + return voc_dict diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/widerface.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/widerface.py new file mode 100644 index 0000000000000000000000000000000000000000..c1775309b294cbb8ef51e01dbc070b413c6e6f7e --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/datasets/widerface.py @@ -0,0 +1,187 @@ +from PIL import Image +import os +from os.path import abspath, expanduser +import torch +from typing import Any, Callable, List, Dict, Optional, Tuple, Union +from .utils import check_integrity, download_file_from_google_drive, \ + download_and_extract_archive, extract_archive, verify_str_arg +from .vision import VisionDataset + + +class WIDERFace(VisionDataset): + """`WIDERFace <http://shuoyang1213.me/WIDERFACE/>`_ Dataset. + + Args: + root (string): Root directory where images and annotations are downloaded to. + Expects the following folder structure if download=False: + + .. code:: + + <root> + └── widerface + ├── wider_face_split ('wider_face_split.zip' if compressed) + ├── WIDER_train ('WIDER_train.zip' if compressed) + ├── WIDER_val ('WIDER_val.zip' if compressed) + └── WIDER_test ('WIDER_test.zip' if compressed) + split (string): The dataset split to use. One of {``train``, ``val``, ``test``}. + Defaults to ``train``. + transform (callable, optional): A function/transform that takes in a PIL image + and returns a transformed version. E.g, ``transforms.RandomCrop`` + target_transform (callable, optional): A function/transform that takes in the + target and transforms it. + download (bool, optional): If true, downloads the dataset from the internet and + puts it in root directory. If dataset is already downloaded, it is not + downloaded again. + + """ + + BASE_FOLDER = "widerface" + FILE_LIST = [ + # File ID MD5 Hash Filename + ("0B6eKvaijfFUDQUUwd21EckhUbWs", "3fedf70df600953d25982bcd13d91ba2", "WIDER_train.zip"), + ("0B6eKvaijfFUDd3dIRmpvSk8tLUk", "dfa7d7e790efa35df3788964cf0bbaea", "WIDER_val.zip"), + ("0B6eKvaijfFUDbW4tdGpaYjgzZkU", "e5d8f4248ed24c334bbd12f49c29dd40", "WIDER_test.zip") + ] + ANNOTATIONS_FILE = ( + "http://mmlab.ie.cuhk.edu.hk/projects/WIDERFace/support/bbx_annotation/wider_face_split.zip", + "0e3767bcf0e326556d407bf5bff5d27c", + "wider_face_split.zip" + ) + + def __init__( + self, + root: str, + split: str = "train", + transform: Optional[Callable] = None, + target_transform: Optional[Callable] = None, + download: bool = False, + ) -> None: + super(WIDERFace, self).__init__(root=os.path.join(root, self.BASE_FOLDER), + transform=transform, + target_transform=target_transform) + # check arguments + self.split = verify_str_arg(split, "split", ("train", "val", "test")) + + if download: + self.download() + + if not self._check_integrity(): + raise RuntimeError("Dataset not found or corrupted. " + + "You can use download=True to download and prepare it") + + self.img_info: List[Dict[str, Union[str, Dict[str, torch.Tensor]]]] = [] + if self.split in ("train", "val"): + self.parse_train_val_annotations_file() + else: + self.parse_test_annotations_file() + + def __getitem__(self, index: int) -> Tuple[Any, Any]: + """ + Args: + index (int): Index + + Returns: + tuple: (image, target) where target is a dict of annotations for all faces in the image. + target=None for the test split. + """ + + # stay consistent with other datasets and return a PIL Image + img = Image.open(self.img_info[index]["img_path"]) + + if self.transform is not None: + img = self.transform(img) + + target = None if self.split == "test" else self.img_info[index]["annotations"] + if self.target_transform is not None: + target = self.target_transform(target) + + return img, target + + def __len__(self) -> int: + return len(self.img_info) + + def extra_repr(self) -> str: + lines = ["Split: {split}"] + return '\n'.join(lines).format(**self.__dict__) + + def parse_train_val_annotations_file(self) -> None: + filename = "wider_face_train_bbx_gt.txt" if self.split == "train" else "wider_face_val_bbx_gt.txt" + filepath = os.path.join(self.root, "wider_face_split", filename) + + with open(filepath, "r") as f: + lines = f.readlines() + file_name_line, num_boxes_line, box_annotation_line = True, False, False + num_boxes, box_counter = 0, 0 + labels = [] + for line in lines: + line = line.rstrip() + if file_name_line: + img_path = os.path.join(self.root, "WIDER_" + self.split, "images", line) + img_path = abspath(expanduser(img_path)) + file_name_line = False + num_boxes_line = True + elif num_boxes_line: + num_boxes = int(line) + num_boxes_line = False + box_annotation_line = True + elif box_annotation_line: + box_counter += 1 + line_split = line.split(" ") + line_values = [int(x) for x in line_split] + labels.append(line_values) + if box_counter >= num_boxes: + box_annotation_line = False + file_name_line = True + labels_tensor = torch.tensor(labels) + self.img_info.append({ + "img_path": img_path, + "annotations": {"bbox": labels_tensor[:, 0:4], # x, y, width, height + "blur": labels_tensor[:, 4], + "expression": labels_tensor[:, 5], + "illumination": labels_tensor[:, 6], + "occlusion": labels_tensor[:, 7], + "pose": labels_tensor[:, 8], + "invalid": labels_tensor[:, 9]} + }) + box_counter = 0 + labels.clear() + else: + raise RuntimeError("Error parsing annotation file {}".format(filepath)) + + def parse_test_annotations_file(self) -> None: + filepath = os.path.join(self.root, "wider_face_split", "wider_face_test_filelist.txt") + filepath = abspath(expanduser(filepath)) + with open(filepath, "r") as f: + lines = f.readlines() + for line in lines: + line = line.rstrip() + img_path = os.path.join(self.root, "WIDER_test", "images", line) + img_path = abspath(expanduser(img_path)) + self.img_info.append({"img_path": img_path}) + + def _check_integrity(self) -> bool: + # Allow original archive to be deleted (zip). Only need the extracted images + all_files = self.FILE_LIST.copy() + all_files.append(self.ANNOTATIONS_FILE) + for (_, md5, filename) in all_files: + file, ext = os.path.splitext(filename) + extracted_dir = os.path.join(self.root, file) + if not os.path.exists(extracted_dir): + return False + return True + + def download(self) -> None: + if self._check_integrity(): + print('Files already downloaded and verified') + return + + # download and extract image data + for (file_id, md5, filename) in self.FILE_LIST: + download_file_from_google_drive(file_id, self.root, filename, md5) + filepath = os.path.join(self.root, filename) + extract_archive(filepath) + + # download and extract annotation files + download_and_extract_archive(url=self.ANNOTATIONS_FILE[0], + download_root=self.root, + md5=self.ANNOTATIONS_FILE[1]) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/extension.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/extension.py new file mode 100644 index 0000000000000000000000000000000000000000..265c989a8cea8e98dc06c1110d73474f6d061303 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/extension.py @@ -0,0 +1,101 @@ +_HAS_OPS = False + + +def _has_ops(): + return False + + +def _register_extensions(): + import os + import importlib + import torch + + # load the custom_op_library and register the custom ops + lib_dir = os.path.dirname(__file__) + if os.name == 'nt': + # Register the main torchvision library location on the default DLL path + import ctypes + import sys + + kernel32 = ctypes.WinDLL('kernel32.dll', use_last_error=True) + with_load_library_flags = hasattr(kernel32, 'AddDllDirectory') + prev_error_mode = kernel32.SetErrorMode(0x0001) + + if with_load_library_flags: + kernel32.AddDllDirectory.restype = ctypes.c_void_p + + if sys.version_info >= (3, 8): + os.add_dll_directory(lib_dir) + elif with_load_library_flags: + res = kernel32.AddDllDirectory(lib_dir) + if res is None: + err = ctypes.WinError(ctypes.get_last_error()) + err.strerror += f' Error adding "{lib_dir}" to the DLL directories.' + raise err + + kernel32.SetErrorMode(prev_error_mode) + + loader_details = ( + importlib.machinery.ExtensionFileLoader, + importlib.machinery.EXTENSION_SUFFIXES + ) + + extfinder = importlib.machinery.FileFinder(lib_dir, loader_details) + ext_specs = extfinder.find_spec("_C") + if ext_specs is None: + raise ImportError + torch.ops.load_library(ext_specs.origin) + + +try: + _register_extensions() + _HAS_OPS = True + + def _has_ops(): # noqa: F811 + return True +except (ImportError, OSError): + pass + + +def _assert_has_ops(): + if not _has_ops(): + raise RuntimeError( + "Couldn't load custom C++ ops. This can happen if your PyTorch and " + "torchvision versions are incompatible, or if you had errors while compiling " + "torchvision from source. For further information on the compatible versions, check " + "https://github.com/pytorch/vision#installation for the compatibility matrix. " + "Please check your PyTorch version with torch.__version__ and your torchvision " + "version with torchvision.__version__ and verify if they are compatible, and if not " + "please reinstall torchvision so that it matches your PyTorch install." + ) + + +def _check_cuda_version(): + """ + Make sure that CUDA versions match between the pytorch install and torchvision install + """ + if not _HAS_OPS: + return -1 + import torch + _version = torch.ops.torchvision._cuda_version() + if _version != -1 and torch.version.cuda is not None: + tv_version = str(_version) + if int(tv_version) < 10000: + tv_major = int(tv_version[0]) + tv_minor = int(tv_version[2]) + else: + tv_major = int(tv_version[0:2]) + tv_minor = int(tv_version[3]) + t_version = torch.version.cuda + t_version = t_version.split('.') + t_major = int(t_version[0]) + t_minor = int(t_version[1]) + if t_major != tv_major or t_minor != tv_minor: + raise RuntimeError("Detected that PyTorch and torchvision were compiled with different CUDA versions. " + "PyTorch has CUDA Version={}.{} and torchvision has CUDA Version={}.{}. " + "Please reinstall the torchvision that matches your PyTorch install." + .format(t_major, t_minor, tv_major, tv_minor)) + return _version + + +_check_cuda_version() diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/io/__init__.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/io/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..742344e6b0f9a13ffd53fb09d2fd414bf68a60d3 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/io/__init__.py @@ -0,0 +1,202 @@ +import torch + +from ._video_opt import ( + Timebase, + VideoMetaData, + _HAS_VIDEO_OPT, + _probe_video_from_file, + _probe_video_from_memory, + _read_video_from_file, + _read_video_from_memory, + _read_video_timestamps_from_file, + _read_video_timestamps_from_memory, +) +from .video import ( + read_video, + read_video_timestamps, + write_video, +) +from .image import ( + ImageReadMode, + decode_image, + decode_jpeg, + decode_png, + encode_jpeg, + encode_png, + read_file, + read_image, + write_file, + write_jpeg, + write_png, +) + + +if _HAS_VIDEO_OPT: + + def _has_video_opt(): + return True + + +else: + + def _has_video_opt(): + return False + + +class VideoReader: + """ + Fine-grained video-reading API. + Supports frame-by-frame reading of various streams from a single video + container. + + Example: + The following examples creates a :mod:`VideoReader` object, seeks into 2s + point, and returns a single frame:: + + import torchvision + video_path = "path_to_a_test_video" + reader = torchvision.io.VideoReader(video_path, "video") + reader.seek(2.0) + frame = next(reader) + + :mod:`VideoReader` implements the iterable API, which makes it suitable to + using it in conjunction with :mod:`itertools` for more advanced reading. + As such, we can use a :mod:`VideoReader` instance inside for loops:: + + reader.seek(2) + for frame in reader: + frames.append(frame['data']) + # additionally, `seek` implements a fluent API, so we can do + for frame in reader.seek(2): + frames.append(frame['data']) + + With :mod:`itertools`, we can read all frames between 2 and 5 seconds with the + following code:: + + for frame in itertools.takewhile(lambda x: x['pts'] <= 5, reader.seek(2)): + frames.append(frame['data']) + + and similarly, reading 10 frames after the 2s timestamp can be achieved + as follows:: + + for frame in itertools.islice(reader.seek(2), 10): + frames.append(frame['data']) + + .. note:: + + Each stream descriptor consists of two parts: stream type (e.g. 'video') and + a unique stream id (which are determined by the video encoding). + In this way, if the video contaner contains multiple + streams of the same type, users can acces the one they want. + If only stream type is passed, the decoder auto-detects first stream of that type. + + Args: + + path (string): Path to the video file in supported format + + stream (string, optional): descriptor of the required stream, followed by the stream id, + in the format ``{stream_type}:{stream_id}``. Defaults to ``"video:0"``. + Currently available options include ``['video', 'audio']`` + """ + + def __init__(self, path, stream="video"): + if not _has_video_opt(): + raise RuntimeError( + "Not compiled with video_reader support, " + + "to enable video_reader support, please install " + + "ffmpeg (version 4.2 is currently supported) and" + + "build torchvision from source." + ) + self._c = torch.classes.torchvision.Video(path, stream) + + def __next__(self): + """Decodes and returns the next frame of the current stream. + Frames are encoded as a dict with mandatory + data and pts fields, where data is a tensor, and pts is a + presentation timestamp of the frame expressed in seconds + as a float. + + Returns: + (dict): a dictionary and containing decoded frame (``data``) + and corresponding timestamp (``pts``) in seconds + + """ + frame, pts = self._c.next() + if frame.numel() == 0: + raise StopIteration + return {"data": frame, "pts": pts} + + def __iter__(self): + return self + + def seek(self, time_s: float): + """Seek within current stream. + + Args: + time_s (float): seek time in seconds + + .. note:: + Current implementation is the so-called precise seek. This + means following seek, call to :mod:`next()` will return the + frame with the exact timestamp if it exists or + the first frame with timestamp larger than ``time_s``. + """ + self._c.seek(time_s) + return self + + def get_metadata(self): + """Returns video metadata + + Returns: + (dict): dictionary containing duration and frame rate for every stream + """ + return self._c.get_metadata() + + def set_current_stream(self, stream: str): + """Set current stream. + Explicitly define the stream we are operating on. + + Args: + stream (string): descriptor of the required stream. Defaults to ``"video:0"`` + Currently available stream types include ``['video', 'audio']``. + Each descriptor consists of two parts: stream type (e.g. 'video') and + a unique stream id (which are determined by video encoding). + In this way, if the video contaner contains multiple + streams of the same type, users can acces the one they want. + If only stream type is passed, the decoder auto-detects first stream + of that type and returns it. + + Returns: + (bool): True on succes, False otherwise + """ + return self._c.set_current_stream(stream) + + +__all__ = [ + "write_video", + "read_video", + "read_video_timestamps", + "_read_video_from_file", + "_read_video_timestamps_from_file", + "_probe_video_from_file", + "_read_video_from_memory", + "_read_video_timestamps_from_memory", + "_probe_video_from_memory", + "_HAS_VIDEO_OPT", + "_read_video_clip_from_memory", + "_read_video_meta_data", + "VideoMetaData", + "Timebase", + "ImageReadMode", + "decode_image", + "decode_jpeg", + "decode_png", + "encode_jpeg", + "encode_png", + "read_file", + "read_image", + "write_file", + "write_jpeg", + "write_png", + "Video", +] diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/io/_video_opt.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/io/_video_opt.py new file mode 100644 index 0000000000000000000000000000000000000000..a34b023bc6cbc80c07bd8a750d9f06c9e3b112fe --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/io/_video_opt.py @@ -0,0 +1,570 @@ + +import importlib +import math +import os +import warnings +from fractions import Fraction +from typing import List, Tuple + +import numpy as np +import torch + + +_HAS_VIDEO_OPT = False + +try: + lib_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), "..")) + + loader_details = ( + importlib.machinery.ExtensionFileLoader, + importlib.machinery.EXTENSION_SUFFIXES + ) + + extfinder = importlib.machinery.FileFinder(lib_dir, loader_details) + ext_specs = extfinder.find_spec("video_reader") + + if os.name == 'nt': + # Load the video_reader extension using LoadLibraryExW + import ctypes + + kernel32 = ctypes.WinDLL('kernel32.dll', use_last_error=True) + with_load_library_flags = hasattr(kernel32, 'AddDllDirectory') + prev_error_mode = kernel32.SetErrorMode(0x0001) + + if with_load_library_flags: + kernel32.LoadLibraryExW.restype = ctypes.c_void_p + + if ext_specs is not None: + res = kernel32.LoadLibraryExW(ext_specs.origin, None, 0x00001100) + if res is None: + err = ctypes.WinError(ctypes.get_last_error()) + err.strerror += (f' Error loading "{ext_specs.origin}" or any or ' + 'its dependencies.') + raise err + + kernel32.SetErrorMode(prev_error_mode) + + if ext_specs is not None: + torch.ops.load_library(ext_specs.origin) + _HAS_VIDEO_OPT = True +except (ImportError, OSError): + pass + + +default_timebase = Fraction(0, 1) + + +# simple class for torch scripting +# the complex Fraction class from fractions module is not scriptable +class Timebase(object): + __annotations__ = {"numerator": int, "denominator": int} + __slots__ = ["numerator", "denominator"] + + def __init__( + self, + numerator, # type: int + denominator, # type: int + ): + # type: (...) -> None + self.numerator = numerator + self.denominator = denominator + + +class VideoMetaData(object): + __annotations__ = { + "has_video": bool, + "video_timebase": Timebase, + "video_duration": float, + "video_fps": float, + "has_audio": bool, + "audio_timebase": Timebase, + "audio_duration": float, + "audio_sample_rate": float, + } + __slots__ = [ + "has_video", + "video_timebase", + "video_duration", + "video_fps", + "has_audio", + "audio_timebase", + "audio_duration", + "audio_sample_rate", + ] + + def __init__(self): + self.has_video = False + self.video_timebase = Timebase(0, 1) + self.video_duration = 0.0 + self.video_fps = 0.0 + self.has_audio = False + self.audio_timebase = Timebase(0, 1) + self.audio_duration = 0.0 + self.audio_sample_rate = 0.0 + + +def _validate_pts(pts_range): + # type: (List[int]) -> None + + if pts_range[1] > 0: + assert ( + pts_range[0] <= pts_range[1] + ), """Start pts should not be smaller than end pts, got + start pts: {0:d} and end pts: {1:d}""".format( + pts_range[0], + pts_range[1], + ) + + +def _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration): + # type: (torch.Tensor,torch.Tensor,torch.Tensor,torch.Tensor,torch.Tensor,torch.Tensor) -> VideoMetaData + """ + Build update VideoMetaData struct with info about the video + """ + meta = VideoMetaData() + if vtimebase.numel() > 0: + meta.video_timebase = Timebase( + int(vtimebase[0].item()), int(vtimebase[1].item()) + ) + timebase = vtimebase[0].item() / float(vtimebase[1].item()) + if vduration.numel() > 0: + meta.has_video = True + meta.video_duration = float(vduration.item()) * timebase + if vfps.numel() > 0: + meta.video_fps = float(vfps.item()) + if atimebase.numel() > 0: + meta.audio_timebase = Timebase( + int(atimebase[0].item()), int(atimebase[1].item()) + ) + timebase = atimebase[0].item() / float(atimebase[1].item()) + if aduration.numel() > 0: + meta.has_audio = True + meta.audio_duration = float(aduration.item()) * timebase + if asample_rate.numel() > 0: + meta.audio_sample_rate = float(asample_rate.item()) + + return meta + + +def _align_audio_frames(aframes, aframe_pts, audio_pts_range): + # type: (torch.Tensor, torch.Tensor, List[int]) -> torch.Tensor + start, end = aframe_pts[0], aframe_pts[-1] + num_samples = aframes.size(0) + step_per_aframe = float(end - start + 1) / float(num_samples) + s_idx = 0 + e_idx = num_samples + if start < audio_pts_range[0]: + s_idx = int((audio_pts_range[0] - start) / step_per_aframe) + if end > audio_pts_range[1]: + e_idx = int((audio_pts_range[1] - end) / step_per_aframe) + return aframes[s_idx:e_idx, :] + + +def _read_video_from_file( + filename, + seek_frame_margin=0.25, + read_video_stream=True, + video_width=0, + video_height=0, + video_min_dimension=0, + video_max_dimension=0, + video_pts_range=(0, -1), + video_timebase=default_timebase, + read_audio_stream=True, + audio_samples=0, + audio_channels=0, + audio_pts_range=(0, -1), + audio_timebase=default_timebase, +): + """ + Reads a video from a file, returning both the video frames as well as + the audio frames + + Args: + filename (str): path to the video file + seek_frame_margin (double, optional): seeking frame in the stream is imprecise. Thus, + when video_start_pts is specified, we seek the pts earlier by seek_frame_margin seconds + read_video_stream (int, optional): whether read video stream. If yes, set to 1. Otherwise, 0 + video_width/video_height/video_min_dimension/video_max_dimension (int): together decide + the size of decoded frames: + + - When video_width = 0, video_height = 0, video_min_dimension = 0, + and video_max_dimension = 0, keep the original frame resolution + - When video_width = 0, video_height = 0, video_min_dimension != 0, + and video_max_dimension = 0, keep the aspect ratio and resize the + frame so that shorter edge size is video_min_dimension + - When video_width = 0, video_height = 0, video_min_dimension = 0, + and video_max_dimension != 0, keep the aspect ratio and resize + the frame so that longer edge size is video_max_dimension + - When video_width = 0, video_height = 0, video_min_dimension != 0, + and video_max_dimension != 0, resize the frame so that shorter + edge size is video_min_dimension, and longer edge size is + video_max_dimension. The aspect ratio may not be preserved + - When video_width = 0, video_height != 0, video_min_dimension = 0, + and video_max_dimension = 0, keep the aspect ratio and resize + the frame so that frame video_height is $video_height + - When video_width != 0, video_height == 0, video_min_dimension = 0, + and video_max_dimension = 0, keep the aspect ratio and resize + the frame so that frame video_width is $video_width + - When video_width != 0, video_height != 0, video_min_dimension = 0, + and video_max_dimension = 0, resize the frame so that frame + video_width and video_height are set to $video_width and + $video_height, respectively + video_pts_range (list(int), optional): the start and end presentation timestamp of video stream + video_timebase (Fraction, optional): a Fraction rational number which denotes timebase in video stream + read_audio_stream (int, optional): whether read audio stream. If yes, set to 1. Otherwise, 0 + audio_samples (int, optional): audio sampling rate + audio_channels (int optional): audio channels + audio_pts_range (list(int), optional): the start and end presentation timestamp of audio stream + audio_timebase (Fraction, optional): a Fraction rational number which denotes time base in audio stream + + Returns + vframes (Tensor[T, H, W, C]): the `T` video frames + aframes (Tensor[L, K]): the audio frames, where `L` is the number of points and + `K` is the number of audio_channels + info (Dict): metadata for the video and audio. Can contain the fields video_fps (float) + and audio_fps (int) + """ + _validate_pts(video_pts_range) + _validate_pts(audio_pts_range) + + result = torch.ops.video_reader.read_video_from_file( + filename, + seek_frame_margin, + 0, # getPtsOnly + read_video_stream, + video_width, + video_height, + video_min_dimension, + video_max_dimension, + video_pts_range[0], + video_pts_range[1], + video_timebase.numerator, + video_timebase.denominator, + read_audio_stream, + audio_samples, + audio_channels, + audio_pts_range[0], + audio_pts_range[1], + audio_timebase.numerator, + audio_timebase.denominator, + ) + vframes, _vframe_pts, vtimebase, vfps, vduration, \ + aframes, aframe_pts, atimebase, asample_rate, aduration = ( + result + ) + info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration) + if aframes.numel() > 0: + # when audio stream is found + aframes = _align_audio_frames(aframes, aframe_pts, audio_pts_range) + return vframes, aframes, info + + +def _read_video_timestamps_from_file(filename): + """ + Decode all video- and audio frames in the video. Only pts + (presentation timestamp) is returned. The actual frame pixel data is not + copied. Thus, it is much faster than read_video(...) + """ + result = torch.ops.video_reader.read_video_from_file( + filename, + 0, # seek_frame_margin + 1, # getPtsOnly + 1, # read_video_stream + 0, # video_width + 0, # video_height + 0, # video_min_dimension + 0, # video_max_dimension + 0, # video_start_pts + -1, # video_end_pts + 0, # video_timebase_num + 1, # video_timebase_den + 1, # read_audio_stream + 0, # audio_samples + 0, # audio_channels + 0, # audio_start_pts + -1, # audio_end_pts + 0, # audio_timebase_num + 1, # audio_timebase_den + ) + _vframes, vframe_pts, vtimebase, vfps, vduration, \ + _aframes, aframe_pts, atimebase, asample_rate, aduration = result + info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration) + + vframe_pts = vframe_pts.numpy().tolist() + aframe_pts = aframe_pts.numpy().tolist() + return vframe_pts, aframe_pts, info + + +def _probe_video_from_file(filename): + """ + Probe a video file and return VideoMetaData with info about the video + """ + result = torch.ops.video_reader.probe_video_from_file(filename) + vtimebase, vfps, vduration, atimebase, asample_rate, aduration = result + info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration) + return info + + +def _read_video_from_memory( + video_data, # type: torch.Tensor + seek_frame_margin=0.25, # type: float + read_video_stream=1, # type: int + video_width=0, # type: int + video_height=0, # type: int + video_min_dimension=0, # type: int + video_max_dimension=0, # type: int + video_pts_range=(0, -1), # type: List[int] + video_timebase_numerator=0, # type: int + video_timebase_denominator=1, # type: int + read_audio_stream=1, # type: int + audio_samples=0, # type: int + audio_channels=0, # type: int + audio_pts_range=(0, -1), # type: List[int] + audio_timebase_numerator=0, # type: int + audio_timebase_denominator=1, # type: int +): + # type: (...) -> Tuple[torch.Tensor, torch.Tensor] + """ + Reads a video from memory, returning both the video frames as well as + the audio frames + This function is torchscriptable. + + Args: + video_data (data type could be 1) torch.Tensor, dtype=torch.int8 or 2) python bytes): + compressed video content stored in either 1) torch.Tensor 2) python bytes + seek_frame_margin (double, optional): seeking frame in the stream is imprecise. + Thus, when video_start_pts is specified, we seek the pts earlier by seek_frame_margin seconds + read_video_stream (int, optional): whether read video stream. If yes, set to 1. Otherwise, 0 + video_width/video_height/video_min_dimension/video_max_dimension (int): together decide + the size of decoded frames: + + - When video_width = 0, video_height = 0, video_min_dimension = 0, + and video_max_dimension = 0, keep the original frame resolution + - When video_width = 0, video_height = 0, video_min_dimension != 0, + and video_max_dimension = 0, keep the aspect ratio and resize the + frame so that shorter edge size is video_min_dimension + - When video_width = 0, video_height = 0, video_min_dimension = 0, + and video_max_dimension != 0, keep the aspect ratio and resize + the frame so that longer edge size is video_max_dimension + - When video_width = 0, video_height = 0, video_min_dimension != 0, + and video_max_dimension != 0, resize the frame so that shorter + edge size is video_min_dimension, and longer edge size is + video_max_dimension. The aspect ratio may not be preserved + - When video_width = 0, video_height != 0, video_min_dimension = 0, + and video_max_dimension = 0, keep the aspect ratio and resize + the frame so that frame video_height is $video_height + - When video_width != 0, video_height == 0, video_min_dimension = 0, + and video_max_dimension = 0, keep the aspect ratio and resize + the frame so that frame video_width is $video_width + - When video_width != 0, video_height != 0, video_min_dimension = 0, + and video_max_dimension = 0, resize the frame so that frame + video_width and video_height are set to $video_width and + $video_height, respectively + video_pts_range (list(int), optional): the start and end presentation timestamp of video stream + video_timebase_numerator / video_timebase_denominator (float, optional): a rational + number which denotes timebase in video stream + read_audio_stream (int, optional): whether read audio stream. If yes, set to 1. Otherwise, 0 + audio_samples (int, optional): audio sampling rate + audio_channels (int optional): audio audio_channels + audio_pts_range (list(int), optional): the start and end presentation timestamp of audio stream + audio_timebase_numerator / audio_timebase_denominator (float, optional): + a rational number which denotes time base in audio stream + + Returns: + vframes (Tensor[T, H, W, C]): the `T` video frames + aframes (Tensor[L, K]): the audio frames, where `L` is the number of points and + `K` is the number of channels + """ + + _validate_pts(video_pts_range) + _validate_pts(audio_pts_range) + + if not isinstance(video_data, torch.Tensor): + video_data = torch.from_numpy(np.frombuffer(video_data, dtype=np.uint8)) + + result = torch.ops.video_reader.read_video_from_memory( + video_data, + seek_frame_margin, + 0, # getPtsOnly + read_video_stream, + video_width, + video_height, + video_min_dimension, + video_max_dimension, + video_pts_range[0], + video_pts_range[1], + video_timebase_numerator, + video_timebase_denominator, + read_audio_stream, + audio_samples, + audio_channels, + audio_pts_range[0], + audio_pts_range[1], + audio_timebase_numerator, + audio_timebase_denominator, + ) + + vframes, _vframe_pts, vtimebase, vfps, vduration, \ + aframes, aframe_pts, atimebase, asample_rate, aduration = ( + result + ) + + if aframes.numel() > 0: + # when audio stream is found + aframes = _align_audio_frames(aframes, aframe_pts, audio_pts_range) + + return vframes, aframes + + +def _read_video_timestamps_from_memory(video_data): + """ + Decode all frames in the video. Only pts (presentation timestamp) is returned. + The actual frame pixel data is not copied. Thus, read_video_timestamps(...) + is much faster than read_video(...) + """ + if not isinstance(video_data, torch.Tensor): + video_data = torch.from_numpy(np.frombuffer(video_data, dtype=np.uint8)) + result = torch.ops.video_reader.read_video_from_memory( + video_data, + 0, # seek_frame_margin + 1, # getPtsOnly + 1, # read_video_stream + 0, # video_width + 0, # video_height + 0, # video_min_dimension + 0, # video_max_dimension + 0, # video_start_pts + -1, # video_end_pts + 0, # video_timebase_num + 1, # video_timebase_den + 1, # read_audio_stream + 0, # audio_samples + 0, # audio_channels + 0, # audio_start_pts + -1, # audio_end_pts + 0, # audio_timebase_num + 1, # audio_timebase_den + ) + _vframes, vframe_pts, vtimebase, vfps, vduration, \ + _aframes, aframe_pts, atimebase, asample_rate, aduration = ( + result + ) + info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration) + + vframe_pts = vframe_pts.numpy().tolist() + aframe_pts = aframe_pts.numpy().tolist() + return vframe_pts, aframe_pts, info + + +def _probe_video_from_memory(video_data): + # type: (torch.Tensor) -> VideoMetaData + """ + Probe a video in memory and return VideoMetaData with info about the video + This function is torchscriptable + """ + if not isinstance(video_data, torch.Tensor): + video_data = torch.from_numpy(np.frombuffer(video_data, dtype=np.uint8)) + result = torch.ops.video_reader.probe_video_from_memory(video_data) + vtimebase, vfps, vduration, atimebase, asample_rate, aduration = result + info = _fill_info(vtimebase, vfps, vduration, atimebase, asample_rate, aduration) + return info + + +def _convert_to_sec(start_pts, end_pts, pts_unit, time_base): + if pts_unit == 'pts': + start_pts = float(start_pts * time_base) + end_pts = float(end_pts * time_base) + pts_unit = 'sec' + return start_pts, end_pts, pts_unit + + +def _read_video(filename, start_pts=0, end_pts=None, pts_unit="pts"): + if end_pts is None: + end_pts = float("inf") + + if pts_unit == "pts": + warnings.warn( + "The pts_unit 'pts' gives wrong results and will be removed in a " + + "follow-up version. Please use pts_unit 'sec'." + ) + + info = _probe_video_from_file(filename) + + has_video = info.has_video + has_audio = info.has_audio + video_pts_range = (0, -1) + video_timebase = default_timebase + audio_pts_range = (0, -1) + audio_timebase = default_timebase + time_base = default_timebase + + if has_video: + video_timebase = Fraction( + info.video_timebase.numerator, info.video_timebase.denominator + ) + time_base = video_timebase + + if has_audio: + audio_timebase = Fraction( + info.audio_timebase.numerator, info.audio_timebase.denominator + ) + time_base = time_base if time_base else audio_timebase + + # video_timebase is the default time_base + start_pts_sec, end_pts_sec, pts_unit = _convert_to_sec( + start_pts, end_pts, pts_unit, time_base) + + def get_pts(time_base): + start_offset = start_pts_sec + end_offset = end_pts_sec + if pts_unit == "sec": + start_offset = int(math.floor(start_pts_sec * (1 / time_base))) + if end_offset != float("inf"): + end_offset = int(math.ceil(end_pts_sec * (1 / time_base))) + if end_offset == float("inf"): + end_offset = -1 + return start_offset, end_offset + + if has_video: + video_pts_range = get_pts(video_timebase) + + if has_audio: + audio_pts_range = get_pts(audio_timebase) + + vframes, aframes, info = _read_video_from_file( + filename, + read_video_stream=True, + video_pts_range=video_pts_range, + video_timebase=video_timebase, + read_audio_stream=True, + audio_pts_range=audio_pts_range, + audio_timebase=audio_timebase, + ) + _info = {} + if has_video: + _info["video_fps"] = info.video_fps + if has_audio: + _info["audio_fps"] = info.audio_sample_rate + + return vframes, aframes, _info + + +def _read_video_timestamps(filename, pts_unit="pts"): + if pts_unit == "pts": + warnings.warn( + "The pts_unit 'pts' gives wrong results and will be removed in a " + + "follow-up version. Please use pts_unit 'sec'." + ) + + pts, _, info = _read_video_timestamps_from_file(filename) + + if pts_unit == "sec": + video_time_base = Fraction( + info.video_timebase.numerator, info.video_timebase.denominator + ) + pts = [x * video_time_base for x in pts] + + video_fps = info.video_fps if info.has_video else None + + return pts, video_fps diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/io/image.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/io/image.py new file mode 100644 index 0000000000000000000000000000000000000000..4f824abad60e1c8dbf2dc6f91131e37d322d191c --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/io/image.py @@ -0,0 +1,260 @@ +import torch + +import os +import os.path as osp +import importlib.machinery + +from enum import Enum + +_HAS_IMAGE_OPT = False + +try: + lib_dir = osp.abspath(osp.join(osp.dirname(__file__), "..")) + + loader_details = ( + importlib.machinery.ExtensionFileLoader, + importlib.machinery.EXTENSION_SUFFIXES + ) + + extfinder = importlib.machinery.FileFinder(lib_dir, loader_details) # type: ignore[arg-type] + ext_specs = extfinder.find_spec("image") + + if os.name == 'nt': + # Load the image extension using LoadLibraryExW + import ctypes + + kernel32 = ctypes.WinDLL('kernel32.dll', use_last_error=True) + with_load_library_flags = hasattr(kernel32, 'AddDllDirectory') + prev_error_mode = kernel32.SetErrorMode(0x0001) + + kernel32.LoadLibraryW.restype = ctypes.c_void_p + if with_load_library_flags: + kernel32.LoadLibraryExW.restype = ctypes.c_void_p + + if ext_specs is not None: + res = kernel32.LoadLibraryExW(ext_specs.origin, None, 0x00001100) + if res is None: + err = ctypes.WinError(ctypes.get_last_error()) + err.strerror += (f' Error loading "{ext_specs.origin}" or any or ' + 'its dependencies.') + raise err + + kernel32.SetErrorMode(prev_error_mode) + + if ext_specs is not None: + torch.ops.load_library(ext_specs.origin) + _HAS_IMAGE_OPT = True +except (ImportError, OSError): + pass + + +class ImageReadMode(Enum): + """ + Support for various modes while reading images. + + Use ``ImageReadMode.UNCHANGED`` for loading the image as-is, + ``ImageReadMode.GRAY`` for converting to grayscale, + ``ImageReadMode.GRAY_ALPHA`` for grayscale with transparency, + ``ImageReadMode.RGB`` for RGB and ``ImageReadMode.RGB_ALPHA`` for + RGB with transparency. + """ + UNCHANGED = 0 + GRAY = 1 + GRAY_ALPHA = 2 + RGB = 3 + RGB_ALPHA = 4 + + +def read_file(path: str) -> torch.Tensor: + """ + Reads and outputs the bytes contents of a file as a uint8 Tensor + with one dimension. + + Args: + path (str): the path to the file to be read + + Returns: + data (Tensor) + """ + data = torch.ops.image.read_file(path) + return data + + +def write_file(filename: str, data: torch.Tensor) -> None: + """ + Writes the contents of a uint8 tensor with one dimension to a + file. + + Args: + filename (str): the path to the file to be written + data (Tensor): the contents to be written to the output file + """ + torch.ops.image.write_file(filename, data) + + +def decode_png(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANGED) -> torch.Tensor: + """ + Decodes a PNG image into a 3 dimensional RGB Tensor. + Optionally converts the image to the desired format. + The values of the output tensor are uint8 between 0 and 255. + + Args: + input (Tensor[1]): a one dimensional uint8 tensor containing + the raw bytes of the PNG image. + mode (ImageReadMode): the read mode used for optionally + converting the image. Default: ``ImageReadMode.UNCHANGED``. + See `ImageReadMode` class for more information on various + available modes. + + Returns: + output (Tensor[image_channels, image_height, image_width]) + """ + output = torch.ops.image.decode_png(input, mode.value) + return output + + +def encode_png(input: torch.Tensor, compression_level: int = 6) -> torch.Tensor: + """ + Takes an input tensor in CHW layout and returns a buffer with the contents + of its corresponding PNG file. + + Args: + input (Tensor[channels, image_height, image_width]): int8 image tensor of + ``c`` channels, where ``c`` must 3 or 1. + compression_level (int): Compression factor for the resulting file, it must be a number + between 0 and 9. Default: 6 + + Returns: + Tensor[1]: A one dimensional int8 tensor that contains the raw bytes of the + PNG file. + """ + output = torch.ops.image.encode_png(input, compression_level) + return output + + +def write_png(input: torch.Tensor, filename: str, compression_level: int = 6): + """ + Takes an input tensor in CHW layout (or HW in the case of grayscale images) + and saves it in a PNG file. + + Args: + input (Tensor[channels, image_height, image_width]): int8 image tensor of + ``c`` channels, where ``c`` must be 1 or 3. + filename (str): Path to save the image. + compression_level (int): Compression factor for the resulting file, it must be a number + between 0 and 9. Default: 6 + """ + output = encode_png(input, compression_level) + write_file(filename, output) + + +def decode_jpeg(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANGED, + device: str = 'cpu') -> torch.Tensor: + """ + Decodes a JPEG image into a 3 dimensional RGB Tensor. + Optionally converts the image to the desired format. + The values of the output tensor are uint8 between 0 and 255. + + Args: + input (Tensor[1]): a one dimensional uint8 tensor containing + the raw bytes of the JPEG image. This tensor must be on CPU, + regardless of the ``device`` parameter. + mode (ImageReadMode): the read mode used for optionally + converting the image. Default: ``ImageReadMode.UNCHANGED``. + See ``ImageReadMode`` class for more information on various + available modes. + device (str or torch.device): The device on which the decoded image will + be stored. If a cuda device is specified, the image will be decoded + with `nvjpeg <https://developer.nvidia.com/nvjpeg>`_. This is only + supported for CUDA version >= 10.1 + + Returns: + output (Tensor[image_channels, image_height, image_width]) + """ + device = torch.device(device) + if device.type == 'cuda': + output = torch.ops.image.decode_jpeg_cuda(input, mode.value, device) + else: + output = torch.ops.image.decode_jpeg(input, mode.value) + return output + + +def encode_jpeg(input: torch.Tensor, quality: int = 75) -> torch.Tensor: + """ + Takes an input tensor in CHW layout and returns a buffer with the contents + of its corresponding JPEG file. + + Args: + input (Tensor[channels, image_height, image_width])): int8 image tensor of + ``c`` channels, where ``c`` must be 1 or 3. + quality (int): Quality of the resulting JPEG file, it must be a number between + 1 and 100. Default: 75 + + Returns: + output (Tensor[1]): A one dimensional int8 tensor that contains the raw bytes of the + JPEG file. + """ + if quality < 1 or quality > 100: + raise ValueError('Image quality should be a positive number ' + 'between 1 and 100') + + output = torch.ops.image.encode_jpeg(input, quality) + return output + + +def write_jpeg(input: torch.Tensor, filename: str, quality: int = 75): + """ + Takes an input tensor in CHW layout and saves it in a JPEG file. + + Args: + input (Tensor[channels, image_height, image_width]): int8 image tensor of ``c`` + channels, where ``c`` must be 1 or 3. + filename (str): Path to save the image. + quality (int): Quality of the resulting JPEG file, it must be a number + between 1 and 100. Default: 75 + """ + output = encode_jpeg(input, quality) + write_file(filename, output) + + +def decode_image(input: torch.Tensor, mode: ImageReadMode = ImageReadMode.UNCHANGED) -> torch.Tensor: + """ + Detects whether an image is a JPEG or PNG and performs the appropriate + operation to decode the image into a 3 dimensional RGB Tensor. + + Optionally converts the image to the desired format. + The values of the output tensor are uint8 between 0 and 255. + + Args: + input (Tensor): a one dimensional uint8 tensor containing the raw bytes of the + PNG or JPEG image. + mode (ImageReadMode): the read mode used for optionally converting the image. + Default: ``ImageReadMode.UNCHANGED``. + See ``ImageReadMode`` class for more information on various + available modes. + + Returns: + output (Tensor[image_channels, image_height, image_width]) + """ + output = torch.ops.image.decode_image(input, mode.value) + return output + + +def read_image(path: str, mode: ImageReadMode = ImageReadMode.UNCHANGED) -> torch.Tensor: + """ + Reads a JPEG or PNG image into a 3 dimensional RGB Tensor. + Optionally converts the image to the desired format. + The values of the output tensor are uint8 between 0 and 255. + + Args: + path (str): path of the JPEG or PNG image. + mode (ImageReadMode): the read mode used for optionally converting the image. + Default: ``ImageReadMode.UNCHANGED``. + See ``ImageReadMode`` class for more information on various + available modes. + + Returns: + output (Tensor[image_channels, image_height, image_width]) + """ + data = read_file(path) + return decode_image(data, mode) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/io/video.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/io/video.py new file mode 100644 index 0000000000000000000000000000000000000000..e16e8906d9755312c40d6fbf5244fd8412902995 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/io/video.py @@ -0,0 +1,399 @@ +import gc +import math +import re +import warnings +from typing import Any, Dict, List, Optional, Tuple, Union + +import numpy as np +import torch + +from . import _video_opt + + +try: + import av + + av.logging.set_level(av.logging.ERROR) + if not hasattr(av.video.frame.VideoFrame, "pict_type"): + av = ImportError( + """\ +Your version of PyAV is too old for the necessary video operations in torchvision. +If you are on Python 3.5, you will have to build from source (the conda-forge +packages are not up-to-date). See +https://github.com/mikeboers/PyAV#installation for instructions on how to +install PyAV on your system. +""" + ) +except ImportError: + av = ImportError( + """\ +PyAV is not installed, and is necessary for the video operations in torchvision. +See https://github.com/mikeboers/PyAV#installation for instructions on how to +install PyAV on your system. +""" + ) + + +def _check_av_available() -> None: + if isinstance(av, Exception): + raise av + + +def _av_available() -> bool: + return not isinstance(av, Exception) + + +# PyAV has some reference cycles +_CALLED_TIMES = 0 +_GC_COLLECTION_INTERVAL = 10 + + +def write_video( + filename: str, + video_array: torch.Tensor, + fps: float, + video_codec: str = "libx264", + options: Optional[Dict[str, Any]] = None, + audio_array: Optional[torch.Tensor] = None, + audio_fps: Optional[float] = None, + audio_codec: Optional[str] = None, + audio_options: Optional[Dict[str, Any]] = None, +) -> None: + """ + Writes a 4d tensor in [T, H, W, C] format in a video file + + Args: + filename (str): path where the video will be saved + video_array (Tensor[T, H, W, C]): tensor containing the individual frames, + as a uint8 tensor in [T, H, W, C] format + fps (Number): video frames per second + video_codec (str): the name of the video codec, i.e. "libx264", "h264", etc. + options (Dict): dictionary containing options to be passed into the PyAV video stream + audio_array (Tensor[C, N]): tensor containing the audio, where C is the number of channels + and N is the number of samples + audio_fps (Number): audio sample rate, typically 44100 or 48000 + audio_codec (str): the name of the audio codec, i.e. "mp3", "aac", etc. + audio_options (Dict): dictionary containing options to be passed into the PyAV audio stream + """ + _check_av_available() + video_array = torch.as_tensor(video_array, dtype=torch.uint8).numpy() + + # PyAV does not support floating point numbers with decimal point + # and will throw OverflowException in case this is not the case + if isinstance(fps, float): + fps = np.round(fps) + + with av.open(filename, mode="w") as container: + stream = container.add_stream(video_codec, rate=fps) + stream.width = video_array.shape[2] + stream.height = video_array.shape[1] + stream.pix_fmt = "yuv420p" if video_codec != "libx264rgb" else "rgb24" + stream.options = options or {} + + if audio_array is not None: + audio_format_dtypes = { + 'dbl': '<f8', + 'dblp': '<f8', + 'flt': '<f4', + 'fltp': '<f4', + 's16': '<i2', + 's16p': '<i2', + 's32': '<i4', + 's32p': '<i4', + 'u8': 'u1', + 'u8p': 'u1', + } + a_stream = container.add_stream(audio_codec, rate=audio_fps) + a_stream.options = audio_options or {} + + num_channels = audio_array.shape[0] + audio_layout = "stereo" if num_channels > 1 else "mono" + audio_sample_fmt = container.streams.audio[0].format.name + + format_dtype = np.dtype(audio_format_dtypes[audio_sample_fmt]) + audio_array = torch.as_tensor(audio_array).numpy().astype(format_dtype) + + frame = av.AudioFrame.from_ndarray( + audio_array, format=audio_sample_fmt, layout=audio_layout + ) + + frame.sample_rate = audio_fps + + for packet in a_stream.encode(frame): + container.mux(packet) + + for packet in a_stream.encode(): + container.mux(packet) + + for img in video_array: + frame = av.VideoFrame.from_ndarray(img, format="rgb24") + frame.pict_type = "NONE" + for packet in stream.encode(frame): + container.mux(packet) + + # Flush stream + for packet in stream.encode(): + container.mux(packet) + + +def _read_from_stream( + container: "av.container.Container", + start_offset: float, + end_offset: float, + pts_unit: str, + stream: "av.stream.Stream", + stream_name: Dict[str, Optional[Union[int, Tuple[int, ...], List[int]]]], +) -> List["av.frame.Frame"]: + global _CALLED_TIMES, _GC_COLLECTION_INTERVAL + _CALLED_TIMES += 1 + if _CALLED_TIMES % _GC_COLLECTION_INTERVAL == _GC_COLLECTION_INTERVAL - 1: + gc.collect() + + if pts_unit == "sec": + start_offset = int(math.floor(start_offset * (1 / stream.time_base))) + if end_offset != float("inf"): + end_offset = int(math.ceil(end_offset * (1 / stream.time_base))) + else: + warnings.warn( + "The pts_unit 'pts' gives wrong results and will be removed in a " + + "follow-up version. Please use pts_unit 'sec'." + ) + + frames = {} + should_buffer = True + max_buffer_size = 5 + if stream.type == "video": + # DivX-style packed B-frames can have out-of-order pts (2 frames in a single pkt) + # so need to buffer some extra frames to sort everything + # properly + extradata = stream.codec_context.extradata + # overly complicated way of finding if `divx_packed` is set, following + # https://github.com/FFmpeg/FFmpeg/commit/d5a21172283572af587b3d939eba0091484d3263 + if extradata and b"DivX" in extradata: + # can't use regex directly because of some weird characters sometimes... + pos = extradata.find(b"DivX") + d = extradata[pos:] + o = re.search(br"DivX(\d+)Build(\d+)(\w)", d) + if o is None: + o = re.search(br"DivX(\d+)b(\d+)(\w)", d) + if o is not None: + should_buffer = o.group(3) == b"p" + seek_offset = start_offset + # some files don't seek to the right location, so better be safe here + seek_offset = max(seek_offset - 1, 0) + if should_buffer: + # FIXME this is kind of a hack, but we will jump to the previous keyframe + # so this will be safe + seek_offset = max(seek_offset - max_buffer_size, 0) + try: + # TODO check if stream needs to always be the video stream here or not + container.seek(seek_offset, any_frame=False, backward=True, stream=stream) + except av.AVError: + # TODO add some warnings in this case + # print("Corrupted file?", container.name) + return [] + buffer_count = 0 + try: + for _idx, frame in enumerate(container.decode(**stream_name)): + frames[frame.pts] = frame + if frame.pts >= end_offset: + if should_buffer and buffer_count < max_buffer_size: + buffer_count += 1 + continue + break + except av.AVError: + # TODO add a warning + pass + # ensure that the results are sorted wrt the pts + result = [ + frames[i] for i in sorted(frames) if start_offset <= frames[i].pts <= end_offset + ] + if len(frames) > 0 and start_offset > 0 and start_offset not in frames: + # if there is no frame that exactly matches the pts of start_offset + # add the last frame smaller than start_offset, to guarantee that + # we will have all the necessary data. This is most useful for audio + preceding_frames = [i for i in frames if i < start_offset] + if len(preceding_frames) > 0: + first_frame_pts = max(preceding_frames) + result.insert(0, frames[first_frame_pts]) + return result + + +def _align_audio_frames( + aframes: torch.Tensor, audio_frames: List["av.frame.Frame"], ref_start: int, ref_end: float +) -> torch.Tensor: + start, end = audio_frames[0].pts, audio_frames[-1].pts + total_aframes = aframes.shape[1] + step_per_aframe = (end - start + 1) / total_aframes + s_idx = 0 + e_idx = total_aframes + if start < ref_start: + s_idx = int((ref_start - start) / step_per_aframe) + if end > ref_end: + e_idx = int((ref_end - end) / step_per_aframe) + return aframes[:, s_idx:e_idx] + + +def read_video( + filename: str, start_pts: int = 0, end_pts: Optional[float] = None, pts_unit: str = "pts" +) -> Tuple[torch.Tensor, torch.Tensor, Dict[str, Any]]: + """ + Reads a video from a file, returning both the video frames as well as + the audio frames + + Args: + filename (str): path to the video file + start_pts (int if pts_unit = 'pts', float / Fraction if pts_unit = 'sec', optional): + The start presentation time of the video + end_pts (int if pts_unit = 'pts', float / Fraction if pts_unit = 'sec', optional): + The end presentation time + pts_unit (str, optional): unit in which start_pts and end_pts values will be interpreted, + either 'pts' or 'sec'. Defaults to 'pts'. + + Returns: + vframes (Tensor[T, H, W, C]): the `T` video frames + aframes (Tensor[K, L]): the audio frames, where `K` is the number of channels and `L` is the number of points + info (Dict): metadata for the video and audio. Can contain the fields video_fps (float) and audio_fps (int) + """ + + from torchvision import get_video_backend + + if get_video_backend() != "pyav": + return _video_opt._read_video(filename, start_pts, end_pts, pts_unit) + + _check_av_available() + + if end_pts is None: + end_pts = float("inf") + + if end_pts < start_pts: + raise ValueError( + "end_pts should be larger than start_pts, got " + "start_pts={} and end_pts={}".format(start_pts, end_pts) + ) + + info = {} + video_frames = [] + audio_frames = [] + + try: + with av.open(filename, metadata_errors="ignore") as container: + time_base = _video_opt.default_timebase + if container.streams.video: + time_base = container.streams.video[0].time_base + elif container.streams.audio: + time_base = container.streams.audio[0].time_base + # video_timebase is the default time_base + start_pts_sec, end_pts_sec, pts_unit = _video_opt._convert_to_sec( + start_pts, end_pts, pts_unit, time_base) + if container.streams.video: + video_frames = _read_from_stream( + container, + start_pts_sec, + end_pts_sec, + pts_unit, + container.streams.video[0], + {"video": 0}, + ) + video_fps = container.streams.video[0].average_rate + # guard against potentially corrupted files + if video_fps is not None: + info["video_fps"] = float(video_fps) + + if container.streams.audio: + audio_frames = _read_from_stream( + container, + start_pts_sec, + end_pts_sec, + pts_unit, + container.streams.audio[0], + {"audio": 0}, + ) + info["audio_fps"] = container.streams.audio[0].rate + + except av.AVError: + # TODO raise a warning? + pass + + vframes_list = [frame.to_rgb().to_ndarray() for frame in video_frames] + aframes_list = [frame.to_ndarray() for frame in audio_frames] + + if vframes_list: + vframes = torch.as_tensor(np.stack(vframes_list)) + else: + vframes = torch.empty((0, 1, 1, 3), dtype=torch.uint8) + + if aframes_list: + aframes = np.concatenate(aframes_list, 1) + aframes = torch.as_tensor(aframes) + aframes = _align_audio_frames(aframes, audio_frames, start_pts, end_pts) + else: + aframes = torch.empty((1, 0), dtype=torch.float32) + + return vframes, aframes, info + + +def _can_read_timestamps_from_packets(container: "av.container.Container") -> bool: + extradata = container.streams[0].codec_context.extradata + if extradata is None: + return False + if b"Lavc" in extradata: + return True + return False + + +def _decode_video_timestamps(container: "av.container.Container") -> List[int]: + if _can_read_timestamps_from_packets(container): + # fast path + return [x.pts for x in container.demux(video=0) if x.pts is not None] + else: + return [x.pts for x in container.decode(video=0) if x.pts is not None] + + +def read_video_timestamps(filename: str, pts_unit: str = "pts") -> Tuple[List[int], Optional[float]]: + """ + List the video frames timestamps. + + Note that the function decodes the whole video frame-by-frame. + + Args: + filename (str): path to the video file + pts_unit (str, optional): unit in which timestamp values will be returned + either 'pts' or 'sec'. Defaults to 'pts'. + + Returns: + pts (List[int] if pts_unit = 'pts', List[Fraction] if pts_unit = 'sec'): + presentation timestamps for each one of the frames in the video. + video_fps (float, optional): the frame rate for the video + + """ + from torchvision import get_video_backend + + if get_video_backend() != "pyav": + return _video_opt._read_video_timestamps(filename, pts_unit) + + _check_av_available() + + video_fps = None + pts = [] + + try: + with av.open(filename, metadata_errors="ignore") as container: + if container.streams.video: + video_stream = container.streams.video[0] + video_time_base = video_stream.time_base + try: + pts = _decode_video_timestamps(container) + except av.AVError: + warnings.warn(f"Failed decoding frames for file {filename}") + video_fps = float(video_stream.average_rate) + except av.AVError: + # TODO add a warning + pass + + pts.sort() + + if pts_unit == "sec": + pts = [x * video_time_base for x in pts] + + return pts, video_fps diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/__init__.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..283e544e98eea4c25c0c3b29d1f81ef2f3614073 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/__init__.py @@ -0,0 +1,14 @@ +from .alexnet import * +from .resnet import * +from .vgg import * +from .squeezenet import * +from .inception import * +from .densenet import * +from .googlenet import * +from .mobilenet import * +from .mnasnet import * +from .shufflenetv2 import * +from . import segmentation +from . import detection +from . import video +from . import quantization diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/_utils.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..df5ab9a044c4a3863f0129774ba212ed6e246ae1 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/_utils.py @@ -0,0 +1,66 @@ +from collections import OrderedDict + +from torch import nn +from typing import Dict + + +class IntermediateLayerGetter(nn.ModuleDict): + """ + Module wrapper that returns intermediate layers from a model + + It has a strong assumption that the modules have been registered + into the model in the same order as they are used. + This means that one should **not** reuse the same nn.Module + twice in the forward if you want this to work. + + Additionally, it is only able to query submodules that are directly + assigned to the model. So if `model` is passed, `model.feature1` can + be returned, but not `model.feature1.layer2`. + + Args: + model (nn.Module): model on which we will extract the features + return_layers (Dict[name, new_name]): a dict containing the names + of the modules for which the activations will be returned as + the key of the dict, and the value of the dict is the name + of the returned activation (which the user can specify). + + Examples:: + + >>> m = torchvision.models.resnet18(pretrained=True) + >>> # extract layer1 and layer3, giving as names `feat1` and feat2` + >>> new_m = torchvision.models._utils.IntermediateLayerGetter(m, + >>> {'layer1': 'feat1', 'layer3': 'feat2'}) + >>> out = new_m(torch.rand(1, 3, 224, 224)) + >>> print([(k, v.shape) for k, v in out.items()]) + >>> [('feat1', torch.Size([1, 64, 56, 56])), + >>> ('feat2', torch.Size([1, 256, 14, 14]))] + """ + _version = 2 + __annotations__ = { + "return_layers": Dict[str, str], + } + + def __init__(self, model: nn.Module, return_layers: Dict[str, str]) -> None: + if not set(return_layers).issubset([name for name, _ in model.named_children()]): + raise ValueError("return_layers are not present in model") + orig_return_layers = return_layers + return_layers = {str(k): str(v) for k, v in return_layers.items()} + layers = OrderedDict() + for name, module in model.named_children(): + layers[name] = module + if name in return_layers: + del return_layers[name] + if not return_layers: + break + + super(IntermediateLayerGetter, self).__init__(layers) + self.return_layers = orig_return_layers + + def forward(self, x): + out = OrderedDict() + for name, module in self.items(): + x = module(x) + if name in self.return_layers: + out_name = self.return_layers[name] + out[out_name] = x + return out diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/alexnet.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/alexnet.py new file mode 100644 index 0000000000000000000000000000000000000000..b9e8206d552863bab88c619a44ce8cc7021f5341 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/alexnet.py @@ -0,0 +1,66 @@ +import torch +import torch.nn as nn +from .utils import load_state_dict_from_url +from typing import Any + + +__all__ = ['AlexNet', 'alexnet'] + + +model_urls = { + 'alexnet': 'https://download.pytorch.org/models/alexnet-owt-7be5be79.pth', +} + + +class AlexNet(nn.Module): + + def __init__(self, num_classes: int = 1000) -> None: + super(AlexNet, self).__init__() + self.features = nn.Sequential( + nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + nn.Conv2d(64, 192, kernel_size=5, padding=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + nn.Conv2d(192, 384, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(384, 256, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.Conv2d(256, 256, kernel_size=3, padding=1), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2), + ) + self.avgpool = nn.AdaptiveAvgPool2d((6, 6)) + self.classifier = nn.Sequential( + nn.Dropout(), + nn.Linear(256 * 6 * 6, 4096), + nn.ReLU(inplace=True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(inplace=True), + nn.Linear(4096, num_classes), + ) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.features(x) + x = self.avgpool(x) + x = torch.flatten(x, 1) + x = self.classifier(x) + return x + + +def alexnet(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> AlexNet: + r"""AlexNet model architecture from the + `"One weird trick..." <https://arxiv.org/abs/1404.5997>`_ paper. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + model = AlexNet(**kwargs) + if pretrained: + state_dict = load_state_dict_from_url(model_urls['alexnet'], + progress=progress) + model.load_state_dict(state_dict) + return model diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/densenet.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/densenet.py new file mode 100644 index 0000000000000000000000000000000000000000..02d18c1e22b8d664e70925bbf473547a113e3a89 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/densenet.py @@ -0,0 +1,310 @@ +import re +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.utils.checkpoint as cp +from collections import OrderedDict +from .utils import load_state_dict_from_url +from torch import Tensor +from typing import Any, List, Tuple + + +__all__ = ['DenseNet', 'densenet121', 'densenet169', 'densenet201', 'densenet161'] + +model_urls = { + 'densenet121': 'https://download.pytorch.org/models/densenet121-a639ec97.pth', + 'densenet169': 'https://download.pytorch.org/models/densenet169-b2777c0a.pth', + 'densenet201': 'https://download.pytorch.org/models/densenet201-c1103571.pth', + 'densenet161': 'https://download.pytorch.org/models/densenet161-8d451a50.pth', +} + + +class _DenseLayer(nn.Module): + def __init__( + self, + num_input_features: int, + growth_rate: int, + bn_size: int, + drop_rate: float, + memory_efficient: bool = False + ) -> None: + super(_DenseLayer, self).__init__() + self.norm1: nn.BatchNorm2d + self.add_module('norm1', nn.BatchNorm2d(num_input_features)) + self.relu1: nn.ReLU + self.add_module('relu1', nn.ReLU(inplace=True)) + self.conv1: nn.Conv2d + self.add_module('conv1', nn.Conv2d(num_input_features, bn_size * + growth_rate, kernel_size=1, stride=1, + bias=False)) + self.norm2: nn.BatchNorm2d + self.add_module('norm2', nn.BatchNorm2d(bn_size * growth_rate)) + self.relu2: nn.ReLU + self.add_module('relu2', nn.ReLU(inplace=True)) + self.conv2: nn.Conv2d + self.add_module('conv2', nn.Conv2d(bn_size * growth_rate, growth_rate, + kernel_size=3, stride=1, padding=1, + bias=False)) + self.drop_rate = float(drop_rate) + self.memory_efficient = memory_efficient + + def bn_function(self, inputs: List[Tensor]) -> Tensor: + concated_features = torch.cat(inputs, 1) + bottleneck_output = self.conv1(self.relu1(self.norm1(concated_features))) # noqa: T484 + return bottleneck_output + + # todo: rewrite when torchscript supports any + def any_requires_grad(self, input: List[Tensor]) -> bool: + for tensor in input: + if tensor.requires_grad: + return True + return False + + @torch.jit.unused # noqa: T484 + def call_checkpoint_bottleneck(self, input: List[Tensor]) -> Tensor: + def closure(*inputs): + return self.bn_function(inputs) + + return cp.checkpoint(closure, *input) + + @torch.jit._overload_method # noqa: F811 + def forward(self, input: List[Tensor]) -> Tensor: + pass + + @torch.jit._overload_method # noqa: F811 + def forward(self, input: Tensor) -> Tensor: + pass + + # torchscript does not yet support *args, so we overload method + # allowing it to take either a List[Tensor] or single Tensor + def forward(self, input: Tensor) -> Tensor: # noqa: F811 + if isinstance(input, Tensor): + prev_features = [input] + else: + prev_features = input + + if self.memory_efficient and self.any_requires_grad(prev_features): + if torch.jit.is_scripting(): + raise Exception("Memory Efficient not supported in JIT") + + bottleneck_output = self.call_checkpoint_bottleneck(prev_features) + else: + bottleneck_output = self.bn_function(prev_features) + + new_features = self.conv2(self.relu2(self.norm2(bottleneck_output))) + if self.drop_rate > 0: + new_features = F.dropout(new_features, p=self.drop_rate, + training=self.training) + return new_features + + +class _DenseBlock(nn.ModuleDict): + _version = 2 + + def __init__( + self, + num_layers: int, + num_input_features: int, + bn_size: int, + growth_rate: int, + drop_rate: float, + memory_efficient: bool = False + ) -> None: + super(_DenseBlock, self).__init__() + for i in range(num_layers): + layer = _DenseLayer( + num_input_features + i * growth_rate, + growth_rate=growth_rate, + bn_size=bn_size, + drop_rate=drop_rate, + memory_efficient=memory_efficient, + ) + self.add_module('denselayer%d' % (i + 1), layer) + + def forward(self, init_features: Tensor) -> Tensor: + features = [init_features] + for name, layer in self.items(): + new_features = layer(features) + features.append(new_features) + return torch.cat(features, 1) + + +class _Transition(nn.Sequential): + def __init__(self, num_input_features: int, num_output_features: int) -> None: + super(_Transition, self).__init__() + self.add_module('norm', nn.BatchNorm2d(num_input_features)) + self.add_module('relu', nn.ReLU(inplace=True)) + self.add_module('conv', nn.Conv2d(num_input_features, num_output_features, + kernel_size=1, stride=1, bias=False)) + self.add_module('pool', nn.AvgPool2d(kernel_size=2, stride=2)) + + +class DenseNet(nn.Module): + r"""Densenet-BC model class, based on + `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_. + + Args: + growth_rate (int) - how many filters to add each layer (`k` in paper) + block_config (list of 4 ints) - how many layers in each pooling block + num_init_features (int) - the number of filters to learn in the first convolution layer + bn_size (int) - multiplicative factor for number of bottle neck layers + (i.e. bn_size * k features in the bottleneck layer) + drop_rate (float) - dropout rate after each dense layer + num_classes (int) - number of classification classes + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_. + """ + + def __init__( + self, + growth_rate: int = 32, + block_config: Tuple[int, int, int, int] = (6, 12, 24, 16), + num_init_features: int = 64, + bn_size: int = 4, + drop_rate: float = 0, + num_classes: int = 1000, + memory_efficient: bool = False + ) -> None: + + super(DenseNet, self).__init__() + + # First convolution + self.features = nn.Sequential(OrderedDict([ + ('conv0', nn.Conv2d(3, num_init_features, kernel_size=7, stride=2, + padding=3, bias=False)), + ('norm0', nn.BatchNorm2d(num_init_features)), + ('relu0', nn.ReLU(inplace=True)), + ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)), + ])) + + # Each denseblock + num_features = num_init_features + for i, num_layers in enumerate(block_config): + block = _DenseBlock( + num_layers=num_layers, + num_input_features=num_features, + bn_size=bn_size, + growth_rate=growth_rate, + drop_rate=drop_rate, + memory_efficient=memory_efficient + ) + self.features.add_module('denseblock%d' % (i + 1), block) + num_features = num_features + num_layers * growth_rate + if i != len(block_config) - 1: + trans = _Transition(num_input_features=num_features, + num_output_features=num_features // 2) + self.features.add_module('transition%d' % (i + 1), trans) + num_features = num_features // 2 + + # Final batch norm + self.features.add_module('norm5', nn.BatchNorm2d(num_features)) + + # Linear layer + self.classifier = nn.Linear(num_features, num_classes) + + # Official init from torch repo. + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.constant_(m.bias, 0) + + def forward(self, x: Tensor) -> Tensor: + features = self.features(x) + out = F.relu(features, inplace=True) + out = F.adaptive_avg_pool2d(out, (1, 1)) + out = torch.flatten(out, 1) + out = self.classifier(out) + return out + + +def _load_state_dict(model: nn.Module, model_url: str, progress: bool) -> None: + # '.'s are no longer allowed in module names, but previous _DenseLayer + # has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'. + # They are also in the checkpoints in model_urls. This pattern is used + # to find such keys. + pattern = re.compile( + r'^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$') + + state_dict = load_state_dict_from_url(model_url, progress=progress) + for key in list(state_dict.keys()): + res = pattern.match(key) + if res: + new_key = res.group(1) + res.group(2) + state_dict[new_key] = state_dict[key] + del state_dict[key] + model.load_state_dict(state_dict) + + +def _densenet( + arch: str, + growth_rate: int, + block_config: Tuple[int, int, int, int], + num_init_features: int, + pretrained: bool, + progress: bool, + **kwargs: Any +) -> DenseNet: + model = DenseNet(growth_rate, block_config, num_init_features, **kwargs) + if pretrained: + _load_state_dict(model, model_urls[arch], progress) + return model + + +def densenet121(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> DenseNet: + r"""Densenet-121 model from + `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_. + """ + return _densenet('densenet121', 32, (6, 12, 24, 16), 64, pretrained, progress, + **kwargs) + + +def densenet161(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> DenseNet: + r"""Densenet-161 model from + `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_. + """ + return _densenet('densenet161', 48, (6, 12, 36, 24), 96, pretrained, progress, + **kwargs) + + +def densenet169(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> DenseNet: + r"""Densenet-169 model from + `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_. + """ + return _densenet('densenet169', 32, (6, 12, 32, 32), 64, pretrained, progress, + **kwargs) + + +def densenet201(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> DenseNet: + r"""Densenet-201 model from + `"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient, + but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_. + """ + return _densenet('densenet201', 32, (6, 12, 48, 32), 64, pretrained, progress, + **kwargs) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/__init__.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4772415b3b1e3581e953b0c9c391aa8a397816a5 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/__init__.py @@ -0,0 +1,6 @@ +from .faster_rcnn import * +from .mask_rcnn import * +from .keypoint_rcnn import * +from .retinanet import * +from .ssd import * +from .ssdlite import * diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/_utils.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..40281b39b6baabaa711bfc1435e1d55297929081 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/_utils.py @@ -0,0 +1,410 @@ +import math +import torch + +from collections import OrderedDict +from torch import Tensor +from typing import List, Tuple + +from torchvision.ops.misc import FrozenBatchNorm2d + + +class BalancedPositiveNegativeSampler(object): + """ + This class samples batches, ensuring that they contain a fixed proportion of positives + """ + + def __init__(self, batch_size_per_image, positive_fraction): + # type: (int, float) -> None + """ + Args: + batch_size_per_image (int): number of elements to be selected per image + positive_fraction (float): percentace of positive elements per batch + """ + self.batch_size_per_image = batch_size_per_image + self.positive_fraction = positive_fraction + + def __call__(self, matched_idxs): + # type: (List[Tensor]) -> Tuple[List[Tensor], List[Tensor]] + """ + Args: + matched idxs: list of tensors containing -1, 0 or positive values. + Each tensor corresponds to a specific image. + -1 values are ignored, 0 are considered as negatives and > 0 as + positives. + + Returns: + pos_idx (list[tensor]) + neg_idx (list[tensor]) + + Returns two lists of binary masks for each image. + The first list contains the positive elements that were selected, + and the second list the negative example. + """ + pos_idx = [] + neg_idx = [] + for matched_idxs_per_image in matched_idxs: + positive = torch.where(matched_idxs_per_image >= 1)[0] + negative = torch.where(matched_idxs_per_image == 0)[0] + + num_pos = int(self.batch_size_per_image * self.positive_fraction) + # protect against not enough positive examples + num_pos = min(positive.numel(), num_pos) + num_neg = self.batch_size_per_image - num_pos + # protect against not enough negative examples + num_neg = min(negative.numel(), num_neg) + + # randomly select positive and negative examples + perm1 = torch.randperm(positive.numel(), device=positive.device)[:num_pos] + perm2 = torch.randperm(negative.numel(), device=negative.device)[:num_neg] + + pos_idx_per_image = positive[perm1] + neg_idx_per_image = negative[perm2] + + # create binary mask from indices + pos_idx_per_image_mask = torch.zeros_like( + matched_idxs_per_image, dtype=torch.uint8 + ) + neg_idx_per_image_mask = torch.zeros_like( + matched_idxs_per_image, dtype=torch.uint8 + ) + + pos_idx_per_image_mask[pos_idx_per_image] = 1 + neg_idx_per_image_mask[neg_idx_per_image] = 1 + + pos_idx.append(pos_idx_per_image_mask) + neg_idx.append(neg_idx_per_image_mask) + + return pos_idx, neg_idx + + +@torch.jit._script_if_tracing +def encode_boxes(reference_boxes, proposals, weights): + # type: (torch.Tensor, torch.Tensor, torch.Tensor) -> torch.Tensor + """ + Encode a set of proposals with respect to some + reference boxes + + Args: + reference_boxes (Tensor): reference boxes + proposals (Tensor): boxes to be encoded + weights (Tensor[4]): the weights for ``(x, y, w, h)`` + """ + + # perform some unpacking to make it JIT-fusion friendly + wx = weights[0] + wy = weights[1] + ww = weights[2] + wh = weights[3] + + proposals_x1 = proposals[:, 0].unsqueeze(1) + proposals_y1 = proposals[:, 1].unsqueeze(1) + proposals_x2 = proposals[:, 2].unsqueeze(1) + proposals_y2 = proposals[:, 3].unsqueeze(1) + + reference_boxes_x1 = reference_boxes[:, 0].unsqueeze(1) + reference_boxes_y1 = reference_boxes[:, 1].unsqueeze(1) + reference_boxes_x2 = reference_boxes[:, 2].unsqueeze(1) + reference_boxes_y2 = reference_boxes[:, 3].unsqueeze(1) + + # implementation starts here + ex_widths = proposals_x2 - proposals_x1 + ex_heights = proposals_y2 - proposals_y1 + ex_ctr_x = proposals_x1 + 0.5 * ex_widths + ex_ctr_y = proposals_y1 + 0.5 * ex_heights + + gt_widths = reference_boxes_x2 - reference_boxes_x1 + gt_heights = reference_boxes_y2 - reference_boxes_y1 + gt_ctr_x = reference_boxes_x1 + 0.5 * gt_widths + gt_ctr_y = reference_boxes_y1 + 0.5 * gt_heights + + targets_dx = wx * (gt_ctr_x - ex_ctr_x) / ex_widths + targets_dy = wy * (gt_ctr_y - ex_ctr_y) / ex_heights + targets_dw = ww * torch.log(gt_widths / ex_widths) + targets_dh = wh * torch.log(gt_heights / ex_heights) + + targets = torch.cat((targets_dx, targets_dy, targets_dw, targets_dh), dim=1) + return targets + + +class BoxCoder(object): + """ + This class encodes and decodes a set of bounding boxes into + the representation used for training the regressors. + """ + + def __init__(self, weights, bbox_xform_clip=math.log(1000. / 16)): + # type: (Tuple[float, float, float, float], float) -> None + """ + Args: + weights (4-element tuple) + bbox_xform_clip (float) + """ + self.weights = weights + self.bbox_xform_clip = bbox_xform_clip + + def encode(self, reference_boxes, proposals): + # type: (List[Tensor], List[Tensor]) -> List[Tensor] + boxes_per_image = [len(b) for b in reference_boxes] + reference_boxes = torch.cat(reference_boxes, dim=0) + proposals = torch.cat(proposals, dim=0) + targets = self.encode_single(reference_boxes, proposals) + return targets.split(boxes_per_image, 0) + + def encode_single(self, reference_boxes, proposals): + """ + Encode a set of proposals with respect to some + reference boxes + + Args: + reference_boxes (Tensor): reference boxes + proposals (Tensor): boxes to be encoded + """ + dtype = reference_boxes.dtype + device = reference_boxes.device + weights = torch.as_tensor(self.weights, dtype=dtype, device=device) + targets = encode_boxes(reference_boxes, proposals, weights) + + return targets + + def decode(self, rel_codes, boxes): + # type: (Tensor, List[Tensor]) -> Tensor + assert isinstance(boxes, (list, tuple)) + assert isinstance(rel_codes, torch.Tensor) + boxes_per_image = [b.size(0) for b in boxes] + concat_boxes = torch.cat(boxes, dim=0) + box_sum = 0 + for val in boxes_per_image: + box_sum += val + if box_sum > 0: + rel_codes = rel_codes.reshape(box_sum, -1) + pred_boxes = self.decode_single( + rel_codes, concat_boxes + ) + if box_sum > 0: + pred_boxes = pred_boxes.reshape(box_sum, -1, 4) + return pred_boxes + + def decode_single(self, rel_codes, boxes): + """ + From a set of original boxes and encoded relative box offsets, + get the decoded boxes. + + Args: + rel_codes (Tensor): encoded boxes + boxes (Tensor): reference boxes. + """ + + boxes = boxes.to(rel_codes.dtype) + + widths = boxes[:, 2] - boxes[:, 0] + heights = boxes[:, 3] - boxes[:, 1] + ctr_x = boxes[:, 0] + 0.5 * widths + ctr_y = boxes[:, 1] + 0.5 * heights + + wx, wy, ww, wh = self.weights + dx = rel_codes[:, 0::4] / wx + dy = rel_codes[:, 1::4] / wy + dw = rel_codes[:, 2::4] / ww + dh = rel_codes[:, 3::4] / wh + + # Prevent sending too large values into torch.exp() + dw = torch.clamp(dw, max=self.bbox_xform_clip) + dh = torch.clamp(dh, max=self.bbox_xform_clip) + + pred_ctr_x = dx * widths[:, None] + ctr_x[:, None] + pred_ctr_y = dy * heights[:, None] + ctr_y[:, None] + pred_w = torch.exp(dw) * widths[:, None] + pred_h = torch.exp(dh) * heights[:, None] + + pred_boxes1 = pred_ctr_x - torch.tensor(0.5, dtype=pred_ctr_x.dtype, device=pred_w.device) * pred_w + pred_boxes2 = pred_ctr_y - torch.tensor(0.5, dtype=pred_ctr_y.dtype, device=pred_h.device) * pred_h + pred_boxes3 = pred_ctr_x + torch.tensor(0.5, dtype=pred_ctr_x.dtype, device=pred_w.device) * pred_w + pred_boxes4 = pred_ctr_y + torch.tensor(0.5, dtype=pred_ctr_y.dtype, device=pred_h.device) * pred_h + pred_boxes = torch.stack((pred_boxes1, pred_boxes2, pred_boxes3, pred_boxes4), dim=2).flatten(1) + return pred_boxes + + +class Matcher(object): + """ + This class assigns to each predicted "element" (e.g., a box) a ground-truth + element. Each predicted element will have exactly zero or one matches; each + ground-truth element may be assigned to zero or more predicted elements. + + Matching is based on the MxN match_quality_matrix, that characterizes how well + each (ground-truth, predicted)-pair match. For example, if the elements are + boxes, the matrix may contain box IoU overlap values. + + The matcher returns a tensor of size N containing the index of the ground-truth + element m that matches to prediction n. If there is no match, a negative value + is returned. + """ + + BELOW_LOW_THRESHOLD = -1 + BETWEEN_THRESHOLDS = -2 + + __annotations__ = { + 'BELOW_LOW_THRESHOLD': int, + 'BETWEEN_THRESHOLDS': int, + } + + def __init__(self, high_threshold, low_threshold, allow_low_quality_matches=False): + # type: (float, float, bool) -> None + """ + Args: + high_threshold (float): quality values greater than or equal to + this value are candidate matches. + low_threshold (float): a lower quality threshold used to stratify + matches into three levels: + 1) matches >= high_threshold + 2) BETWEEN_THRESHOLDS matches in [low_threshold, high_threshold) + 3) BELOW_LOW_THRESHOLD matches in [0, low_threshold) + allow_low_quality_matches (bool): if True, produce additional matches + for predictions that have only low-quality match candidates. See + set_low_quality_matches_ for more details. + """ + self.BELOW_LOW_THRESHOLD = -1 + self.BETWEEN_THRESHOLDS = -2 + assert low_threshold <= high_threshold + self.high_threshold = high_threshold + self.low_threshold = low_threshold + self.allow_low_quality_matches = allow_low_quality_matches + + def __call__(self, match_quality_matrix): + """ + Args: + match_quality_matrix (Tensor[float]): an MxN tensor, containing the + pairwise quality between M ground-truth elements and N predicted elements. + + Returns: + matches (Tensor[int64]): an N tensor where N[i] is a matched gt in + [0, M - 1] or a negative value indicating that prediction i could not + be matched. + """ + if match_quality_matrix.numel() == 0: + # empty targets or proposals not supported during training + if match_quality_matrix.shape[0] == 0: + raise ValueError( + "No ground-truth boxes available for one of the images " + "during training") + else: + raise ValueError( + "No proposal boxes available for one of the images " + "during training") + + # match_quality_matrix is M (gt) x N (predicted) + # Max over gt elements (dim 0) to find best gt candidate for each prediction + matched_vals, matches = match_quality_matrix.max(dim=0) + if self.allow_low_quality_matches: + all_matches = matches.clone() + else: + all_matches = None + + # Assign candidate matches with low quality to negative (unassigned) values + below_low_threshold = matched_vals < self.low_threshold + between_thresholds = (matched_vals >= self.low_threshold) & ( + matched_vals < self.high_threshold + ) + matches[below_low_threshold] = self.BELOW_LOW_THRESHOLD + matches[between_thresholds] = self.BETWEEN_THRESHOLDS + + if self.allow_low_quality_matches: + assert all_matches is not None + self.set_low_quality_matches_(matches, all_matches, match_quality_matrix) + + return matches + + def set_low_quality_matches_(self, matches, all_matches, match_quality_matrix): + """ + Produce additional matches for predictions that have only low-quality matches. + Specifically, for each ground-truth find the set of predictions that have + maximum overlap with it (including ties); for each prediction in that set, if + it is unmatched, then match it to the ground-truth with which it has the highest + quality value. + """ + # For each gt, find the prediction with which it has highest quality + highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1) + # Find highest quality match available, even if it is low, including ties + gt_pred_pairs_of_highest_quality = torch.where( + match_quality_matrix == highest_quality_foreach_gt[:, None] + ) + # Example gt_pred_pairs_of_highest_quality: + # tensor([[ 0, 39796], + # [ 1, 32055], + # [ 1, 32070], + # [ 2, 39190], + # [ 2, 40255], + # [ 3, 40390], + # [ 3, 41455], + # [ 4, 45470], + # [ 5, 45325], + # [ 5, 46390]]) + # Each row is a (gt index, prediction index) + # Note how gt items 1, 2, 3, and 5 each have two ties + + pred_inds_to_update = gt_pred_pairs_of_highest_quality[1] + matches[pred_inds_to_update] = all_matches[pred_inds_to_update] + + +class SSDMatcher(Matcher): + + def __init__(self, threshold): + super().__init__(threshold, threshold, allow_low_quality_matches=False) + + def __call__(self, match_quality_matrix): + matches = super().__call__(match_quality_matrix) + + # For each gt, find the prediction with which it has the highest quality + _, highest_quality_pred_foreach_gt = match_quality_matrix.max(dim=1) + matches[highest_quality_pred_foreach_gt] = torch.arange(highest_quality_pred_foreach_gt.size(0), + dtype=torch.int64, + device=highest_quality_pred_foreach_gt.device) + + return matches + + +def overwrite_eps(model, eps): + """ + This method overwrites the default eps values of all the + FrozenBatchNorm2d layers of the model with the provided value. + This is necessary to address the BC-breaking change introduced + by the bug-fix at pytorch/vision#2933. The overwrite is applied + only when the pretrained weights are loaded to maintain compatibility + with previous versions. + + Args: + model (nn.Module): The model on which we perform the overwrite. + eps (float): The new value of eps. + """ + for module in model.modules(): + if isinstance(module, FrozenBatchNorm2d): + module.eps = eps + + +def retrieve_out_channels(model, size): + """ + This method retrieves the number of output channels of a specific model. + + Args: + model (nn.Module): The model for which we estimate the out_channels. + It should return a single Tensor or an OrderedDict[Tensor]. + size (Tuple[int, int]): The size (wxh) of the input. + + Returns: + out_channels (List[int]): A list of the output channels of the model. + """ + in_training = model.training + model.eval() + + with torch.no_grad(): + # Use dummy data to retrieve the feature map sizes to avoid hard-coding their values + device = next(model.parameters()).device + tmp_img = torch.zeros((1, 3, size[1], size[0]), device=device) + features = model(tmp_img) + if isinstance(features, torch.Tensor): + features = OrderedDict([('0', features)]) + out_channels = [x.size(1) for x in features.values()] + + if in_training: + model.train() + + return out_channels diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/anchor_utils.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/anchor_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..06ecc551442a704796c31a881d965810381b8855 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/anchor_utils.py @@ -0,0 +1,250 @@ +import math +import torch +from torch import nn, Tensor + +from typing import List, Optional +from .image_list import ImageList + + +class AnchorGenerator(nn.Module): + """ + Module that generates anchors for a set of feature maps and + image sizes. + + The module support computing anchors at multiple sizes and aspect ratios + per feature map. This module assumes aspect ratio = height / width for + each anchor. + + sizes and aspect_ratios should have the same number of elements, and it should + correspond to the number of feature maps. + + sizes[i] and aspect_ratios[i] can have an arbitrary number of elements, + and AnchorGenerator will output a set of sizes[i] * aspect_ratios[i] anchors + per spatial location for feature map i. + + Args: + sizes (Tuple[Tuple[int]]): + aspect_ratios (Tuple[Tuple[float]]): + """ + + __annotations__ = { + "cell_anchors": List[torch.Tensor], + } + + def __init__( + self, + sizes=((128, 256, 512),), + aspect_ratios=((0.5, 1.0, 2.0),), + ): + super(AnchorGenerator, self).__init__() + + if not isinstance(sizes[0], (list, tuple)): + # TODO change this + sizes = tuple((s,) for s in sizes) + if not isinstance(aspect_ratios[0], (list, tuple)): + aspect_ratios = (aspect_ratios,) * len(sizes) + + assert len(sizes) == len(aspect_ratios) + + self.sizes = sizes + self.aspect_ratios = aspect_ratios + self.cell_anchors = [self.generate_anchors(size, aspect_ratio) + for size, aspect_ratio in zip(sizes, aspect_ratios)] + + # TODO: https://github.com/pytorch/pytorch/issues/26792 + # For every (aspect_ratios, scales) combination, output a zero-centered anchor with those values. + # (scales, aspect_ratios) are usually an element of zip(self.scales, self.aspect_ratios) + # This method assumes aspect ratio = height / width for an anchor. + def generate_anchors(self, scales: List[int], aspect_ratios: List[float], dtype: torch.dtype = torch.float32, + device: torch.device = torch.device("cpu")): + scales = torch.as_tensor(scales, dtype=dtype, device=device) + aspect_ratios = torch.as_tensor(aspect_ratios, dtype=dtype, device=device) + h_ratios = torch.sqrt(aspect_ratios) + w_ratios = 1 / h_ratios + + ws = (w_ratios[:, None] * scales[None, :]).view(-1) + hs = (h_ratios[:, None] * scales[None, :]).view(-1) + + base_anchors = torch.stack([-ws, -hs, ws, hs], dim=1) / 2 + return base_anchors.round() + + def set_cell_anchors(self, dtype: torch.dtype, device: torch.device): + self.cell_anchors = [cell_anchor.to(dtype=dtype, device=device) + for cell_anchor in self.cell_anchors] + + def num_anchors_per_location(self): + return [len(s) * len(a) for s, a in zip(self.sizes, self.aspect_ratios)] + + # For every combination of (a, (g, s), i) in (self.cell_anchors, zip(grid_sizes, strides), 0:2), + # output g[i] anchors that are s[i] distance apart in direction i, with the same dimensions as a. + def grid_anchors(self, grid_sizes: List[List[int]], strides: List[List[Tensor]]) -> List[Tensor]: + anchors = [] + cell_anchors = self.cell_anchors + assert cell_anchors is not None + + if not (len(grid_sizes) == len(strides) == len(cell_anchors)): + raise ValueError("Anchors should be Tuple[Tuple[int]] because each feature " + "map could potentially have different sizes and aspect ratios. " + "There needs to be a match between the number of " + "feature maps passed and the number of sizes / aspect ratios specified.") + + for size, stride, base_anchors in zip( + grid_sizes, strides, cell_anchors + ): + grid_height, grid_width = size + stride_height, stride_width = stride + device = base_anchors.device + + # For output anchor, compute [x_center, y_center, x_center, y_center] + shifts_x = torch.arange( + 0, grid_width, dtype=torch.float32, device=device + ) * stride_width + shifts_y = torch.arange( + 0, grid_height, dtype=torch.float32, device=device + ) * stride_height + shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x) + shift_x = shift_x.reshape(-1) + shift_y = shift_y.reshape(-1) + shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1) + + # For every (base anchor, output anchor) pair, + # offset each zero-centered base anchor by the center of the output anchor. + anchors.append( + (shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4) + ) + + return anchors + + def forward(self, image_list: ImageList, feature_maps: List[Tensor]) -> List[Tensor]: + grid_sizes = [feature_map.shape[-2:] for feature_map in feature_maps] + image_size = image_list.tensors.shape[-2:] + dtype, device = feature_maps[0].dtype, feature_maps[0].device + strides = [[torch.tensor(image_size[0] // g[0], dtype=torch.int64, device=device), + torch.tensor(image_size[1] // g[1], dtype=torch.int64, device=device)] for g in grid_sizes] + self.set_cell_anchors(dtype, device) + anchors_over_all_feature_maps = self.grid_anchors(grid_sizes, strides) + anchors: List[List[torch.Tensor]] = [] + for _ in range(len(image_list.image_sizes)): + anchors_in_image = [anchors_per_feature_map for anchors_per_feature_map in anchors_over_all_feature_maps] + anchors.append(anchors_in_image) + anchors = [torch.cat(anchors_per_image) for anchors_per_image in anchors] + return anchors + + +class DefaultBoxGenerator(nn.Module): + """ + This module generates the default boxes of SSD for a set of feature maps and image sizes. + + Args: + aspect_ratios (List[List[int]]): A list with all the aspect ratios used in each feature map. + min_ratio (float): The minimum scale :math:`\text{s}_{\text{min}}` of the default boxes used in the estimation + of the scales of each feature map. It is used only if the ``scales`` parameter is not provided. + max_ratio (float): The maximum scale :math:`\text{s}_{\text{max}}` of the default boxes used in the estimation + of the scales of each feature map. It is used only if the ``scales`` parameter is not provided. + scales (List[float]], optional): The scales of the default boxes. If not provided it will be estimated using + the ``min_ratio`` and ``max_ratio`` parameters. + steps (List[int]], optional): It's a hyper-parameter that affects the tiling of defalt boxes. If not provided + it will be estimated from the data. + clip (bool): Whether the standardized values of default boxes should be clipped between 0 and 1. The clipping + is applied while the boxes are encoded in format ``(cx, cy, w, h)``. + """ + + def __init__(self, aspect_ratios: List[List[int]], min_ratio: float = 0.15, max_ratio: float = 0.9, + scales: Optional[List[float]] = None, steps: Optional[List[int]] = None, clip: bool = True): + super().__init__() + if steps is not None: + assert len(aspect_ratios) == len(steps) + self.aspect_ratios = aspect_ratios + self.steps = steps + self.clip = clip + num_outputs = len(aspect_ratios) + + # Estimation of default boxes scales + if scales is None: + if num_outputs > 1: + range_ratio = max_ratio - min_ratio + self.scales = [min_ratio + range_ratio * k / (num_outputs - 1.0) for k in range(num_outputs)] + self.scales.append(1.0) + else: + self.scales = [min_ratio, max_ratio] + else: + self.scales = scales + + self._wh_pairs = self._generate_wh_pairs(num_outputs) + + def _generate_wh_pairs(self, num_outputs: int, dtype: torch.dtype = torch.float32, + device: torch.device = torch.device("cpu")) -> List[Tensor]: + _wh_pairs: List[Tensor] = [] + for k in range(num_outputs): + # Adding the 2 default width-height pairs for aspect ratio 1 and scale s'k + s_k = self.scales[k] + s_prime_k = math.sqrt(self.scales[k] * self.scales[k + 1]) + wh_pairs = [[s_k, s_k], [s_prime_k, s_prime_k]] + + # Adding 2 pairs for each aspect ratio of the feature map k + for ar in self.aspect_ratios[k]: + sq_ar = math.sqrt(ar) + w = self.scales[k] * sq_ar + h = self.scales[k] / sq_ar + wh_pairs.extend([[w, h], [h, w]]) + + _wh_pairs.append(torch.as_tensor(wh_pairs, dtype=dtype, device=device)) + return _wh_pairs + + def num_anchors_per_location(self): + # Estimate num of anchors based on aspect ratios: 2 default boxes + 2 * ratios of feaure map. + return [2 + 2 * len(r) for r in self.aspect_ratios] + + # Default Boxes calculation based on page 6 of SSD paper + def _grid_default_boxes(self, grid_sizes: List[List[int]], image_size: List[int], + dtype: torch.dtype = torch.float32) -> Tensor: + default_boxes = [] + for k, f_k in enumerate(grid_sizes): + # Now add the default boxes for each width-height pair + if self.steps is not None: + x_f_k, y_f_k = [img_shape / self.steps[k] for img_shape in image_size] + else: + y_f_k, x_f_k = f_k + + shifts_x = ((torch.arange(0, f_k[1]) + 0.5) / x_f_k).to(dtype=dtype) + shifts_y = ((torch.arange(0, f_k[0]) + 0.5) / y_f_k).to(dtype=dtype) + shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x) + shift_x = shift_x.reshape(-1) + shift_y = shift_y.reshape(-1) + + shifts = torch.stack((shift_x, shift_y) * len(self._wh_pairs[k]), dim=-1).reshape(-1, 2) + # Clipping the default boxes while the boxes are encoded in format (cx, cy, w, h) + _wh_pair = self._wh_pairs[k].clamp(min=0, max=1) if self.clip else self._wh_pairs[k] + wh_pairs = _wh_pair.repeat((f_k[0] * f_k[1]), 1) + + default_box = torch.cat((shifts, wh_pairs), dim=1) + + default_boxes.append(default_box) + + return torch.cat(default_boxes, dim=0) + + def __repr__(self) -> str: + s = self.__class__.__name__ + '(' + s += 'aspect_ratios={aspect_ratios}' + s += ', clip={clip}' + s += ', scales={scales}' + s += ', steps={steps}' + s += ')' + return s.format(**self.__dict__) + + def forward(self, image_list: ImageList, feature_maps: List[Tensor]) -> List[Tensor]: + grid_sizes = [feature_map.shape[-2:] for feature_map in feature_maps] + image_size = image_list.tensors.shape[-2:] + dtype, device = feature_maps[0].dtype, feature_maps[0].device + default_boxes = self._grid_default_boxes(grid_sizes, image_size, dtype=dtype) + default_boxes = default_boxes.to(device) + + dboxes = [] + for _ in image_list.image_sizes: + dboxes_in_image = default_boxes + dboxes_in_image = torch.cat([dboxes_in_image[:, :2] - 0.5 * dboxes_in_image[:, 2:], + dboxes_in_image[:, :2] + 0.5 * dboxes_in_image[:, 2:]], -1) + dboxes_in_image[:, 0::2] *= image_size[1] + dboxes_in_image[:, 1::2] *= image_size[0] + dboxes.append(dboxes_in_image) + return dboxes diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/backbone_utils.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/backbone_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..3178a81b52ce2d4538cb239fb9979054ab534e4d --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/backbone_utils.py @@ -0,0 +1,179 @@ +import warnings +from torch import nn +from torchvision.ops.feature_pyramid_network import FeaturePyramidNetwork, LastLevelMaxPool + +from torchvision.ops import misc as misc_nn_ops +from .._utils import IntermediateLayerGetter +from .. import mobilenet +from .. import resnet + + +class BackboneWithFPN(nn.Module): + """ + Adds a FPN on top of a model. + Internally, it uses torchvision.models._utils.IntermediateLayerGetter to + extract a submodel that returns the feature maps specified in return_layers. + The same limitations of IntermediatLayerGetter apply here. + Args: + backbone (nn.Module) + return_layers (Dict[name, new_name]): a dict containing the names + of the modules for which the activations will be returned as + the key of the dict, and the value of the dict is the name + of the returned activation (which the user can specify). + in_channels_list (List[int]): number of channels for each feature map + that is returned, in the order they are present in the OrderedDict + out_channels (int): number of channels in the FPN. + Attributes: + out_channels (int): the number of channels in the FPN + """ + def __init__(self, backbone, return_layers, in_channels_list, out_channels, extra_blocks=None): + super(BackboneWithFPN, self).__init__() + + if extra_blocks is None: + extra_blocks = LastLevelMaxPool() + + self.body = IntermediateLayerGetter(backbone, return_layers=return_layers) + self.fpn = FeaturePyramidNetwork( + in_channels_list=in_channels_list, + out_channels=out_channels, + extra_blocks=extra_blocks, + ) + self.out_channels = out_channels + + def forward(self, x): + x = self.body(x) + x = self.fpn(x) + return x + + +def resnet_fpn_backbone( + backbone_name, + pretrained, + norm_layer=misc_nn_ops.FrozenBatchNorm2d, + trainable_layers=3, + returned_layers=None, + extra_blocks=None +): + """ + Constructs a specified ResNet backbone with FPN on top. Freezes the specified number of layers in the backbone. + + Examples:: + + >>> from torchvision.models.detection.backbone_utils import resnet_fpn_backbone + >>> backbone = resnet_fpn_backbone('resnet50', pretrained=True, trainable_layers=3) + >>> # get some dummy image + >>> x = torch.rand(1,3,64,64) + >>> # compute the output + >>> output = backbone(x) + >>> print([(k, v.shape) for k, v in output.items()]) + >>> # returns + >>> [('0', torch.Size([1, 256, 16, 16])), + >>> ('1', torch.Size([1, 256, 8, 8])), + >>> ('2', torch.Size([1, 256, 4, 4])), + >>> ('3', torch.Size([1, 256, 2, 2])), + >>> ('pool', torch.Size([1, 256, 1, 1]))] + + Args: + backbone_name (string): resnet architecture. Possible values are 'ResNet', 'resnet18', 'resnet34', 'resnet50', + 'resnet101', 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', 'wide_resnet50_2', 'wide_resnet101_2' + pretrained (bool): If True, returns a model with backbone pre-trained on Imagenet + norm_layer (torchvision.ops): it is recommended to use the default value. For details visit: + (https://github.com/facebookresearch/maskrcnn-benchmark/issues/267) + trainable_layers (int): number of trainable (not frozen) resnet layers starting from final block. + Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. + returned_layers (list of int): The layers of the network to return. Each entry must be in ``[1, 4]``. + By default all layers are returned. + extra_blocks (ExtraFPNBlock or None): if provided, extra operations will + be performed. It is expected to take the fpn features, the original + features and the names of the original features as input, and returns + a new list of feature maps and their corresponding names. By + default a ``LastLevelMaxPool`` is used. + """ + backbone = resnet.__dict__[backbone_name]( + pretrained=pretrained, + norm_layer=norm_layer) + + # select layers that wont be frozen + assert 0 <= trainable_layers <= 5 + layers_to_train = ['layer4', 'layer3', 'layer2', 'layer1', 'conv1'][:trainable_layers] + if trainable_layers == 5: + layers_to_train.append('bn1') + for name, parameter in backbone.named_parameters(): + if all([not name.startswith(layer) for layer in layers_to_train]): + parameter.requires_grad_(False) + + if extra_blocks is None: + extra_blocks = LastLevelMaxPool() + + if returned_layers is None: + returned_layers = [1, 2, 3, 4] + assert min(returned_layers) > 0 and max(returned_layers) < 5 + return_layers = {f'layer{k}': str(v) for v, k in enumerate(returned_layers)} + + in_channels_stage2 = backbone.inplanes // 8 + in_channels_list = [in_channels_stage2 * 2 ** (i - 1) for i in returned_layers] + out_channels = 256 + return BackboneWithFPN(backbone, return_layers, in_channels_list, out_channels, extra_blocks=extra_blocks) + + +def _validate_trainable_layers(pretrained, trainable_backbone_layers, max_value, default_value): + # dont freeze any layers if pretrained model or backbone is not used + if not pretrained: + if trainable_backbone_layers is not None: + warnings.warn( + "Changing trainable_backbone_layers has not effect if " + "neither pretrained nor pretrained_backbone have been set to True, " + "falling back to trainable_backbone_layers={} so that all layers are trainable".format(max_value)) + trainable_backbone_layers = max_value + + # by default freeze first blocks + if trainable_backbone_layers is None: + trainable_backbone_layers = default_value + assert 0 <= trainable_backbone_layers <= max_value + return trainable_backbone_layers + + +def mobilenet_backbone( + backbone_name, + pretrained, + fpn, + norm_layer=misc_nn_ops.FrozenBatchNorm2d, + trainable_layers=2, + returned_layers=None, + extra_blocks=None +): + backbone = mobilenet.__dict__[backbone_name](pretrained=pretrained, norm_layer=norm_layer).features + + # Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks. + # The first and last blocks are always included because they are the C0 (conv1) and Cn. + stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "_is_cn", False)] + [len(backbone) - 1] + num_stages = len(stage_indices) + + # find the index of the layer from which we wont freeze + assert 0 <= trainable_layers <= num_stages + freeze_before = len(backbone) if trainable_layers == 0 else stage_indices[num_stages - trainable_layers] + + for b in backbone[:freeze_before]: + for parameter in b.parameters(): + parameter.requires_grad_(False) + + out_channels = 256 + if fpn: + if extra_blocks is None: + extra_blocks = LastLevelMaxPool() + + if returned_layers is None: + returned_layers = [num_stages - 2, num_stages - 1] + assert min(returned_layers) >= 0 and max(returned_layers) < num_stages + return_layers = {f'{stage_indices[k]}': str(v) for v, k in enumerate(returned_layers)} + + in_channels_list = [backbone[stage_indices[i]].out_channels for i in returned_layers] + return BackboneWithFPN(backbone, return_layers, in_channels_list, out_channels, extra_blocks=extra_blocks) + else: + m = nn.Sequential( + backbone, + # depthwise linear combination of channels to reduce their size + nn.Conv2d(backbone[-1].out_channels, out_channels, 1), + ) + m.out_channels = out_channels + return m diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/faster_rcnn.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/faster_rcnn.py new file mode 100644 index 0000000000000000000000000000000000000000..bf8df3b93779aa9bf9e8315e28f2a993be164668 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/faster_rcnn.py @@ -0,0 +1,469 @@ +from torch import nn +import torch.nn.functional as F + +from torchvision.ops import MultiScaleRoIAlign + +from ._utils import overwrite_eps +from ..utils import load_state_dict_from_url + +from .anchor_utils import AnchorGenerator +from .generalized_rcnn import GeneralizedRCNN +from .rpn import RPNHead, RegionProposalNetwork +from .roi_heads import RoIHeads +from .transform import GeneralizedRCNNTransform +from .backbone_utils import resnet_fpn_backbone, _validate_trainable_layers, mobilenet_backbone + + +__all__ = [ + "FasterRCNN", "fasterrcnn_resnet50_fpn", "fasterrcnn_mobilenet_v3_large_320_fpn", + "fasterrcnn_mobilenet_v3_large_fpn" +] + + +class FasterRCNN(GeneralizedRCNN): + """ + Implements Faster R-CNN. + + The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each + image, and should be in 0-1 range. Different images can have different sizes. + + The behavior of the model changes depending if it is in training or evaluation mode. + + During training, the model expects both the input tensors, as well as a targets (list of dictionary), + containing: + - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (Int64Tensor[N]): the class label for each ground-truth box + + The model returns a Dict[Tensor] during training, containing the classification and regression + losses for both the RPN and the R-CNN. + + During inference, the model requires only the input tensors, and returns the post-processed + predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as + follows: + - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (Int64Tensor[N]): the predicted labels for each image + - scores (Tensor[N]): the scores or each prediction + + Args: + backbone (nn.Module): the network used to compute the features for the model. + It should contain a out_channels attribute, which indicates the number of output + channels that each feature map has (and it should be the same for all feature maps). + The backbone should return a single Tensor or and OrderedDict[Tensor]. + num_classes (int): number of output classes of the model (including the background). + If box_predictor is specified, num_classes should be None. + min_size (int): minimum size of the image to be rescaled before feeding it to the backbone + max_size (int): maximum size of the image to be rescaled before feeding it to the backbone + image_mean (Tuple[float, float, float]): mean values used for input normalization. + They are generally the mean values of the dataset on which the backbone has been trained + on + image_std (Tuple[float, float, float]): std values used for input normalization. + They are generally the std values of the dataset on which the backbone has been trained on + rpn_anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature + maps. + rpn_head (nn.Module): module that computes the objectness and regression deltas from the RPN + rpn_pre_nms_top_n_train (int): number of proposals to keep before applying NMS during training + rpn_pre_nms_top_n_test (int): number of proposals to keep before applying NMS during testing + rpn_post_nms_top_n_train (int): number of proposals to keep after applying NMS during training + rpn_post_nms_top_n_test (int): number of proposals to keep after applying NMS during testing + rpn_nms_thresh (float): NMS threshold used for postprocessing the RPN proposals + rpn_fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be + considered as positive during training of the RPN. + rpn_bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be + considered as negative during training of the RPN. + rpn_batch_size_per_image (int): number of anchors that are sampled during training of the RPN + for computing the loss + rpn_positive_fraction (float): proportion of positive anchors in a mini-batch during training + of the RPN + rpn_score_thresh (float): during inference, only return proposals with a classification score + greater than rpn_score_thresh + box_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in + the locations indicated by the bounding boxes + box_head (nn.Module): module that takes the cropped feature maps as input + box_predictor (nn.Module): module that takes the output of box_head and returns the + classification logits and box regression deltas. + box_score_thresh (float): during inference, only return proposals with a classification score + greater than box_score_thresh + box_nms_thresh (float): NMS threshold for the prediction head. Used during inference + box_detections_per_img (int): maximum number of detections per image, for all classes. + box_fg_iou_thresh (float): minimum IoU between the proposals and the GT box so that they can be + considered as positive during training of the classification head + box_bg_iou_thresh (float): maximum IoU between the proposals and the GT box so that they can be + considered as negative during training of the classification head + box_batch_size_per_image (int): number of proposals that are sampled during training of the + classification head + box_positive_fraction (float): proportion of positive proposals in a mini-batch during training + of the classification head + bbox_reg_weights (Tuple[float, float, float, float]): weights for the encoding/decoding of the + bounding boxes + + Example:: + + >>> import torch + >>> import torchvision + >>> from torchvision.models.detection import FasterRCNN + >>> from torchvision.models.detection.rpn import AnchorGenerator + >>> # load a pre-trained model for classification and return + >>> # only the features + >>> backbone = torchvision.models.mobilenet_v2(pretrained=True).features + >>> # FasterRCNN needs to know the number of + >>> # output channels in a backbone. For mobilenet_v2, it's 1280 + >>> # so we need to add it here + >>> backbone.out_channels = 1280 + >>> + >>> # let's make the RPN generate 5 x 3 anchors per spatial + >>> # location, with 5 different sizes and 3 different aspect + >>> # ratios. We have a Tuple[Tuple[int]] because each feature + >>> # map could potentially have different sizes and + >>> # aspect ratios + >>> anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), + >>> aspect_ratios=((0.5, 1.0, 2.0),)) + >>> + >>> # let's define what are the feature maps that we will + >>> # use to perform the region of interest cropping, as well as + >>> # the size of the crop after rescaling. + >>> # if your backbone returns a Tensor, featmap_names is expected to + >>> # be ['0']. More generally, the backbone should return an + >>> # OrderedDict[Tensor], and in featmap_names you can choose which + >>> # feature maps to use. + >>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'], + >>> output_size=7, + >>> sampling_ratio=2) + >>> + >>> # put the pieces together inside a FasterRCNN model + >>> model = FasterRCNN(backbone, + >>> num_classes=2, + >>> rpn_anchor_generator=anchor_generator, + >>> box_roi_pool=roi_pooler) + >>> model.eval() + >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] + >>> predictions = model(x) + """ + + def __init__(self, backbone, num_classes=None, + # transform parameters + min_size=800, max_size=1333, + image_mean=None, image_std=None, + # RPN parameters + rpn_anchor_generator=None, rpn_head=None, + rpn_pre_nms_top_n_train=2000, rpn_pre_nms_top_n_test=1000, + rpn_post_nms_top_n_train=2000, rpn_post_nms_top_n_test=1000, + rpn_nms_thresh=0.7, + rpn_fg_iou_thresh=0.7, rpn_bg_iou_thresh=0.3, + rpn_batch_size_per_image=256, rpn_positive_fraction=0.5, + rpn_score_thresh=0.0, + # Box parameters + box_roi_pool=None, box_head=None, box_predictor=None, + box_score_thresh=0.05, box_nms_thresh=0.5, box_detections_per_img=100, + box_fg_iou_thresh=0.5, box_bg_iou_thresh=0.5, + box_batch_size_per_image=512, box_positive_fraction=0.25, + bbox_reg_weights=None): + + if not hasattr(backbone, "out_channels"): + raise ValueError( + "backbone should contain an attribute out_channels " + "specifying the number of output channels (assumed to be the " + "same for all the levels)") + + assert isinstance(rpn_anchor_generator, (AnchorGenerator, type(None))) + assert isinstance(box_roi_pool, (MultiScaleRoIAlign, type(None))) + + if num_classes is not None: + if box_predictor is not None: + raise ValueError("num_classes should be None when box_predictor is specified") + else: + if box_predictor is None: + raise ValueError("num_classes should not be None when box_predictor " + "is not specified") + + out_channels = backbone.out_channels + + if rpn_anchor_generator is None: + anchor_sizes = ((32,), (64,), (128,), (256,), (512,)) + aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes) + rpn_anchor_generator = AnchorGenerator( + anchor_sizes, aspect_ratios + ) + if rpn_head is None: + rpn_head = RPNHead( + out_channels, rpn_anchor_generator.num_anchors_per_location()[0] + ) + + rpn_pre_nms_top_n = dict(training=rpn_pre_nms_top_n_train, testing=rpn_pre_nms_top_n_test) + rpn_post_nms_top_n = dict(training=rpn_post_nms_top_n_train, testing=rpn_post_nms_top_n_test) + + rpn = RegionProposalNetwork( + rpn_anchor_generator, rpn_head, + rpn_fg_iou_thresh, rpn_bg_iou_thresh, + rpn_batch_size_per_image, rpn_positive_fraction, + rpn_pre_nms_top_n, rpn_post_nms_top_n, rpn_nms_thresh, + score_thresh=rpn_score_thresh) + + if box_roi_pool is None: + box_roi_pool = MultiScaleRoIAlign( + featmap_names=['0', '1', '2', '3'], + output_size=7, + sampling_ratio=2) + + if box_head is None: + resolution = box_roi_pool.output_size[0] + representation_size = 1024 + box_head = TwoMLPHead( + out_channels * resolution ** 2, + representation_size) + + if box_predictor is None: + representation_size = 1024 + box_predictor = FastRCNNPredictor( + representation_size, + num_classes) + + roi_heads = RoIHeads( + # Box + box_roi_pool, box_head, box_predictor, + box_fg_iou_thresh, box_bg_iou_thresh, + box_batch_size_per_image, box_positive_fraction, + bbox_reg_weights, + box_score_thresh, box_nms_thresh, box_detections_per_img) + + if image_mean is None: + image_mean = [0.485, 0.456, 0.406] + if image_std is None: + image_std = [0.229, 0.224, 0.225] + transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std) + + super(FasterRCNN, self).__init__(backbone, rpn, roi_heads, transform) + + +class TwoMLPHead(nn.Module): + """ + Standard heads for FPN-based models + + Args: + in_channels (int): number of input channels + representation_size (int): size of the intermediate representation + """ + + def __init__(self, in_channels, representation_size): + super(TwoMLPHead, self).__init__() + + self.fc6 = nn.Linear(in_channels, representation_size) + self.fc7 = nn.Linear(representation_size, representation_size) + + def forward(self, x): + x = x.flatten(start_dim=1) + + x = F.relu(self.fc6(x)) + x = F.relu(self.fc7(x)) + + return x + + +class FastRCNNPredictor(nn.Module): + """ + Standard classification + bounding box regression layers + for Fast R-CNN. + + Args: + in_channels (int): number of input channels + num_classes (int): number of output classes (including background) + """ + + def __init__(self, in_channels, num_classes): + super(FastRCNNPredictor, self).__init__() + self.cls_score = nn.Linear(in_channels, num_classes) + self.bbox_pred = nn.Linear(in_channels, num_classes * 4) + + def forward(self, x): + if x.dim() == 4: + assert list(x.shape[2:]) == [1, 1] + x = x.flatten(start_dim=1) + scores = self.cls_score(x) + bbox_deltas = self.bbox_pred(x) + + return scores, bbox_deltas + + +model_urls = { + 'fasterrcnn_resnet50_fpn_coco': + 'https://download.pytorch.org/models/fasterrcnn_resnet50_fpn_coco-258fb6c6.pth', + 'fasterrcnn_mobilenet_v3_large_320_fpn_coco': + 'https://download.pytorch.org/models/fasterrcnn_mobilenet_v3_large_320_fpn-907ea3f9.pth', + 'fasterrcnn_mobilenet_v3_large_fpn_coco': + 'https://download.pytorch.org/models/fasterrcnn_mobilenet_v3_large_fpn-fb6a3cc7.pth' +} + + +def fasterrcnn_resnet50_fpn(pretrained=False, progress=True, + num_classes=91, pretrained_backbone=True, trainable_backbone_layers=None, **kwargs): + """ + Constructs a Faster R-CNN model with a ResNet-50-FPN backbone. + + The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each + image, and should be in ``0-1`` range. Different images can have different sizes. + + The behavior of the model changes depending if it is in training or evaluation mode. + + During training, the model expects both the input tensors, as well as a targets (list of dictionary), + containing: + + - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (``Int64Tensor[N]``): the class label for each ground-truth box + + The model returns a ``Dict[Tensor]`` during training, containing the classification and regression + losses for both the RPN and the R-CNN. + + During inference, the model requires only the input tensors, and returns the post-processed + predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as + follows, where ``N`` is the number of detections: + + - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (``Int64Tensor[N]``): the predicted labels for each detection + - scores (``Tensor[N]``): the scores of each detection + + For more details on the output, you may refer to :ref:`instance_seg_output`. + + Faster R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size. + + Example:: + + >>> model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True) + >>> # For training + >>> images, boxes = torch.rand(4, 3, 600, 1200), torch.rand(4, 11, 4) + >>> labels = torch.randint(1, 91, (4, 11)) + >>> images = list(image for image in images) + >>> targets = [] + >>> for i in range(len(images)): + >>> d = {} + >>> d['boxes'] = boxes[i] + >>> d['labels'] = labels[i] + >>> targets.append(d) + >>> output = model(images, targets) + >>> # For inference + >>> model.eval() + >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] + >>> predictions = model(x) + >>> + >>> # optionally, if you want to export the model to ONNX: + >>> torch.onnx.export(model, x, "faster_rcnn.onnx", opset_version = 11) + + Args: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 + progress (bool): If True, displays a progress bar of the download to stderr + num_classes (int): number of output classes of the model (including the background) + pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet + trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block. + Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. + """ + trainable_backbone_layers = _validate_trainable_layers( + pretrained or pretrained_backbone, trainable_backbone_layers, 5, 3) + + if pretrained: + # no need to download the backbone if pretrained is set + pretrained_backbone = False + backbone = resnet_fpn_backbone('resnet50', pretrained_backbone, trainable_layers=trainable_backbone_layers) + model = FasterRCNN(backbone, num_classes, **kwargs) + if pretrained: + state_dict = load_state_dict_from_url(model_urls['fasterrcnn_resnet50_fpn_coco'], + progress=progress) + model.load_state_dict(state_dict) + overwrite_eps(model, 0.0) + return model + + +def _fasterrcnn_mobilenet_v3_large_fpn(weights_name, pretrained=False, progress=True, num_classes=91, + pretrained_backbone=True, trainable_backbone_layers=None, **kwargs): + trainable_backbone_layers = _validate_trainable_layers( + pretrained or pretrained_backbone, trainable_backbone_layers, 6, 3) + + if pretrained: + pretrained_backbone = False + backbone = mobilenet_backbone("mobilenet_v3_large", pretrained_backbone, True, + trainable_layers=trainable_backbone_layers) + + anchor_sizes = ((32, 64, 128, 256, 512, ), ) * 3 + aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes) + + model = FasterRCNN(backbone, num_classes, rpn_anchor_generator=AnchorGenerator(anchor_sizes, aspect_ratios), + **kwargs) + if pretrained: + if model_urls.get(weights_name, None) is None: + raise ValueError("No checkpoint is available for model {}".format(weights_name)) + state_dict = load_state_dict_from_url(model_urls[weights_name], progress=progress) + model.load_state_dict(state_dict) + return model + + +def fasterrcnn_mobilenet_v3_large_320_fpn(pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, + trainable_backbone_layers=None, **kwargs): + """ + Constructs a low resolution Faster R-CNN model with a MobileNetV3-Large FPN backbone tunned for mobile use-cases. + It works similarly to Faster R-CNN with ResNet-50 FPN backbone. See + :func:`~torchvision.models.detection.fasterrcnn_resnet50_fpn` for more + details. + + Example:: + + >>> model = torchvision.models.detection.fasterrcnn_mobilenet_v3_large_320_fpn(pretrained=True) + >>> model.eval() + >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] + >>> predictions = model(x) + + Args: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 + progress (bool): If True, displays a progress bar of the download to stderr + num_classes (int): number of output classes of the model (including the background) + pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet + trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block. + Valid values are between 0 and 6, with 6 meaning all backbone layers are trainable. + """ + weights_name = "fasterrcnn_mobilenet_v3_large_320_fpn_coco" + defaults = { + "min_size": 320, + "max_size": 640, + "rpn_pre_nms_top_n_test": 150, + "rpn_post_nms_top_n_test": 150, + "rpn_score_thresh": 0.05, + } + + kwargs = {**defaults, **kwargs} + return _fasterrcnn_mobilenet_v3_large_fpn(weights_name, pretrained=pretrained, progress=progress, + num_classes=num_classes, pretrained_backbone=pretrained_backbone, + trainable_backbone_layers=trainable_backbone_layers, **kwargs) + + +def fasterrcnn_mobilenet_v3_large_fpn(pretrained=False, progress=True, num_classes=91, pretrained_backbone=True, + trainable_backbone_layers=None, **kwargs): + """ + Constructs a high resolution Faster R-CNN model with a MobileNetV3-Large FPN backbone. + It works similarly to Faster R-CNN with ResNet-50 FPN backbone. See + :func:`~torchvision.models.detection.fasterrcnn_resnet50_fpn` for more + details. + + Example:: + + >>> model = torchvision.models.detection.fasterrcnn_mobilenet_v3_large_fpn(pretrained=True) + >>> model.eval() + >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] + >>> predictions = model(x) + + Args: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 + progress (bool): If True, displays a progress bar of the download to stderr + num_classes (int): number of output classes of the model (including the background) + pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet + trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block. + Valid values are between 0 and 6, with 6 meaning all backbone layers are trainable. + """ + weights_name = "fasterrcnn_mobilenet_v3_large_fpn_coco" + defaults = { + "rpn_score_thresh": 0.05, + } + + kwargs = {**defaults, **kwargs} + return _fasterrcnn_mobilenet_v3_large_fpn(weights_name, pretrained=pretrained, progress=progress, + num_classes=num_classes, pretrained_backbone=pretrained_backbone, + trainable_backbone_layers=trainable_backbone_layers, **kwargs) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/generalized_rcnn.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/generalized_rcnn.py new file mode 100644 index 0000000000000000000000000000000000000000..1d3979caa3fb2797c2cf0d1b16ad6ed8827e7c1d --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/generalized_rcnn.py @@ -0,0 +1,110 @@ +""" +Implements the Generalized R-CNN framework +""" + +from collections import OrderedDict +import torch +from torch import nn, Tensor +import warnings +from typing import Tuple, List, Dict, Optional, Union + + +class GeneralizedRCNN(nn.Module): + """ + Main class for Generalized R-CNN. + + Args: + backbone (nn.Module): + rpn (nn.Module): + roi_heads (nn.Module): takes the features + the proposals from the RPN and computes + detections / masks from it. + transform (nn.Module): performs the data transformation from the inputs to feed into + the model + """ + + def __init__(self, backbone, rpn, roi_heads, transform): + super(GeneralizedRCNN, self).__init__() + self.transform = transform + self.backbone = backbone + self.rpn = rpn + self.roi_heads = roi_heads + # used only on torchscript mode + self._has_warned = False + + @torch.jit.unused + def eager_outputs(self, losses, detections): + # type: (Dict[str, Tensor], List[Dict[str, Tensor]]) -> Union[Dict[str, Tensor], List[Dict[str, Tensor]]] + if self.training: + return losses + + return detections + + def forward(self, images, targets=None): + # type: (List[Tensor], Optional[List[Dict[str, Tensor]]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]] + """ + Args: + images (list[Tensor]): images to be processed + targets (list[Dict[Tensor]]): ground-truth boxes present in the image (optional) + + Returns: + result (list[BoxList] or dict[Tensor]): the output from the model. + During training, it returns a dict[Tensor] which contains the losses. + During testing, it returns list[BoxList] contains additional fields + like `scores`, `labels` and `mask` (for Mask R-CNN models). + + """ + if self.training and targets is None: + raise ValueError("In training mode, targets should be passed") + if self.training: + assert targets is not None + for target in targets: + boxes = target["boxes"] + if isinstance(boxes, torch.Tensor): + if len(boxes.shape) != 2 or boxes.shape[-1] != 4: + raise ValueError("Expected target boxes to be a tensor" + "of shape [N, 4], got {:}.".format( + boxes.shape)) + else: + raise ValueError("Expected target boxes to be of type " + "Tensor, got {:}.".format(type(boxes))) + + original_image_sizes: List[Tuple[int, int]] = [] + for img in images: + val = img.shape[-2:] + assert len(val) == 2 + original_image_sizes.append((val[0], val[1])) + + images, targets = self.transform(images, targets) + + # Check for degenerate boxes + # TODO: Move this to a function + if targets is not None: + for target_idx, target in enumerate(targets): + boxes = target["boxes"] + degenerate_boxes = boxes[:, 2:] <= boxes[:, :2] + if degenerate_boxes.any(): + # print the first degenerate box + bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0] + degen_bb: List[float] = boxes[bb_idx].tolist() + raise ValueError("All bounding boxes should have positive height and width." + " Found invalid box {} for target at index {}." + .format(degen_bb, target_idx)) + + features = self.backbone(images.tensors) + if isinstance(features, torch.Tensor): + features = OrderedDict([('0', features)]) + proposals, proposal_losses = self.rpn(images, features, targets) + detections, detector_losses = self.roi_heads(features, proposals, images.image_sizes, targets) + detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes) + + losses = {} + losses.update(detector_losses) + losses.update(proposal_losses) + + if torch.jit.is_scripting(): + if not self._has_warned: + warnings.warn("RCNN always returns a (Losses, Detections) tuple in scripting") + self._has_warned = True + return losses, detections + else: + return self.eager_outputs(losses, detections) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/image_list.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/image_list.py new file mode 100644 index 0000000000000000000000000000000000000000..a389b3c3ce18626a017c1bc7c2d318edce5bc86e --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/image_list.py @@ -0,0 +1,25 @@ +import torch +from torch import Tensor +from typing import List, Tuple + + +class ImageList(object): + """ + Structure that holds a list of images (of possibly + varying sizes) as a single tensor. + This works by padding the images to the same size, + and storing in a field the original sizes of each image + """ + + def __init__(self, tensors: Tensor, image_sizes: List[Tuple[int, int]]): + """ + Args: + tensors (tensor) + image_sizes (list[tuple[int, int]]) + """ + self.tensors = tensors + self.image_sizes = image_sizes + + def to(self, device: torch.device) -> 'ImageList': + cast_tensor = self.tensors.to(device) + return ImageList(cast_tensor, self.image_sizes) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/keypoint_rcnn.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/keypoint_rcnn.py new file mode 100644 index 0000000000000000000000000000000000000000..0d9a4de6dcaad9b545feea0d20d6d5005038c4e7 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/keypoint_rcnn.py @@ -0,0 +1,347 @@ +import torch +from torch import nn + +from torchvision.ops import MultiScaleRoIAlign + +from ._utils import overwrite_eps +from ..utils import load_state_dict_from_url + +from .faster_rcnn import FasterRCNN +from .backbone_utils import resnet_fpn_backbone, _validate_trainable_layers + + +__all__ = [ + "KeypointRCNN", "keypointrcnn_resnet50_fpn" +] + + +class KeypointRCNN(FasterRCNN): + """ + Implements Keypoint R-CNN. + + The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each + image, and should be in 0-1 range. Different images can have different sizes. + + The behavior of the model changes depending if it is in training or evaluation mode. + + During training, the model expects both the input tensors, as well as a targets (list of dictionary), + containing: + + - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (Int64Tensor[N]): the class label for each ground-truth box + - keypoints (FloatTensor[N, K, 3]): the K keypoints location for each of the N instances, in the + format [x, y, visibility], where visibility=0 means that the keypoint is not visible. + + The model returns a Dict[Tensor] during training, containing the classification and regression + losses for both the RPN and the R-CNN, and the keypoint loss. + + During inference, the model requires only the input tensors, and returns the post-processed + predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as + follows: + + - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (Int64Tensor[N]): the predicted labels for each image + - scores (Tensor[N]): the scores or each prediction + - keypoints (FloatTensor[N, K, 3]): the locations of the predicted keypoints, in [x, y, v] format. + + Args: + backbone (nn.Module): the network used to compute the features for the model. + It should contain a out_channels attribute, which indicates the number of output + channels that each feature map has (and it should be the same for all feature maps). + The backbone should return a single Tensor or and OrderedDict[Tensor]. + num_classes (int): number of output classes of the model (including the background). + If box_predictor is specified, num_classes should be None. + min_size (int): minimum size of the image to be rescaled before feeding it to the backbone + max_size (int): maximum size of the image to be rescaled before feeding it to the backbone + image_mean (Tuple[float, float, float]): mean values used for input normalization. + They are generally the mean values of the dataset on which the backbone has been trained + on + image_std (Tuple[float, float, float]): std values used for input normalization. + They are generally the std values of the dataset on which the backbone has been trained on + rpn_anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature + maps. + rpn_head (nn.Module): module that computes the objectness and regression deltas from the RPN + rpn_pre_nms_top_n_train (int): number of proposals to keep before applying NMS during training + rpn_pre_nms_top_n_test (int): number of proposals to keep before applying NMS during testing + rpn_post_nms_top_n_train (int): number of proposals to keep after applying NMS during training + rpn_post_nms_top_n_test (int): number of proposals to keep after applying NMS during testing + rpn_nms_thresh (float): NMS threshold used for postprocessing the RPN proposals + rpn_fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be + considered as positive during training of the RPN. + rpn_bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be + considered as negative during training of the RPN. + rpn_batch_size_per_image (int): number of anchors that are sampled during training of the RPN + for computing the loss + rpn_positive_fraction (float): proportion of positive anchors in a mini-batch during training + of the RPN + rpn_score_thresh (float): during inference, only return proposals with a classification score + greater than rpn_score_thresh + box_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in + the locations indicated by the bounding boxes + box_head (nn.Module): module that takes the cropped feature maps as input + box_predictor (nn.Module): module that takes the output of box_head and returns the + classification logits and box regression deltas. + box_score_thresh (float): during inference, only return proposals with a classification score + greater than box_score_thresh + box_nms_thresh (float): NMS threshold for the prediction head. Used during inference + box_detections_per_img (int): maximum number of detections per image, for all classes. + box_fg_iou_thresh (float): minimum IoU between the proposals and the GT box so that they can be + considered as positive during training of the classification head + box_bg_iou_thresh (float): maximum IoU between the proposals and the GT box so that they can be + considered as negative during training of the classification head + box_batch_size_per_image (int): number of proposals that are sampled during training of the + classification head + box_positive_fraction (float): proportion of positive proposals in a mini-batch during training + of the classification head + bbox_reg_weights (Tuple[float, float, float, float]): weights for the encoding/decoding of the + bounding boxes + keypoint_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in + the locations indicated by the bounding boxes, which will be used for the keypoint head. + keypoint_head (nn.Module): module that takes the cropped feature maps as input + keypoint_predictor (nn.Module): module that takes the output of the keypoint_head and returns the + heatmap logits + + Example:: + + >>> import torch + >>> import torchvision + >>> from torchvision.models.detection import KeypointRCNN + >>> from torchvision.models.detection.anchor_utils import AnchorGenerator + >>> + >>> # load a pre-trained model for classification and return + >>> # only the features + >>> backbone = torchvision.models.mobilenet_v2(pretrained=True).features + >>> # KeypointRCNN needs to know the number of + >>> # output channels in a backbone. For mobilenet_v2, it's 1280 + >>> # so we need to add it here + >>> backbone.out_channels = 1280 + >>> + >>> # let's make the RPN generate 5 x 3 anchors per spatial + >>> # location, with 5 different sizes and 3 different aspect + >>> # ratios. We have a Tuple[Tuple[int]] because each feature + >>> # map could potentially have different sizes and + >>> # aspect ratios + >>> anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), + >>> aspect_ratios=((0.5, 1.0, 2.0),)) + >>> + >>> # let's define what are the feature maps that we will + >>> # use to perform the region of interest cropping, as well as + >>> # the size of the crop after rescaling. + >>> # if your backbone returns a Tensor, featmap_names is expected to + >>> # be ['0']. More generally, the backbone should return an + >>> # OrderedDict[Tensor], and in featmap_names you can choose which + >>> # feature maps to use. + >>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'], + >>> output_size=7, + >>> sampling_ratio=2) + >>> + >>> keypoint_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'], + >>> output_size=14, + >>> sampling_ratio=2) + >>> # put the pieces together inside a KeypointRCNN model + >>> model = KeypointRCNN(backbone, + >>> num_classes=2, + >>> rpn_anchor_generator=anchor_generator, + >>> box_roi_pool=roi_pooler, + >>> keypoint_roi_pool=keypoint_roi_pooler) + >>> model.eval() + >>> model.eval() + >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] + >>> predictions = model(x) + """ + def __init__(self, backbone, num_classes=None, + # transform parameters + min_size=None, max_size=1333, + image_mean=None, image_std=None, + # RPN parameters + rpn_anchor_generator=None, rpn_head=None, + rpn_pre_nms_top_n_train=2000, rpn_pre_nms_top_n_test=1000, + rpn_post_nms_top_n_train=2000, rpn_post_nms_top_n_test=1000, + rpn_nms_thresh=0.7, + rpn_fg_iou_thresh=0.7, rpn_bg_iou_thresh=0.3, + rpn_batch_size_per_image=256, rpn_positive_fraction=0.5, + rpn_score_thresh=0.0, + # Box parameters + box_roi_pool=None, box_head=None, box_predictor=None, + box_score_thresh=0.05, box_nms_thresh=0.5, box_detections_per_img=100, + box_fg_iou_thresh=0.5, box_bg_iou_thresh=0.5, + box_batch_size_per_image=512, box_positive_fraction=0.25, + bbox_reg_weights=None, + # keypoint parameters + keypoint_roi_pool=None, keypoint_head=None, keypoint_predictor=None, + num_keypoints=17): + + assert isinstance(keypoint_roi_pool, (MultiScaleRoIAlign, type(None))) + if min_size is None: + min_size = (640, 672, 704, 736, 768, 800) + + if num_classes is not None: + if keypoint_predictor is not None: + raise ValueError("num_classes should be None when keypoint_predictor is specified") + + out_channels = backbone.out_channels + + if keypoint_roi_pool is None: + keypoint_roi_pool = MultiScaleRoIAlign( + featmap_names=['0', '1', '2', '3'], + output_size=14, + sampling_ratio=2) + + if keypoint_head is None: + keypoint_layers = tuple(512 for _ in range(8)) + keypoint_head = KeypointRCNNHeads(out_channels, keypoint_layers) + + if keypoint_predictor is None: + keypoint_dim_reduced = 512 # == keypoint_layers[-1] + keypoint_predictor = KeypointRCNNPredictor(keypoint_dim_reduced, num_keypoints) + + super(KeypointRCNN, self).__init__( + backbone, num_classes, + # transform parameters + min_size, max_size, + image_mean, image_std, + # RPN-specific parameters + rpn_anchor_generator, rpn_head, + rpn_pre_nms_top_n_train, rpn_pre_nms_top_n_test, + rpn_post_nms_top_n_train, rpn_post_nms_top_n_test, + rpn_nms_thresh, + rpn_fg_iou_thresh, rpn_bg_iou_thresh, + rpn_batch_size_per_image, rpn_positive_fraction, + rpn_score_thresh, + # Box parameters + box_roi_pool, box_head, box_predictor, + box_score_thresh, box_nms_thresh, box_detections_per_img, + box_fg_iou_thresh, box_bg_iou_thresh, + box_batch_size_per_image, box_positive_fraction, + bbox_reg_weights) + + self.roi_heads.keypoint_roi_pool = keypoint_roi_pool + self.roi_heads.keypoint_head = keypoint_head + self.roi_heads.keypoint_predictor = keypoint_predictor + + +class KeypointRCNNHeads(nn.Sequential): + def __init__(self, in_channels, layers): + d = [] + next_feature = in_channels + for out_channels in layers: + d.append(nn.Conv2d(next_feature, out_channels, 3, stride=1, padding=1)) + d.append(nn.ReLU(inplace=True)) + next_feature = out_channels + super(KeypointRCNNHeads, self).__init__(*d) + for m in self.children(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode="fan_out", nonlinearity="relu") + nn.init.constant_(m.bias, 0) + + +class KeypointRCNNPredictor(nn.Module): + def __init__(self, in_channels, num_keypoints): + super(KeypointRCNNPredictor, self).__init__() + input_features = in_channels + deconv_kernel = 4 + self.kps_score_lowres = nn.ConvTranspose2d( + input_features, + num_keypoints, + deconv_kernel, + stride=2, + padding=deconv_kernel // 2 - 1, + ) + nn.init.kaiming_normal_( + self.kps_score_lowres.weight, mode="fan_out", nonlinearity="relu" + ) + nn.init.constant_(self.kps_score_lowres.bias, 0) + self.up_scale = 2 + self.out_channels = num_keypoints + + def forward(self, x): + x = self.kps_score_lowres(x) + return torch.nn.functional.interpolate( + x, scale_factor=float(self.up_scale), mode="bilinear", align_corners=False, recompute_scale_factor=False + ) + + +model_urls = { + # legacy model for BC reasons, see https://github.com/pytorch/vision/issues/1606 + 'keypointrcnn_resnet50_fpn_coco_legacy': + 'https://download.pytorch.org/models/keypointrcnn_resnet50_fpn_coco-9f466800.pth', + 'keypointrcnn_resnet50_fpn_coco': + 'https://download.pytorch.org/models/keypointrcnn_resnet50_fpn_coco-fc266e95.pth', +} + + +def keypointrcnn_resnet50_fpn(pretrained=False, progress=True, + num_classes=2, num_keypoints=17, + pretrained_backbone=True, trainable_backbone_layers=None, **kwargs): + """ + Constructs a Keypoint R-CNN model with a ResNet-50-FPN backbone. + + The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each + image, and should be in ``0-1`` range. Different images can have different sizes. + + The behavior of the model changes depending if it is in training or evaluation mode. + + During training, the model expects both the input tensors, as well as a targets (list of dictionary), + containing: + + - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (``Int64Tensor[N]``): the class label for each ground-truth box + - keypoints (``FloatTensor[N, K, 3]``): the ``K`` keypoints location for each of the ``N`` instances, in the + format ``[x, y, visibility]``, where ``visibility=0`` means that the keypoint is not visible. + + The model returns a ``Dict[Tensor]`` during training, containing the classification and regression + losses for both the RPN and the R-CNN, and the keypoint loss. + + During inference, the model requires only the input tensors, and returns the post-processed + predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as + follows, where ``N`` is the number of detected instances: + + - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (``Int64Tensor[N]``): the predicted labels for each instance + - scores (``Tensor[N]``): the scores or each instance + - keypoints (``FloatTensor[N, K, 3]``): the locations of the predicted keypoints, in ``[x, y, v]`` format. + + For more details on the output, you may refer to :ref:`instance_seg_output`. + + Keypoint R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size. + + Example:: + + >>> model = torchvision.models.detection.keypointrcnn_resnet50_fpn(pretrained=True) + >>> model.eval() + >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] + >>> predictions = model(x) + >>> + >>> # optionally, if you want to export the model to ONNX: + >>> torch.onnx.export(model, x, "keypoint_rcnn.onnx", opset_version = 11) + + Args: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 + progress (bool): If True, displays a progress bar of the download to stderr + num_classes (int): number of output classes of the model (including the background) + num_keypoints (int): number of keypoints, default 17 + pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet + trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block. + Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. + """ + trainable_backbone_layers = _validate_trainable_layers( + pretrained or pretrained_backbone, trainable_backbone_layers, 5, 3) + + if pretrained: + # no need to download the backbone if pretrained is set + pretrained_backbone = False + backbone = resnet_fpn_backbone('resnet50', pretrained_backbone, trainable_layers=trainable_backbone_layers) + model = KeypointRCNN(backbone, num_classes, num_keypoints=num_keypoints, **kwargs) + if pretrained: + key = 'keypointrcnn_resnet50_fpn_coco' + if pretrained == 'legacy': + key += '_legacy' + state_dict = load_state_dict_from_url(model_urls[key], + progress=progress) + model.load_state_dict(state_dict) + overwrite_eps(model, 0.0) + return model diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/mask_rcnn.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/mask_rcnn.py new file mode 100644 index 0000000000000000000000000000000000000000..589a42068bf46a649e2610b280750c3c603ae51d --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/mask_rcnn.py @@ -0,0 +1,337 @@ +from collections import OrderedDict + +from torch import nn + +from torchvision.ops import MultiScaleRoIAlign + +from ._utils import overwrite_eps +from ..utils import load_state_dict_from_url + +from .faster_rcnn import FasterRCNN +from .backbone_utils import resnet_fpn_backbone, _validate_trainable_layers + +__all__ = [ + "MaskRCNN", "maskrcnn_resnet50_fpn", +] + + +class MaskRCNN(FasterRCNN): + """ + Implements Mask R-CNN. + + The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each + image, and should be in 0-1 range. Different images can have different sizes. + + The behavior of the model changes depending if it is in training or evaluation mode. + + During training, the model expects both the input tensors, as well as a targets (list of dictionary), + containing: + - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (Int64Tensor[N]): the class label for each ground-truth box + - masks (UInt8Tensor[N, H, W]): the segmentation binary masks for each instance + + The model returns a Dict[Tensor] during training, containing the classification and regression + losses for both the RPN and the R-CNN, and the mask loss. + + During inference, the model requires only the input tensors, and returns the post-processed + predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as + follows: + - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (Int64Tensor[N]): the predicted labels for each image + - scores (Tensor[N]): the scores or each prediction + - masks (UInt8Tensor[N, 1, H, W]): the predicted masks for each instance, in 0-1 range. In order to + obtain the final segmentation masks, the soft masks can be thresholded, generally + with a value of 0.5 (mask >= 0.5) + + Args: + backbone (nn.Module): the network used to compute the features for the model. + It should contain a out_channels attribute, which indicates the number of output + channels that each feature map has (and it should be the same for all feature maps). + The backbone should return a single Tensor or and OrderedDict[Tensor]. + num_classes (int): number of output classes of the model (including the background). + If box_predictor is specified, num_classes should be None. + min_size (int): minimum size of the image to be rescaled before feeding it to the backbone + max_size (int): maximum size of the image to be rescaled before feeding it to the backbone + image_mean (Tuple[float, float, float]): mean values used for input normalization. + They are generally the mean values of the dataset on which the backbone has been trained + on + image_std (Tuple[float, float, float]): std values used for input normalization. + They are generally the std values of the dataset on which the backbone has been trained on + rpn_anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature + maps. + rpn_head (nn.Module): module that computes the objectness and regression deltas from the RPN + rpn_pre_nms_top_n_train (int): number of proposals to keep before applying NMS during training + rpn_pre_nms_top_n_test (int): number of proposals to keep before applying NMS during testing + rpn_post_nms_top_n_train (int): number of proposals to keep after applying NMS during training + rpn_post_nms_top_n_test (int): number of proposals to keep after applying NMS during testing + rpn_nms_thresh (float): NMS threshold used for postprocessing the RPN proposals + rpn_fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be + considered as positive during training of the RPN. + rpn_bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be + considered as negative during training of the RPN. + rpn_batch_size_per_image (int): number of anchors that are sampled during training of the RPN + for computing the loss + rpn_positive_fraction (float): proportion of positive anchors in a mini-batch during training + of the RPN + rpn_score_thresh (float): during inference, only return proposals with a classification score + greater than rpn_score_thresh + box_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in + the locations indicated by the bounding boxes + box_head (nn.Module): module that takes the cropped feature maps as input + box_predictor (nn.Module): module that takes the output of box_head and returns the + classification logits and box regression deltas. + box_score_thresh (float): during inference, only return proposals with a classification score + greater than box_score_thresh + box_nms_thresh (float): NMS threshold for the prediction head. Used during inference + box_detections_per_img (int): maximum number of detections per image, for all classes. + box_fg_iou_thresh (float): minimum IoU between the proposals and the GT box so that they can be + considered as positive during training of the classification head + box_bg_iou_thresh (float): maximum IoU between the proposals and the GT box so that they can be + considered as negative during training of the classification head + box_batch_size_per_image (int): number of proposals that are sampled during training of the + classification head + box_positive_fraction (float): proportion of positive proposals in a mini-batch during training + of the classification head + bbox_reg_weights (Tuple[float, float, float, float]): weights for the encoding/decoding of the + bounding boxes + mask_roi_pool (MultiScaleRoIAlign): the module which crops and resizes the feature maps in + the locations indicated by the bounding boxes, which will be used for the mask head. + mask_head (nn.Module): module that takes the cropped feature maps as input + mask_predictor (nn.Module): module that takes the output of the mask_head and returns the + segmentation mask logits + + Example:: + + >>> import torch + >>> import torchvision + >>> from torchvision.models.detection import MaskRCNN + >>> from torchvision.models.detection.anchor_utils import AnchorGenerator + >>> + >>> # load a pre-trained model for classification and return + >>> # only the features + >>> backbone = torchvision.models.mobilenet_v2(pretrained=True).features + >>> # MaskRCNN needs to know the number of + >>> # output channels in a backbone. For mobilenet_v2, it's 1280 + >>> # so we need to add it here + >>> backbone.out_channels = 1280 + >>> + >>> # let's make the RPN generate 5 x 3 anchors per spatial + >>> # location, with 5 different sizes and 3 different aspect + >>> # ratios. We have a Tuple[Tuple[int]] because each feature + >>> # map could potentially have different sizes and + >>> # aspect ratios + >>> anchor_generator = AnchorGenerator(sizes=((32, 64, 128, 256, 512),), + >>> aspect_ratios=((0.5, 1.0, 2.0),)) + >>> + >>> # let's define what are the feature maps that we will + >>> # use to perform the region of interest cropping, as well as + >>> # the size of the crop after rescaling. + >>> # if your backbone returns a Tensor, featmap_names is expected to + >>> # be ['0']. More generally, the backbone should return an + >>> # OrderedDict[Tensor], and in featmap_names you can choose which + >>> # feature maps to use. + >>> roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'], + >>> output_size=7, + >>> sampling_ratio=2) + >>> + >>> mask_roi_pooler = torchvision.ops.MultiScaleRoIAlign(featmap_names=['0'], + >>> output_size=14, + >>> sampling_ratio=2) + >>> # put the pieces together inside a MaskRCNN model + >>> model = MaskRCNN(backbone, + >>> num_classes=2, + >>> rpn_anchor_generator=anchor_generator, + >>> box_roi_pool=roi_pooler, + >>> mask_roi_pool=mask_roi_pooler) + >>> model.eval() + >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] + >>> predictions = model(x) + """ + def __init__(self, backbone, num_classes=None, + # transform parameters + min_size=800, max_size=1333, + image_mean=None, image_std=None, + # RPN parameters + rpn_anchor_generator=None, rpn_head=None, + rpn_pre_nms_top_n_train=2000, rpn_pre_nms_top_n_test=1000, + rpn_post_nms_top_n_train=2000, rpn_post_nms_top_n_test=1000, + rpn_nms_thresh=0.7, + rpn_fg_iou_thresh=0.7, rpn_bg_iou_thresh=0.3, + rpn_batch_size_per_image=256, rpn_positive_fraction=0.5, + rpn_score_thresh=0.0, + # Box parameters + box_roi_pool=None, box_head=None, box_predictor=None, + box_score_thresh=0.05, box_nms_thresh=0.5, box_detections_per_img=100, + box_fg_iou_thresh=0.5, box_bg_iou_thresh=0.5, + box_batch_size_per_image=512, box_positive_fraction=0.25, + bbox_reg_weights=None, + # Mask parameters + mask_roi_pool=None, mask_head=None, mask_predictor=None): + + assert isinstance(mask_roi_pool, (MultiScaleRoIAlign, type(None))) + + if num_classes is not None: + if mask_predictor is not None: + raise ValueError("num_classes should be None when mask_predictor is specified") + + out_channels = backbone.out_channels + + if mask_roi_pool is None: + mask_roi_pool = MultiScaleRoIAlign( + featmap_names=['0', '1', '2', '3'], + output_size=14, + sampling_ratio=2) + + if mask_head is None: + mask_layers = (256, 256, 256, 256) + mask_dilation = 1 + mask_head = MaskRCNNHeads(out_channels, mask_layers, mask_dilation) + + if mask_predictor is None: + mask_predictor_in_channels = 256 # == mask_layers[-1] + mask_dim_reduced = 256 + mask_predictor = MaskRCNNPredictor(mask_predictor_in_channels, + mask_dim_reduced, num_classes) + + super(MaskRCNN, self).__init__( + backbone, num_classes, + # transform parameters + min_size, max_size, + image_mean, image_std, + # RPN-specific parameters + rpn_anchor_generator, rpn_head, + rpn_pre_nms_top_n_train, rpn_pre_nms_top_n_test, + rpn_post_nms_top_n_train, rpn_post_nms_top_n_test, + rpn_nms_thresh, + rpn_fg_iou_thresh, rpn_bg_iou_thresh, + rpn_batch_size_per_image, rpn_positive_fraction, + rpn_score_thresh, + # Box parameters + box_roi_pool, box_head, box_predictor, + box_score_thresh, box_nms_thresh, box_detections_per_img, + box_fg_iou_thresh, box_bg_iou_thresh, + box_batch_size_per_image, box_positive_fraction, + bbox_reg_weights) + + self.roi_heads.mask_roi_pool = mask_roi_pool + self.roi_heads.mask_head = mask_head + self.roi_heads.mask_predictor = mask_predictor + + +class MaskRCNNHeads(nn.Sequential): + def __init__(self, in_channels, layers, dilation): + """ + Args: + in_channels (int): number of input channels + layers (list): feature dimensions of each FCN layer + dilation (int): dilation rate of kernel + """ + d = OrderedDict() + next_feature = in_channels + for layer_idx, layer_features in enumerate(layers, 1): + d["mask_fcn{}".format(layer_idx)] = nn.Conv2d( + next_feature, layer_features, kernel_size=3, + stride=1, padding=dilation, dilation=dilation) + d["relu{}".format(layer_idx)] = nn.ReLU(inplace=True) + next_feature = layer_features + + super(MaskRCNNHeads, self).__init__(d) + for name, param in self.named_parameters(): + if "weight" in name: + nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu") + # elif "bias" in name: + # nn.init.constant_(param, 0) + + +class MaskRCNNPredictor(nn.Sequential): + def __init__(self, in_channels, dim_reduced, num_classes): + super(MaskRCNNPredictor, self).__init__(OrderedDict([ + ("conv5_mask", nn.ConvTranspose2d(in_channels, dim_reduced, 2, 2, 0)), + ("relu", nn.ReLU(inplace=True)), + ("mask_fcn_logits", nn.Conv2d(dim_reduced, num_classes, 1, 1, 0)), + ])) + + for name, param in self.named_parameters(): + if "weight" in name: + nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu") + # elif "bias" in name: + # nn.init.constant_(param, 0) + + +model_urls = { + 'maskrcnn_resnet50_fpn_coco': + 'https://download.pytorch.org/models/maskrcnn_resnet50_fpn_coco-bf2d0c1e.pth', +} + + +def maskrcnn_resnet50_fpn(pretrained=False, progress=True, + num_classes=91, pretrained_backbone=True, trainable_backbone_layers=None, **kwargs): + """ + Constructs a Mask R-CNN model with a ResNet-50-FPN backbone. + + The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each + image, and should be in ``0-1`` range. Different images can have different sizes. + + The behavior of the model changes depending if it is in training or evaluation mode. + + During training, the model expects both the input tensors, as well as a targets (list of dictionary), + containing: + + - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (``Int64Tensor[N]``): the class label for each ground-truth box + - masks (``UInt8Tensor[N, H, W]``): the segmentation binary masks for each instance + + The model returns a ``Dict[Tensor]`` during training, containing the classification and regression + losses for both the RPN and the R-CNN, and the mask loss. + + During inference, the model requires only the input tensors, and returns the post-processed + predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as + follows, where ``N`` is the number of detected instances: + + - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (``Int64Tensor[N]``): the predicted labels for each instance + - scores (``Tensor[N]``): the scores or each instance + - masks (``UInt8Tensor[N, 1, H, W]``): the predicted masks for each instance, in ``0-1`` range. In order to + obtain the final segmentation masks, the soft masks can be thresholded, generally + with a value of 0.5 (``mask >= 0.5``) + + For more details on the output and on how to plot the masks, you may refer to :ref:`instance_seg_output`. + + Mask R-CNN is exportable to ONNX for a fixed batch size with inputs images of fixed size. + + Example:: + + >>> model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True) + >>> model.eval() + >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] + >>> predictions = model(x) + >>> + >>> # optionally, if you want to export the model to ONNX: + >>> torch.onnx.export(model, x, "mask_rcnn.onnx", opset_version = 11) + + Args: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 + progress (bool): If True, displays a progress bar of the download to stderr + num_classes (int): number of output classes of the model (including the background) + pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet + trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block. + Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. + """ + trainable_backbone_layers = _validate_trainable_layers( + pretrained or pretrained_backbone, trainable_backbone_layers, 5, 3) + + if pretrained: + # no need to download the backbone if pretrained is set + pretrained_backbone = False + backbone = resnet_fpn_backbone('resnet50', pretrained_backbone, trainable_layers=trainable_backbone_layers) + model = MaskRCNN(backbone, num_classes, **kwargs) + if pretrained: + state_dict = load_state_dict_from_url(model_urls['maskrcnn_resnet50_fpn_coco'], + progress=progress) + model.load_state_dict(state_dict) + overwrite_eps(model, 0.0) + return model diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/retinanet.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/retinanet.py new file mode 100644 index 0000000000000000000000000000000000000000..af6943628a4b832511693239c8eca0b808969bf0 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/retinanet.py @@ -0,0 +1,628 @@ +import math +from collections import OrderedDict +import warnings + +import torch +from torch import nn, Tensor +from typing import Dict, List, Tuple, Optional + +from ._utils import overwrite_eps +from ..utils import load_state_dict_from_url + +from . import _utils as det_utils +from .anchor_utils import AnchorGenerator +from .transform import GeneralizedRCNNTransform +from .backbone_utils import resnet_fpn_backbone, _validate_trainable_layers +from ...ops.feature_pyramid_network import LastLevelP6P7 +from ...ops import sigmoid_focal_loss +from ...ops import boxes as box_ops + + +__all__ = [ + "RetinaNet", "retinanet_resnet50_fpn" +] + + +def _sum(x: List[Tensor]) -> Tensor: + res = x[0] + for i in x[1:]: + res = res + i + return res + + +class RetinaNetHead(nn.Module): + """ + A regression and classification head for use in RetinaNet. + + Args: + in_channels (int): number of channels of the input feature + num_anchors (int): number of anchors to be predicted + num_classes (int): number of classes to be predicted + """ + + def __init__(self, in_channels, num_anchors, num_classes): + super().__init__() + self.classification_head = RetinaNetClassificationHead(in_channels, num_anchors, num_classes) + self.regression_head = RetinaNetRegressionHead(in_channels, num_anchors) + + def compute_loss(self, targets, head_outputs, anchors, matched_idxs): + # type: (List[Dict[str, Tensor]], Dict[str, Tensor], List[Tensor], List[Tensor]) -> Dict[str, Tensor] + return { + 'classification': self.classification_head.compute_loss(targets, head_outputs, matched_idxs), + 'bbox_regression': self.regression_head.compute_loss(targets, head_outputs, anchors, matched_idxs), + } + + def forward(self, x): + # type: (List[Tensor]) -> Dict[str, Tensor] + return { + 'cls_logits': self.classification_head(x), + 'bbox_regression': self.regression_head(x) + } + + +class RetinaNetClassificationHead(nn.Module): + """ + A classification head for use in RetinaNet. + + Args: + in_channels (int): number of channels of the input feature + num_anchors (int): number of anchors to be predicted + num_classes (int): number of classes to be predicted + """ + + def __init__(self, in_channels, num_anchors, num_classes, prior_probability=0.01): + super().__init__() + + conv = [] + for _ in range(4): + conv.append(nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)) + conv.append(nn.ReLU()) + self.conv = nn.Sequential(*conv) + + for layer in self.conv.children(): + if isinstance(layer, nn.Conv2d): + torch.nn.init.normal_(layer.weight, std=0.01) + torch.nn.init.constant_(layer.bias, 0) + + self.cls_logits = nn.Conv2d(in_channels, num_anchors * num_classes, kernel_size=3, stride=1, padding=1) + torch.nn.init.normal_(self.cls_logits.weight, std=0.01) + torch.nn.init.constant_(self.cls_logits.bias, -math.log((1 - prior_probability) / prior_probability)) + + self.num_classes = num_classes + self.num_anchors = num_anchors + + # This is to fix using det_utils.Matcher.BETWEEN_THRESHOLDS in TorchScript. + # TorchScript doesn't support class attributes. + # https://github.com/pytorch/vision/pull/1697#issuecomment-630255584 + self.BETWEEN_THRESHOLDS = det_utils.Matcher.BETWEEN_THRESHOLDS + + def compute_loss(self, targets, head_outputs, matched_idxs): + # type: (List[Dict[str, Tensor]], Dict[str, Tensor], List[Tensor]) -> Tensor + losses = [] + + cls_logits = head_outputs['cls_logits'] + + for targets_per_image, cls_logits_per_image, matched_idxs_per_image in zip(targets, cls_logits, matched_idxs): + # determine only the foreground + foreground_idxs_per_image = matched_idxs_per_image >= 0 + num_foreground = foreground_idxs_per_image.sum() + + # create the target classification + gt_classes_target = torch.zeros_like(cls_logits_per_image) + gt_classes_target[ + foreground_idxs_per_image, + targets_per_image['labels'][matched_idxs_per_image[foreground_idxs_per_image]] + ] = 1.0 + + # find indices for which anchors should be ignored + valid_idxs_per_image = matched_idxs_per_image != self.BETWEEN_THRESHOLDS + + # compute the classification loss + losses.append(sigmoid_focal_loss( + cls_logits_per_image[valid_idxs_per_image], + gt_classes_target[valid_idxs_per_image], + reduction='sum', + ) / max(1, num_foreground)) + + return _sum(losses) / len(targets) + + def forward(self, x): + # type: (List[Tensor]) -> Tensor + all_cls_logits = [] + + for features in x: + cls_logits = self.conv(features) + cls_logits = self.cls_logits(cls_logits) + + # Permute classification output from (N, A * K, H, W) to (N, HWA, K). + N, _, H, W = cls_logits.shape + cls_logits = cls_logits.view(N, -1, self.num_classes, H, W) + cls_logits = cls_logits.permute(0, 3, 4, 1, 2) + cls_logits = cls_logits.reshape(N, -1, self.num_classes) # Size=(N, HWA, 4) + + all_cls_logits.append(cls_logits) + + return torch.cat(all_cls_logits, dim=1) + + +class RetinaNetRegressionHead(nn.Module): + """ + A regression head for use in RetinaNet. + + Args: + in_channels (int): number of channels of the input feature + num_anchors (int): number of anchors to be predicted + """ + __annotations__ = { + 'box_coder': det_utils.BoxCoder, + } + + def __init__(self, in_channels, num_anchors): + super().__init__() + + conv = [] + for _ in range(4): + conv.append(nn.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)) + conv.append(nn.ReLU()) + self.conv = nn.Sequential(*conv) + + self.bbox_reg = nn.Conv2d(in_channels, num_anchors * 4, kernel_size=3, stride=1, padding=1) + torch.nn.init.normal_(self.bbox_reg.weight, std=0.01) + torch.nn.init.zeros_(self.bbox_reg.bias) + + for layer in self.conv.children(): + if isinstance(layer, nn.Conv2d): + torch.nn.init.normal_(layer.weight, std=0.01) + torch.nn.init.zeros_(layer.bias) + + self.box_coder = det_utils.BoxCoder(weights=(1.0, 1.0, 1.0, 1.0)) + + def compute_loss(self, targets, head_outputs, anchors, matched_idxs): + # type: (List[Dict[str, Tensor]], Dict[str, Tensor], List[Tensor], List[Tensor]) -> Tensor + losses = [] + + bbox_regression = head_outputs['bbox_regression'] + + for targets_per_image, bbox_regression_per_image, anchors_per_image, matched_idxs_per_image in \ + zip(targets, bbox_regression, anchors, matched_idxs): + # determine only the foreground indices, ignore the rest + foreground_idxs_per_image = torch.where(matched_idxs_per_image >= 0)[0] + num_foreground = foreground_idxs_per_image.numel() + + # select only the foreground boxes + matched_gt_boxes_per_image = targets_per_image['boxes'][matched_idxs_per_image[foreground_idxs_per_image]] + bbox_regression_per_image = bbox_regression_per_image[foreground_idxs_per_image, :] + anchors_per_image = anchors_per_image[foreground_idxs_per_image, :] + + # compute the regression targets + target_regression = self.box_coder.encode_single(matched_gt_boxes_per_image, anchors_per_image) + + # compute the loss + losses.append(torch.nn.functional.l1_loss( + bbox_regression_per_image, + target_regression, + reduction='sum' + ) / max(1, num_foreground)) + + return _sum(losses) / max(1, len(targets)) + + def forward(self, x): + # type: (List[Tensor]) -> Tensor + all_bbox_regression = [] + + for features in x: + bbox_regression = self.conv(features) + bbox_regression = self.bbox_reg(bbox_regression) + + # Permute bbox regression output from (N, 4 * A, H, W) to (N, HWA, 4). + N, _, H, W = bbox_regression.shape + bbox_regression = bbox_regression.view(N, -1, 4, H, W) + bbox_regression = bbox_regression.permute(0, 3, 4, 1, 2) + bbox_regression = bbox_regression.reshape(N, -1, 4) # Size=(N, HWA, 4) + + all_bbox_regression.append(bbox_regression) + + return torch.cat(all_bbox_regression, dim=1) + + +class RetinaNet(nn.Module): + """ + Implements RetinaNet. + + The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each + image, and should be in 0-1 range. Different images can have different sizes. + + The behavior of the model changes depending if it is in training or evaluation mode. + + During training, the model expects both the input tensors, as well as a targets (list of dictionary), + containing: + - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (Int64Tensor[N]): the class label for each ground-truth box + + The model returns a Dict[Tensor] during training, containing the classification and regression + losses. + + During inference, the model requires only the input tensors, and returns the post-processed + predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as + follows: + - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (Int64Tensor[N]): the predicted labels for each image + - scores (Tensor[N]): the scores for each prediction + + Args: + backbone (nn.Module): the network used to compute the features for the model. + It should contain an out_channels attribute, which indicates the number of output + channels that each feature map has (and it should be the same for all feature maps). + The backbone should return a single Tensor or an OrderedDict[Tensor]. + num_classes (int): number of output classes of the model (excluding the background). + min_size (int): minimum size of the image to be rescaled before feeding it to the backbone + max_size (int): maximum size of the image to be rescaled before feeding it to the backbone + image_mean (Tuple[float, float, float]): mean values used for input normalization. + They are generally the mean values of the dataset on which the backbone has been trained + on + image_std (Tuple[float, float, float]): std values used for input normalization. + They are generally the std values of the dataset on which the backbone has been trained on + anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature + maps. + head (nn.Module): Module run on top of the feature pyramid. + Defaults to a module containing a classification and regression module. + score_thresh (float): Score threshold used for postprocessing the detections. + nms_thresh (float): NMS threshold used for postprocessing the detections. + detections_per_img (int): Number of best detections to keep after NMS. + fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be + considered as positive during training. + bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be + considered as negative during training. + topk_candidates (int): Number of best detections to keep before NMS. + + Example: + + >>> import torch + >>> import torchvision + >>> from torchvision.models.detection import RetinaNet + >>> from torchvision.models.detection.anchor_utils import AnchorGenerator + >>> # load a pre-trained model for classification and return + >>> # only the features + >>> backbone = torchvision.models.mobilenet_v2(pretrained=True).features + >>> # RetinaNet needs to know the number of + >>> # output channels in a backbone. For mobilenet_v2, it's 1280 + >>> # so we need to add it here + >>> backbone.out_channels = 1280 + >>> + >>> # let's make the network generate 5 x 3 anchors per spatial + >>> # location, with 5 different sizes and 3 different aspect + >>> # ratios. We have a Tuple[Tuple[int]] because each feature + >>> # map could potentially have different sizes and + >>> # aspect ratios + >>> anchor_generator = AnchorGenerator( + >>> sizes=((32, 64, 128, 256, 512),), + >>> aspect_ratios=((0.5, 1.0, 2.0),) + >>> ) + >>> + >>> # put the pieces together inside a RetinaNet model + >>> model = RetinaNet(backbone, + >>> num_classes=2, + >>> anchor_generator=anchor_generator) + >>> model.eval() + >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] + >>> predictions = model(x) + """ + __annotations__ = { + 'box_coder': det_utils.BoxCoder, + 'proposal_matcher': det_utils.Matcher, + } + + def __init__(self, backbone, num_classes, + # transform parameters + min_size=800, max_size=1333, + image_mean=None, image_std=None, + # Anchor parameters + anchor_generator=None, head=None, + proposal_matcher=None, + score_thresh=0.05, + nms_thresh=0.5, + detections_per_img=300, + fg_iou_thresh=0.5, bg_iou_thresh=0.4, + topk_candidates=1000): + super().__init__() + + if not hasattr(backbone, "out_channels"): + raise ValueError( + "backbone should contain an attribute out_channels " + "specifying the number of output channels (assumed to be the " + "same for all the levels)") + self.backbone = backbone + + assert isinstance(anchor_generator, (AnchorGenerator, type(None))) + + if anchor_generator is None: + anchor_sizes = tuple((x, int(x * 2 ** (1.0 / 3)), int(x * 2 ** (2.0 / 3))) for x in [32, 64, 128, 256, 512]) + aspect_ratios = ((0.5, 1.0, 2.0),) * len(anchor_sizes) + anchor_generator = AnchorGenerator( + anchor_sizes, aspect_ratios + ) + self.anchor_generator = anchor_generator + + if head is None: + head = RetinaNetHead(backbone.out_channels, anchor_generator.num_anchors_per_location()[0], num_classes) + self.head = head + + if proposal_matcher is None: + proposal_matcher = det_utils.Matcher( + fg_iou_thresh, + bg_iou_thresh, + allow_low_quality_matches=True, + ) + self.proposal_matcher = proposal_matcher + + self.box_coder = det_utils.BoxCoder(weights=(1.0, 1.0, 1.0, 1.0)) + + if image_mean is None: + image_mean = [0.485, 0.456, 0.406] + if image_std is None: + image_std = [0.229, 0.224, 0.225] + self.transform = GeneralizedRCNNTransform(min_size, max_size, image_mean, image_std) + + self.score_thresh = score_thresh + self.nms_thresh = nms_thresh + self.detections_per_img = detections_per_img + self.topk_candidates = topk_candidates + + # used only on torchscript mode + self._has_warned = False + + @torch.jit.unused + def eager_outputs(self, losses, detections): + # type: (Dict[str, Tensor], List[Dict[str, Tensor]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]] + if self.training: + return losses + + return detections + + def compute_loss(self, targets, head_outputs, anchors): + # type: (List[Dict[str, Tensor]], Dict[str, Tensor], List[Tensor]) -> Dict[str, Tensor] + matched_idxs = [] + for anchors_per_image, targets_per_image in zip(anchors, targets): + if targets_per_image['boxes'].numel() == 0: + matched_idxs.append(torch.full((anchors_per_image.size(0),), -1, dtype=torch.int64, + device=anchors_per_image.device)) + continue + + match_quality_matrix = box_ops.box_iou(targets_per_image['boxes'], anchors_per_image) + matched_idxs.append(self.proposal_matcher(match_quality_matrix)) + + return self.head.compute_loss(targets, head_outputs, anchors, matched_idxs) + + def postprocess_detections(self, head_outputs, anchors, image_shapes): + # type: (Dict[str, List[Tensor]], List[List[Tensor]], List[Tuple[int, int]]) -> List[Dict[str, Tensor]] + class_logits = head_outputs['cls_logits'] + box_regression = head_outputs['bbox_regression'] + + num_images = len(image_shapes) + + detections: List[Dict[str, Tensor]] = [] + + for index in range(num_images): + box_regression_per_image = [br[index] for br in box_regression] + logits_per_image = [cl[index] for cl in class_logits] + anchors_per_image, image_shape = anchors[index], image_shapes[index] + + image_boxes = [] + image_scores = [] + image_labels = [] + + for box_regression_per_level, logits_per_level, anchors_per_level in \ + zip(box_regression_per_image, logits_per_image, anchors_per_image): + num_classes = logits_per_level.shape[-1] + + # remove low scoring boxes + scores_per_level = torch.sigmoid(logits_per_level).flatten() + keep_idxs = scores_per_level > self.score_thresh + scores_per_level = scores_per_level[keep_idxs] + topk_idxs = torch.where(keep_idxs)[0] + + # keep only topk scoring predictions + num_topk = min(self.topk_candidates, topk_idxs.size(0)) + scores_per_level, idxs = scores_per_level.topk(num_topk) + topk_idxs = topk_idxs[idxs] + + anchor_idxs = torch.div(topk_idxs, num_classes, rounding_mode='floor') + labels_per_level = topk_idxs % num_classes + + boxes_per_level = self.box_coder.decode_single(box_regression_per_level[anchor_idxs], + anchors_per_level[anchor_idxs]) + boxes_per_level = box_ops.clip_boxes_to_image(boxes_per_level, image_shape) + + image_boxes.append(boxes_per_level) + image_scores.append(scores_per_level) + image_labels.append(labels_per_level) + + image_boxes = torch.cat(image_boxes, dim=0) + image_scores = torch.cat(image_scores, dim=0) + image_labels = torch.cat(image_labels, dim=0) + + # non-maximum suppression + keep = box_ops.batched_nms(image_boxes, image_scores, image_labels, self.nms_thresh) + keep = keep[:self.detections_per_img] + + detections.append({ + 'boxes': image_boxes[keep], + 'scores': image_scores[keep], + 'labels': image_labels[keep], + }) + + return detections + + def forward(self, images, targets=None): + # type: (List[Tensor], Optional[List[Dict[str, Tensor]]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]] + """ + Args: + images (list[Tensor]): images to be processed + targets (list[Dict[Tensor]]): ground-truth boxes present in the image (optional) + + Returns: + result (list[BoxList] or dict[Tensor]): the output from the model. + During training, it returns a dict[Tensor] which contains the losses. + During testing, it returns list[BoxList] contains additional fields + like `scores`, `labels` and `mask` (for Mask R-CNN models). + + """ + if self.training and targets is None: + raise ValueError("In training mode, targets should be passed") + + if self.training: + assert targets is not None + for target in targets: + boxes = target["boxes"] + if isinstance(boxes, torch.Tensor): + if len(boxes.shape) != 2 or boxes.shape[-1] != 4: + raise ValueError("Expected target boxes to be a tensor" + "of shape [N, 4], got {:}.".format( + boxes.shape)) + else: + raise ValueError("Expected target boxes to be of type " + "Tensor, got {:}.".format(type(boxes))) + + # get the original image sizes + original_image_sizes: List[Tuple[int, int]] = [] + for img in images: + val = img.shape[-2:] + assert len(val) == 2 + original_image_sizes.append((val[0], val[1])) + + # transform the input + images, targets = self.transform(images, targets) + + # Check for degenerate boxes + # TODO: Move this to a function + if targets is not None: + for target_idx, target in enumerate(targets): + boxes = target["boxes"] + degenerate_boxes = boxes[:, 2:] <= boxes[:, :2] + if degenerate_boxes.any(): + # print the first degenerate box + bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0] + degen_bb: List[float] = boxes[bb_idx].tolist() + raise ValueError("All bounding boxes should have positive height and width." + " Found invalid box {} for target at index {}." + .format(degen_bb, target_idx)) + + # get the features from the backbone + features = self.backbone(images.tensors) + if isinstance(features, torch.Tensor): + features = OrderedDict([('0', features)]) + + # TODO: Do we want a list or a dict? + features = list(features.values()) + + # compute the retinanet heads outputs using the features + head_outputs = self.head(features) + + # create the set of anchors + anchors = self.anchor_generator(images, features) + + losses = {} + detections: List[Dict[str, Tensor]] = [] + if self.training: + assert targets is not None + + # compute the losses + losses = self.compute_loss(targets, head_outputs, anchors) + else: + # recover level sizes + num_anchors_per_level = [x.size(2) * x.size(3) for x in features] + HW = 0 + for v in num_anchors_per_level: + HW += v + HWA = head_outputs['cls_logits'].size(1) + A = HWA // HW + num_anchors_per_level = [hw * A for hw in num_anchors_per_level] + + # split outputs per level + split_head_outputs: Dict[str, List[Tensor]] = {} + for k in head_outputs: + split_head_outputs[k] = list(head_outputs[k].split(num_anchors_per_level, dim=1)) + split_anchors = [list(a.split(num_anchors_per_level)) for a in anchors] + + # compute the detections + detections = self.postprocess_detections(split_head_outputs, split_anchors, images.image_sizes) + detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes) + + if torch.jit.is_scripting(): + if not self._has_warned: + warnings.warn("RetinaNet always returns a (Losses, Detections) tuple in scripting") + self._has_warned = True + return losses, detections + return self.eager_outputs(losses, detections) + + +model_urls = { + 'retinanet_resnet50_fpn_coco': + 'https://download.pytorch.org/models/retinanet_resnet50_fpn_coco-eeacb38b.pth', +} + + +def retinanet_resnet50_fpn(pretrained=False, progress=True, + num_classes=91, pretrained_backbone=True, trainable_backbone_layers=None, **kwargs): + """ + Constructs a RetinaNet model with a ResNet-50-FPN backbone. + + The input to the model is expected to be a list of tensors, each of shape ``[C, H, W]``, one for each + image, and should be in ``0-1`` range. Different images can have different sizes. + + The behavior of the model changes depending if it is in training or evaluation mode. + + During training, the model expects both the input tensors, as well as a targets (list of dictionary), + containing: + + - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (``Int64Tensor[N]``): the class label for each ground-truth box + + The model returns a ``Dict[Tensor]`` during training, containing the classification and regression + losses. + + During inference, the model requires only the input tensors, and returns the post-processed + predictions as a ``List[Dict[Tensor]]``, one for each input image. The fields of the ``Dict`` are as + follows, where ``N`` is the number of detections: + + - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (``Int64Tensor[N]``): the predicted labels for each detection + - scores (``Tensor[N]``): the scores of each detection + + For more details on the output, you may refer to :ref:`instance_seg_output`. + + Example:: + + >>> model = torchvision.models.detection.retinanet_resnet50_fpn(pretrained=True) + >>> model.eval() + >>> x = [torch.rand(3, 300, 400), torch.rand(3, 500, 400)] + >>> predictions = model(x) + + Args: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 + progress (bool): If True, displays a progress bar of the download to stderr + num_classes (int): number of output classes of the model (including the background) + pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet + trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block. + Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. + """ + trainable_backbone_layers = _validate_trainable_layers( + pretrained or pretrained_backbone, trainable_backbone_layers, 5, 3) + + if pretrained: + # no need to download the backbone if pretrained is set + pretrained_backbone = False + # skip P2 because it generates too many anchors (according to their paper) + backbone = resnet_fpn_backbone('resnet50', pretrained_backbone, returned_layers=[2, 3, 4], + extra_blocks=LastLevelP6P7(256, 256), trainable_layers=trainable_backbone_layers) + model = RetinaNet(backbone, num_classes, **kwargs) + if pretrained: + state_dict = load_state_dict_from_url(model_urls['retinanet_resnet50_fpn_coco'], + progress=progress) + model.load_state_dict(state_dict) + overwrite_eps(model, 0.0) + return model diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/roi_heads.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/roi_heads.py new file mode 100644 index 0000000000000000000000000000000000000000..9948d5f537fde2451a28bc5f64f238b569659e47 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/roi_heads.py @@ -0,0 +1,867 @@ +import torch +import torchvision + +import torch.nn.functional as F +from torch import nn, Tensor + +from torchvision.ops import boxes as box_ops + +from torchvision.ops import roi_align + +from . import _utils as det_utils + +from typing import Optional, List, Dict, Tuple + + +def fastrcnn_loss(class_logits, box_regression, labels, regression_targets): + # type: (Tensor, Tensor, List[Tensor], List[Tensor]) -> Tuple[Tensor, Tensor] + """ + Computes the loss for Faster R-CNN. + + Args: + class_logits (Tensor) + box_regression (Tensor) + labels (list[BoxList]) + regression_targets (Tensor) + + Returns: + classification_loss (Tensor) + box_loss (Tensor) + """ + + labels = torch.cat(labels, dim=0) + regression_targets = torch.cat(regression_targets, dim=0) + + classification_loss = F.cross_entropy(class_logits, labels) + + # get indices that correspond to the regression targets for + # the corresponding ground truth labels, to be used with + # advanced indexing + sampled_pos_inds_subset = torch.where(labels > 0)[0] + labels_pos = labels[sampled_pos_inds_subset] + N, num_classes = class_logits.shape + box_regression = box_regression.reshape(N, box_regression.size(-1) // 4, 4) + + box_loss = F.smooth_l1_loss( + box_regression[sampled_pos_inds_subset, labels_pos], + regression_targets[sampled_pos_inds_subset], + beta=1 / 9, + reduction='sum', + ) + box_loss = box_loss / labels.numel() + + return classification_loss, box_loss + + +def maskrcnn_inference(x, labels): + # type: (Tensor, List[Tensor]) -> List[Tensor] + """ + From the results of the CNN, post process the masks + by taking the mask corresponding to the class with max + probability (which are of fixed size and directly output + by the CNN) and return the masks in the mask field of the BoxList. + + Args: + x (Tensor): the mask logits + labels (list[BoxList]): bounding boxes that are used as + reference, one for ech image + + Returns: + results (list[BoxList]): one BoxList for each image, containing + the extra field mask + """ + mask_prob = x.sigmoid() + + # select masks corresponding to the predicted classes + num_masks = x.shape[0] + boxes_per_image = [label.shape[0] for label in labels] + labels = torch.cat(labels) + index = torch.arange(num_masks, device=labels.device) + mask_prob = mask_prob[index, labels][:, None] + mask_prob = mask_prob.split(boxes_per_image, dim=0) + + return mask_prob + + +def project_masks_on_boxes(gt_masks, boxes, matched_idxs, M): + # type: (Tensor, Tensor, Tensor, int) -> Tensor + """ + Given segmentation masks and the bounding boxes corresponding + to the location of the masks in the image, this function + crops and resizes the masks in the position defined by the + boxes. This prepares the masks for them to be fed to the + loss computation as the targets. + """ + matched_idxs = matched_idxs.to(boxes) + rois = torch.cat([matched_idxs[:, None], boxes], dim=1) + gt_masks = gt_masks[:, None].to(rois) + return roi_align(gt_masks, rois, (M, M), 1.)[:, 0] + + +def maskrcnn_loss(mask_logits, proposals, gt_masks, gt_labels, mask_matched_idxs): + # type: (Tensor, List[Tensor], List[Tensor], List[Tensor], List[Tensor]) -> Tensor + """ + Args: + proposals (list[BoxList]) + mask_logits (Tensor) + targets (list[BoxList]) + + Return: + mask_loss (Tensor): scalar tensor containing the loss + """ + + discretization_size = mask_logits.shape[-1] + labels = [gt_label[idxs] for gt_label, idxs in zip(gt_labels, mask_matched_idxs)] + mask_targets = [ + project_masks_on_boxes(m, p, i, discretization_size) + for m, p, i in zip(gt_masks, proposals, mask_matched_idxs) + ] + + labels = torch.cat(labels, dim=0) + mask_targets = torch.cat(mask_targets, dim=0) + + # torch.mean (in binary_cross_entropy_with_logits) doesn't + # accept empty tensors, so handle it separately + if mask_targets.numel() == 0: + return mask_logits.sum() * 0 + + mask_loss = F.binary_cross_entropy_with_logits( + mask_logits[torch.arange(labels.shape[0], device=labels.device), labels], mask_targets + ) + return mask_loss + + +def keypoints_to_heatmap(keypoints, rois, heatmap_size): + # type: (Tensor, Tensor, int) -> Tuple[Tensor, Tensor] + offset_x = rois[:, 0] + offset_y = rois[:, 1] + scale_x = heatmap_size / (rois[:, 2] - rois[:, 0]) + scale_y = heatmap_size / (rois[:, 3] - rois[:, 1]) + + offset_x = offset_x[:, None] + offset_y = offset_y[:, None] + scale_x = scale_x[:, None] + scale_y = scale_y[:, None] + + x = keypoints[..., 0] + y = keypoints[..., 1] + + x_boundary_inds = x == rois[:, 2][:, None] + y_boundary_inds = y == rois[:, 3][:, None] + + x = (x - offset_x) * scale_x + x = x.floor().long() + y = (y - offset_y) * scale_y + y = y.floor().long() + + x[x_boundary_inds] = heatmap_size - 1 + y[y_boundary_inds] = heatmap_size - 1 + + valid_loc = (x >= 0) & (y >= 0) & (x < heatmap_size) & (y < heatmap_size) + vis = keypoints[..., 2] > 0 + valid = (valid_loc & vis).long() + + lin_ind = y * heatmap_size + x + heatmaps = lin_ind * valid + + return heatmaps, valid + + +def _onnx_heatmaps_to_keypoints(maps, maps_i, roi_map_width, roi_map_height, + widths_i, heights_i, offset_x_i, offset_y_i): + num_keypoints = torch.scalar_tensor(maps.size(1), dtype=torch.int64) + + width_correction = widths_i / roi_map_width + height_correction = heights_i / roi_map_height + + roi_map = F.interpolate( + maps_i[:, None], size=(int(roi_map_height), int(roi_map_width)), mode='bicubic', align_corners=False)[:, 0] + + w = torch.scalar_tensor(roi_map.size(2), dtype=torch.int64) + pos = roi_map.reshape(num_keypoints, -1).argmax(dim=1) + + x_int = (pos % w) + y_int = ((pos - x_int) // w) + + x = (torch.tensor(0.5, dtype=torch.float32) + x_int.to(dtype=torch.float32)) * \ + width_correction.to(dtype=torch.float32) + y = (torch.tensor(0.5, dtype=torch.float32) + y_int.to(dtype=torch.float32)) * \ + height_correction.to(dtype=torch.float32) + + xy_preds_i_0 = x + offset_x_i.to(dtype=torch.float32) + xy_preds_i_1 = y + offset_y_i.to(dtype=torch.float32) + xy_preds_i_2 = torch.ones(xy_preds_i_1.shape, dtype=torch.float32) + xy_preds_i = torch.stack([xy_preds_i_0.to(dtype=torch.float32), + xy_preds_i_1.to(dtype=torch.float32), + xy_preds_i_2.to(dtype=torch.float32)], 0) + + # TODO: simplify when indexing without rank will be supported by ONNX + base = num_keypoints * num_keypoints + num_keypoints + 1 + ind = torch.arange(num_keypoints) + ind = ind.to(dtype=torch.int64) * base + end_scores_i = roi_map.index_select(1, y_int.to(dtype=torch.int64)) \ + .index_select(2, x_int.to(dtype=torch.int64)).view(-1).index_select(0, ind.to(dtype=torch.int64)) + + return xy_preds_i, end_scores_i + + +@torch.jit._script_if_tracing +def _onnx_heatmaps_to_keypoints_loop(maps, rois, widths_ceil, heights_ceil, + widths, heights, offset_x, offset_y, num_keypoints): + xy_preds = torch.zeros((0, 3, int(num_keypoints)), dtype=torch.float32, device=maps.device) + end_scores = torch.zeros((0, int(num_keypoints)), dtype=torch.float32, device=maps.device) + + for i in range(int(rois.size(0))): + xy_preds_i, end_scores_i = _onnx_heatmaps_to_keypoints(maps, maps[i], + widths_ceil[i], heights_ceil[i], + widths[i], heights[i], + offset_x[i], offset_y[i]) + xy_preds = torch.cat((xy_preds.to(dtype=torch.float32), + xy_preds_i.unsqueeze(0).to(dtype=torch.float32)), 0) + end_scores = torch.cat((end_scores.to(dtype=torch.float32), + end_scores_i.to(dtype=torch.float32).unsqueeze(0)), 0) + return xy_preds, end_scores + + +def heatmaps_to_keypoints(maps, rois): + """Extract predicted keypoint locations from heatmaps. Output has shape + (#rois, 4, #keypoints) with the 4 rows corresponding to (x, y, logit, prob) + for each keypoint. + """ + # This function converts a discrete image coordinate in a HEATMAP_SIZE x + # HEATMAP_SIZE image to a continuous keypoint coordinate. We maintain + # consistency with keypoints_to_heatmap_labels by using the conversion from + # Heckbert 1990: c = d + 0.5, where d is a discrete coordinate and c is a + # continuous coordinate. + offset_x = rois[:, 0] + offset_y = rois[:, 1] + + widths = rois[:, 2] - rois[:, 0] + heights = rois[:, 3] - rois[:, 1] + widths = widths.clamp(min=1) + heights = heights.clamp(min=1) + widths_ceil = widths.ceil() + heights_ceil = heights.ceil() + + num_keypoints = maps.shape[1] + + if torchvision._is_tracing(): + xy_preds, end_scores = _onnx_heatmaps_to_keypoints_loop(maps, rois, + widths_ceil, heights_ceil, widths, heights, + offset_x, offset_y, + torch.scalar_tensor(num_keypoints, dtype=torch.int64)) + return xy_preds.permute(0, 2, 1), end_scores + + xy_preds = torch.zeros((len(rois), 3, num_keypoints), dtype=torch.float32, device=maps.device) + end_scores = torch.zeros((len(rois), num_keypoints), dtype=torch.float32, device=maps.device) + for i in range(len(rois)): + roi_map_width = int(widths_ceil[i].item()) + roi_map_height = int(heights_ceil[i].item()) + width_correction = widths[i] / roi_map_width + height_correction = heights[i] / roi_map_height + roi_map = F.interpolate( + maps[i][:, None], size=(roi_map_height, roi_map_width), mode='bicubic', align_corners=False)[:, 0] + # roi_map_probs = scores_to_probs(roi_map.copy()) + w = roi_map.shape[2] + pos = roi_map.reshape(num_keypoints, -1).argmax(dim=1) + + x_int = pos % w + y_int = torch.div(pos - x_int, w, rounding_mode='floor') + # assert (roi_map_probs[k, y_int, x_int] == + # roi_map_probs[k, :, :].max()) + x = (x_int.float() + 0.5) * width_correction + y = (y_int.float() + 0.5) * height_correction + xy_preds[i, 0, :] = x + offset_x[i] + xy_preds[i, 1, :] = y + offset_y[i] + xy_preds[i, 2, :] = 1 + end_scores[i, :] = roi_map[torch.arange(num_keypoints, device=roi_map.device), y_int, x_int] + + return xy_preds.permute(0, 2, 1), end_scores + + +def keypointrcnn_loss(keypoint_logits, proposals, gt_keypoints, keypoint_matched_idxs): + # type: (Tensor, List[Tensor], List[Tensor], List[Tensor]) -> Tensor + N, K, H, W = keypoint_logits.shape + assert H == W + discretization_size = H + heatmaps = [] + valid = [] + for proposals_per_image, gt_kp_in_image, midx in zip(proposals, gt_keypoints, keypoint_matched_idxs): + kp = gt_kp_in_image[midx] + heatmaps_per_image, valid_per_image = keypoints_to_heatmap( + kp, proposals_per_image, discretization_size + ) + heatmaps.append(heatmaps_per_image.view(-1)) + valid.append(valid_per_image.view(-1)) + + keypoint_targets = torch.cat(heatmaps, dim=0) + valid = torch.cat(valid, dim=0).to(dtype=torch.uint8) + valid = torch.where(valid)[0] + + # torch.mean (in binary_cross_entropy_with_logits) does'nt + # accept empty tensors, so handle it sepaartely + if keypoint_targets.numel() == 0 or len(valid) == 0: + return keypoint_logits.sum() * 0 + + keypoint_logits = keypoint_logits.view(N * K, H * W) + + keypoint_loss = F.cross_entropy(keypoint_logits[valid], keypoint_targets[valid]) + return keypoint_loss + + +def keypointrcnn_inference(x, boxes): + # type: (Tensor, List[Tensor]) -> Tuple[List[Tensor], List[Tensor]] + kp_probs = [] + kp_scores = [] + + boxes_per_image = [box.size(0) for box in boxes] + x2 = x.split(boxes_per_image, dim=0) + + for xx, bb in zip(x2, boxes): + kp_prob, scores = heatmaps_to_keypoints(xx, bb) + kp_probs.append(kp_prob) + kp_scores.append(scores) + + return kp_probs, kp_scores + + +def _onnx_expand_boxes(boxes, scale): + # type: (Tensor, float) -> Tensor + w_half = (boxes[:, 2] - boxes[:, 0]) * .5 + h_half = (boxes[:, 3] - boxes[:, 1]) * .5 + x_c = (boxes[:, 2] + boxes[:, 0]) * .5 + y_c = (boxes[:, 3] + boxes[:, 1]) * .5 + + w_half = w_half.to(dtype=torch.float32) * scale + h_half = h_half.to(dtype=torch.float32) * scale + + boxes_exp0 = x_c - w_half + boxes_exp1 = y_c - h_half + boxes_exp2 = x_c + w_half + boxes_exp3 = y_c + h_half + boxes_exp = torch.stack((boxes_exp0, boxes_exp1, boxes_exp2, boxes_exp3), 1) + return boxes_exp + + +# the next two functions should be merged inside Masker +# but are kept here for the moment while we need them +# temporarily for paste_mask_in_image +def expand_boxes(boxes, scale): + # type: (Tensor, float) -> Tensor + if torchvision._is_tracing(): + return _onnx_expand_boxes(boxes, scale) + w_half = (boxes[:, 2] - boxes[:, 0]) * .5 + h_half = (boxes[:, 3] - boxes[:, 1]) * .5 + x_c = (boxes[:, 2] + boxes[:, 0]) * .5 + y_c = (boxes[:, 3] + boxes[:, 1]) * .5 + + w_half *= scale + h_half *= scale + + boxes_exp = torch.zeros_like(boxes) + boxes_exp[:, 0] = x_c - w_half + boxes_exp[:, 2] = x_c + w_half + boxes_exp[:, 1] = y_c - h_half + boxes_exp[:, 3] = y_c + h_half + return boxes_exp + + +@torch.jit.unused +def expand_masks_tracing_scale(M, padding): + # type: (int, int) -> float + return torch.tensor(M + 2 * padding).to(torch.float32) / torch.tensor(M).to(torch.float32) + + +def expand_masks(mask, padding): + # type: (Tensor, int) -> Tuple[Tensor, float] + M = mask.shape[-1] + if torch._C._get_tracing_state(): # could not import is_tracing(), not sure why + scale = expand_masks_tracing_scale(M, padding) + else: + scale = float(M + 2 * padding) / M + padded_mask = F.pad(mask, (padding,) * 4) + return padded_mask, scale + + +def paste_mask_in_image(mask, box, im_h, im_w): + # type: (Tensor, Tensor, int, int) -> Tensor + TO_REMOVE = 1 + w = int(box[2] - box[0] + TO_REMOVE) + h = int(box[3] - box[1] + TO_REMOVE) + w = max(w, 1) + h = max(h, 1) + + # Set shape to [batchxCxHxW] + mask = mask.expand((1, 1, -1, -1)) + + # Resize mask + mask = F.interpolate(mask, size=(h, w), mode='bilinear', align_corners=False) + mask = mask[0][0] + + im_mask = torch.zeros((im_h, im_w), dtype=mask.dtype, device=mask.device) + x_0 = max(box[0], 0) + x_1 = min(box[2] + 1, im_w) + y_0 = max(box[1], 0) + y_1 = min(box[3] + 1, im_h) + + im_mask[y_0:y_1, x_0:x_1] = mask[ + (y_0 - box[1]):(y_1 - box[1]), (x_0 - box[0]):(x_1 - box[0]) + ] + return im_mask + + +def _onnx_paste_mask_in_image(mask, box, im_h, im_w): + one = torch.ones(1, dtype=torch.int64) + zero = torch.zeros(1, dtype=torch.int64) + + w = (box[2] - box[0] + one) + h = (box[3] - box[1] + one) + w = torch.max(torch.cat((w, one))) + h = torch.max(torch.cat((h, one))) + + # Set shape to [batchxCxHxW] + mask = mask.expand((1, 1, mask.size(0), mask.size(1))) + + # Resize mask + mask = F.interpolate(mask, size=(int(h), int(w)), mode='bilinear', align_corners=False) + mask = mask[0][0] + + x_0 = torch.max(torch.cat((box[0].unsqueeze(0), zero))) + x_1 = torch.min(torch.cat((box[2].unsqueeze(0) + one, im_w.unsqueeze(0)))) + y_0 = torch.max(torch.cat((box[1].unsqueeze(0), zero))) + y_1 = torch.min(torch.cat((box[3].unsqueeze(0) + one, im_h.unsqueeze(0)))) + + unpaded_im_mask = mask[(y_0 - box[1]):(y_1 - box[1]), + (x_0 - box[0]):(x_1 - box[0])] + + # TODO : replace below with a dynamic padding when support is added in ONNX + + # pad y + zeros_y0 = torch.zeros(y_0, unpaded_im_mask.size(1)) + zeros_y1 = torch.zeros(im_h - y_1, unpaded_im_mask.size(1)) + concat_0 = torch.cat((zeros_y0, + unpaded_im_mask.to(dtype=torch.float32), + zeros_y1), 0)[0:im_h, :] + # pad x + zeros_x0 = torch.zeros(concat_0.size(0), x_0) + zeros_x1 = torch.zeros(concat_0.size(0), im_w - x_1) + im_mask = torch.cat((zeros_x0, + concat_0, + zeros_x1), 1)[:, :im_w] + return im_mask + + +@torch.jit._script_if_tracing +def _onnx_paste_masks_in_image_loop(masks, boxes, im_h, im_w): + res_append = torch.zeros(0, im_h, im_w) + for i in range(masks.size(0)): + mask_res = _onnx_paste_mask_in_image(masks[i][0], boxes[i], im_h, im_w) + mask_res = mask_res.unsqueeze(0) + res_append = torch.cat((res_append, mask_res)) + return res_append + + +def paste_masks_in_image(masks, boxes, img_shape, padding=1): + # type: (Tensor, Tensor, Tuple[int, int], int) -> Tensor + masks, scale = expand_masks(masks, padding=padding) + boxes = expand_boxes(boxes, scale).to(dtype=torch.int64) + im_h, im_w = img_shape + + if torchvision._is_tracing(): + return _onnx_paste_masks_in_image_loop(masks, boxes, + torch.scalar_tensor(im_h, dtype=torch.int64), + torch.scalar_tensor(im_w, dtype=torch.int64))[:, None] + res = [ + paste_mask_in_image(m[0], b, im_h, im_w) + for m, b in zip(masks, boxes) + ] + if len(res) > 0: + ret = torch.stack(res, dim=0)[:, None] + else: + ret = masks.new_empty((0, 1, im_h, im_w)) + return ret + + +class RoIHeads(nn.Module): + __annotations__ = { + 'box_coder': det_utils.BoxCoder, + 'proposal_matcher': det_utils.Matcher, + 'fg_bg_sampler': det_utils.BalancedPositiveNegativeSampler, + } + + def __init__(self, + box_roi_pool, + box_head, + box_predictor, + # Faster R-CNN training + fg_iou_thresh, bg_iou_thresh, + batch_size_per_image, positive_fraction, + bbox_reg_weights, + # Faster R-CNN inference + score_thresh, + nms_thresh, + detections_per_img, + # Mask + mask_roi_pool=None, + mask_head=None, + mask_predictor=None, + keypoint_roi_pool=None, + keypoint_head=None, + keypoint_predictor=None, + ): + super(RoIHeads, self).__init__() + + self.box_similarity = box_ops.box_iou + # assign ground-truth boxes for each proposal + self.proposal_matcher = det_utils.Matcher( + fg_iou_thresh, + bg_iou_thresh, + allow_low_quality_matches=False) + + self.fg_bg_sampler = det_utils.BalancedPositiveNegativeSampler( + batch_size_per_image, + positive_fraction) + + if bbox_reg_weights is None: + bbox_reg_weights = (10., 10., 5., 5.) + self.box_coder = det_utils.BoxCoder(bbox_reg_weights) + + self.box_roi_pool = box_roi_pool + self.box_head = box_head + self.box_predictor = box_predictor + + self.score_thresh = score_thresh + self.nms_thresh = nms_thresh + self.detections_per_img = detections_per_img + + self.mask_roi_pool = mask_roi_pool + self.mask_head = mask_head + self.mask_predictor = mask_predictor + + self.keypoint_roi_pool = keypoint_roi_pool + self.keypoint_head = keypoint_head + self.keypoint_predictor = keypoint_predictor + + def has_mask(self): + if self.mask_roi_pool is None: + return False + if self.mask_head is None: + return False + if self.mask_predictor is None: + return False + return True + + def has_keypoint(self): + if self.keypoint_roi_pool is None: + return False + if self.keypoint_head is None: + return False + if self.keypoint_predictor is None: + return False + return True + + def assign_targets_to_proposals(self, proposals, gt_boxes, gt_labels): + # type: (List[Tensor], List[Tensor], List[Tensor]) -> Tuple[List[Tensor], List[Tensor]] + matched_idxs = [] + labels = [] + for proposals_in_image, gt_boxes_in_image, gt_labels_in_image in zip(proposals, gt_boxes, gt_labels): + + if gt_boxes_in_image.numel() == 0: + # Background image + device = proposals_in_image.device + clamped_matched_idxs_in_image = torch.zeros( + (proposals_in_image.shape[0],), dtype=torch.int64, device=device + ) + labels_in_image = torch.zeros( + (proposals_in_image.shape[0],), dtype=torch.int64, device=device + ) + else: + # set to self.box_similarity when https://github.com/pytorch/pytorch/issues/27495 lands + match_quality_matrix = box_ops.box_iou(gt_boxes_in_image, proposals_in_image) + matched_idxs_in_image = self.proposal_matcher(match_quality_matrix) + + clamped_matched_idxs_in_image = matched_idxs_in_image.clamp(min=0) + + labels_in_image = gt_labels_in_image[clamped_matched_idxs_in_image] + labels_in_image = labels_in_image.to(dtype=torch.int64) + + # Label background (below the low threshold) + bg_inds = matched_idxs_in_image == self.proposal_matcher.BELOW_LOW_THRESHOLD + labels_in_image[bg_inds] = 0 + + # Label ignore proposals (between low and high thresholds) + ignore_inds = matched_idxs_in_image == self.proposal_matcher.BETWEEN_THRESHOLDS + labels_in_image[ignore_inds] = -1 # -1 is ignored by sampler + + matched_idxs.append(clamped_matched_idxs_in_image) + labels.append(labels_in_image) + return matched_idxs, labels + + def subsample(self, labels): + # type: (List[Tensor]) -> List[Tensor] + sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels) + sampled_inds = [] + for img_idx, (pos_inds_img, neg_inds_img) in enumerate( + zip(sampled_pos_inds, sampled_neg_inds) + ): + img_sampled_inds = torch.where(pos_inds_img | neg_inds_img)[0] + sampled_inds.append(img_sampled_inds) + return sampled_inds + + def add_gt_proposals(self, proposals, gt_boxes): + # type: (List[Tensor], List[Tensor]) -> List[Tensor] + proposals = [ + torch.cat((proposal, gt_box)) + for proposal, gt_box in zip(proposals, gt_boxes) + ] + + return proposals + + def check_targets(self, targets): + # type: (Optional[List[Dict[str, Tensor]]]) -> None + assert targets is not None + assert all(["boxes" in t for t in targets]) + assert all(["labels" in t for t in targets]) + if self.has_mask(): + assert all(["masks" in t for t in targets]) + + def select_training_samples(self, + proposals, # type: List[Tensor] + targets # type: Optional[List[Dict[str, Tensor]]] + ): + # type: (...) -> Tuple[List[Tensor], List[Tensor], List[Tensor], List[Tensor]] + self.check_targets(targets) + assert targets is not None + dtype = proposals[0].dtype + device = proposals[0].device + + gt_boxes = [t["boxes"].to(dtype) for t in targets] + gt_labels = [t["labels"] for t in targets] + + # append ground-truth bboxes to propos + proposals = self.add_gt_proposals(proposals, gt_boxes) + + # get matching gt indices for each proposal + matched_idxs, labels = self.assign_targets_to_proposals(proposals, gt_boxes, gt_labels) + # sample a fixed proportion of positive-negative proposals + sampled_inds = self.subsample(labels) + matched_gt_boxes = [] + num_images = len(proposals) + for img_id in range(num_images): + img_sampled_inds = sampled_inds[img_id] + proposals[img_id] = proposals[img_id][img_sampled_inds] + labels[img_id] = labels[img_id][img_sampled_inds] + matched_idxs[img_id] = matched_idxs[img_id][img_sampled_inds] + + gt_boxes_in_image = gt_boxes[img_id] + if gt_boxes_in_image.numel() == 0: + gt_boxes_in_image = torch.zeros((1, 4), dtype=dtype, device=device) + matched_gt_boxes.append(gt_boxes_in_image[matched_idxs[img_id]]) + + regression_targets = self.box_coder.encode(matched_gt_boxes, proposals) + return proposals, matched_idxs, labels, regression_targets + + def postprocess_detections(self, + class_logits, # type: Tensor + box_regression, # type: Tensor + proposals, # type: List[Tensor] + image_shapes # type: List[Tuple[int, int]] + ): + # type: (...) -> Tuple[List[Tensor], List[Tensor], List[Tensor]] + device = class_logits.device + num_classes = class_logits.shape[-1] + + boxes_per_image = [boxes_in_image.shape[0] for boxes_in_image in proposals] + pred_boxes = self.box_coder.decode(box_regression, proposals) + + pred_scores = F.softmax(class_logits, -1) + + pred_boxes_list = pred_boxes.split(boxes_per_image, 0) + pred_scores_list = pred_scores.split(boxes_per_image, 0) + + all_boxes = [] + all_scores = [] + all_labels = [] + for boxes, scores, image_shape in zip(pred_boxes_list, pred_scores_list, image_shapes): + boxes = box_ops.clip_boxes_to_image(boxes, image_shape) + + # create labels for each prediction + labels = torch.arange(num_classes, device=device) + labels = labels.view(1, -1).expand_as(scores) + + # remove predictions with the background label + boxes = boxes[:, 1:] + scores = scores[:, 1:] + labels = labels[:, 1:] + + # batch everything, by making every class prediction be a separate instance + boxes = boxes.reshape(-1, 4) + scores = scores.reshape(-1) + labels = labels.reshape(-1) + + # remove low scoring boxes + inds = torch.where(scores > self.score_thresh)[0] + boxes, scores, labels = boxes[inds], scores[inds], labels[inds] + + # remove empty boxes + keep = box_ops.remove_small_boxes(boxes, min_size=1e-2) + boxes, scores, labels = boxes[keep], scores[keep], labels[keep] + + # non-maximum suppression, independently done per class + keep = box_ops.batched_nms(boxes, scores, labels, self.nms_thresh) + # keep only topk scoring predictions + keep = keep[:self.detections_per_img] + boxes, scores, labels = boxes[keep], scores[keep], labels[keep] + + all_boxes.append(boxes) + all_scores.append(scores) + all_labels.append(labels) + + return all_boxes, all_scores, all_labels + + def forward(self, + features, # type: Dict[str, Tensor] + proposals, # type: List[Tensor] + image_shapes, # type: List[Tuple[int, int]] + targets=None # type: Optional[List[Dict[str, Tensor]]] + ): + # type: (...) -> Tuple[List[Dict[str, Tensor]], Dict[str, Tensor]] + """ + Args: + features (List[Tensor]) + proposals (List[Tensor[N, 4]]) + image_shapes (List[Tuple[H, W]]) + targets (List[Dict]) + """ + if targets is not None: + for t in targets: + # TODO: https://github.com/pytorch/pytorch/issues/26731 + floating_point_types = (torch.float, torch.double, torch.half) + assert t["boxes"].dtype in floating_point_types, 'target boxes must of float type' + assert t["labels"].dtype == torch.int64, 'target labels must of int64 type' + if self.has_keypoint(): + assert t["keypoints"].dtype == torch.float32, 'target keypoints must of float type' + + if self.training: + proposals, matched_idxs, labels, regression_targets = self.select_training_samples(proposals, targets) + else: + labels = None + regression_targets = None + matched_idxs = None + + box_features = self.box_roi_pool(features, proposals, image_shapes) + box_features = self.box_head(box_features) + class_logits, box_regression = self.box_predictor(box_features) + + result: List[Dict[str, torch.Tensor]] = [] + losses = {} + if self.training: + assert labels is not None and regression_targets is not None + loss_classifier, loss_box_reg = fastrcnn_loss( + class_logits, box_regression, labels, regression_targets) + losses = { + "loss_classifier": loss_classifier, + "loss_box_reg": loss_box_reg + } + else: + boxes, scores, labels = self.postprocess_detections(class_logits, box_regression, proposals, image_shapes) + num_images = len(boxes) + for i in range(num_images): + result.append( + { + "boxes": boxes[i], + "labels": labels[i], + "scores": scores[i], + } + ) + + if self.has_mask(): + mask_proposals = [p["boxes"] for p in result] + if self.training: + assert matched_idxs is not None + # during training, only focus on positive boxes + num_images = len(proposals) + mask_proposals = [] + pos_matched_idxs = [] + for img_id in range(num_images): + pos = torch.where(labels[img_id] > 0)[0] + mask_proposals.append(proposals[img_id][pos]) + pos_matched_idxs.append(matched_idxs[img_id][pos]) + else: + pos_matched_idxs = None + + if self.mask_roi_pool is not None: + mask_features = self.mask_roi_pool(features, mask_proposals, image_shapes) + mask_features = self.mask_head(mask_features) + mask_logits = self.mask_predictor(mask_features) + else: + raise Exception("Expected mask_roi_pool to be not None") + + loss_mask = {} + if self.training: + assert targets is not None + assert pos_matched_idxs is not None + assert mask_logits is not None + + gt_masks = [t["masks"] for t in targets] + gt_labels = [t["labels"] for t in targets] + rcnn_loss_mask = maskrcnn_loss( + mask_logits, mask_proposals, + gt_masks, gt_labels, pos_matched_idxs) + loss_mask = { + "loss_mask": rcnn_loss_mask + } + else: + labels = [r["labels"] for r in result] + masks_probs = maskrcnn_inference(mask_logits, labels) + for mask_prob, r in zip(masks_probs, result): + r["masks"] = mask_prob + + losses.update(loss_mask) + + # keep none checks in if conditional so torchscript will conditionally + # compile each branch + if self.keypoint_roi_pool is not None and self.keypoint_head is not None \ + and self.keypoint_predictor is not None: + keypoint_proposals = [p["boxes"] for p in result] + if self.training: + # during training, only focus on positive boxes + num_images = len(proposals) + keypoint_proposals = [] + pos_matched_idxs = [] + assert matched_idxs is not None + for img_id in range(num_images): + pos = torch.where(labels[img_id] > 0)[0] + keypoint_proposals.append(proposals[img_id][pos]) + pos_matched_idxs.append(matched_idxs[img_id][pos]) + else: + pos_matched_idxs = None + + keypoint_features = self.keypoint_roi_pool(features, keypoint_proposals, image_shapes) + keypoint_features = self.keypoint_head(keypoint_features) + keypoint_logits = self.keypoint_predictor(keypoint_features) + + loss_keypoint = {} + if self.training: + assert targets is not None + assert pos_matched_idxs is not None + + gt_keypoints = [t["keypoints"] for t in targets] + rcnn_loss_keypoint = keypointrcnn_loss( + keypoint_logits, keypoint_proposals, + gt_keypoints, pos_matched_idxs) + loss_keypoint = { + "loss_keypoint": rcnn_loss_keypoint + } + else: + assert keypoint_logits is not None + assert keypoint_proposals is not None + + keypoints_probs, kp_scores = keypointrcnn_inference(keypoint_logits, keypoint_proposals) + for keypoint_prob, kps, r in zip(keypoints_probs, kp_scores, result): + r["keypoints"] = keypoint_prob + r["keypoints_scores"] = kps + + losses.update(loss_keypoint) + + return result, losses diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/rpn.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/rpn.py new file mode 100644 index 0000000000000000000000000000000000000000..a98eac24dd348b13052ab058f68053ad1040d1dc --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/rpn.py @@ -0,0 +1,369 @@ +import torch +from torch.nn import functional as F +from torch import nn, Tensor + +import torchvision +from torchvision.ops import boxes as box_ops + +from . import _utils as det_utils +from .image_list import ImageList + +from typing import List, Optional, Dict, Tuple + +# Import AnchorGenerator to keep compatibility. +from .anchor_utils import AnchorGenerator + + +@torch.jit.unused +def _onnx_get_num_anchors_and_pre_nms_top_n(ob, orig_pre_nms_top_n): + # type: (Tensor, int) -> Tuple[int, int] + from torch.onnx import operators + num_anchors = operators.shape_as_tensor(ob)[1].unsqueeze(0) + pre_nms_top_n = torch.min(torch.cat( + (torch.tensor([orig_pre_nms_top_n], dtype=num_anchors.dtype), + num_anchors), 0)) + + return num_anchors, pre_nms_top_n + + +class RPNHead(nn.Module): + """ + Adds a simple RPN Head with classification and regression heads + + Args: + in_channels (int): number of channels of the input feature + num_anchors (int): number of anchors to be predicted + """ + + def __init__(self, in_channels, num_anchors): + super(RPNHead, self).__init__() + self.conv = nn.Conv2d( + in_channels, in_channels, kernel_size=3, stride=1, padding=1 + ) + self.cls_logits = nn.Conv2d(in_channels, num_anchors, kernel_size=1, stride=1) + self.bbox_pred = nn.Conv2d( + in_channels, num_anchors * 4, kernel_size=1, stride=1 + ) + + for layer in self.children(): + torch.nn.init.normal_(layer.weight, std=0.01) + torch.nn.init.constant_(layer.bias, 0) + + def forward(self, x): + # type: (List[Tensor]) -> Tuple[List[Tensor], List[Tensor]] + logits = [] + bbox_reg = [] + for feature in x: + t = F.relu(self.conv(feature)) + logits.append(self.cls_logits(t)) + bbox_reg.append(self.bbox_pred(t)) + return logits, bbox_reg + + +def permute_and_flatten(layer, N, A, C, H, W): + # type: (Tensor, int, int, int, int, int) -> Tensor + layer = layer.view(N, -1, C, H, W) + layer = layer.permute(0, 3, 4, 1, 2) + layer = layer.reshape(N, -1, C) + return layer + + +def concat_box_prediction_layers(box_cls, box_regression): + # type: (List[Tensor], List[Tensor]) -> Tuple[Tensor, Tensor] + box_cls_flattened = [] + box_regression_flattened = [] + # for each feature level, permute the outputs to make them be in the + # same format as the labels. Note that the labels are computed for + # all feature levels concatenated, so we keep the same representation + # for the objectness and the box_regression + for box_cls_per_level, box_regression_per_level in zip( + box_cls, box_regression + ): + N, AxC, H, W = box_cls_per_level.shape + Ax4 = box_regression_per_level.shape[1] + A = Ax4 // 4 + C = AxC // A + box_cls_per_level = permute_and_flatten( + box_cls_per_level, N, A, C, H, W + ) + box_cls_flattened.append(box_cls_per_level) + + box_regression_per_level = permute_and_flatten( + box_regression_per_level, N, A, 4, H, W + ) + box_regression_flattened.append(box_regression_per_level) + # concatenate on the first dimension (representing the feature levels), to + # take into account the way the labels were generated (with all feature maps + # being concatenated as well) + box_cls = torch.cat(box_cls_flattened, dim=1).flatten(0, -2) + box_regression = torch.cat(box_regression_flattened, dim=1).reshape(-1, 4) + return box_cls, box_regression + + +class RegionProposalNetwork(torch.nn.Module): + """ + Implements Region Proposal Network (RPN). + + Args: + anchor_generator (AnchorGenerator): module that generates the anchors for a set of feature + maps. + head (nn.Module): module that computes the objectness and regression deltas + fg_iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be + considered as positive during training of the RPN. + bg_iou_thresh (float): maximum IoU between the anchor and the GT box so that they can be + considered as negative during training of the RPN. + batch_size_per_image (int): number of anchors that are sampled during training of the RPN + for computing the loss + positive_fraction (float): proportion of positive anchors in a mini-batch during training + of the RPN + pre_nms_top_n (Dict[int]): number of proposals to keep before applying NMS. It should + contain two fields: training and testing, to allow for different values depending + on training or evaluation + post_nms_top_n (Dict[int]): number of proposals to keep after applying NMS. It should + contain two fields: training and testing, to allow for different values depending + on training or evaluation + nms_thresh (float): NMS threshold used for postprocessing the RPN proposals + + """ + __annotations__ = { + 'box_coder': det_utils.BoxCoder, + 'proposal_matcher': det_utils.Matcher, + 'fg_bg_sampler': det_utils.BalancedPositiveNegativeSampler, + 'pre_nms_top_n': Dict[str, int], + 'post_nms_top_n': Dict[str, int], + } + + def __init__(self, + anchor_generator, + head, + # + fg_iou_thresh, bg_iou_thresh, + batch_size_per_image, positive_fraction, + # + pre_nms_top_n, post_nms_top_n, nms_thresh, score_thresh=0.0): + super(RegionProposalNetwork, self).__init__() + self.anchor_generator = anchor_generator + self.head = head + self.box_coder = det_utils.BoxCoder(weights=(1.0, 1.0, 1.0, 1.0)) + + # used during training + self.box_similarity = box_ops.box_iou + + self.proposal_matcher = det_utils.Matcher( + fg_iou_thresh, + bg_iou_thresh, + allow_low_quality_matches=True, + ) + + self.fg_bg_sampler = det_utils.BalancedPositiveNegativeSampler( + batch_size_per_image, positive_fraction + ) + # used during testing + self._pre_nms_top_n = pre_nms_top_n + self._post_nms_top_n = post_nms_top_n + self.nms_thresh = nms_thresh + self.score_thresh = score_thresh + self.min_size = 1e-3 + + def pre_nms_top_n(self): + if self.training: + return self._pre_nms_top_n['training'] + return self._pre_nms_top_n['testing'] + + def post_nms_top_n(self): + if self.training: + return self._post_nms_top_n['training'] + return self._post_nms_top_n['testing'] + + def assign_targets_to_anchors(self, anchors, targets): + # type: (List[Tensor], List[Dict[str, Tensor]]) -> Tuple[List[Tensor], List[Tensor]] + labels = [] + matched_gt_boxes = [] + for anchors_per_image, targets_per_image in zip(anchors, targets): + gt_boxes = targets_per_image["boxes"] + + if gt_boxes.numel() == 0: + # Background image (negative example) + device = anchors_per_image.device + matched_gt_boxes_per_image = torch.zeros(anchors_per_image.shape, dtype=torch.float32, device=device) + labels_per_image = torch.zeros((anchors_per_image.shape[0],), dtype=torch.float32, device=device) + else: + match_quality_matrix = self.box_similarity(gt_boxes, anchors_per_image) + matched_idxs = self.proposal_matcher(match_quality_matrix) + # get the targets corresponding GT for each proposal + # NB: need to clamp the indices because we can have a single + # GT in the image, and matched_idxs can be -2, which goes + # out of bounds + matched_gt_boxes_per_image = gt_boxes[matched_idxs.clamp(min=0)] + + labels_per_image = matched_idxs >= 0 + labels_per_image = labels_per_image.to(dtype=torch.float32) + + # Background (negative examples) + bg_indices = matched_idxs == self.proposal_matcher.BELOW_LOW_THRESHOLD + labels_per_image[bg_indices] = 0.0 + + # discard indices that are between thresholds + inds_to_discard = matched_idxs == self.proposal_matcher.BETWEEN_THRESHOLDS + labels_per_image[inds_to_discard] = -1.0 + + labels.append(labels_per_image) + matched_gt_boxes.append(matched_gt_boxes_per_image) + return labels, matched_gt_boxes + + def _get_top_n_idx(self, objectness, num_anchors_per_level): + # type: (Tensor, List[int]) -> Tensor + r = [] + offset = 0 + for ob in objectness.split(num_anchors_per_level, 1): + if torchvision._is_tracing(): + num_anchors, pre_nms_top_n = _onnx_get_num_anchors_and_pre_nms_top_n(ob, self.pre_nms_top_n()) + else: + num_anchors = ob.shape[1] + pre_nms_top_n = min(self.pre_nms_top_n(), num_anchors) + _, top_n_idx = ob.topk(pre_nms_top_n, dim=1) + r.append(top_n_idx + offset) + offset += num_anchors + return torch.cat(r, dim=1) + + def filter_proposals(self, proposals, objectness, image_shapes, num_anchors_per_level): + # type: (Tensor, Tensor, List[Tuple[int, int]], List[int]) -> Tuple[List[Tensor], List[Tensor]] + num_images = proposals.shape[0] + device = proposals.device + # do not backprop throught objectness + objectness = objectness.detach() + objectness = objectness.reshape(num_images, -1) + + levels = [ + torch.full((n,), idx, dtype=torch.int64, device=device) + for idx, n in enumerate(num_anchors_per_level) + ] + levels = torch.cat(levels, 0) + levels = levels.reshape(1, -1).expand_as(objectness) + + # select top_n boxes independently per level before applying nms + top_n_idx = self._get_top_n_idx(objectness, num_anchors_per_level) + + image_range = torch.arange(num_images, device=device) + batch_idx = image_range[:, None] + + objectness = objectness[batch_idx, top_n_idx] + levels = levels[batch_idx, top_n_idx] + proposals = proposals[batch_idx, top_n_idx] + + objectness_prob = torch.sigmoid(objectness) + + final_boxes = [] + final_scores = [] + for boxes, scores, lvl, img_shape in zip(proposals, objectness_prob, levels, image_shapes): + boxes = box_ops.clip_boxes_to_image(boxes, img_shape) + + # remove small boxes + keep = box_ops.remove_small_boxes(boxes, self.min_size) + boxes, scores, lvl = boxes[keep], scores[keep], lvl[keep] + + # remove low scoring boxes + # use >= for Backwards compatibility + keep = torch.where(scores >= self.score_thresh)[0] + boxes, scores, lvl = boxes[keep], scores[keep], lvl[keep] + + # non-maximum suppression, independently done per level + keep = box_ops.batched_nms(boxes, scores, lvl, self.nms_thresh) + + # keep only topk scoring predictions + keep = keep[:self.post_nms_top_n()] + boxes, scores = boxes[keep], scores[keep] + + final_boxes.append(boxes) + final_scores.append(scores) + return final_boxes, final_scores + + def compute_loss(self, objectness, pred_bbox_deltas, labels, regression_targets): + # type: (Tensor, Tensor, List[Tensor], List[Tensor]) -> Tuple[Tensor, Tensor] + """ + Args: + objectness (Tensor) + pred_bbox_deltas (Tensor) + labels (List[Tensor]) + regression_targets (List[Tensor]) + + Returns: + objectness_loss (Tensor) + box_loss (Tensor) + """ + + sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels) + sampled_pos_inds = torch.where(torch.cat(sampled_pos_inds, dim=0))[0] + sampled_neg_inds = torch.where(torch.cat(sampled_neg_inds, dim=0))[0] + + sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0) + + objectness = objectness.flatten() + + labels = torch.cat(labels, dim=0) + regression_targets = torch.cat(regression_targets, dim=0) + + box_loss = F.smooth_l1_loss( + pred_bbox_deltas[sampled_pos_inds], + regression_targets[sampled_pos_inds], + beta=1 / 9, + reduction='sum', + ) / (sampled_inds.numel()) + + objectness_loss = F.binary_cross_entropy_with_logits( + objectness[sampled_inds], labels[sampled_inds] + ) + + return objectness_loss, box_loss + + def forward(self, + images, # type: ImageList + features, # type: Dict[str, Tensor] + targets=None # type: Optional[List[Dict[str, Tensor]]] + ): + # type: (...) -> Tuple[List[Tensor], Dict[str, Tensor]] + """ + Args: + images (ImageList): images for which we want to compute the predictions + features (OrderedDict[Tensor]): features computed from the images that are + used for computing the predictions. Each tensor in the list + correspond to different feature levels + targets (List[Dict[Tensor]]): ground-truth boxes present in the image (optional). + If provided, each element in the dict should contain a field `boxes`, + with the locations of the ground-truth boxes. + + Returns: + boxes (List[Tensor]): the predicted boxes from the RPN, one Tensor per + image. + losses (Dict[Tensor]): the losses for the model during training. During + testing, it is an empty dict. + """ + # RPN uses all feature maps that are available + features = list(features.values()) + objectness, pred_bbox_deltas = self.head(features) + anchors = self.anchor_generator(images, features) + + num_images = len(anchors) + num_anchors_per_level_shape_tensors = [o[0].shape for o in objectness] + num_anchors_per_level = [s[0] * s[1] * s[2] for s in num_anchors_per_level_shape_tensors] + objectness, pred_bbox_deltas = \ + concat_box_prediction_layers(objectness, pred_bbox_deltas) + # apply pred_bbox_deltas to anchors to obtain the decoded proposals + # note that we detach the deltas because Faster R-CNN do not backprop through + # the proposals + proposals = self.box_coder.decode(pred_bbox_deltas.detach(), anchors) + proposals = proposals.view(num_images, -1, 4) + boxes, scores = self.filter_proposals(proposals, objectness, images.image_sizes, num_anchors_per_level) + + losses = {} + if self.training: + assert targets is not None + labels, matched_gt_boxes = self.assign_targets_to_anchors(anchors, targets) + regression_targets = self.box_coder.encode(matched_gt_boxes, anchors) + loss_objectness, loss_rpn_box_reg = self.compute_loss( + objectness, pred_bbox_deltas, labels, regression_targets) + losses = { + "loss_objectness": loss_objectness, + "loss_rpn_box_reg": loss_rpn_box_reg, + } + return boxes, losses diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/ssd.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/ssd.py new file mode 100644 index 0000000000000000000000000000000000000000..b6e620dba4a35720634a09ed27d37fe90fdff8ff --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/ssd.py @@ -0,0 +1,596 @@ +import torch +import torch.nn.functional as F +import warnings + +from collections import OrderedDict +from torch import nn, Tensor +from typing import Any, Dict, List, Optional, Tuple + +from . import _utils as det_utils +from .anchor_utils import DefaultBoxGenerator +from .backbone_utils import _validate_trainable_layers +from .transform import GeneralizedRCNNTransform +from .. import vgg +from ..utils import load_state_dict_from_url +from ...ops import boxes as box_ops + +__all__ = ['SSD', 'ssd300_vgg16'] + +model_urls = { + 'ssd300_vgg16_coco': 'https://download.pytorch.org/models/ssd300_vgg16_coco-b556d3b4.pth', +} + +backbone_urls = { + # We port the features of a VGG16 backbone trained by amdegroot because unlike the one on TorchVision, it uses the + # same input standardization method as the paper. Ref: https://s3.amazonaws.com/amdegroot-models/vgg16_reducedfc.pth + 'vgg16_features': 'https://download.pytorch.org/models/vgg16_features-amdegroot.pth' +} + + +def _xavier_init(conv: nn.Module): + for layer in conv.modules(): + if isinstance(layer, nn.Conv2d): + torch.nn.init.xavier_uniform_(layer.weight) + if layer.bias is not None: + torch.nn.init.constant_(layer.bias, 0.0) + + +class SSDHead(nn.Module): + def __init__(self, in_channels: List[int], num_anchors: List[int], num_classes: int): + super().__init__() + self.classification_head = SSDClassificationHead(in_channels, num_anchors, num_classes) + self.regression_head = SSDRegressionHead(in_channels, num_anchors) + + def forward(self, x: List[Tensor]) -> Dict[str, Tensor]: + return { + 'bbox_regression': self.regression_head(x), + 'cls_logits': self.classification_head(x), + } + + +class SSDScoringHead(nn.Module): + def __init__(self, module_list: nn.ModuleList, num_columns: int): + super().__init__() + self.module_list = module_list + self.num_columns = num_columns + + def _get_result_from_module_list(self, x: Tensor, idx: int) -> Tensor: + """ + This is equivalent to self.module_list[idx](x), + but torchscript doesn't support this yet + """ + num_blocks = len(self.module_list) + if idx < 0: + idx += num_blocks + i = 0 + out = x + for module in self.module_list: + if i == idx: + out = module(x) + i += 1 + return out + + def forward(self, x: List[Tensor]) -> Tensor: + all_results = [] + + for i, features in enumerate(x): + results = self._get_result_from_module_list(features, i) + + # Permute output from (N, A * K, H, W) to (N, HWA, K). + N, _, H, W = results.shape + results = results.view(N, -1, self.num_columns, H, W) + results = results.permute(0, 3, 4, 1, 2) + results = results.reshape(N, -1, self.num_columns) # Size=(N, HWA, K) + + all_results.append(results) + + return torch.cat(all_results, dim=1) + + +class SSDClassificationHead(SSDScoringHead): + def __init__(self, in_channels: List[int], num_anchors: List[int], num_classes: int): + cls_logits = nn.ModuleList() + for channels, anchors in zip(in_channels, num_anchors): + cls_logits.append(nn.Conv2d(channels, num_classes * anchors, kernel_size=3, padding=1)) + _xavier_init(cls_logits) + super().__init__(cls_logits, num_classes) + + +class SSDRegressionHead(SSDScoringHead): + def __init__(self, in_channels: List[int], num_anchors: List[int]): + bbox_reg = nn.ModuleList() + for channels, anchors in zip(in_channels, num_anchors): + bbox_reg.append(nn.Conv2d(channels, 4 * anchors, kernel_size=3, padding=1)) + _xavier_init(bbox_reg) + super().__init__(bbox_reg, 4) + + +class SSD(nn.Module): + """ + Implements SSD architecture from `"SSD: Single Shot MultiBox Detector" <https://arxiv.org/abs/1512.02325>`_. + + The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each + image, and should be in 0-1 range. Different images can have different sizes but they will be resized + to a fixed size before passing it to the backbone. + + The behavior of the model changes depending if it is in training or evaluation mode. + + During training, the model expects both the input tensors, as well as a targets (list of dictionary), + containing: + - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (Int64Tensor[N]): the class label for each ground-truth box + + The model returns a Dict[Tensor] during training, containing the classification and regression + losses. + + During inference, the model requires only the input tensors, and returns the post-processed + predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as + follows, where ``N`` is the number of detections: + + - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (Int64Tensor[N]): the predicted labels for each detection + - scores (Tensor[N]): the scores for each detection + + Args: + backbone (nn.Module): the network used to compute the features for the model. + It should contain an out_channels attribute with the list of the output channels of + each feature map. The backbone should return a single Tensor or an OrderedDict[Tensor]. + anchor_generator (DefaultBoxGenerator): module that generates the default boxes for a + set of feature maps. + size (Tuple[int, int]): the width and height to which images will be rescaled before feeding them + to the backbone. + num_classes (int): number of output classes of the model (excluding the background). + image_mean (Tuple[float, float, float]): mean values used for input normalization. + They are generally the mean values of the dataset on which the backbone has been trained + on + image_std (Tuple[float, float, float]): std values used for input normalization. + They are generally the std values of the dataset on which the backbone has been trained on + head (nn.Module, optional): Module run on top of the backbone features. Defaults to a module containing + a classification and regression module. + score_thresh (float): Score threshold used for postprocessing the detections. + nms_thresh (float): NMS threshold used for postprocessing the detections. + detections_per_img (int): Number of best detections to keep after NMS. + iou_thresh (float): minimum IoU between the anchor and the GT box so that they can be + considered as positive during training. + topk_candidates (int): Number of best detections to keep before NMS. + positive_fraction (float): a number between 0 and 1 which indicates the proportion of positive + proposals used during the training of the classification head. It is used to estimate the negative to + positive ratio. + """ + __annotations__ = { + 'box_coder': det_utils.BoxCoder, + 'proposal_matcher': det_utils.Matcher, + } + + def __init__(self, backbone: nn.Module, anchor_generator: DefaultBoxGenerator, + size: Tuple[int, int], num_classes: int, + image_mean: Optional[List[float]] = None, image_std: Optional[List[float]] = None, + head: Optional[nn.Module] = None, + score_thresh: float = 0.01, + nms_thresh: float = 0.45, + detections_per_img: int = 200, + iou_thresh: float = 0.5, + topk_candidates: int = 400, + positive_fraction: float = 0.25): + super().__init__() + + self.backbone = backbone + + self.anchor_generator = anchor_generator + + self.box_coder = det_utils.BoxCoder(weights=(10., 10., 5., 5.)) + + if head is None: + if hasattr(backbone, 'out_channels'): + out_channels = backbone.out_channels + else: + out_channels = det_utils.retrieve_out_channels(backbone, size) + + assert len(out_channels) == len(anchor_generator.aspect_ratios) + + num_anchors = self.anchor_generator.num_anchors_per_location() + head = SSDHead(out_channels, num_anchors, num_classes) + self.head = head + + self.proposal_matcher = det_utils.SSDMatcher(iou_thresh) + + if image_mean is None: + image_mean = [0.485, 0.456, 0.406] + if image_std is None: + image_std = [0.229, 0.224, 0.225] + self.transform = GeneralizedRCNNTransform(min(size), max(size), image_mean, image_std, + size_divisible=1, fixed_size=size) + + self.score_thresh = score_thresh + self.nms_thresh = nms_thresh + self.detections_per_img = detections_per_img + self.topk_candidates = topk_candidates + self.neg_to_pos_ratio = (1.0 - positive_fraction) / positive_fraction + + # used only on torchscript mode + self._has_warned = False + + @torch.jit.unused + def eager_outputs(self, losses: Dict[str, Tensor], + detections: List[Dict[str, Tensor]]) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]: + if self.training: + return losses + + return detections + + def compute_loss(self, targets: List[Dict[str, Tensor]], head_outputs: Dict[str, Tensor], anchors: List[Tensor], + matched_idxs: List[Tensor]) -> Dict[str, Tensor]: + bbox_regression = head_outputs['bbox_regression'] + cls_logits = head_outputs['cls_logits'] + + # Match original targets with default boxes + num_foreground = 0 + bbox_loss = [] + cls_targets = [] + for (targets_per_image, bbox_regression_per_image, cls_logits_per_image, anchors_per_image, + matched_idxs_per_image) in zip(targets, bbox_regression, cls_logits, anchors, matched_idxs): + # produce the matching between boxes and targets + foreground_idxs_per_image = torch.where(matched_idxs_per_image >= 0)[0] + foreground_matched_idxs_per_image = matched_idxs_per_image[foreground_idxs_per_image] + num_foreground += foreground_matched_idxs_per_image.numel() + + # Calculate regression loss + matched_gt_boxes_per_image = targets_per_image['boxes'][foreground_matched_idxs_per_image] + bbox_regression_per_image = bbox_regression_per_image[foreground_idxs_per_image, :] + anchors_per_image = anchors_per_image[foreground_idxs_per_image, :] + target_regression = self.box_coder.encode_single(matched_gt_boxes_per_image, anchors_per_image) + bbox_loss.append(torch.nn.functional.smooth_l1_loss( + bbox_regression_per_image, + target_regression, + reduction='sum' + )) + + # Estimate ground truth for class targets + gt_classes_target = torch.zeros((cls_logits_per_image.size(0), ), dtype=targets_per_image['labels'].dtype, + device=targets_per_image['labels'].device) + gt_classes_target[foreground_idxs_per_image] = \ + targets_per_image['labels'][foreground_matched_idxs_per_image] + cls_targets.append(gt_classes_target) + + bbox_loss = torch.stack(bbox_loss) + cls_targets = torch.stack(cls_targets) + + # Calculate classification loss + num_classes = cls_logits.size(-1) + cls_loss = F.cross_entropy( + cls_logits.view(-1, num_classes), + cls_targets.view(-1), + reduction='none' + ).view(cls_targets.size()) + + # Hard Negative Sampling + foreground_idxs = cls_targets > 0 + num_negative = self.neg_to_pos_ratio * foreground_idxs.sum(1, keepdim=True) + # num_negative[num_negative < self.neg_to_pos_ratio] = self.neg_to_pos_ratio + negative_loss = cls_loss.clone() + negative_loss[foreground_idxs] = -float('inf') # use -inf to detect positive values that creeped in the sample + values, idx = negative_loss.sort(1, descending=True) + # background_idxs = torch.logical_and(idx.sort(1)[1] < num_negative, torch.isfinite(values)) + background_idxs = idx.sort(1)[1] < num_negative + + N = max(1, num_foreground) + return { + 'bbox_regression': bbox_loss.sum() / N, + 'classification': (cls_loss[foreground_idxs].sum() + cls_loss[background_idxs].sum()) / N, + } + + def forward(self, images: List[Tensor], + targets: Optional[List[Dict[str, Tensor]]] = None) -> Tuple[Dict[str, Tensor], List[Dict[str, Tensor]]]: + if self.training and targets is None: + raise ValueError("In training mode, targets should be passed") + + if self.training: + assert targets is not None + for target in targets: + boxes = target["boxes"] + if isinstance(boxes, torch.Tensor): + if len(boxes.shape) != 2 or boxes.shape[-1] != 4: + raise ValueError("Expected target boxes to be a tensor" + "of shape [N, 4], got {:}.".format( + boxes.shape)) + else: + raise ValueError("Expected target boxes to be of type " + "Tensor, got {:}.".format(type(boxes))) + + # get the original image sizes + original_image_sizes: List[Tuple[int, int]] = [] + for img in images: + val = img.shape[-2:] + assert len(val) == 2 + original_image_sizes.append((val[0], val[1])) + + # transform the input + images, targets = self.transform(images, targets) + + # Check for degenerate boxes + if targets is not None: + for target_idx, target in enumerate(targets): + boxes = target["boxes"] + degenerate_boxes = boxes[:, 2:] <= boxes[:, :2] + if degenerate_boxes.any(): + bb_idx = torch.where(degenerate_boxes.any(dim=1))[0][0] + degen_bb: List[float] = boxes[bb_idx].tolist() + raise ValueError("All bounding boxes should have positive height and width." + " Found invalid box {} for target at index {}." + .format(degen_bb, target_idx)) + + # get the features from the backbone + features = self.backbone(images.tensors) + if isinstance(features, torch.Tensor): + features = OrderedDict([('0', features)]) + + features = list(features.values()) + + # compute the ssd heads outputs using the features + head_outputs = self.head(features) + + # create the set of anchors + anchors = self.anchor_generator(images, features) + + losses = {} + detections: List[Dict[str, Tensor]] = [] + if self.training: + assert targets is not None + + matched_idxs = [] + for anchors_per_image, targets_per_image in zip(anchors, targets): + if targets_per_image['boxes'].numel() == 0: + matched_idxs.append(torch.full((anchors_per_image.size(0),), -1, dtype=torch.int64, + device=anchors_per_image.device)) + continue + + match_quality_matrix = box_ops.box_iou(targets_per_image['boxes'], anchors_per_image) + matched_idxs.append(self.proposal_matcher(match_quality_matrix)) + + losses = self.compute_loss(targets, head_outputs, anchors, matched_idxs) + else: + detections = self.postprocess_detections(head_outputs, anchors, images.image_sizes) + detections = self.transform.postprocess(detections, images.image_sizes, original_image_sizes) + + if torch.jit.is_scripting(): + if not self._has_warned: + warnings.warn("SSD always returns a (Losses, Detections) tuple in scripting") + self._has_warned = True + return losses, detections + return self.eager_outputs(losses, detections) + + def postprocess_detections(self, head_outputs: Dict[str, Tensor], image_anchors: List[Tensor], + image_shapes: List[Tuple[int, int]]) -> List[Dict[str, Tensor]]: + bbox_regression = head_outputs['bbox_regression'] + pred_scores = F.softmax(head_outputs['cls_logits'], dim=-1) + + num_classes = pred_scores.size(-1) + device = pred_scores.device + + detections: List[Dict[str, Tensor]] = [] + + for boxes, scores, anchors, image_shape in zip(bbox_regression, pred_scores, image_anchors, image_shapes): + boxes = self.box_coder.decode_single(boxes, anchors) + boxes = box_ops.clip_boxes_to_image(boxes, image_shape) + + image_boxes = [] + image_scores = [] + image_labels = [] + for label in range(1, num_classes): + score = scores[:, label] + + keep_idxs = score > self.score_thresh + score = score[keep_idxs] + box = boxes[keep_idxs] + + # keep only topk scoring predictions + num_topk = min(self.topk_candidates, score.size(0)) + score, idxs = score.topk(num_topk) + box = box[idxs] + + image_boxes.append(box) + image_scores.append(score) + image_labels.append(torch.full_like(score, fill_value=label, dtype=torch.int64, device=device)) + + image_boxes = torch.cat(image_boxes, dim=0) + image_scores = torch.cat(image_scores, dim=0) + image_labels = torch.cat(image_labels, dim=0) + + # non-maximum suppression + keep = box_ops.batched_nms(image_boxes, image_scores, image_labels, self.nms_thresh) + keep = keep[:self.detections_per_img] + + detections.append({ + 'boxes': image_boxes[keep], + 'scores': image_scores[keep], + 'labels': image_labels[keep], + }) + return detections + + +class SSDFeatureExtractorVGG(nn.Module): + def __init__(self, backbone: nn.Module, highres: bool): + super().__init__() + + _, _, maxpool3_pos, maxpool4_pos, _ = (i for i, layer in enumerate(backbone) if isinstance(layer, nn.MaxPool2d)) + + # Patch ceil_mode for maxpool3 to get the same WxH output sizes as the paper + backbone[maxpool3_pos].ceil_mode = True + + # parameters used for L2 regularization + rescaling + self.scale_weight = nn.Parameter(torch.ones(512) * 20) + + # Multiple Feature maps - page 4, Fig 2 of SSD paper + self.features = nn.Sequential( + *backbone[:maxpool4_pos] # until conv4_3 + ) + + # SSD300 case - page 4, Fig 2 of SSD paper + extra = nn.ModuleList([ + nn.Sequential( + nn.Conv2d(1024, 256, kernel_size=1), + nn.ReLU(inplace=True), + nn.Conv2d(256, 512, kernel_size=3, padding=1, stride=2), # conv8_2 + nn.ReLU(inplace=True), + ), + nn.Sequential( + nn.Conv2d(512, 128, kernel_size=1), + nn.ReLU(inplace=True), + nn.Conv2d(128, 256, kernel_size=3, padding=1, stride=2), # conv9_2 + nn.ReLU(inplace=True), + ), + nn.Sequential( + nn.Conv2d(256, 128, kernel_size=1), + nn.ReLU(inplace=True), + nn.Conv2d(128, 256, kernel_size=3), # conv10_2 + nn.ReLU(inplace=True), + ), + nn.Sequential( + nn.Conv2d(256, 128, kernel_size=1), + nn.ReLU(inplace=True), + nn.Conv2d(128, 256, kernel_size=3), # conv11_2 + nn.ReLU(inplace=True), + ) + ]) + if highres: + # Additional layers for the SSD512 case. See page 11, footernote 5. + extra.append(nn.Sequential( + nn.Conv2d(256, 128, kernel_size=1), + nn.ReLU(inplace=True), + nn.Conv2d(128, 256, kernel_size=4), # conv12_2 + nn.ReLU(inplace=True), + )) + _xavier_init(extra) + + fc = nn.Sequential( + nn.MaxPool2d(kernel_size=3, stride=1, padding=1, ceil_mode=False), # add modified maxpool5 + nn.Conv2d(in_channels=512, out_channels=1024, kernel_size=3, padding=6, dilation=6), # FC6 with atrous + nn.ReLU(inplace=True), + nn.Conv2d(in_channels=1024, out_channels=1024, kernel_size=1), # FC7 + nn.ReLU(inplace=True) + ) + _xavier_init(fc) + extra.insert(0, nn.Sequential( + *backbone[maxpool4_pos:-1], # until conv5_3, skip maxpool5 + fc, + )) + self.extra = extra + + def forward(self, x: Tensor) -> Dict[str, Tensor]: + # L2 regularization + Rescaling of 1st block's feature map + x = self.features(x) + rescaled = self.scale_weight.view(1, -1, 1, 1) * F.normalize(x) + output = [rescaled] + + # Calculating Feature maps for the rest blocks + for block in self.extra: + x = block(x) + output.append(x) + + return OrderedDict([(str(i), v) for i, v in enumerate(output)]) + + +def _vgg_extractor(backbone_name: str, highres: bool, progress: bool, pretrained: bool, trainable_layers: int): + if backbone_name in backbone_urls: + # Use custom backbones more appropriate for SSD + arch = backbone_name.split('_')[0] + backbone = vgg.__dict__[arch](pretrained=False, progress=progress).features + if pretrained: + state_dict = load_state_dict_from_url(backbone_urls[backbone_name], progress=progress) + backbone.load_state_dict(state_dict) + else: + # Use standard backbones from TorchVision + backbone = vgg.__dict__[backbone_name](pretrained=pretrained, progress=progress).features + + # Gather the indices of maxpools. These are the locations of output blocks. + stage_indices = [i for i, b in enumerate(backbone) if isinstance(b, nn.MaxPool2d)] + num_stages = len(stage_indices) + + # find the index of the layer from which we wont freeze + assert 0 <= trainable_layers <= num_stages + freeze_before = len(backbone) if trainable_layers == 0 else stage_indices[num_stages - trainable_layers] + + for b in backbone[:freeze_before]: + for parameter in b.parameters(): + parameter.requires_grad_(False) + + return SSDFeatureExtractorVGG(backbone, highres) + + +def ssd300_vgg16(pretrained: bool = False, progress: bool = True, num_classes: int = 91, + pretrained_backbone: bool = True, trainable_backbone_layers: Optional[int] = None, **kwargs: Any): + """Constructs an SSD model with input size 300x300 and a VGG16 backbone. + + Reference: `"SSD: Single Shot MultiBox Detector" <https://arxiv.org/abs/1512.02325>`_. + + The input to the model is expected to be a list of tensors, each of shape [C, H, W], one for each + image, and should be in 0-1 range. Different images can have different sizes but they will be resized + to a fixed size before passing it to the backbone. + + The behavior of the model changes depending if it is in training or evaluation mode. + + During training, the model expects both the input tensors, as well as a targets (list of dictionary), + containing: + + - boxes (``FloatTensor[N, 4]``): the ground-truth boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (Int64Tensor[N]): the class label for each ground-truth box + + The model returns a Dict[Tensor] during training, containing the classification and regression + losses. + + During inference, the model requires only the input tensors, and returns the post-processed + predictions as a List[Dict[Tensor]], one for each input image. The fields of the Dict are as + follows, where ``N`` is the number of detections: + + - boxes (``FloatTensor[N, 4]``): the predicted boxes in ``[x1, y1, x2, y2]`` format, with + ``0 <= x1 < x2 <= W`` and ``0 <= y1 < y2 <= H``. + - labels (Int64Tensor[N]): the predicted labels for each detection + - scores (Tensor[N]): the scores for each detection + + Example: + + >>> model = torchvision.models.detection.ssd300_vgg16(pretrained=True) + >>> model.eval() + >>> x = [torch.rand(3, 300, 300), torch.rand(3, 500, 400)] + >>> predictions = model(x) + + Args: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 + progress (bool): If True, displays a progress bar of the download to stderr + num_classes (int): number of output classes of the model (including the background) + pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet + trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block. + Valid values are between 0 and 5, with 5 meaning all backbone layers are trainable. + """ + if "size" in kwargs: + warnings.warn("The size of the model is already fixed; ignoring the argument.") + + trainable_backbone_layers = _validate_trainable_layers( + pretrained or pretrained_backbone, trainable_backbone_layers, 5, 5) + + if pretrained: + # no need to download the backbone if pretrained is set + pretrained_backbone = False + + backbone = _vgg_extractor("vgg16_features", False, progress, pretrained_backbone, trainable_backbone_layers) + anchor_generator = DefaultBoxGenerator([[2], [2, 3], [2, 3], [2, 3], [2], [2]], + scales=[0.07, 0.15, 0.33, 0.51, 0.69, 0.87, 1.05], + steps=[8, 16, 32, 64, 100, 300]) + + defaults = { + # Rescale the input in a way compatible to the backbone + "image_mean": [0.48235, 0.45882, 0.40784], + "image_std": [1.0 / 255.0, 1.0 / 255.0, 1.0 / 255.0], # undo the 0-1 scaling of toTensor + } + kwargs = {**defaults, **kwargs} + model = SSD(backbone, anchor_generator, (300, 300), num_classes, **kwargs) + if pretrained: + weights_name = 'ssd300_vgg16_coco' + if model_urls.get(weights_name, None) is None: + raise ValueError("No checkpoint is available for model {}".format(weights_name)) + state_dict = load_state_dict_from_url(model_urls[weights_name], progress=progress) + model.load_state_dict(state_dict) + return model diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/ssdlite.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/ssdlite.py new file mode 100644 index 0000000000000000000000000000000000000000..26378d7038ddf8978f40b5744e6651e7b310d256 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/ssdlite.py @@ -0,0 +1,231 @@ +import torch +import warnings + +from collections import OrderedDict +from functools import partial +from torch import nn, Tensor +from typing import Any, Callable, Dict, List, Optional, Tuple + +from . import _utils as det_utils +from .ssd import SSD, SSDScoringHead +from .anchor_utils import DefaultBoxGenerator +from .backbone_utils import _validate_trainable_layers +from .. import mobilenet +from ..mobilenetv3 import ConvBNActivation +from ..utils import load_state_dict_from_url + + +__all__ = ['ssdlite320_mobilenet_v3_large'] + +model_urls = { + 'ssdlite320_mobilenet_v3_large_coco': + 'https://download.pytorch.org/models/ssdlite320_mobilenet_v3_large_coco-a79551df.pth' +} + + +# Building blocks of SSDlite as described in section 6.2 of MobileNetV2 paper +def _prediction_block(in_channels: int, out_channels: int, kernel_size: int, + norm_layer: Callable[..., nn.Module]) -> nn.Sequential: + return nn.Sequential( + # 3x3 depthwise with stride 1 and padding 1 + ConvBNActivation(in_channels, in_channels, kernel_size=kernel_size, groups=in_channels, + norm_layer=norm_layer, activation_layer=nn.ReLU6), + + # 1x1 projetion to output channels + nn.Conv2d(in_channels, out_channels, 1) + ) + + +def _extra_block(in_channels: int, out_channels: int, norm_layer: Callable[..., nn.Module]) -> nn.Sequential: + activation = nn.ReLU6 + intermediate_channels = out_channels // 2 + return nn.Sequential( + # 1x1 projection to half output channels + ConvBNActivation(in_channels, intermediate_channels, kernel_size=1, + norm_layer=norm_layer, activation_layer=activation), + + # 3x3 depthwise with stride 2 and padding 1 + ConvBNActivation(intermediate_channels, intermediate_channels, kernel_size=3, stride=2, + groups=intermediate_channels, norm_layer=norm_layer, activation_layer=activation), + + # 1x1 projetion to output channels + ConvBNActivation(intermediate_channels, out_channels, kernel_size=1, + norm_layer=norm_layer, activation_layer=activation), + ) + + +def _normal_init(conv: nn.Module): + for layer in conv.modules(): + if isinstance(layer, nn.Conv2d): + torch.nn.init.normal_(layer.weight, mean=0.0, std=0.03) + if layer.bias is not None: + torch.nn.init.constant_(layer.bias, 0.0) + + +class SSDLiteHead(nn.Module): + def __init__(self, in_channels: List[int], num_anchors: List[int], num_classes: int, + norm_layer: Callable[..., nn.Module]): + super().__init__() + self.classification_head = SSDLiteClassificationHead(in_channels, num_anchors, num_classes, norm_layer) + self.regression_head = SSDLiteRegressionHead(in_channels, num_anchors, norm_layer) + + def forward(self, x: List[Tensor]) -> Dict[str, Tensor]: + return { + 'bbox_regression': self.regression_head(x), + 'cls_logits': self.classification_head(x), + } + + +class SSDLiteClassificationHead(SSDScoringHead): + def __init__(self, in_channels: List[int], num_anchors: List[int], num_classes: int, + norm_layer: Callable[..., nn.Module]): + cls_logits = nn.ModuleList() + for channels, anchors in zip(in_channels, num_anchors): + cls_logits.append(_prediction_block(channels, num_classes * anchors, 3, norm_layer)) + _normal_init(cls_logits) + super().__init__(cls_logits, num_classes) + + +class SSDLiteRegressionHead(SSDScoringHead): + def __init__(self, in_channels: List[int], num_anchors: List[int], norm_layer: Callable[..., nn.Module]): + bbox_reg = nn.ModuleList() + for channels, anchors in zip(in_channels, num_anchors): + bbox_reg.append(_prediction_block(channels, 4 * anchors, 3, norm_layer)) + _normal_init(bbox_reg) + super().__init__(bbox_reg, 4) + + +class SSDLiteFeatureExtractorMobileNet(nn.Module): + def __init__(self, backbone: nn.Module, c4_pos: int, norm_layer: Callable[..., nn.Module], width_mult: float = 1.0, + min_depth: int = 16, **kwargs: Any): + super().__init__() + + assert not backbone[c4_pos].use_res_connect + self.features = nn.Sequential( + # As described in section 6.3 of MobileNetV3 paper + nn.Sequential(*backbone[:c4_pos], backbone[c4_pos].block[0]), # from start until C4 expansion layer + nn.Sequential(backbone[c4_pos].block[1:], *backbone[c4_pos + 1:]), # from C4 depthwise until end + ) + + get_depth = lambda d: max(min_depth, int(d * width_mult)) # noqa: E731 + extra = nn.ModuleList([ + _extra_block(backbone[-1].out_channels, get_depth(512), norm_layer), + _extra_block(get_depth(512), get_depth(256), norm_layer), + _extra_block(get_depth(256), get_depth(256), norm_layer), + _extra_block(get_depth(256), get_depth(128), norm_layer), + ]) + _normal_init(extra) + + self.extra = extra + + def forward(self, x: Tensor) -> Dict[str, Tensor]: + # Get feature maps from backbone and extra. Can't be refactored due to JIT limitations. + output = [] + for block in self.features: + x = block(x) + output.append(x) + + for block in self.extra: + x = block(x) + output.append(x) + + return OrderedDict([(str(i), v) for i, v in enumerate(output)]) + + +def _mobilenet_extractor(backbone_name: str, progress: bool, pretrained: bool, trainable_layers: int, + norm_layer: Callable[..., nn.Module], **kwargs: Any): + backbone = mobilenet.__dict__[backbone_name](pretrained=pretrained, progress=progress, + norm_layer=norm_layer, **kwargs).features + if not pretrained: + # Change the default initialization scheme if not pretrained + _normal_init(backbone) + + # Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks. + # The first and last blocks are always included because they are the C0 (conv1) and Cn. + stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "_is_cn", False)] + [len(backbone) - 1] + num_stages = len(stage_indices) + + # find the index of the layer from which we wont freeze + assert 0 <= trainable_layers <= num_stages + freeze_before = len(backbone) if trainable_layers == 0 else stage_indices[num_stages - trainable_layers] + + for b in backbone[:freeze_before]: + for parameter in b.parameters(): + parameter.requires_grad_(False) + + return SSDLiteFeatureExtractorMobileNet(backbone, stage_indices[-2], norm_layer, **kwargs) + + +def ssdlite320_mobilenet_v3_large(pretrained: bool = False, progress: bool = True, num_classes: int = 91, + pretrained_backbone: bool = False, trainable_backbone_layers: Optional[int] = None, + norm_layer: Optional[Callable[..., nn.Module]] = None, + **kwargs: Any): + """Constructs an SSDlite model with input size 320x320 and a MobileNetV3 Large backbone, as described at + `"Searching for MobileNetV3" + <https://arxiv.org/abs/1905.02244>`_ and + `"MobileNetV2: Inverted Residuals and Linear Bottlenecks" + <https://arxiv.org/abs/1801.04381>`_. + + See :func:`~torchvision.models.detection.ssd300_vgg16` for more details. + + Example: + + >>> model = torchvision.models.detection.ssdlite320_mobilenet_v3_large(pretrained=True) + >>> model.eval() + >>> x = [torch.rand(3, 320, 320), torch.rand(3, 500, 400)] + >>> predictions = model(x) + + Args: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 + progress (bool): If True, displays a progress bar of the download to stderr + num_classes (int): number of output classes of the model (including the background) + pretrained_backbone (bool): If True, returns a model with backbone pre-trained on Imagenet + trainable_backbone_layers (int): number of trainable (not frozen) resnet layers starting from final block. + Valid values are between 0 and 6, with 6 meaning all backbone layers are trainable. + norm_layer (callable, optional): Module specifying the normalization layer to use. + """ + if "size" in kwargs: + warnings.warn("The size of the model is already fixed; ignoring the argument.") + + trainable_backbone_layers = _validate_trainable_layers( + pretrained or pretrained_backbone, trainable_backbone_layers, 6, 6) + + if pretrained: + pretrained_backbone = False + + # Enable reduced tail if no pretrained backbone is selected. See Table 6 of MobileNetV3 paper. + reduce_tail = not pretrained_backbone + + if norm_layer is None: + norm_layer = partial(nn.BatchNorm2d, eps=0.001, momentum=0.03) + + backbone = _mobilenet_extractor("mobilenet_v3_large", progress, pretrained_backbone, trainable_backbone_layers, + norm_layer, reduced_tail=reduce_tail, **kwargs) + + size = (320, 320) + anchor_generator = DefaultBoxGenerator([[2, 3] for _ in range(6)], min_ratio=0.2, max_ratio=0.95) + out_channels = det_utils.retrieve_out_channels(backbone, size) + num_anchors = anchor_generator.num_anchors_per_location() + assert len(out_channels) == len(anchor_generator.aspect_ratios) + + defaults = { + "score_thresh": 0.001, + "nms_thresh": 0.55, + "detections_per_img": 300, + "topk_candidates": 300, + # Rescale the input in a way compatible to the backbone: + # The following mean/std rescale the data from [0, 1] to [-1, -1] + "image_mean": [0.5, 0.5, 0.5], + "image_std": [0.5, 0.5, 0.5], + } + kwargs = {**defaults, **kwargs} + model = SSD(backbone, anchor_generator, size, num_classes, + head=SSDLiteHead(out_channels, num_anchors, num_classes, norm_layer), **kwargs) + + if pretrained: + weights_name = 'ssdlite320_mobilenet_v3_large_coco' + if model_urls.get(weights_name, None) is None: + raise ValueError("No checkpoint is available for model {}".format(weights_name)) + state_dict = load_state_dict_from_url(model_urls[weights_name], progress=progress) + model.load_state_dict(state_dict) + return model diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/transform.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/transform.py new file mode 100644 index 0000000000000000000000000000000000000000..0ca5273e047f1c76762b6489f290d5b74ce5cb6c --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/detection/transform.py @@ -0,0 +1,293 @@ +import math +import torch +import torchvision + +from torch import nn, Tensor +from typing import List, Tuple, Dict, Optional + +from .image_list import ImageList +from .roi_heads import paste_masks_in_image + + +@torch.jit.unused +def _get_shape_onnx(image): + # type: (Tensor) -> Tensor + from torch.onnx import operators + return operators.shape_as_tensor(image)[-2:] + + +@torch.jit.unused +def _fake_cast_onnx(v): + # type: (Tensor) -> float + # ONNX requires a tensor but here we fake its type for JIT. + return v + + +def _resize_image_and_masks(image: Tensor, self_min_size: float, self_max_size: float, + target: Optional[Dict[str, Tensor]] = None, + fixed_size: Optional[Tuple[int, int]] = None, + ) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]: + if torchvision._is_tracing(): + im_shape = _get_shape_onnx(image) + else: + im_shape = torch.tensor(image.shape[-2:]) + + size: Optional[List[int]] = None + scale_factor: Optional[float] = None + recompute_scale_factor: Optional[bool] = None + if fixed_size is not None: + size = [fixed_size[1], fixed_size[0]] + else: + min_size = torch.min(im_shape).to(dtype=torch.float32) + max_size = torch.max(im_shape).to(dtype=torch.float32) + scale = torch.min(self_min_size / min_size, self_max_size / max_size) + + if torchvision._is_tracing(): + scale_factor = _fake_cast_onnx(scale) + else: + scale_factor = scale.item() + recompute_scale_factor = True + + image = torch.nn.functional.interpolate(image[None], size=size, scale_factor=scale_factor, mode='bilinear', + recompute_scale_factor=recompute_scale_factor, align_corners=False)[0] + + if target is None: + return image, target + + if "masks" in target: + mask = target["masks"] + mask = torch.nn.functional.interpolate(mask[:, None].float(), size=size, scale_factor=scale_factor, + recompute_scale_factor=recompute_scale_factor)[:, 0].byte() + target["masks"] = mask + return image, target + + +class GeneralizedRCNNTransform(nn.Module): + """ + Performs input / target transformation before feeding the data to a GeneralizedRCNN + model. + + The transformations it perform are: + - input normalization (mean subtraction and std division) + - input / target resizing to match min_size / max_size + + It returns a ImageList for the inputs, and a List[Dict[Tensor]] for the targets + """ + + def __init__(self, min_size, max_size, image_mean, image_std, size_divisible=32, fixed_size=None): + super(GeneralizedRCNNTransform, self).__init__() + if not isinstance(min_size, (list, tuple)): + min_size = (min_size,) + self.min_size = min_size + self.max_size = max_size + self.image_mean = image_mean + self.image_std = image_std + self.size_divisible = size_divisible + self.fixed_size = fixed_size + + def forward(self, + images, # type: List[Tensor] + targets=None # type: Optional[List[Dict[str, Tensor]]] + ): + # type: (...) -> Tuple[ImageList, Optional[List[Dict[str, Tensor]]]] + images = [img for img in images] + if targets is not None: + # make a copy of targets to avoid modifying it in-place + # once torchscript supports dict comprehension + # this can be simplified as follows + # targets = [{k: v for k,v in t.items()} for t in targets] + targets_copy: List[Dict[str, Tensor]] = [] + for t in targets: + data: Dict[str, Tensor] = {} + for k, v in t.items(): + data[k] = v + targets_copy.append(data) + targets = targets_copy + for i in range(len(images)): + image = images[i] + target_index = targets[i] if targets is not None else None + + if image.dim() != 3: + raise ValueError("images is expected to be a list of 3d tensors " + "of shape [C, H, W], got {}".format(image.shape)) + image = self.normalize(image) + image, target_index = self.resize(image, target_index) + images[i] = image + if targets is not None and target_index is not None: + targets[i] = target_index + + image_sizes = [img.shape[-2:] for img in images] + images = self.batch_images(images, size_divisible=self.size_divisible) + image_sizes_list: List[Tuple[int, int]] = [] + for image_size in image_sizes: + assert len(image_size) == 2 + image_sizes_list.append((image_size[0], image_size[1])) + + image_list = ImageList(images, image_sizes_list) + return image_list, targets + + def normalize(self, image): + if not image.is_floating_point(): + raise TypeError( + f"Expected input images to be of floating type (in range [0, 1]), " + f"but found type {image.dtype} instead" + ) + dtype, device = image.dtype, image.device + mean = torch.as_tensor(self.image_mean, dtype=dtype, device=device) + std = torch.as_tensor(self.image_std, dtype=dtype, device=device) + return (image - mean[:, None, None]) / std[:, None, None] + + def torch_choice(self, k): + # type: (List[int]) -> int + """ + Implements `random.choice` via torch ops so it can be compiled with + TorchScript. Remove if https://github.com/pytorch/pytorch/issues/25803 + is fixed. + """ + index = int(torch.empty(1).uniform_(0., float(len(k))).item()) + return k[index] + + def resize(self, + image: Tensor, + target: Optional[Dict[str, Tensor]] = None, + ) -> Tuple[Tensor, Optional[Dict[str, Tensor]]]: + h, w = image.shape[-2:] + if self.training: + size = float(self.torch_choice(self.min_size)) + else: + # FIXME assume for now that testing uses the largest scale + size = float(self.min_size[-1]) + image, target = _resize_image_and_masks(image, size, float(self.max_size), target, self.fixed_size) + + if target is None: + return image, target + + bbox = target["boxes"] + bbox = resize_boxes(bbox, (h, w), image.shape[-2:]) + target["boxes"] = bbox + + if "keypoints" in target: + keypoints = target["keypoints"] + keypoints = resize_keypoints(keypoints, (h, w), image.shape[-2:]) + target["keypoints"] = keypoints + return image, target + + # _onnx_batch_images() is an implementation of + # batch_images() that is supported by ONNX tracing. + @torch.jit.unused + def _onnx_batch_images(self, images, size_divisible=32): + # type: (List[Tensor], int) -> Tensor + max_size = [] + for i in range(images[0].dim()): + max_size_i = torch.max(torch.stack([img.shape[i] for img in images]).to(torch.float32)).to(torch.int64) + max_size.append(max_size_i) + stride = size_divisible + max_size[1] = (torch.ceil((max_size[1].to(torch.float32)) / stride) * stride).to(torch.int64) + max_size[2] = (torch.ceil((max_size[2].to(torch.float32)) / stride) * stride).to(torch.int64) + max_size = tuple(max_size) + + # work around for + # pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) + # which is not yet supported in onnx + padded_imgs = [] + for img in images: + padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))] + padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0])) + padded_imgs.append(padded_img) + + return torch.stack(padded_imgs) + + def max_by_axis(self, the_list): + # type: (List[List[int]]) -> List[int] + maxes = the_list[0] + for sublist in the_list[1:]: + for index, item in enumerate(sublist): + maxes[index] = max(maxes[index], item) + return maxes + + def batch_images(self, images, size_divisible=32): + # type: (List[Tensor], int) -> Tensor + if torchvision._is_tracing(): + # batch_images() does not export well to ONNX + # call _onnx_batch_images() instead + return self._onnx_batch_images(images, size_divisible) + + max_size = self.max_by_axis([list(img.shape) for img in images]) + stride = float(size_divisible) + max_size = list(max_size) + max_size[1] = int(math.ceil(float(max_size[1]) / stride) * stride) + max_size[2] = int(math.ceil(float(max_size[2]) / stride) * stride) + + batch_shape = [len(images)] + max_size + batched_imgs = images[0].new_full(batch_shape, 0) + for img, pad_img in zip(images, batched_imgs): + pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) + + return batched_imgs + + def postprocess(self, + result, # type: List[Dict[str, Tensor]] + image_shapes, # type: List[Tuple[int, int]] + original_image_sizes # type: List[Tuple[int, int]] + ): + # type: (...) -> List[Dict[str, Tensor]] + if self.training: + return result + for i, (pred, im_s, o_im_s) in enumerate(zip(result, image_shapes, original_image_sizes)): + boxes = pred["boxes"] + boxes = resize_boxes(boxes, im_s, o_im_s) + result[i]["boxes"] = boxes + if "masks" in pred: + masks = pred["masks"] + masks = paste_masks_in_image(masks, boxes, o_im_s) + result[i]["masks"] = masks + if "keypoints" in pred: + keypoints = pred["keypoints"] + keypoints = resize_keypoints(keypoints, im_s, o_im_s) + result[i]["keypoints"] = keypoints + return result + + def __repr__(self): + format_string = self.__class__.__name__ + '(' + _indent = '\n ' + format_string += "{0}Normalize(mean={1}, std={2})".format(_indent, self.image_mean, self.image_std) + format_string += "{0}Resize(min_size={1}, max_size={2}, mode='bilinear')".format(_indent, self.min_size, + self.max_size) + format_string += '\n)' + return format_string + + +def resize_keypoints(keypoints, original_size, new_size): + # type: (Tensor, List[int], List[int]) -> Tensor + ratios = [ + torch.tensor(s, dtype=torch.float32, device=keypoints.device) / + torch.tensor(s_orig, dtype=torch.float32, device=keypoints.device) + for s, s_orig in zip(new_size, original_size) + ] + ratio_h, ratio_w = ratios + resized_data = keypoints.clone() + if torch._C._get_tracing_state(): + resized_data_0 = resized_data[:, :, 0] * ratio_w + resized_data_1 = resized_data[:, :, 1] * ratio_h + resized_data = torch.stack((resized_data_0, resized_data_1, resized_data[:, :, 2]), dim=2) + else: + resized_data[..., 0] *= ratio_w + resized_data[..., 1] *= ratio_h + return resized_data + + +def resize_boxes(boxes, original_size, new_size): + # type: (Tensor, List[int], List[int]) -> Tensor + ratios = [ + torch.tensor(s, dtype=torch.float32, device=boxes.device) / + torch.tensor(s_orig, dtype=torch.float32, device=boxes.device) + for s, s_orig in zip(new_size, original_size) + ] + ratio_height, ratio_width = ratios + xmin, ymin, xmax, ymax = boxes.unbind(1) + + xmin = xmin * ratio_width + xmax = xmax * ratio_width + ymin = ymin * ratio_height + ymax = ymax * ratio_height + return torch.stack((xmin, ymin, xmax, ymax), dim=1) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/googlenet.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/googlenet.py new file mode 100644 index 0000000000000000000000000000000000000000..cef48dea76a68d087382dd71a9a15e8b68733dab --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/googlenet.py @@ -0,0 +1,316 @@ +import warnings +from collections import namedtuple +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch import Tensor +from .utils import load_state_dict_from_url +from typing import Optional, Tuple, List, Callable, Any + +__all__ = ['GoogLeNet', 'googlenet', "GoogLeNetOutputs", "_GoogLeNetOutputs"] + +model_urls = { + # GoogLeNet ported from TensorFlow + 'googlenet': 'https://download.pytorch.org/models/googlenet-1378be20.pth', +} + +GoogLeNetOutputs = namedtuple('GoogLeNetOutputs', ['logits', 'aux_logits2', 'aux_logits1']) +GoogLeNetOutputs.__annotations__ = {'logits': Tensor, 'aux_logits2': Optional[Tensor], + 'aux_logits1': Optional[Tensor]} + +# Script annotations failed with _GoogleNetOutputs = namedtuple ... +# _GoogLeNetOutputs set here for backwards compat +_GoogLeNetOutputs = GoogLeNetOutputs + + +def googlenet(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> "GoogLeNet": + r"""GoogLeNet (Inception v1) model architecture from + `"Going Deeper with Convolutions" <http://arxiv.org/abs/1409.4842>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + aux_logits (bool): If True, adds two auxiliary branches that can improve training. + Default: *False* when pretrained is True otherwise *True* + transform_input (bool): If True, preprocesses the input according to the method with which it + was trained on ImageNet. Default: *False* + """ + if pretrained: + if 'transform_input' not in kwargs: + kwargs['transform_input'] = True + if 'aux_logits' not in kwargs: + kwargs['aux_logits'] = False + if kwargs['aux_logits']: + warnings.warn('auxiliary heads in the pretrained googlenet model are NOT pretrained, ' + 'so make sure to train them') + original_aux_logits = kwargs['aux_logits'] + kwargs['aux_logits'] = True + kwargs['init_weights'] = False + model = GoogLeNet(**kwargs) + state_dict = load_state_dict_from_url(model_urls['googlenet'], + progress=progress) + model.load_state_dict(state_dict) + if not original_aux_logits: + model.aux_logits = False + model.aux1 = None # type: ignore[assignment] + model.aux2 = None # type: ignore[assignment] + return model + + return GoogLeNet(**kwargs) + + +class GoogLeNet(nn.Module): + __constants__ = ['aux_logits', 'transform_input'] + + def __init__( + self, + num_classes: int = 1000, + aux_logits: bool = True, + transform_input: bool = False, + init_weights: Optional[bool] = None, + blocks: Optional[List[Callable[..., nn.Module]]] = None + ) -> None: + super(GoogLeNet, self).__init__() + if blocks is None: + blocks = [BasicConv2d, Inception, InceptionAux] + if init_weights is None: + warnings.warn('The default weight initialization of GoogleNet will be changed in future releases of ' + 'torchvision. If you wish to keep the old behavior (which leads to long initialization times' + ' due to scipy/scipy#11299), please set init_weights=True.', FutureWarning) + init_weights = True + assert len(blocks) == 3 + conv_block = blocks[0] + inception_block = blocks[1] + inception_aux_block = blocks[2] + + self.aux_logits = aux_logits + self.transform_input = transform_input + + self.conv1 = conv_block(3, 64, kernel_size=7, stride=2, padding=3) + self.maxpool1 = nn.MaxPool2d(3, stride=2, ceil_mode=True) + self.conv2 = conv_block(64, 64, kernel_size=1) + self.conv3 = conv_block(64, 192, kernel_size=3, padding=1) + self.maxpool2 = nn.MaxPool2d(3, stride=2, ceil_mode=True) + + self.inception3a = inception_block(192, 64, 96, 128, 16, 32, 32) + self.inception3b = inception_block(256, 128, 128, 192, 32, 96, 64) + self.maxpool3 = nn.MaxPool2d(3, stride=2, ceil_mode=True) + + self.inception4a = inception_block(480, 192, 96, 208, 16, 48, 64) + self.inception4b = inception_block(512, 160, 112, 224, 24, 64, 64) + self.inception4c = inception_block(512, 128, 128, 256, 24, 64, 64) + self.inception4d = inception_block(512, 112, 144, 288, 32, 64, 64) + self.inception4e = inception_block(528, 256, 160, 320, 32, 128, 128) + self.maxpool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True) + + self.inception5a = inception_block(832, 256, 160, 320, 32, 128, 128) + self.inception5b = inception_block(832, 384, 192, 384, 48, 128, 128) + + if aux_logits: + self.aux1 = inception_aux_block(512, num_classes) + self.aux2 = inception_aux_block(528, num_classes) + else: + self.aux1 = None # type: ignore[assignment] + self.aux2 = None # type: ignore[assignment] + + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.dropout = nn.Dropout(0.2) + self.fc = nn.Linear(1024, num_classes) + + if init_weights: + self._initialize_weights() + + def _initialize_weights(self) -> None: + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): + import scipy.stats as stats + X = stats.truncnorm(-2, 2, scale=0.01) + values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype) + values = values.view(m.weight.size()) + with torch.no_grad(): + m.weight.copy_(values) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def _transform_input(self, x: Tensor) -> Tensor: + if self.transform_input: + x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5 + x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5 + x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5 + x = torch.cat((x_ch0, x_ch1, x_ch2), 1) + return x + + def _forward(self, x: Tensor) -> Tuple[Tensor, Optional[Tensor], Optional[Tensor]]: + # N x 3 x 224 x 224 + x = self.conv1(x) + # N x 64 x 112 x 112 + x = self.maxpool1(x) + # N x 64 x 56 x 56 + x = self.conv2(x) + # N x 64 x 56 x 56 + x = self.conv3(x) + # N x 192 x 56 x 56 + x = self.maxpool2(x) + + # N x 192 x 28 x 28 + x = self.inception3a(x) + # N x 256 x 28 x 28 + x = self.inception3b(x) + # N x 480 x 28 x 28 + x = self.maxpool3(x) + # N x 480 x 14 x 14 + x = self.inception4a(x) + # N x 512 x 14 x 14 + aux1: Optional[Tensor] = None + if self.aux1 is not None: + if self.training: + aux1 = self.aux1(x) + + x = self.inception4b(x) + # N x 512 x 14 x 14 + x = self.inception4c(x) + # N x 512 x 14 x 14 + x = self.inception4d(x) + # N x 528 x 14 x 14 + aux2: Optional[Tensor] = None + if self.aux2 is not None: + if self.training: + aux2 = self.aux2(x) + + x = self.inception4e(x) + # N x 832 x 14 x 14 + x = self.maxpool4(x) + # N x 832 x 7 x 7 + x = self.inception5a(x) + # N x 832 x 7 x 7 + x = self.inception5b(x) + # N x 1024 x 7 x 7 + + x = self.avgpool(x) + # N x 1024 x 1 x 1 + x = torch.flatten(x, 1) + # N x 1024 + x = self.dropout(x) + x = self.fc(x) + # N x 1000 (num_classes) + return x, aux2, aux1 + + @torch.jit.unused + def eager_outputs(self, x: Tensor, aux2: Tensor, aux1: Optional[Tensor]) -> GoogLeNetOutputs: + if self.training and self.aux_logits: + return _GoogLeNetOutputs(x, aux2, aux1) + else: + return x # type: ignore[return-value] + + def forward(self, x: Tensor) -> GoogLeNetOutputs: + x = self._transform_input(x) + x, aux1, aux2 = self._forward(x) + aux_defined = self.training and self.aux_logits + if torch.jit.is_scripting(): + if not aux_defined: + warnings.warn("Scripted GoogleNet always returns GoogleNetOutputs Tuple") + return GoogLeNetOutputs(x, aux2, aux1) + else: + return self.eager_outputs(x, aux2, aux1) + + +class Inception(nn.Module): + + def __init__( + self, + in_channels: int, + ch1x1: int, + ch3x3red: int, + ch3x3: int, + ch5x5red: int, + ch5x5: int, + pool_proj: int, + conv_block: Optional[Callable[..., nn.Module]] = None + ) -> None: + super(Inception, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch1 = conv_block(in_channels, ch1x1, kernel_size=1) + + self.branch2 = nn.Sequential( + conv_block(in_channels, ch3x3red, kernel_size=1), + conv_block(ch3x3red, ch3x3, kernel_size=3, padding=1) + ) + + self.branch3 = nn.Sequential( + conv_block(in_channels, ch5x5red, kernel_size=1), + # Here, kernel_size=3 instead of kernel_size=5 is a known bug. + # Please see https://github.com/pytorch/vision/issues/906 for details. + conv_block(ch5x5red, ch5x5, kernel_size=3, padding=1) + ) + + self.branch4 = nn.Sequential( + nn.MaxPool2d(kernel_size=3, stride=1, padding=1, ceil_mode=True), + conv_block(in_channels, pool_proj, kernel_size=1) + ) + + def _forward(self, x: Tensor) -> List[Tensor]: + branch1 = self.branch1(x) + branch2 = self.branch2(x) + branch3 = self.branch3(x) + branch4 = self.branch4(x) + + outputs = [branch1, branch2, branch3, branch4] + return outputs + + def forward(self, x: Tensor) -> Tensor: + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionAux(nn.Module): + + def __init__( + self, + in_channels: int, + num_classes: int, + conv_block: Optional[Callable[..., nn.Module]] = None + ) -> None: + super(InceptionAux, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.conv = conv_block(in_channels, 128, kernel_size=1) + + self.fc1 = nn.Linear(2048, 1024) + self.fc2 = nn.Linear(1024, num_classes) + + def forward(self, x: Tensor) -> Tensor: + # aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14 + x = F.adaptive_avg_pool2d(x, (4, 4)) + # aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4 + x = self.conv(x) + # N x 128 x 4 x 4 + x = torch.flatten(x, 1) + # N x 2048 + x = F.relu(self.fc1(x), inplace=True) + # N x 1024 + x = F.dropout(x, 0.7, training=self.training) + # N x 1024 + x = self.fc2(x) + # N x 1000 (num_classes) + + return x + + +class BasicConv2d(nn.Module): + + def __init__( + self, + in_channels: int, + out_channels: int, + **kwargs: Any + ) -> None: + super(BasicConv2d, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs) + self.bn = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x: Tensor) -> Tensor: + x = self.conv(x) + x = self.bn(x) + return F.relu(x, inplace=True) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/inception.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/inception.py new file mode 100644 index 0000000000000000000000000000000000000000..05b1cf07620cff73f163154c3a1e0ccea735c6df --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/inception.py @@ -0,0 +1,478 @@ +from collections import namedtuple +import warnings +import torch +from torch import nn, Tensor +import torch.nn.functional as F +from .utils import load_state_dict_from_url +from typing import Callable, Any, Optional, Tuple, List + + +__all__ = ['Inception3', 'inception_v3', 'InceptionOutputs', '_InceptionOutputs'] + + +model_urls = { + # Inception v3 ported from TensorFlow + 'inception_v3_google': 'https://download.pytorch.org/models/inception_v3_google-0cc3c7bd.pth', +} + +InceptionOutputs = namedtuple('InceptionOutputs', ['logits', 'aux_logits']) +InceptionOutputs.__annotations__ = {'logits': Tensor, 'aux_logits': Optional[Tensor]} + +# Script annotations failed with _GoogleNetOutputs = namedtuple ... +# _InceptionOutputs set here for backwards compat +_InceptionOutputs = InceptionOutputs + + +def inception_v3(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> "Inception3": + r"""Inception v3 model architecture from + `"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_. + + .. note:: + **Important**: In contrast to the other models the inception_v3 expects tensors with a size of + N x 3 x 299 x 299, so ensure your images are sized accordingly. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + aux_logits (bool): If True, add an auxiliary branch that can improve training. + Default: *True* + transform_input (bool): If True, preprocesses the input according to the method with which it + was trained on ImageNet. Default: *False* + """ + if pretrained: + if 'transform_input' not in kwargs: + kwargs['transform_input'] = True + if 'aux_logits' in kwargs: + original_aux_logits = kwargs['aux_logits'] + kwargs['aux_logits'] = True + else: + original_aux_logits = True + kwargs['init_weights'] = False # we are loading weights from a pretrained model + model = Inception3(**kwargs) + state_dict = load_state_dict_from_url(model_urls['inception_v3_google'], + progress=progress) + model.load_state_dict(state_dict) + if not original_aux_logits: + model.aux_logits = False + model.AuxLogits = None + return model + + return Inception3(**kwargs) + + +class Inception3(nn.Module): + + def __init__( + self, + num_classes: int = 1000, + aux_logits: bool = True, + transform_input: bool = False, + inception_blocks: Optional[List[Callable[..., nn.Module]]] = None, + init_weights: Optional[bool] = None + ) -> None: + super(Inception3, self).__init__() + if inception_blocks is None: + inception_blocks = [ + BasicConv2d, InceptionA, InceptionB, InceptionC, + InceptionD, InceptionE, InceptionAux + ] + if init_weights is None: + warnings.warn('The default weight initialization of inception_v3 will be changed in future releases of ' + 'torchvision. If you wish to keep the old behavior (which leads to long initialization times' + ' due to scipy/scipy#11299), please set init_weights=True.', FutureWarning) + init_weights = True + assert len(inception_blocks) == 7 + conv_block = inception_blocks[0] + inception_a = inception_blocks[1] + inception_b = inception_blocks[2] + inception_c = inception_blocks[3] + inception_d = inception_blocks[4] + inception_e = inception_blocks[5] + inception_aux = inception_blocks[6] + + self.aux_logits = aux_logits + self.transform_input = transform_input + self.Conv2d_1a_3x3 = conv_block(3, 32, kernel_size=3, stride=2) + self.Conv2d_2a_3x3 = conv_block(32, 32, kernel_size=3) + self.Conv2d_2b_3x3 = conv_block(32, 64, kernel_size=3, padding=1) + self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2) + self.Conv2d_3b_1x1 = conv_block(64, 80, kernel_size=1) + self.Conv2d_4a_3x3 = conv_block(80, 192, kernel_size=3) + self.maxpool2 = nn.MaxPool2d(kernel_size=3, stride=2) + self.Mixed_5b = inception_a(192, pool_features=32) + self.Mixed_5c = inception_a(256, pool_features=64) + self.Mixed_5d = inception_a(288, pool_features=64) + self.Mixed_6a = inception_b(288) + self.Mixed_6b = inception_c(768, channels_7x7=128) + self.Mixed_6c = inception_c(768, channels_7x7=160) + self.Mixed_6d = inception_c(768, channels_7x7=160) + self.Mixed_6e = inception_c(768, channels_7x7=192) + self.AuxLogits: Optional[nn.Module] = None + if aux_logits: + self.AuxLogits = inception_aux(768, num_classes) + self.Mixed_7a = inception_d(768) + self.Mixed_7b = inception_e(1280) + self.Mixed_7c = inception_e(2048) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.dropout = nn.Dropout() + self.fc = nn.Linear(2048, num_classes) + if init_weights: + for m in self.modules(): + if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear): + import scipy.stats as stats + stddev = m.stddev if hasattr(m, 'stddev') else 0.1 + X = stats.truncnorm(-2, 2, scale=stddev) + values = torch.as_tensor(X.rvs(m.weight.numel()), dtype=m.weight.dtype) + values = values.view(m.weight.size()) + with torch.no_grad(): + m.weight.copy_(values) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + def _transform_input(self, x: Tensor) -> Tensor: + if self.transform_input: + x_ch0 = torch.unsqueeze(x[:, 0], 1) * (0.229 / 0.5) + (0.485 - 0.5) / 0.5 + x_ch1 = torch.unsqueeze(x[:, 1], 1) * (0.224 / 0.5) + (0.456 - 0.5) / 0.5 + x_ch2 = torch.unsqueeze(x[:, 2], 1) * (0.225 / 0.5) + (0.406 - 0.5) / 0.5 + x = torch.cat((x_ch0, x_ch1, x_ch2), 1) + return x + + def _forward(self, x: Tensor) -> Tuple[Tensor, Optional[Tensor]]: + # N x 3 x 299 x 299 + x = self.Conv2d_1a_3x3(x) + # N x 32 x 149 x 149 + x = self.Conv2d_2a_3x3(x) + # N x 32 x 147 x 147 + x = self.Conv2d_2b_3x3(x) + # N x 64 x 147 x 147 + x = self.maxpool1(x) + # N x 64 x 73 x 73 + x = self.Conv2d_3b_1x1(x) + # N x 80 x 73 x 73 + x = self.Conv2d_4a_3x3(x) + # N x 192 x 71 x 71 + x = self.maxpool2(x) + # N x 192 x 35 x 35 + x = self.Mixed_5b(x) + # N x 256 x 35 x 35 + x = self.Mixed_5c(x) + # N x 288 x 35 x 35 + x = self.Mixed_5d(x) + # N x 288 x 35 x 35 + x = self.Mixed_6a(x) + # N x 768 x 17 x 17 + x = self.Mixed_6b(x) + # N x 768 x 17 x 17 + x = self.Mixed_6c(x) + # N x 768 x 17 x 17 + x = self.Mixed_6d(x) + # N x 768 x 17 x 17 + x = self.Mixed_6e(x) + # N x 768 x 17 x 17 + aux: Optional[Tensor] = None + if self.AuxLogits is not None: + if self.training: + aux = self.AuxLogits(x) + # N x 768 x 17 x 17 + x = self.Mixed_7a(x) + # N x 1280 x 8 x 8 + x = self.Mixed_7b(x) + # N x 2048 x 8 x 8 + x = self.Mixed_7c(x) + # N x 2048 x 8 x 8 + # Adaptive average pooling + x = self.avgpool(x) + # N x 2048 x 1 x 1 + x = self.dropout(x) + # N x 2048 x 1 x 1 + x = torch.flatten(x, 1) + # N x 2048 + x = self.fc(x) + # N x 1000 (num_classes) + return x, aux + + @torch.jit.unused + def eager_outputs(self, x: Tensor, aux: Optional[Tensor]) -> InceptionOutputs: + if self.training and self.aux_logits: + return InceptionOutputs(x, aux) + else: + return x # type: ignore[return-value] + + def forward(self, x: Tensor) -> InceptionOutputs: + x = self._transform_input(x) + x, aux = self._forward(x) + aux_defined = self.training and self.aux_logits + if torch.jit.is_scripting(): + if not aux_defined: + warnings.warn("Scripted Inception3 always returns Inception3 Tuple") + return InceptionOutputs(x, aux) + else: + return self.eager_outputs(x, aux) + + +class InceptionA(nn.Module): + + def __init__( + self, + in_channels: int, + pool_features: int, + conv_block: Optional[Callable[..., nn.Module]] = None + ) -> None: + super(InceptionA, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch1x1 = conv_block(in_channels, 64, kernel_size=1) + + self.branch5x5_1 = conv_block(in_channels, 48, kernel_size=1) + self.branch5x5_2 = conv_block(48, 64, kernel_size=5, padding=2) + + self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1) + self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1) + self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, padding=1) + + self.branch_pool = conv_block(in_channels, pool_features, kernel_size=1) + + def _forward(self, x: Tensor) -> List[Tensor]: + branch1x1 = self.branch1x1(x) + + branch5x5 = self.branch5x5_1(x) + branch5x5 = self.branch5x5_2(branch5x5) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool] + return outputs + + def forward(self, x: Tensor) -> Tensor: + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionB(nn.Module): + + def __init__( + self, + in_channels: int, + conv_block: Optional[Callable[..., nn.Module]] = None + ) -> None: + super(InceptionB, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch3x3 = conv_block(in_channels, 384, kernel_size=3, stride=2) + + self.branch3x3dbl_1 = conv_block(in_channels, 64, kernel_size=1) + self.branch3x3dbl_2 = conv_block(64, 96, kernel_size=3, padding=1) + self.branch3x3dbl_3 = conv_block(96, 96, kernel_size=3, stride=2) + + def _forward(self, x: Tensor) -> List[Tensor]: + branch3x3 = self.branch3x3(x) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl) + + branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) + + outputs = [branch3x3, branch3x3dbl, branch_pool] + return outputs + + def forward(self, x: Tensor) -> Tensor: + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionC(nn.Module): + + def __init__( + self, + in_channels: int, + channels_7x7: int, + conv_block: Optional[Callable[..., nn.Module]] = None + ) -> None: + super(InceptionC, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch1x1 = conv_block(in_channels, 192, kernel_size=1) + + c7 = channels_7x7 + self.branch7x7_1 = conv_block(in_channels, c7, kernel_size=1) + self.branch7x7_2 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7_3 = conv_block(c7, 192, kernel_size=(7, 1), padding=(3, 0)) + + self.branch7x7dbl_1 = conv_block(in_channels, c7, kernel_size=1) + self.branch7x7dbl_2 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7dbl_3 = conv_block(c7, c7, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7dbl_4 = conv_block(c7, c7, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7dbl_5 = conv_block(c7, 192, kernel_size=(1, 7), padding=(0, 3)) + + self.branch_pool = conv_block(in_channels, 192, kernel_size=1) + + def _forward(self, x: Tensor) -> List[Tensor]: + branch1x1 = self.branch1x1(x) + + branch7x7 = self.branch7x7_1(x) + branch7x7 = self.branch7x7_2(branch7x7) + branch7x7 = self.branch7x7_3(branch7x7) + + branch7x7dbl = self.branch7x7dbl_1(x) + branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl) + branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool] + return outputs + + def forward(self, x: Tensor) -> Tensor: + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionD(nn.Module): + + def __init__( + self, + in_channels: int, + conv_block: Optional[Callable[..., nn.Module]] = None + ) -> None: + super(InceptionD, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch3x3_1 = conv_block(in_channels, 192, kernel_size=1) + self.branch3x3_2 = conv_block(192, 320, kernel_size=3, stride=2) + + self.branch7x7x3_1 = conv_block(in_channels, 192, kernel_size=1) + self.branch7x7x3_2 = conv_block(192, 192, kernel_size=(1, 7), padding=(0, 3)) + self.branch7x7x3_3 = conv_block(192, 192, kernel_size=(7, 1), padding=(3, 0)) + self.branch7x7x3_4 = conv_block(192, 192, kernel_size=3, stride=2) + + def _forward(self, x: Tensor) -> List[Tensor]: + branch3x3 = self.branch3x3_1(x) + branch3x3 = self.branch3x3_2(branch3x3) + + branch7x7x3 = self.branch7x7x3_1(x) + branch7x7x3 = self.branch7x7x3_2(branch7x7x3) + branch7x7x3 = self.branch7x7x3_3(branch7x7x3) + branch7x7x3 = self.branch7x7x3_4(branch7x7x3) + + branch_pool = F.max_pool2d(x, kernel_size=3, stride=2) + outputs = [branch3x3, branch7x7x3, branch_pool] + return outputs + + def forward(self, x: Tensor) -> Tensor: + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionE(nn.Module): + + def __init__( + self, + in_channels: int, + conv_block: Optional[Callable[..., nn.Module]] = None + ) -> None: + super(InceptionE, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.branch1x1 = conv_block(in_channels, 320, kernel_size=1) + + self.branch3x3_1 = conv_block(in_channels, 384, kernel_size=1) + self.branch3x3_2a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1)) + self.branch3x3_2b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0)) + + self.branch3x3dbl_1 = conv_block(in_channels, 448, kernel_size=1) + self.branch3x3dbl_2 = conv_block(448, 384, kernel_size=3, padding=1) + self.branch3x3dbl_3a = conv_block(384, 384, kernel_size=(1, 3), padding=(0, 1)) + self.branch3x3dbl_3b = conv_block(384, 384, kernel_size=(3, 1), padding=(1, 0)) + + self.branch_pool = conv_block(in_channels, 192, kernel_size=1) + + def _forward(self, x: Tensor) -> List[Tensor]: + branch1x1 = self.branch1x1(x) + + branch3x3 = self.branch3x3_1(x) + branch3x3 = [ + self.branch3x3_2a(branch3x3), + self.branch3x3_2b(branch3x3), + ] + branch3x3 = torch.cat(branch3x3, 1) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = [ + self.branch3x3dbl_3a(branch3x3dbl), + self.branch3x3dbl_3b(branch3x3dbl), + ] + branch3x3dbl = torch.cat(branch3x3dbl, 1) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] + return outputs + + def forward(self, x: Tensor) -> Tensor: + outputs = self._forward(x) + return torch.cat(outputs, 1) + + +class InceptionAux(nn.Module): + + def __init__( + self, + in_channels: int, + num_classes: int, + conv_block: Optional[Callable[..., nn.Module]] = None + ) -> None: + super(InceptionAux, self).__init__() + if conv_block is None: + conv_block = BasicConv2d + self.conv0 = conv_block(in_channels, 128, kernel_size=1) + self.conv1 = conv_block(128, 768, kernel_size=5) + self.conv1.stddev = 0.01 # type: ignore[assignment] + self.fc = nn.Linear(768, num_classes) + self.fc.stddev = 0.001 # type: ignore[assignment] + + def forward(self, x: Tensor) -> Tensor: + # N x 768 x 17 x 17 + x = F.avg_pool2d(x, kernel_size=5, stride=3) + # N x 768 x 5 x 5 + x = self.conv0(x) + # N x 128 x 5 x 5 + x = self.conv1(x) + # N x 768 x 1 x 1 + # Adaptive average pooling + x = F.adaptive_avg_pool2d(x, (1, 1)) + # N x 768 x 1 x 1 + x = torch.flatten(x, 1) + # N x 768 + x = self.fc(x) + # N x 1000 + return x + + +class BasicConv2d(nn.Module): + + def __init__( + self, + in_channels: int, + out_channels: int, + **kwargs: Any + ) -> None: + super(BasicConv2d, self).__init__() + self.conv = nn.Conv2d(in_channels, out_channels, bias=False, **kwargs) + self.bn = nn.BatchNorm2d(out_channels, eps=0.001) + + def forward(self, x: Tensor) -> Tensor: + x = self.conv(x) + x = self.bn(x) + return F.relu(x, inplace=True) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/mnasnet.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/mnasnet.py new file mode 100644 index 0000000000000000000000000000000000000000..3c703774ddad607de4c21182aac02b0e10d48a73 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/mnasnet.py @@ -0,0 +1,275 @@ +import warnings + +import torch +from torch import Tensor +import torch.nn as nn +from .utils import load_state_dict_from_url +from typing import Any, Dict, List + +__all__ = ['MNASNet', 'mnasnet0_5', 'mnasnet0_75', 'mnasnet1_0', 'mnasnet1_3'] + +_MODEL_URLS = { + "mnasnet0_5": + "https://download.pytorch.org/models/mnasnet0.5_top1_67.823-3ffadce67e.pth", + "mnasnet0_75": None, + "mnasnet1_0": + "https://download.pytorch.org/models/mnasnet1.0_top1_73.512-f206786ef8.pth", + "mnasnet1_3": None +} + +# Paper suggests 0.9997 momentum, for TensorFlow. Equivalent PyTorch momentum is +# 1.0 - tensorflow. +_BN_MOMENTUM = 1 - 0.9997 + + +class _InvertedResidual(nn.Module): + + def __init__( + self, + in_ch: int, + out_ch: int, + kernel_size: int, + stride: int, + expansion_factor: int, + bn_momentum: float = 0.1 + ) -> None: + super(_InvertedResidual, self).__init__() + assert stride in [1, 2] + assert kernel_size in [3, 5] + mid_ch = in_ch * expansion_factor + self.apply_residual = (in_ch == out_ch and stride == 1) + self.layers = nn.Sequential( + # Pointwise + nn.Conv2d(in_ch, mid_ch, 1, bias=False), + nn.BatchNorm2d(mid_ch, momentum=bn_momentum), + nn.ReLU(inplace=True), + # Depthwise + nn.Conv2d(mid_ch, mid_ch, kernel_size, padding=kernel_size // 2, + stride=stride, groups=mid_ch, bias=False), + nn.BatchNorm2d(mid_ch, momentum=bn_momentum), + nn.ReLU(inplace=True), + # Linear pointwise. Note that there's no activation. + nn.Conv2d(mid_ch, out_ch, 1, bias=False), + nn.BatchNorm2d(out_ch, momentum=bn_momentum)) + + def forward(self, input: Tensor) -> Tensor: + if self.apply_residual: + return self.layers(input) + input + else: + return self.layers(input) + + +def _stack(in_ch: int, out_ch: int, kernel_size: int, stride: int, exp_factor: int, repeats: int, + bn_momentum: float) -> nn.Sequential: + """ Creates a stack of inverted residuals. """ + assert repeats >= 1 + # First one has no skip, because feature map size changes. + first = _InvertedResidual(in_ch, out_ch, kernel_size, stride, exp_factor, + bn_momentum=bn_momentum) + remaining = [] + for _ in range(1, repeats): + remaining.append( + _InvertedResidual(out_ch, out_ch, kernel_size, 1, exp_factor, + bn_momentum=bn_momentum)) + return nn.Sequential(first, *remaining) + + +def _round_to_multiple_of(val: float, divisor: int, round_up_bias: float = 0.9) -> int: + """ Asymmetric rounding to make `val` divisible by `divisor`. With default + bias, will round up, unless the number is no more than 10% greater than the + smaller divisible value, i.e. (83, 8) -> 80, but (84, 8) -> 88. """ + assert 0.0 < round_up_bias < 1.0 + new_val = max(divisor, int(val + divisor / 2) // divisor * divisor) + return new_val if new_val >= round_up_bias * val else new_val + divisor + + +def _get_depths(alpha: float) -> List[int]: + """ Scales tensor depths as in reference MobileNet code, prefers rouding up + rather than down. """ + depths = [32, 16, 24, 40, 80, 96, 192, 320] + return [_round_to_multiple_of(depth * alpha, 8) for depth in depths] + + +class MNASNet(torch.nn.Module): + """ MNASNet, as described in https://arxiv.org/pdf/1807.11626.pdf. This + implements the B1 variant of the model. + >>> model = MNASNet(1.0, num_classes=1000) + >>> x = torch.rand(1, 3, 224, 224) + >>> y = model(x) + >>> y.dim() + 2 + >>> y.nelement() + 1000 + """ + # Version 2 adds depth scaling in the initial stages of the network. + _version = 2 + + def __init__( + self, + alpha: float, + num_classes: int = 1000, + dropout: float = 0.2 + ) -> None: + super(MNASNet, self).__init__() + assert alpha > 0.0 + self.alpha = alpha + self.num_classes = num_classes + depths = _get_depths(alpha) + layers = [ + # First layer: regular conv. + nn.Conv2d(3, depths[0], 3, padding=1, stride=2, bias=False), + nn.BatchNorm2d(depths[0], momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True), + # Depthwise separable, no skip. + nn.Conv2d(depths[0], depths[0], 3, padding=1, stride=1, + groups=depths[0], bias=False), + nn.BatchNorm2d(depths[0], momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True), + nn.Conv2d(depths[0], depths[1], 1, padding=0, stride=1, bias=False), + nn.BatchNorm2d(depths[1], momentum=_BN_MOMENTUM), + # MNASNet blocks: stacks of inverted residuals. + _stack(depths[1], depths[2], 3, 2, 3, 3, _BN_MOMENTUM), + _stack(depths[2], depths[3], 5, 2, 3, 3, _BN_MOMENTUM), + _stack(depths[3], depths[4], 5, 2, 6, 3, _BN_MOMENTUM), + _stack(depths[4], depths[5], 3, 1, 6, 2, _BN_MOMENTUM), + _stack(depths[5], depths[6], 5, 2, 6, 4, _BN_MOMENTUM), + _stack(depths[6], depths[7], 3, 1, 6, 1, _BN_MOMENTUM), + # Final mapping to classifier input. + nn.Conv2d(depths[7], 1280, 1, padding=0, stride=1, bias=False), + nn.BatchNorm2d(1280, momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True), + ] + self.layers = nn.Sequential(*layers) + self.classifier = nn.Sequential(nn.Dropout(p=dropout, inplace=True), + nn.Linear(1280, num_classes)) + self._initialize_weights() + + def forward(self, x: Tensor) -> Tensor: + x = self.layers(x) + # Equivalent to global avgpool and removing H and W dimensions. + x = x.mean([2, 3]) + return self.classifier(x) + + def _initialize_weights(self) -> None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode="fan_out", + nonlinearity="relu") + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, nn.BatchNorm2d): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.kaiming_uniform_(m.weight, mode="fan_out", + nonlinearity="sigmoid") + nn.init.zeros_(m.bias) + + def _load_from_state_dict(self, state_dict: Dict, prefix: str, local_metadata: Dict, strict: bool, + missing_keys: List[str], unexpected_keys: List[str], error_msgs: List[str]) -> None: + version = local_metadata.get("version", None) + assert version in [1, 2] + + if version == 1 and not self.alpha == 1.0: + # In the initial version of the model (v1), stem was fixed-size. + # All other layer configurations were the same. This will patch + # the model so that it's identical to v1. Model with alpha 1.0 is + # unaffected. + depths = _get_depths(self.alpha) + v1_stem = [ + nn.Conv2d(3, 32, 3, padding=1, stride=2, bias=False), + nn.BatchNorm2d(32, momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True), + nn.Conv2d(32, 32, 3, padding=1, stride=1, groups=32, + bias=False), + nn.BatchNorm2d(32, momentum=_BN_MOMENTUM), + nn.ReLU(inplace=True), + nn.Conv2d(32, 16, 1, padding=0, stride=1, bias=False), + nn.BatchNorm2d(16, momentum=_BN_MOMENTUM), + _stack(16, depths[2], 3, 2, 3, 3, _BN_MOMENTUM), + ] + for idx, layer in enumerate(v1_stem): + self.layers[idx] = layer + + # The model is now identical to v1, and must be saved as such. + self._version = 1 + warnings.warn( + "A new version of MNASNet model has been implemented. " + "Your checkpoint was saved using the previous version. " + "This checkpoint will load and work as before, but " + "you may want to upgrade by training a newer model or " + "transfer learning from an updated ImageNet checkpoint.", + UserWarning) + + super(MNASNet, self)._load_from_state_dict( + state_dict, prefix, local_metadata, strict, missing_keys, + unexpected_keys, error_msgs) + + +def _load_pretrained(model_name: str, model: nn.Module, progress: bool) -> None: + if model_name not in _MODEL_URLS or _MODEL_URLS[model_name] is None: + raise ValueError( + "No checkpoint is available for model type {}".format(model_name)) + checkpoint_url = _MODEL_URLS[model_name] + model.load_state_dict( + load_state_dict_from_url(checkpoint_url, progress=progress)) + + +def mnasnet0_5(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> MNASNet: + r"""MNASNet with depth multiplier of 0.5 from + `"MnasNet: Platform-Aware Neural Architecture Search for Mobile" + <https://arxiv.org/pdf/1807.11626.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + model = MNASNet(0.5, **kwargs) + if pretrained: + _load_pretrained("mnasnet0_5", model, progress) + return model + + +def mnasnet0_75(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> MNASNet: + r"""MNASNet with depth multiplier of 0.75 from + `"MnasNet: Platform-Aware Neural Architecture Search for Mobile" + <https://arxiv.org/pdf/1807.11626.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + model = MNASNet(0.75, **kwargs) + if pretrained: + _load_pretrained("mnasnet0_75", model, progress) + return model + + +def mnasnet1_0(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> MNASNet: + r"""MNASNet with depth multiplier of 1.0 from + `"MnasNet: Platform-Aware Neural Architecture Search for Mobile" + <https://arxiv.org/pdf/1807.11626.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + model = MNASNet(1.0, **kwargs) + if pretrained: + _load_pretrained("mnasnet1_0", model, progress) + return model + + +def mnasnet1_3(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> MNASNet: + r"""MNASNet with depth multiplier of 1.3 from + `"MnasNet: Platform-Aware Neural Architecture Search for Mobile" + <https://arxiv.org/pdf/1807.11626.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + model = MNASNet(1.3, **kwargs) + if pretrained: + _load_pretrained("mnasnet1_3", model, progress) + return model diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/mobilenet.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/mobilenet.py new file mode 100644 index 0000000000000000000000000000000000000000..4108305d3f5371ee10f02a7c5fb4858e94b35d5d --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/mobilenet.py @@ -0,0 +1,4 @@ +from .mobilenetv2 import MobileNetV2, mobilenet_v2, __all__ as mv2_all +from .mobilenetv3 import MobileNetV3, mobilenet_v3_large, mobilenet_v3_small, __all__ as mv3_all + +__all__ = mv2_all + mv3_all diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/mobilenetv2.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/mobilenetv2.py new file mode 100644 index 0000000000000000000000000000000000000000..3938156949f25eaf6e53b3509c775b97efbff236 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/mobilenetv2.py @@ -0,0 +1,217 @@ +import torch +from torch import nn +from torch import Tensor +from .utils import load_state_dict_from_url +from typing import Callable, Any, Optional, List + + +__all__ = ['MobileNetV2', 'mobilenet_v2'] + + +model_urls = { + 'mobilenet_v2': 'https://download.pytorch.org/models/mobilenet_v2-b0353104.pth', +} + + +def _make_divisible(v: float, divisor: int, min_value: Optional[int] = None) -> int: + """ + This function is taken from the original tf repo. + It ensures that all layers have a channel number that is divisible by 8 + It can be seen here: + https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py + """ + if min_value is None: + min_value = divisor + new_v = max(min_value, int(v + divisor / 2) // divisor * divisor) + # Make sure that round down does not go down by more than 10%. + if new_v < 0.9 * v: + new_v += divisor + return new_v + + +class ConvBNActivation(nn.Sequential): + def __init__( + self, + in_planes: int, + out_planes: int, + kernel_size: int = 3, + stride: int = 1, + groups: int = 1, + norm_layer: Optional[Callable[..., nn.Module]] = None, + activation_layer: Optional[Callable[..., nn.Module]] = None, + dilation: int = 1, + ) -> None: + padding = (kernel_size - 1) // 2 * dilation + if norm_layer is None: + norm_layer = nn.BatchNorm2d + if activation_layer is None: + activation_layer = nn.ReLU6 + super().__init__( + nn.Conv2d(in_planes, out_planes, kernel_size, stride, padding, dilation=dilation, groups=groups, + bias=False), + norm_layer(out_planes), + activation_layer(inplace=True) + ) + self.out_channels = out_planes + + +# necessary for backwards compatibility +ConvBNReLU = ConvBNActivation + + +class InvertedResidual(nn.Module): + def __init__( + self, + inp: int, + oup: int, + stride: int, + expand_ratio: int, + norm_layer: Optional[Callable[..., nn.Module]] = None + ) -> None: + super(InvertedResidual, self).__init__() + self.stride = stride + assert stride in [1, 2] + + if norm_layer is None: + norm_layer = nn.BatchNorm2d + + hidden_dim = int(round(inp * expand_ratio)) + self.use_res_connect = self.stride == 1 and inp == oup + + layers: List[nn.Module] = [] + if expand_ratio != 1: + # pw + layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1, norm_layer=norm_layer)) + layers.extend([ + # dw + ConvBNReLU(hidden_dim, hidden_dim, stride=stride, groups=hidden_dim, norm_layer=norm_layer), + # pw-linear + nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False), + norm_layer(oup), + ]) + self.conv = nn.Sequential(*layers) + self.out_channels = oup + self._is_cn = stride > 1 + + def forward(self, x: Tensor) -> Tensor: + if self.use_res_connect: + return x + self.conv(x) + else: + return self.conv(x) + + +class MobileNetV2(nn.Module): + def __init__( + self, + num_classes: int = 1000, + width_mult: float = 1.0, + inverted_residual_setting: Optional[List[List[int]]] = None, + round_nearest: int = 8, + block: Optional[Callable[..., nn.Module]] = None, + norm_layer: Optional[Callable[..., nn.Module]] = None + ) -> None: + """ + MobileNet V2 main class + + Args: + num_classes (int): Number of classes + width_mult (float): Width multiplier - adjusts number of channels in each layer by this amount + inverted_residual_setting: Network structure + round_nearest (int): Round the number of channels in each layer to be a multiple of this number + Set to 1 to turn off rounding + block: Module specifying inverted residual building block for mobilenet + norm_layer: Module specifying the normalization layer to use + + """ + super(MobileNetV2, self).__init__() + + if block is None: + block = InvertedResidual + + if norm_layer is None: + norm_layer = nn.BatchNorm2d + + input_channel = 32 + last_channel = 1280 + + if inverted_residual_setting is None: + inverted_residual_setting = [ + # t, c, n, s + [1, 16, 1, 1], + [6, 24, 2, 2], + [6, 32, 3, 2], + [6, 64, 4, 2], + [6, 96, 3, 1], + [6, 160, 3, 2], + [6, 320, 1, 1], + ] + + # only check the first element, assuming user knows t,c,n,s are required + if len(inverted_residual_setting) == 0 or len(inverted_residual_setting[0]) != 4: + raise ValueError("inverted_residual_setting should be non-empty " + "or a 4-element list, got {}".format(inverted_residual_setting)) + + # building first layer + input_channel = _make_divisible(input_channel * width_mult, round_nearest) + self.last_channel = _make_divisible(last_channel * max(1.0, width_mult), round_nearest) + features: List[nn.Module] = [ConvBNReLU(3, input_channel, stride=2, norm_layer=norm_layer)] + # building inverted residual blocks + for t, c, n, s in inverted_residual_setting: + output_channel = _make_divisible(c * width_mult, round_nearest) + for i in range(n): + stride = s if i == 0 else 1 + features.append(block(input_channel, output_channel, stride, expand_ratio=t, norm_layer=norm_layer)) + input_channel = output_channel + # building last several layers + features.append(ConvBNReLU(input_channel, self.last_channel, kernel_size=1, norm_layer=norm_layer)) + # make it nn.Sequential + self.features = nn.Sequential(*features) + + # building classifier + self.classifier = nn.Sequential( + nn.Dropout(0.2), + nn.Linear(self.last_channel, num_classes), + ) + + # weight initialization + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + nn.init.zeros_(m.bias) + + def _forward_impl(self, x: Tensor) -> Tensor: + # This exists since TorchScript doesn't support inheritance, so the superclass method + # (this one) needs to have a name other than `forward` that can be accessed in a subclass + x = self.features(x) + # Cannot use "squeeze" as batch-size can be 1 + x = nn.functional.adaptive_avg_pool2d(x, (1, 1)) + x = torch.flatten(x, 1) + x = self.classifier(x) + return x + + def forward(self, x: Tensor) -> Tensor: + return self._forward_impl(x) + + +def mobilenet_v2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> MobileNetV2: + """ + Constructs a MobileNetV2 architecture from + `"MobileNetV2: Inverted Residuals and Linear Bottlenecks" <https://arxiv.org/abs/1801.04381>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + model = MobileNetV2(**kwargs) + if pretrained: + state_dict = load_state_dict_from_url(model_urls['mobilenet_v2'], + progress=progress) + model.load_state_dict(state_dict) + return model diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/mobilenetv3.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/mobilenetv3.py new file mode 100644 index 0000000000000000000000000000000000000000..3ac517d0ced22fc61596d3554d77fbef3a6dc64b --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/mobilenetv3.py @@ -0,0 +1,278 @@ +import torch + +from functools import partial +from torch import nn, Tensor +from torch.nn import functional as F +from typing import Any, Callable, Dict, List, Optional, Sequence + +from torchvision.models.utils import load_state_dict_from_url +from torchvision.models.mobilenetv2 import _make_divisible, ConvBNActivation + + +__all__ = ["MobileNetV3", "mobilenet_v3_large", "mobilenet_v3_small"] + + +model_urls = { + "mobilenet_v3_large": "https://download.pytorch.org/models/mobilenet_v3_large-8738ca79.pth", + "mobilenet_v3_small": "https://download.pytorch.org/models/mobilenet_v3_small-047dcff4.pth", +} + + +class SqueezeExcitation(nn.Module): + # Implemented as described at Figure 4 of the MobileNetV3 paper + def __init__(self, input_channels: int, squeeze_factor: int = 4): + super().__init__() + squeeze_channels = _make_divisible(input_channels // squeeze_factor, 8) + self.fc1 = nn.Conv2d(input_channels, squeeze_channels, 1) + self.relu = nn.ReLU(inplace=True) + self.fc2 = nn.Conv2d(squeeze_channels, input_channels, 1) + + def _scale(self, input: Tensor, inplace: bool) -> Tensor: + scale = F.adaptive_avg_pool2d(input, 1) + scale = self.fc1(scale) + scale = self.relu(scale) + scale = self.fc2(scale) + return F.hardsigmoid(scale, inplace=inplace) + + def forward(self, input: Tensor) -> Tensor: + scale = self._scale(input, True) + return scale * input + + +class InvertedResidualConfig: + # Stores information listed at Tables 1 and 2 of the MobileNetV3 paper + def __init__(self, input_channels: int, kernel: int, expanded_channels: int, out_channels: int, use_se: bool, + activation: str, stride: int, dilation: int, width_mult: float): + self.input_channels = self.adjust_channels(input_channels, width_mult) + self.kernel = kernel + self.expanded_channels = self.adjust_channels(expanded_channels, width_mult) + self.out_channels = self.adjust_channels(out_channels, width_mult) + self.use_se = use_se + self.use_hs = activation == "HS" + self.stride = stride + self.dilation = dilation + + @staticmethod + def adjust_channels(channels: int, width_mult: float): + return _make_divisible(channels * width_mult, 8) + + +class InvertedResidual(nn.Module): + # Implemented as described at section 5 of MobileNetV3 paper + def __init__(self, cnf: InvertedResidualConfig, norm_layer: Callable[..., nn.Module], + se_layer: Callable[..., nn.Module] = SqueezeExcitation): + super().__init__() + if not (1 <= cnf.stride <= 2): + raise ValueError('illegal stride value') + + self.use_res_connect = cnf.stride == 1 and cnf.input_channels == cnf.out_channels + + layers: List[nn.Module] = [] + activation_layer = nn.Hardswish if cnf.use_hs else nn.ReLU + + # expand + if cnf.expanded_channels != cnf.input_channels: + layers.append(ConvBNActivation(cnf.input_channels, cnf.expanded_channels, kernel_size=1, + norm_layer=norm_layer, activation_layer=activation_layer)) + + # depthwise + stride = 1 if cnf.dilation > 1 else cnf.stride + layers.append(ConvBNActivation(cnf.expanded_channels, cnf.expanded_channels, kernel_size=cnf.kernel, + stride=stride, dilation=cnf.dilation, groups=cnf.expanded_channels, + norm_layer=norm_layer, activation_layer=activation_layer)) + if cnf.use_se: + layers.append(se_layer(cnf.expanded_channels)) + + # project + layers.append(ConvBNActivation(cnf.expanded_channels, cnf.out_channels, kernel_size=1, norm_layer=norm_layer, + activation_layer=nn.Identity)) + + self.block = nn.Sequential(*layers) + self.out_channels = cnf.out_channels + self._is_cn = cnf.stride > 1 + + def forward(self, input: Tensor) -> Tensor: + result = self.block(input) + if self.use_res_connect: + result += input + return result + + +class MobileNetV3(nn.Module): + + def __init__( + self, + inverted_residual_setting: List[InvertedResidualConfig], + last_channel: int, + num_classes: int = 1000, + block: Optional[Callable[..., nn.Module]] = None, + norm_layer: Optional[Callable[..., nn.Module]] = None, + **kwargs: Any + ) -> None: + """ + MobileNet V3 main class + + Args: + inverted_residual_setting (List[InvertedResidualConfig]): Network structure + last_channel (int): The number of channels on the penultimate layer + num_classes (int): Number of classes + block (Optional[Callable[..., nn.Module]]): Module specifying inverted residual building block for mobilenet + norm_layer (Optional[Callable[..., nn.Module]]): Module specifying the normalization layer to use + """ + super().__init__() + + if not inverted_residual_setting: + raise ValueError("The inverted_residual_setting should not be empty") + elif not (isinstance(inverted_residual_setting, Sequence) and + all([isinstance(s, InvertedResidualConfig) for s in inverted_residual_setting])): + raise TypeError("The inverted_residual_setting should be List[InvertedResidualConfig]") + + if block is None: + block = InvertedResidual + + if norm_layer is None: + norm_layer = partial(nn.BatchNorm2d, eps=0.001, momentum=0.01) + + layers: List[nn.Module] = [] + + # building first layer + firstconv_output_channels = inverted_residual_setting[0].input_channels + layers.append(ConvBNActivation(3, firstconv_output_channels, kernel_size=3, stride=2, norm_layer=norm_layer, + activation_layer=nn.Hardswish)) + + # building inverted residual blocks + for cnf in inverted_residual_setting: + layers.append(block(cnf, norm_layer)) + + # building last several layers + lastconv_input_channels = inverted_residual_setting[-1].out_channels + lastconv_output_channels = 6 * lastconv_input_channels + layers.append(ConvBNActivation(lastconv_input_channels, lastconv_output_channels, kernel_size=1, + norm_layer=norm_layer, activation_layer=nn.Hardswish)) + + self.features = nn.Sequential(*layers) + self.avgpool = nn.AdaptiveAvgPool2d(1) + self.classifier = nn.Sequential( + nn.Linear(lastconv_output_channels, last_channel), + nn.Hardswish(inplace=True), + nn.Dropout(p=0.2, inplace=True), + nn.Linear(last_channel, num_classes), + ) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out') + if m.bias is not None: + nn.init.zeros_(m.bias) + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.ones_(m.weight) + nn.init.zeros_(m.bias) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + nn.init.zeros_(m.bias) + + def _forward_impl(self, x: Tensor) -> Tensor: + x = self.features(x) + + x = self.avgpool(x) + x = torch.flatten(x, 1) + + x = self.classifier(x) + + return x + + def forward(self, x: Tensor) -> Tensor: + return self._forward_impl(x) + + +def _mobilenet_v3_conf(arch: str, width_mult: float = 1.0, reduced_tail: bool = False, dilated: bool = False, + **kwargs: Any): + reduce_divider = 2 if reduced_tail else 1 + dilation = 2 if dilated else 1 + + bneck_conf = partial(InvertedResidualConfig, width_mult=width_mult) + adjust_channels = partial(InvertedResidualConfig.adjust_channels, width_mult=width_mult) + + if arch == "mobilenet_v3_large": + inverted_residual_setting = [ + bneck_conf(16, 3, 16, 16, False, "RE", 1, 1), + bneck_conf(16, 3, 64, 24, False, "RE", 2, 1), # C1 + bneck_conf(24, 3, 72, 24, False, "RE", 1, 1), + bneck_conf(24, 5, 72, 40, True, "RE", 2, 1), # C2 + bneck_conf(40, 5, 120, 40, True, "RE", 1, 1), + bneck_conf(40, 5, 120, 40, True, "RE", 1, 1), + bneck_conf(40, 3, 240, 80, False, "HS", 2, 1), # C3 + bneck_conf(80, 3, 200, 80, False, "HS", 1, 1), + bneck_conf(80, 3, 184, 80, False, "HS", 1, 1), + bneck_conf(80, 3, 184, 80, False, "HS", 1, 1), + bneck_conf(80, 3, 480, 112, True, "HS", 1, 1), + bneck_conf(112, 3, 672, 112, True, "HS", 1, 1), + bneck_conf(112, 5, 672, 160 // reduce_divider, True, "HS", 2, dilation), # C4 + bneck_conf(160 // reduce_divider, 5, 960 // reduce_divider, 160 // reduce_divider, True, "HS", 1, dilation), + bneck_conf(160 // reduce_divider, 5, 960 // reduce_divider, 160 // reduce_divider, True, "HS", 1, dilation), + ] + last_channel = adjust_channels(1280 // reduce_divider) # C5 + elif arch == "mobilenet_v3_small": + inverted_residual_setting = [ + bneck_conf(16, 3, 16, 16, True, "RE", 2, 1), # C1 + bneck_conf(16, 3, 72, 24, False, "RE", 2, 1), # C2 + bneck_conf(24, 3, 88, 24, False, "RE", 1, 1), + bneck_conf(24, 5, 96, 40, True, "HS", 2, 1), # C3 + bneck_conf(40, 5, 240, 40, True, "HS", 1, 1), + bneck_conf(40, 5, 240, 40, True, "HS", 1, 1), + bneck_conf(40, 5, 120, 48, True, "HS", 1, 1), + bneck_conf(48, 5, 144, 48, True, "HS", 1, 1), + bneck_conf(48, 5, 288, 96 // reduce_divider, True, "HS", 2, dilation), # C4 + bneck_conf(96 // reduce_divider, 5, 576 // reduce_divider, 96 // reduce_divider, True, "HS", 1, dilation), + bneck_conf(96 // reduce_divider, 5, 576 // reduce_divider, 96 // reduce_divider, True, "HS", 1, dilation), + ] + last_channel = adjust_channels(1024 // reduce_divider) # C5 + else: + raise ValueError("Unsupported model type {}".format(arch)) + + return inverted_residual_setting, last_channel + + +def _mobilenet_v3_model( + arch: str, + inverted_residual_setting: List[InvertedResidualConfig], + last_channel: int, + pretrained: bool, + progress: bool, + **kwargs: Any +): + model = MobileNetV3(inverted_residual_setting, last_channel, **kwargs) + if pretrained: + if model_urls.get(arch, None) is None: + raise ValueError("No checkpoint is available for model type {}".format(arch)) + state_dict = load_state_dict_from_url(model_urls[arch], progress=progress) + model.load_state_dict(state_dict) + return model + + +def mobilenet_v3_large(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> MobileNetV3: + """ + Constructs a large MobileNetV3 architecture from + `"Searching for MobileNetV3" <https://arxiv.org/abs/1905.02244>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + arch = "mobilenet_v3_large" + inverted_residual_setting, last_channel = _mobilenet_v3_conf(arch, **kwargs) + return _mobilenet_v3_model(arch, inverted_residual_setting, last_channel, pretrained, progress, **kwargs) + + +def mobilenet_v3_small(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> MobileNetV3: + """ + Constructs a small MobileNetV3 architecture from + `"Searching for MobileNetV3" <https://arxiv.org/abs/1905.02244>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + arch = "mobilenet_v3_small" + inverted_residual_setting, last_channel = _mobilenet_v3_conf(arch, **kwargs) + return _mobilenet_v3_model(arch, inverted_residual_setting, last_channel, pretrained, progress, **kwargs) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/quantization/__init__.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/quantization/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..deae997a21911432f43b8acf70e8f33dce41c2e4 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/quantization/__init__.py @@ -0,0 +1,5 @@ +from .mobilenet import * +from .resnet import * +from .googlenet import * +from .inception import * +from .shufflenetv2 import * diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/quantization/googlenet.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/quantization/googlenet.py new file mode 100644 index 0000000000000000000000000000000000000000..3a74ccd542c9ceccb7ac5fb20a5a26c4a62a0066 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/quantization/googlenet.py @@ -0,0 +1,166 @@ +import warnings +import torch +import torch.nn as nn +from torch.nn import functional as F + +from torchvision.models.utils import load_state_dict_from_url +from torchvision.models.googlenet import ( + GoogLeNetOutputs, BasicConv2d, Inception, InceptionAux, GoogLeNet, model_urls) + +from .utils import _replace_relu, quantize_model + + +__all__ = ['QuantizableGoogLeNet', 'googlenet'] + +quant_model_urls = { + # fp32 GoogLeNet ported from TensorFlow, with weights quantized in PyTorch + 'googlenet_fbgemm': 'https://download.pytorch.org/models/quantized/googlenet_fbgemm-c00238cf.pth', +} + + +def googlenet(pretrained=False, progress=True, quantize=False, **kwargs): + r"""GoogLeNet (Inception v1) model architecture from + `"Going Deeper with Convolutions" <http://arxiv.org/abs/1409.4842>`_. + + Note that quantize = True returns a quantized model with 8 bit + weights. Quantized models only support inference and run on CPUs. + GPU inference is not yet supported + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + quantize (bool): If True, return a quantized version of the model + aux_logits (bool): If True, adds two auxiliary branches that can improve training. + Default: *False* when pretrained is True otherwise *True* + transform_input (bool): If True, preprocesses the input according to the method with which it + was trained on ImageNet. Default: *False* + """ + if pretrained: + if 'transform_input' not in kwargs: + kwargs['transform_input'] = True + if 'aux_logits' not in kwargs: + kwargs['aux_logits'] = False + if kwargs['aux_logits']: + warnings.warn('auxiliary heads in the pretrained googlenet model are NOT pretrained, ' + 'so make sure to train them') + original_aux_logits = kwargs['aux_logits'] + kwargs['aux_logits'] = True + kwargs['init_weights'] = False + + model = QuantizableGoogLeNet(**kwargs) + _replace_relu(model) + + if quantize: + # TODO use pretrained as a string to specify the backend + backend = 'fbgemm' + quantize_model(model, backend) + else: + assert pretrained in [True, False] + + if pretrained: + if quantize: + model_url = quant_model_urls['googlenet' + '_' + backend] + else: + model_url = model_urls['googlenet'] + + state_dict = load_state_dict_from_url(model_url, + progress=progress) + + model.load_state_dict(state_dict) + + if not original_aux_logits: + model.aux_logits = False + model.aux1 = None + model.aux2 = None + return model + + +class QuantizableBasicConv2d(BasicConv2d): + + def __init__(self, *args, **kwargs): + super(QuantizableBasicConv2d, self).__init__(*args, **kwargs) + self.relu = nn.ReLU() + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + def fuse_model(self): + torch.quantization.fuse_modules(self, ["conv", "bn", "relu"], inplace=True) + + +class QuantizableInception(Inception): + + def __init__(self, *args, **kwargs): + super(QuantizableInception, self).__init__( + conv_block=QuantizableBasicConv2d, *args, **kwargs) + self.cat = nn.quantized.FloatFunctional() + + def forward(self, x): + outputs = self._forward(x) + return self.cat.cat(outputs, 1) + + +class QuantizableInceptionAux(InceptionAux): + + def __init__(self, *args, **kwargs): + super(QuantizableInceptionAux, self).__init__( + conv_block=QuantizableBasicConv2d, *args, **kwargs) + self.relu = nn.ReLU() + self.dropout = nn.Dropout(0.7) + + def forward(self, x): + # aux1: N x 512 x 14 x 14, aux2: N x 528 x 14 x 14 + x = F.adaptive_avg_pool2d(x, (4, 4)) + # aux1: N x 512 x 4 x 4, aux2: N x 528 x 4 x 4 + x = self.conv(x) + # N x 128 x 4 x 4 + x = torch.flatten(x, 1) + # N x 2048 + x = self.relu(self.fc1(x)) + # N x 1024 + x = self.dropout(x) + # N x 1024 + x = self.fc2(x) + # N x 1000 (num_classes) + + return x + + +class QuantizableGoogLeNet(GoogLeNet): + + def __init__(self, *args, **kwargs): + super(QuantizableGoogLeNet, self).__init__( + blocks=[QuantizableBasicConv2d, QuantizableInception, QuantizableInceptionAux], + *args, + **kwargs + ) + self.quant = torch.quantization.QuantStub() + self.dequant = torch.quantization.DeQuantStub() + + def forward(self, x): + x = self._transform_input(x) + x = self.quant(x) + x, aux1, aux2 = self._forward(x) + x = self.dequant(x) + aux_defined = self.training and self.aux_logits + if torch.jit.is_scripting(): + if not aux_defined: + warnings.warn("Scripted QuantizableGoogleNet always returns GoogleNetOutputs Tuple") + return GoogLeNetOutputs(x, aux2, aux1) + else: + return self.eager_outputs(x, aux2, aux1) + + def fuse_model(self): + r"""Fuse conv/bn/relu modules in googlenet model + + Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization. + Model is modified in place. Note that this operation does not change numerics + and the model after modification is in floating point + """ + + for m in self.modules(): + if type(m) == QuantizableBasicConv2d: + m.fuse_model() diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/quantization/inception.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/quantization/inception.py new file mode 100644 index 0000000000000000000000000000000000000000..1475cad5eb45d4167d3693758c2a85e9accdd760 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/quantization/inception.py @@ -0,0 +1,221 @@ +import warnings + +import torch +import torch.nn as nn +import torch.nn.functional as F +from torchvision.models import inception as inception_module +from torchvision.models.inception import InceptionOutputs +from torchvision.models.utils import load_state_dict_from_url +from .utils import _replace_relu, quantize_model + + +__all__ = [ + "QuantizableInception3", + "inception_v3", +] + + +quant_model_urls = { + # fp32 weights ported from TensorFlow, quantized in PyTorch + "inception_v3_google_fbgemm": + "https://download.pytorch.org/models/quantized/inception_v3_google_fbgemm-71447a44.pth" +} + + +def inception_v3(pretrained=False, progress=True, quantize=False, **kwargs): + r"""Inception v3 model architecture from + `"Rethinking the Inception Architecture for Computer Vision" <http://arxiv.org/abs/1512.00567>`_. + + .. note:: + **Important**: In contrast to the other models the inception_v3 expects tensors with a size of + N x 3 x 299 x 299, so ensure your images are sized accordingly. + + Note that quantize = True returns a quantized model with 8 bit + weights. Quantized models only support inference and run on CPUs. + GPU inference is not yet supported + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + quantize (bool): If True, return a quantized version of the model + aux_logits (bool): If True, add an auxiliary branch that can improve training. + Default: *True* + transform_input (bool): If True, preprocesses the input according to the method with which it + was trained on ImageNet. Default: *False* + """ + if pretrained: + if "transform_input" not in kwargs: + kwargs["transform_input"] = True + if "aux_logits" in kwargs: + original_aux_logits = kwargs["aux_logits"] + kwargs["aux_logits"] = True + else: + original_aux_logits = False + + model = QuantizableInception3(**kwargs) + _replace_relu(model) + + if quantize: + # TODO use pretrained as a string to specify the backend + backend = 'fbgemm' + quantize_model(model, backend) + else: + assert pretrained in [True, False] + + if pretrained: + if quantize: + if not original_aux_logits: + model.aux_logits = False + model.AuxLogits = None + model_url = quant_model_urls['inception_v3_google' + '_' + backend] + else: + model_url = inception_module.model_urls['inception_v3_google'] + + state_dict = load_state_dict_from_url(model_url, + progress=progress) + + model.load_state_dict(state_dict) + + if not quantize: + if not original_aux_logits: + model.aux_logits = False + model.AuxLogits = None + return model + + +class QuantizableBasicConv2d(inception_module.BasicConv2d): + def __init__(self, *args, **kwargs): + super(QuantizableBasicConv2d, self).__init__(*args, **kwargs) + self.relu = nn.ReLU() + + def forward(self, x): + x = self.conv(x) + x = self.bn(x) + x = self.relu(x) + return x + + def fuse_model(self): + torch.quantization.fuse_modules(self, ["conv", "bn", "relu"], inplace=True) + + +class QuantizableInceptionA(inception_module.InceptionA): + def __init__(self, *args, **kwargs): + super(QuantizableInceptionA, self).__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) + self.myop = nn.quantized.FloatFunctional() + + def forward(self, x): + outputs = self._forward(x) + return self.myop.cat(outputs, 1) + + +class QuantizableInceptionB(inception_module.InceptionB): + def __init__(self, *args, **kwargs): + super(QuantizableInceptionB, self).__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) + self.myop = nn.quantized.FloatFunctional() + + def forward(self, x): + outputs = self._forward(x) + return self.myop.cat(outputs, 1) + + +class QuantizableInceptionC(inception_module.InceptionC): + def __init__(self, *args, **kwargs): + super(QuantizableInceptionC, self).__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) + self.myop = nn.quantized.FloatFunctional() + + def forward(self, x): + outputs = self._forward(x) + return self.myop.cat(outputs, 1) + + +class QuantizableInceptionD(inception_module.InceptionD): + def __init__(self, *args, **kwargs): + super(QuantizableInceptionD, self).__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) + self.myop = nn.quantized.FloatFunctional() + + def forward(self, x): + outputs = self._forward(x) + return self.myop.cat(outputs, 1) + + +class QuantizableInceptionE(inception_module.InceptionE): + def __init__(self, *args, **kwargs): + super(QuantizableInceptionE, self).__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) + self.myop1 = nn.quantized.FloatFunctional() + self.myop2 = nn.quantized.FloatFunctional() + self.myop3 = nn.quantized.FloatFunctional() + + def _forward(self, x): + branch1x1 = self.branch1x1(x) + + branch3x3 = self.branch3x3_1(x) + branch3x3 = [self.branch3x3_2a(branch3x3), self.branch3x3_2b(branch3x3)] + branch3x3 = self.myop1.cat(branch3x3, 1) + + branch3x3dbl = self.branch3x3dbl_1(x) + branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl) + branch3x3dbl = [ + self.branch3x3dbl_3a(branch3x3dbl), + self.branch3x3dbl_3b(branch3x3dbl), + ] + branch3x3dbl = self.myop2.cat(branch3x3dbl, 1) + + branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1) + branch_pool = self.branch_pool(branch_pool) + + outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool] + return outputs + + def forward(self, x): + outputs = self._forward(x) + return self.myop3.cat(outputs, 1) + + +class QuantizableInceptionAux(inception_module.InceptionAux): + def __init__(self, *args, **kwargs): + super(QuantizableInceptionAux, self).__init__(conv_block=QuantizableBasicConv2d, *args, **kwargs) + + +class QuantizableInception3(inception_module.Inception3): + def __init__(self, num_classes=1000, aux_logits=True, transform_input=False): + super(QuantizableInception3, self).__init__( + num_classes=num_classes, + aux_logits=aux_logits, + transform_input=transform_input, + inception_blocks=[ + QuantizableBasicConv2d, + QuantizableInceptionA, + QuantizableInceptionB, + QuantizableInceptionC, + QuantizableInceptionD, + QuantizableInceptionE, + QuantizableInceptionAux + ] + ) + self.quant = torch.quantization.QuantStub() + self.dequant = torch.quantization.DeQuantStub() + + def forward(self, x): + x = self._transform_input(x) + x = self.quant(x) + x, aux = self._forward(x) + x = self.dequant(x) + aux_defined = self.training and self.aux_logits + if torch.jit.is_scripting(): + if not aux_defined: + warnings.warn("Scripted QuantizableInception3 always returns QuantizableInception3 Tuple") + return InceptionOutputs(x, aux) + else: + return self.eager_outputs(x, aux) + + def fuse_model(self): + r"""Fuse conv/bn/relu modules in inception model + + Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization. + Model is modified in place. Note that this operation does not change numerics + and the model after modification is in floating point + """ + + for m in self.modules(): + if type(m) == QuantizableBasicConv2d: + m.fuse_model() diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/quantization/mobilenet.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/quantization/mobilenet.py new file mode 100644 index 0000000000000000000000000000000000000000..8f2c42db6407364b3ea0b614b4e66dc52e347b12 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/quantization/mobilenet.py @@ -0,0 +1,4 @@ +from .mobilenetv2 import QuantizableMobileNetV2, mobilenet_v2, __all__ as mv2_all +from .mobilenetv3 import QuantizableMobileNetV3, mobilenet_v3_large, __all__ as mv3_all + +__all__ = mv2_all + mv3_all diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/quantization/mobilenetv2.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/quantization/mobilenetv2.py new file mode 100644 index 0000000000000000000000000000000000000000..72c522a2e460b9f2f10336739019ada923775f8a --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/quantization/mobilenetv2.py @@ -0,0 +1,94 @@ +from torch import nn +from torchvision.models.utils import load_state_dict_from_url +from torchvision.models.mobilenetv2 import InvertedResidual, ConvBNReLU, MobileNetV2, model_urls +from torch.quantization import QuantStub, DeQuantStub, fuse_modules +from .utils import _replace_relu, quantize_model + + +__all__ = ['QuantizableMobileNetV2', 'mobilenet_v2'] + +quant_model_urls = { + 'mobilenet_v2_qnnpack': + 'https://download.pytorch.org/models/quantized/mobilenet_v2_qnnpack_37f702c5.pth' +} + + +class QuantizableInvertedResidual(InvertedResidual): + def __init__(self, *args, **kwargs): + super(QuantizableInvertedResidual, self).__init__(*args, **kwargs) + self.skip_add = nn.quantized.FloatFunctional() + + def forward(self, x): + if self.use_res_connect: + return self.skip_add.add(x, self.conv(x)) + else: + return self.conv(x) + + def fuse_model(self): + for idx in range(len(self.conv)): + if type(self.conv[idx]) == nn.Conv2d: + fuse_modules(self.conv, [str(idx), str(idx + 1)], inplace=True) + + +class QuantizableMobileNetV2(MobileNetV2): + def __init__(self, *args, **kwargs): + """ + MobileNet V2 main class + + Args: + Inherits args from floating point MobileNetV2 + """ + super(QuantizableMobileNetV2, self).__init__(*args, **kwargs) + self.quant = QuantStub() + self.dequant = DeQuantStub() + + def forward(self, x): + x = self.quant(x) + x = self._forward_impl(x) + x = self.dequant(x) + return x + + def fuse_model(self): + for m in self.modules(): + if type(m) == ConvBNReLU: + fuse_modules(m, ['0', '1', '2'], inplace=True) + if type(m) == QuantizableInvertedResidual: + m.fuse_model() + + +def mobilenet_v2(pretrained=False, progress=True, quantize=False, **kwargs): + """ + Constructs a MobileNetV2 architecture from + `"MobileNetV2: Inverted Residuals and Linear Bottlenecks" + <https://arxiv.org/abs/1801.04381>`_. + + Note that quantize = True returns a quantized model with 8 bit + weights. Quantized models only support inference and run on CPUs. + GPU inference is not yet supported + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet. + progress (bool): If True, displays a progress bar of the download to stderr + quantize(bool): If True, returns a quantized model, else returns a float model + """ + model = QuantizableMobileNetV2(block=QuantizableInvertedResidual, **kwargs) + _replace_relu(model) + + if quantize: + # TODO use pretrained as a string to specify the backend + backend = 'qnnpack' + quantize_model(model, backend) + else: + assert pretrained in [True, False] + + if pretrained: + if quantize: + model_url = quant_model_urls['mobilenet_v2_' + backend] + else: + model_url = model_urls['mobilenet_v2'] + + state_dict = load_state_dict_from_url(model_url, + progress=progress) + + model.load_state_dict(state_dict) + return model diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/quantization/mobilenetv3.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/quantization/mobilenetv3.py new file mode 100644 index 0000000000000000000000000000000000000000..fe434f8b29724e6bfe869336dd53eb2cc012c7e1 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/quantization/mobilenetv3.py @@ -0,0 +1,131 @@ +import torch +from torch import nn, Tensor +from torchvision.models.utils import load_state_dict_from_url +from torchvision.models.mobilenetv3 import InvertedResidual, InvertedResidualConfig, ConvBNActivation, MobileNetV3,\ + SqueezeExcitation, model_urls, _mobilenet_v3_conf +from torch.quantization import QuantStub, DeQuantStub, fuse_modules +from typing import Any, List, Optional +from .utils import _replace_relu + + +__all__ = ['QuantizableMobileNetV3', 'mobilenet_v3_large'] + +quant_model_urls = { + 'mobilenet_v3_large_qnnpack': + "https://download.pytorch.org/models/quantized/mobilenet_v3_large_qnnpack-5bcacf28.pth", +} + + +class QuantizableSqueezeExcitation(SqueezeExcitation): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.skip_mul = nn.quantized.FloatFunctional() + + def forward(self, input: Tensor) -> Tensor: + return self.skip_mul.mul(self._scale(input, False), input) + + def fuse_model(self): + fuse_modules(self, ['fc1', 'relu'], inplace=True) + + +class QuantizableInvertedResidual(InvertedResidual): + def __init__(self, *args, **kwargs): + super().__init__(*args, se_layer=QuantizableSqueezeExcitation, **kwargs) + self.skip_add = nn.quantized.FloatFunctional() + + def forward(self, x): + if self.use_res_connect: + return self.skip_add.add(x, self.block(x)) + else: + return self.block(x) + + +class QuantizableMobileNetV3(MobileNetV3): + def __init__(self, *args, **kwargs): + """ + MobileNet V3 main class + + Args: + Inherits args from floating point MobileNetV3 + """ + super().__init__(*args, **kwargs) + self.quant = QuantStub() + self.dequant = DeQuantStub() + + def forward(self, x): + x = self.quant(x) + x = self._forward_impl(x) + x = self.dequant(x) + return x + + def fuse_model(self): + for m in self.modules(): + if type(m) == ConvBNActivation: + modules_to_fuse = ['0', '1'] + if type(m[2]) == nn.ReLU: + modules_to_fuse.append('2') + fuse_modules(m, modules_to_fuse, inplace=True) + elif type(m) == QuantizableSqueezeExcitation: + m.fuse_model() + + +def _load_weights( + arch: str, + model: QuantizableMobileNetV3, + model_url: Optional[str], + progress: bool, +): + if model_url is None: + raise ValueError("No checkpoint is available for {}".format(arch)) + state_dict = load_state_dict_from_url(model_url, progress=progress) + model.load_state_dict(state_dict) + + +def _mobilenet_v3_model( + arch: str, + inverted_residual_setting: List[InvertedResidualConfig], + last_channel: int, + pretrained: bool, + progress: bool, + quantize: bool, + **kwargs: Any +): + model = QuantizableMobileNetV3(inverted_residual_setting, last_channel, block=QuantizableInvertedResidual, **kwargs) + _replace_relu(model) + + if quantize: + backend = 'qnnpack' + + model.fuse_model() + model.qconfig = torch.quantization.get_default_qat_qconfig(backend) + torch.quantization.prepare_qat(model, inplace=True) + + if pretrained: + _load_weights(arch, model, quant_model_urls.get(arch + '_' + backend, None), progress) + + torch.quantization.convert(model, inplace=True) + model.eval() + else: + if pretrained: + _load_weights(arch, model, model_urls.get(arch, None), progress) + + return model + + +def mobilenet_v3_large(pretrained=False, progress=True, quantize=False, **kwargs): + """ + Constructs a MobileNetV3 Large architecture from + `"Searching for MobileNetV3" <https://arxiv.org/abs/1905.02244>`_. + + Note that quantize = True returns a quantized model with 8 bit + weights. Quantized models only support inference and run on CPUs. + GPU inference is not yet supported + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet. + progress (bool): If True, displays a progress bar of the download to stderr + quantize (bool): If True, returns a quantized model, else returns a float model + """ + arch = "mobilenet_v3_large" + inverted_residual_setting, last_channel = _mobilenet_v3_conf(arch, **kwargs) + return _mobilenet_v3_model(arch, inverted_residual_setting, last_channel, pretrained, progress, quantize, **kwargs) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/quantization/resnet.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/quantization/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..f5ac3d0a48670d3a3d321159ac9a03e2e6f99bd0 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/quantization/resnet.py @@ -0,0 +1,176 @@ +import torch +from torchvision.models.resnet import Bottleneck, BasicBlock, ResNet, model_urls +import torch.nn as nn +from torchvision.models.utils import load_state_dict_from_url +from torch.quantization import fuse_modules +from .utils import _replace_relu, quantize_model + +__all__ = ['QuantizableResNet', 'resnet18', 'resnet50', + 'resnext101_32x8d'] + + +quant_model_urls = { + 'resnet18_fbgemm': + 'https://download.pytorch.org/models/quantized/resnet18_fbgemm_16fa66dd.pth', + 'resnet50_fbgemm': + 'https://download.pytorch.org/models/quantized/resnet50_fbgemm_bf931d71.pth', + 'resnext101_32x8d_fbgemm': + 'https://download.pytorch.org/models/quantized/resnext101_32x8_fbgemm_09835ccf.pth', +} + + +class QuantizableBasicBlock(BasicBlock): + def __init__(self, *args, **kwargs): + super(QuantizableBasicBlock, self).__init__(*args, **kwargs) + self.add_relu = torch.nn.quantized.FloatFunctional() + + def forward(self, x): + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out = self.add_relu.add_relu(out, identity) + + return out + + def fuse_model(self): + torch.quantization.fuse_modules(self, [['conv1', 'bn1', 'relu'], + ['conv2', 'bn2']], inplace=True) + if self.downsample: + torch.quantization.fuse_modules(self.downsample, ['0', '1'], inplace=True) + + +class QuantizableBottleneck(Bottleneck): + def __init__(self, *args, **kwargs): + super(QuantizableBottleneck, self).__init__(*args, **kwargs) + self.skip_add_relu = nn.quantized.FloatFunctional() + self.relu1 = nn.ReLU(inplace=False) + self.relu2 = nn.ReLU(inplace=False) + + def forward(self, x): + identity = x + out = self.conv1(x) + out = self.bn1(out) + out = self.relu1(out) + out = self.conv2(out) + out = self.bn2(out) + out = self.relu2(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + out = self.skip_add_relu.add_relu(out, identity) + + return out + + def fuse_model(self): + fuse_modules(self, [['conv1', 'bn1', 'relu1'], + ['conv2', 'bn2', 'relu2'], + ['conv3', 'bn3']], inplace=True) + if self.downsample: + torch.quantization.fuse_modules(self.downsample, ['0', '1'], inplace=True) + + +class QuantizableResNet(ResNet): + + def __init__(self, *args, **kwargs): + super(QuantizableResNet, self).__init__(*args, **kwargs) + + self.quant = torch.quantization.QuantStub() + self.dequant = torch.quantization.DeQuantStub() + + def forward(self, x): + x = self.quant(x) + # Ensure scriptability + # super(QuantizableResNet,self).forward(x) + # is not scriptable + x = self._forward_impl(x) + x = self.dequant(x) + return x + + def fuse_model(self): + r"""Fuse conv/bn/relu modules in resnet models + + Fuse conv+bn+relu/ Conv+relu/conv+Bn modules to prepare for quantization. + Model is modified in place. Note that this operation does not change numerics + and the model after modification is in floating point + """ + + fuse_modules(self, ['conv1', 'bn1', 'relu'], inplace=True) + for m in self.modules(): + if type(m) == QuantizableBottleneck or type(m) == QuantizableBasicBlock: + m.fuse_model() + + +def _resnet(arch, block, layers, pretrained, progress, quantize, **kwargs): + model = QuantizableResNet(block, layers, **kwargs) + _replace_relu(model) + if quantize: + # TODO use pretrained as a string to specify the backend + backend = 'fbgemm' + quantize_model(model, backend) + else: + assert pretrained in [True, False] + + if pretrained: + if quantize: + model_url = quant_model_urls[arch + '_' + backend] + else: + model_url = model_urls[arch] + + state_dict = load_state_dict_from_url(model_url, + progress=progress) + + model.load_state_dict(state_dict) + return model + + +def resnet18(pretrained=False, progress=True, quantize=False, **kwargs): + r"""ResNet-18 model from + `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + quantize (bool): If True, return a quantized version of the model + """ + return _resnet('resnet18', QuantizableBasicBlock, [2, 2, 2, 2], pretrained, progress, + quantize, **kwargs) + + +def resnet50(pretrained=False, progress=True, quantize=False, **kwargs): + r"""ResNet-50 model from + `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + quantize (bool): If True, return a quantized version of the model + """ + return _resnet('resnet50', QuantizableBottleneck, [3, 4, 6, 3], pretrained, progress, + quantize, **kwargs) + + +def resnext101_32x8d(pretrained=False, progress=True, quantize=False, **kwargs): + r"""ResNeXt-101 32x8d model from + `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_ + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + quantize (bool): If True, return a quantized version of the model + """ + kwargs['groups'] = 32 + kwargs['width_per_group'] = 8 + return _resnet('resnext101_32x8d', QuantizableBottleneck, [3, 4, 23, 3], + pretrained, progress, quantize, **kwargs) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/quantization/shufflenetv2.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/quantization/shufflenetv2.py new file mode 100644 index 0000000000000000000000000000000000000000..3f779db3817bc022ac793500633cf0330bf973a8 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/quantization/shufflenetv2.py @@ -0,0 +1,158 @@ +import torch +import torch.nn as nn +from torchvision.models.utils import load_state_dict_from_url +import torchvision.models.shufflenetv2 +import sys +from .utils import _replace_relu, quantize_model + +shufflenetv2 = sys.modules['torchvision.models.shufflenetv2'] + +__all__ = [ + 'QuantizableShuffleNetV2', 'shufflenet_v2_x0_5', 'shufflenet_v2_x1_0', + 'shufflenet_v2_x1_5', 'shufflenet_v2_x2_0' +] + +quant_model_urls = { + 'shufflenetv2_x0.5_fbgemm': None, + 'shufflenetv2_x1.0_fbgemm': + 'https://download.pytorch.org/models/quantized/shufflenetv2_x1_fbgemm-db332c57.pth', + 'shufflenetv2_x1.5_fbgemm': None, + 'shufflenetv2_x2.0_fbgemm': None, +} + + +class QuantizableInvertedResidual(shufflenetv2.InvertedResidual): + def __init__(self, *args, **kwargs): + super(QuantizableInvertedResidual, self).__init__(*args, **kwargs) + self.cat = nn.quantized.FloatFunctional() + + def forward(self, x): + if self.stride == 1: + x1, x2 = x.chunk(2, dim=1) + out = self.cat.cat((x1, self.branch2(x2)), dim=1) + else: + out = self.cat.cat((self.branch1(x), self.branch2(x)), dim=1) + + out = shufflenetv2.channel_shuffle(out, 2) + + return out + + +class QuantizableShuffleNetV2(shufflenetv2.ShuffleNetV2): + def __init__(self, *args, **kwargs): + super(QuantizableShuffleNetV2, self).__init__(*args, inverted_residual=QuantizableInvertedResidual, **kwargs) + self.quant = torch.quantization.QuantStub() + self.dequant = torch.quantization.DeQuantStub() + + def forward(self, x): + x = self.quant(x) + x = self._forward_impl(x) + x = self.dequant(x) + return x + + def fuse_model(self): + r"""Fuse conv/bn/relu modules in shufflenetv2 model + + Fuse conv+bn+relu/ conv+relu/conv+bn modules to prepare for quantization. + Model is modified in place. Note that this operation does not change numerics + and the model after modification is in floating point + """ + + for name, m in self._modules.items(): + if name in ["conv1", "conv5"]: + torch.quantization.fuse_modules(m, [["0", "1", "2"]], inplace=True) + for m in self.modules(): + if type(m) == QuantizableInvertedResidual: + if len(m.branch1._modules.items()) > 0: + torch.quantization.fuse_modules( + m.branch1, [["0", "1"], ["2", "3", "4"]], inplace=True + ) + torch.quantization.fuse_modules( + m.branch2, + [["0", "1", "2"], ["3", "4"], ["5", "6", "7"]], + inplace=True, + ) + + +def _shufflenetv2(arch, pretrained, progress, quantize, *args, **kwargs): + model = QuantizableShuffleNetV2(*args, **kwargs) + _replace_relu(model) + + if quantize: + # TODO use pretrained as a string to specify the backend + backend = 'fbgemm' + quantize_model(model, backend) + else: + assert pretrained in [True, False] + + if pretrained: + if quantize: + model_url = quant_model_urls[arch + '_' + backend] + else: + model_url = shufflenetv2.model_urls[arch] + + state_dict = load_state_dict_from_url(model_url, + progress=progress) + + model.load_state_dict(state_dict) + return model + + +def shufflenet_v2_x0_5(pretrained=False, progress=True, quantize=False, **kwargs): + """ + Constructs a ShuffleNetV2 with 0.5x output channels, as described in + `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" + <https://arxiv.org/abs/1807.11164>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + quantize (bool): If True, return a quantized version of the model + """ + return _shufflenetv2('shufflenetv2_x0.5', pretrained, progress, quantize, + [4, 8, 4], [24, 48, 96, 192, 1024], **kwargs) + + +def shufflenet_v2_x1_0(pretrained=False, progress=True, quantize=False, **kwargs): + """ + Constructs a ShuffleNetV2 with 1.0x output channels, as described in + `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" + <https://arxiv.org/abs/1807.11164>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + quantize (bool): If True, return a quantized version of the model + """ + return _shufflenetv2('shufflenetv2_x1.0', pretrained, progress, quantize, + [4, 8, 4], [24, 116, 232, 464, 1024], **kwargs) + + +def shufflenet_v2_x1_5(pretrained=False, progress=True, quantize=False, **kwargs): + """ + Constructs a ShuffleNetV2 with 1.5x output channels, as described in + `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" + <https://arxiv.org/abs/1807.11164>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + quantize (bool): If True, return a quantized version of the model + """ + return _shufflenetv2('shufflenetv2_x1.5', pretrained, progress, quantize, + [4, 8, 4], [24, 176, 352, 704, 1024], **kwargs) + + +def shufflenet_v2_x2_0(pretrained=False, progress=True, quantize=False, **kwargs): + """ + Constructs a ShuffleNetV2 with 2.0x output channels, as described in + `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" + <https://arxiv.org/abs/1807.11164>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + quantize (bool): If True, return a quantized version of the model + """ + return _shufflenetv2('shufflenetv2_x2.0', pretrained, progress, quantize, + [4, 8, 4], [24, 244, 488, 976, 2048], **kwargs) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/quantization/utils.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/quantization/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bf23c9a9332f349e6d7ffbb922347fcd9b4560b7 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/quantization/utils.py @@ -0,0 +1,40 @@ +import torch +from torch import nn + + +def _replace_relu(module): + reassign = {} + for name, mod in module.named_children(): + _replace_relu(mod) + # Checking for explicit type instead of instance + # as we only want to replace modules of the exact type + # not inherited classes + if type(mod) == nn.ReLU or type(mod) == nn.ReLU6: + reassign[name] = nn.ReLU(inplace=False) + + for key, value in reassign.items(): + module._modules[key] = value + + +def quantize_model(model, backend): + _dummy_input_data = torch.rand(1, 3, 299, 299) + if backend not in torch.backends.quantized.supported_engines: + raise RuntimeError("Quantized backend not supported ") + torch.backends.quantized.engine = backend + model.eval() + # Make sure that weight qconfig matches that of the serialized models + if backend == 'fbgemm': + model.qconfig = torch.quantization.QConfig( + activation=torch.quantization.default_observer, + weight=torch.quantization.default_per_channel_weight_observer) + elif backend == 'qnnpack': + model.qconfig = torch.quantization.QConfig( + activation=torch.quantization.default_observer, + weight=torch.quantization.default_weight_observer) + + model.fuse_model() + torch.quantization.prepare(model, inplace=True) + model(_dummy_input_data) + torch.quantization.convert(model, inplace=True) + + return diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/resnet.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..e772650aaaf0f712f791d2a595c382c48117883d --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/resnet.py @@ -0,0 +1,389 @@ +import torch +from torch import Tensor +import torch.nn as nn +from .utils import load_state_dict_from_url +from typing import Type, Any, Callable, Union, List, Optional + + +__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', + 'resnet152', 'resnext50_32x4d', 'resnext101_32x8d', + 'wide_resnet50_2', 'wide_resnet101_2'] + + +model_urls = { + 'resnet18': 'https://download.pytorch.org/models/resnet18-f37072fd.pth', + 'resnet34': 'https://download.pytorch.org/models/resnet34-b627a593.pth', + 'resnet50': 'https://download.pytorch.org/models/resnet50-0676ba61.pth', + 'resnet101': 'https://download.pytorch.org/models/resnet101-63fe2227.pth', + 'resnet152': 'https://download.pytorch.org/models/resnet152-394f9c45.pth', + 'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth', + 'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth', + 'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth', + 'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth', +} + + +def conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d: + """3x3 convolution with padding""" + return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, + padding=dilation, groups=groups, bias=False, dilation=dilation) + + +def conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d: + """1x1 convolution""" + return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False) + + +class BasicBlock(nn.Module): + expansion: int = 1 + + def __init__( + self, + inplanes: int, + planes: int, + stride: int = 1, + downsample: Optional[nn.Module] = None, + groups: int = 1, + base_width: int = 64, + dilation: int = 1, + norm_layer: Optional[Callable[..., nn.Module]] = None + ) -> None: + super(BasicBlock, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + if groups != 1 or base_width != 64: + raise ValueError('BasicBlock only supports groups=1 and base_width=64') + if dilation > 1: + raise NotImplementedError("Dilation > 1 not supported in BasicBlock") + # Both self.conv1 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv3x3(inplanes, planes, stride) + self.bn1 = norm_layer(planes) + self.relu = nn.ReLU(inplace=True) + self.conv2 = conv3x3(planes, planes) + self.bn2 = norm_layer(planes) + self.downsample = downsample + self.stride = stride + + def forward(self, x: Tensor) -> Tensor: + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2) + # while original implementation places the stride at the first 1x1 convolution(self.conv1) + # according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385. + # This variant is also known as ResNet V1.5 and improves accuracy according to + # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch. + + expansion: int = 4 + + def __init__( + self, + inplanes: int, + planes: int, + stride: int = 1, + downsample: Optional[nn.Module] = None, + groups: int = 1, + base_width: int = 64, + dilation: int = 1, + norm_layer: Optional[Callable[..., nn.Module]] = None + ) -> None: + super(Bottleneck, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + width = int(planes * (base_width / 64.)) * groups + # Both self.conv2 and self.downsample layers downsample the input when stride != 1 + self.conv1 = conv1x1(inplanes, width) + self.bn1 = norm_layer(width) + self.conv2 = conv3x3(width, width, stride, groups, dilation) + self.bn2 = norm_layer(width) + self.conv3 = conv1x1(width, planes * self.expansion) + self.bn3 = norm_layer(planes * self.expansion) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x: Tensor) -> Tensor: + identity = x + + out = self.conv1(x) + out = self.bn1(out) + out = self.relu(out) + + out = self.conv2(out) + out = self.bn2(out) + out = self.relu(out) + + out = self.conv3(out) + out = self.bn3(out) + + if self.downsample is not None: + identity = self.downsample(x) + + out += identity + out = self.relu(out) + + return out + + +class ResNet(nn.Module): + + def __init__( + self, + block: Type[Union[BasicBlock, Bottleneck]], + layers: List[int], + num_classes: int = 1000, + zero_init_residual: bool = False, + groups: int = 1, + width_per_group: int = 64, + replace_stride_with_dilation: Optional[List[bool]] = None, + norm_layer: Optional[Callable[..., nn.Module]] = None + ) -> None: + super(ResNet, self).__init__() + if norm_layer is None: + norm_layer = nn.BatchNorm2d + self._norm_layer = norm_layer + + self.inplanes = 64 + self.dilation = 1 + if replace_stride_with_dilation is None: + # each element in the tuple indicates if we should replace + # the 2x2 stride with a dilated convolution instead + replace_stride_with_dilation = [False, False, False] + if len(replace_stride_with_dilation) != 3: + raise ValueError("replace_stride_with_dilation should be None " + "or a 3-element tuple, got {}".format(replace_stride_with_dilation)) + self.groups = groups + self.base_width = width_per_group + self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3, + bias=False) + self.bn1 = norm_layer(self.inplanes) + self.relu = nn.ReLU(inplace=True) + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + self.layer1 = self._make_layer(block, 64, layers[0]) + self.layer2 = self._make_layer(block, 128, layers[1], stride=2, + dilate=replace_stride_with_dilation[0]) + self.layer3 = self._make_layer(block, 256, layers[2], stride=2, + dilate=replace_stride_with_dilation[1]) + self.layer4 = self._make_layer(block, 512, layers[3], stride=2, + dilate=replace_stride_with_dilation[2]) + self.avgpool = nn.AdaptiveAvgPool2d((1, 1)) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + + # Zero-initialize the last BN in each residual branch, + # so that the residual branch starts with zeros, and each residual block behaves like an identity. + # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677 + if zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type] + elif isinstance(m, BasicBlock): + nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type] + + def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int, + stride: int = 1, dilate: bool = False) -> nn.Sequential: + norm_layer = self._norm_layer + downsample = None + previous_dilation = self.dilation + if dilate: + self.dilation *= stride + stride = 1 + if stride != 1 or self.inplanes != planes * block.expansion: + downsample = nn.Sequential( + conv1x1(self.inplanes, planes * block.expansion, stride), + norm_layer(planes * block.expansion), + ) + + layers = [] + layers.append(block(self.inplanes, planes, stride, downsample, self.groups, + self.base_width, previous_dilation, norm_layer)) + self.inplanes = planes * block.expansion + for _ in range(1, blocks): + layers.append(block(self.inplanes, planes, groups=self.groups, + base_width=self.base_width, dilation=self.dilation, + norm_layer=norm_layer)) + + return nn.Sequential(*layers) + + def _forward_impl(self, x: Tensor) -> Tensor: + # See note [TorchScript super()] + x = self.conv1(x) + x = self.bn1(x) + x = self.relu(x) + x = self.maxpool(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + x = torch.flatten(x, 1) + x = self.fc(x) + + return x + + def forward(self, x: Tensor) -> Tensor: + return self._forward_impl(x) + + +def _resnet( + arch: str, + block: Type[Union[BasicBlock, Bottleneck]], + layers: List[int], + pretrained: bool, + progress: bool, + **kwargs: Any +) -> ResNet: + model = ResNet(block, layers, **kwargs) + if pretrained: + state_dict = load_state_dict_from_url(model_urls[arch], + progress=progress) + model.load_state_dict(state_dict) + return model + + +def resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + r"""ResNet-18 model from + `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, + **kwargs) + + +def resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + r"""ResNet-34 model from + `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, + **kwargs) + + +def resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + r"""ResNet-50 model from + `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, + **kwargs) + + +def resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + r"""ResNet-101 model from + `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, + **kwargs) + + +def resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + r"""ResNet-152 model from + `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, + **kwargs) + + +def resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + r"""ResNeXt-50 32x4d model from + `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['groups'] = 32 + kwargs['width_per_group'] = 4 + return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3], + pretrained, progress, **kwargs) + + +def resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + r"""ResNeXt-101 32x8d model from + `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['groups'] = 32 + kwargs['width_per_group'] = 8 + return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3], + pretrained, progress, **kwargs) + + +def wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + r"""Wide ResNet-50-2 model from + `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_. + + The model is the same as ResNet except for the bottleneck number of channels + which is twice larger in every block. The number of channels in outer 1x1 + convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 + channels, and in Wide ResNet-50-2 has 2048-1024-2048. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['width_per_group'] = 64 * 2 + return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3], + pretrained, progress, **kwargs) + + +def wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet: + r"""Wide ResNet-101-2 model from + `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_. + + The model is the same as ResNet except for the bottleneck number of channels + which is twice larger in every block. The number of channels in outer 1x1 + convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048 + channels, and in Wide ResNet-50-2 has 2048-1024-2048. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + kwargs['width_per_group'] = 64 * 2 + return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3], + pretrained, progress, **kwargs) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/segmentation/__init__.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/segmentation/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fb6633d7fb57e61e6cfd305f33cd7a868875aa67 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/segmentation/__init__.py @@ -0,0 +1,4 @@ +from .segmentation import * +from .fcn import * +from .deeplabv3 import * +from .lraspp import * diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/segmentation/_utils.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/segmentation/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..176b7490038e8c9b84ad9e0b933a2a9ecd08b608 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/segmentation/_utils.py @@ -0,0 +1,33 @@ +from collections import OrderedDict + +from torch import nn +from torch.nn import functional as F + + +class _SimpleSegmentationModel(nn.Module): + __constants__ = ['aux_classifier'] + + def __init__(self, backbone, classifier, aux_classifier=None): + super(_SimpleSegmentationModel, self).__init__() + self.backbone = backbone + self.classifier = classifier + self.aux_classifier = aux_classifier + + def forward(self, x): + input_shape = x.shape[-2:] + # contract: features is a dict of tensors + features = self.backbone(x) + + result = OrderedDict() + x = features["out"] + x = self.classifier(x) + x = F.interpolate(x, size=input_shape, mode='bilinear', align_corners=False) + result["out"] = x + + if self.aux_classifier is not None: + x = features["aux"] + x = self.aux_classifier(x) + x = F.interpolate(x, size=input_shape, mode='bilinear', align_corners=False) + result["aux"] = x + + return result diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/segmentation/deeplabv3.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/segmentation/deeplabv3.py new file mode 100644 index 0000000000000000000000000000000000000000..7acc013ccb1434f1f6e7cbc2eb93bf6ae7d25405 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/segmentation/deeplabv3.py @@ -0,0 +1,93 @@ +import torch +from torch import nn +from torch.nn import functional as F + +from ._utils import _SimpleSegmentationModel + + +__all__ = ["DeepLabV3"] + + +class DeepLabV3(_SimpleSegmentationModel): + """ + Implements DeepLabV3 model from + `"Rethinking Atrous Convolution for Semantic Image Segmentation" + <https://arxiv.org/abs/1706.05587>`_. + + Args: + backbone (nn.Module): the network used to compute the features for the model. + The backbone should return an OrderedDict[Tensor], with the key being + "out" for the last feature map used, and "aux" if an auxiliary classifier + is used. + classifier (nn.Module): module that takes the "out" element returned from + the backbone and returns a dense prediction. + aux_classifier (nn.Module, optional): auxiliary classifier used during training + """ + pass + + +class DeepLabHead(nn.Sequential): + def __init__(self, in_channels, num_classes): + super(DeepLabHead, self).__init__( + ASPP(in_channels, [12, 24, 36]), + nn.Conv2d(256, 256, 3, padding=1, bias=False), + nn.BatchNorm2d(256), + nn.ReLU(), + nn.Conv2d(256, num_classes, 1) + ) + + +class ASPPConv(nn.Sequential): + def __init__(self, in_channels, out_channels, dilation): + modules = [ + nn.Conv2d(in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False), + nn.BatchNorm2d(out_channels), + nn.ReLU() + ] + super(ASPPConv, self).__init__(*modules) + + +class ASPPPooling(nn.Sequential): + def __init__(self, in_channels, out_channels): + super(ASPPPooling, self).__init__( + nn.AdaptiveAvgPool2d(1), + nn.Conv2d(in_channels, out_channels, 1, bias=False), + nn.BatchNorm2d(out_channels), + nn.ReLU()) + + def forward(self, x): + size = x.shape[-2:] + for mod in self: + x = mod(x) + return F.interpolate(x, size=size, mode='bilinear', align_corners=False) + + +class ASPP(nn.Module): + def __init__(self, in_channels, atrous_rates, out_channels=256): + super(ASPP, self).__init__() + modules = [] + modules.append(nn.Sequential( + nn.Conv2d(in_channels, out_channels, 1, bias=False), + nn.BatchNorm2d(out_channels), + nn.ReLU())) + + rates = tuple(atrous_rates) + for rate in rates: + modules.append(ASPPConv(in_channels, out_channels, rate)) + + modules.append(ASPPPooling(in_channels, out_channels)) + + self.convs = nn.ModuleList(modules) + + self.project = nn.Sequential( + nn.Conv2d(len(self.convs) * out_channels, out_channels, 1, bias=False), + nn.BatchNorm2d(out_channels), + nn.ReLU(), + nn.Dropout(0.5)) + + def forward(self, x): + res = [] + for conv in self.convs: + res.append(conv(x)) + res = torch.cat(res, dim=1) + return self.project(res) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/segmentation/fcn.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/segmentation/fcn.py new file mode 100644 index 0000000000000000000000000000000000000000..3c695b5316753b6e93723042729a9d8dbe9c1f1d --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/segmentation/fcn.py @@ -0,0 +1,36 @@ +from torch import nn + +from ._utils import _SimpleSegmentationModel + + +__all__ = ["FCN"] + + +class FCN(_SimpleSegmentationModel): + """ + Implements a Fully-Convolutional Network for semantic segmentation. + + Args: + backbone (nn.Module): the network used to compute the features for the model. + The backbone should return an OrderedDict[Tensor], with the key being + "out" for the last feature map used, and "aux" if an auxiliary classifier + is used. + classifier (nn.Module): module that takes the "out" element returned from + the backbone and returns a dense prediction. + aux_classifier (nn.Module, optional): auxiliary classifier used during training + """ + pass + + +class FCNHead(nn.Sequential): + def __init__(self, in_channels, channels): + inter_channels = in_channels // 4 + layers = [ + nn.Conv2d(in_channels, inter_channels, 3, padding=1, bias=False), + nn.BatchNorm2d(inter_channels), + nn.ReLU(), + nn.Dropout(0.1), + nn.Conv2d(inter_channels, channels, 1) + ] + + super(FCNHead, self).__init__(*layers) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/segmentation/lraspp.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/segmentation/lraspp.py new file mode 100644 index 0000000000000000000000000000000000000000..44cd9b1e77307aae2c44895acab85b5832b6f348 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/segmentation/lraspp.py @@ -0,0 +1,69 @@ +from collections import OrderedDict + +from torch import nn, Tensor +from torch.nn import functional as F +from typing import Dict + + +__all__ = ["LRASPP"] + + +class LRASPP(nn.Module): + """ + Implements a Lite R-ASPP Network for semantic segmentation from + `"Searching for MobileNetV3" + <https://arxiv.org/abs/1905.02244>`_. + + Args: + backbone (nn.Module): the network used to compute the features for the model. + The backbone should return an OrderedDict[Tensor], with the key being + "high" for the high level feature map and "low" for the low level feature map. + low_channels (int): the number of channels of the low level features. + high_channels (int): the number of channels of the high level features. + num_classes (int): number of output classes of the model (including the background). + inter_channels (int, optional): the number of channels for intermediate computations. + """ + + def __init__(self, backbone, low_channels, high_channels, num_classes, inter_channels=128): + super().__init__() + self.backbone = backbone + self.classifier = LRASPPHead(low_channels, high_channels, num_classes, inter_channels) + + def forward(self, input): + features = self.backbone(input) + out = self.classifier(features) + out = F.interpolate(out, size=input.shape[-2:], mode='bilinear', align_corners=False) + + result = OrderedDict() + result["out"] = out + + return result + + +class LRASPPHead(nn.Module): + + def __init__(self, low_channels, high_channels, num_classes, inter_channels): + super().__init__() + self.cbr = nn.Sequential( + nn.Conv2d(high_channels, inter_channels, 1, bias=False), + nn.BatchNorm2d(inter_channels), + nn.ReLU(inplace=True) + ) + self.scale = nn.Sequential( + nn.AdaptiveAvgPool2d(1), + nn.Conv2d(high_channels, inter_channels, 1, bias=False), + nn.Sigmoid(), + ) + self.low_classifier = nn.Conv2d(low_channels, num_classes, 1) + self.high_classifier = nn.Conv2d(inter_channels, num_classes, 1) + + def forward(self, input: Dict[str, Tensor]) -> Tensor: + low = input["low"] + high = input["high"] + + x = self.cbr(high) + s = self.scale(high) + x = x * s + x = F.interpolate(x, size=low.shape[-2:], mode='bilinear', align_corners=False) + + return self.low_classifier(low) + self.high_classifier(x) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/segmentation/segmentation.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/segmentation/segmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..4f328974543357ccbafee669010fffb0df7f24c7 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/segmentation/segmentation.py @@ -0,0 +1,194 @@ +from .._utils import IntermediateLayerGetter +from ..utils import load_state_dict_from_url +from .. import mobilenetv3 +from .. import resnet +from .deeplabv3 import DeepLabHead, DeepLabV3 +from .fcn import FCN, FCNHead +from .lraspp import LRASPP + + +__all__ = ['fcn_resnet50', 'fcn_resnet101', 'deeplabv3_resnet50', 'deeplabv3_resnet101', + 'deeplabv3_mobilenet_v3_large', 'lraspp_mobilenet_v3_large'] + + +model_urls = { + 'fcn_resnet50_coco': 'https://download.pytorch.org/models/fcn_resnet50_coco-1167a1af.pth', + 'fcn_resnet101_coco': 'https://download.pytorch.org/models/fcn_resnet101_coco-7ecb50ca.pth', + 'deeplabv3_resnet50_coco': 'https://download.pytorch.org/models/deeplabv3_resnet50_coco-cd0a2569.pth', + 'deeplabv3_resnet101_coco': 'https://download.pytorch.org/models/deeplabv3_resnet101_coco-586e9e4e.pth', + 'deeplabv3_mobilenet_v3_large_coco': + 'https://download.pytorch.org/models/deeplabv3_mobilenet_v3_large-fc3c493d.pth', + 'lraspp_mobilenet_v3_large_coco': 'https://download.pytorch.org/models/lraspp_mobilenet_v3_large-d234d4ea.pth', +} + + +def _segm_model(name, backbone_name, num_classes, aux, pretrained_backbone=True): + if 'resnet' in backbone_name: + backbone = resnet.__dict__[backbone_name]( + pretrained=pretrained_backbone, + replace_stride_with_dilation=[False, True, True]) + out_layer = 'layer4' + out_inplanes = 2048 + aux_layer = 'layer3' + aux_inplanes = 1024 + elif 'mobilenet_v3' in backbone_name: + backbone = mobilenetv3.__dict__[backbone_name](pretrained=pretrained_backbone, dilated=True).features + + # Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks. + # The first and last blocks are always included because they are the C0 (conv1) and Cn. + stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "_is_cn", False)] + [len(backbone) - 1] + out_pos = stage_indices[-1] # use C5 which has output_stride = 16 + out_layer = str(out_pos) + out_inplanes = backbone[out_pos].out_channels + aux_pos = stage_indices[-4] # use C2 here which has output_stride = 8 + aux_layer = str(aux_pos) + aux_inplanes = backbone[aux_pos].out_channels + else: + raise NotImplementedError('backbone {} is not supported as of now'.format(backbone_name)) + + return_layers = {out_layer: 'out'} + if aux: + return_layers[aux_layer] = 'aux' + backbone = IntermediateLayerGetter(backbone, return_layers=return_layers) + + aux_classifier = None + if aux: + aux_classifier = FCNHead(aux_inplanes, num_classes) + + model_map = { + 'deeplabv3': (DeepLabHead, DeepLabV3), + 'fcn': (FCNHead, FCN), + } + classifier = model_map[name][0](out_inplanes, num_classes) + base_model = model_map[name][1] + + model = base_model(backbone, classifier, aux_classifier) + return model + + +def _load_model(arch_type, backbone, pretrained, progress, num_classes, aux_loss, **kwargs): + if pretrained: + aux_loss = True + kwargs["pretrained_backbone"] = False + model = _segm_model(arch_type, backbone, num_classes, aux_loss, **kwargs) + if pretrained: + _load_weights(model, arch_type, backbone, progress) + return model + + +def _load_weights(model, arch_type, backbone, progress): + arch = arch_type + '_' + backbone + '_coco' + model_url = model_urls.get(arch, None) + if model_url is None: + raise NotImplementedError('pretrained {} is not supported as of now'.format(arch)) + else: + state_dict = load_state_dict_from_url(model_url, progress=progress) + model.load_state_dict(state_dict) + + +def _segm_lraspp_mobilenetv3(backbone_name, num_classes, pretrained_backbone=True): + backbone = mobilenetv3.__dict__[backbone_name](pretrained=pretrained_backbone, dilated=True).features + + # Gather the indices of blocks which are strided. These are the locations of C1, ..., Cn-1 blocks. + # The first and last blocks are always included because they are the C0 (conv1) and Cn. + stage_indices = [0] + [i for i, b in enumerate(backbone) if getattr(b, "_is_cn", False)] + [len(backbone) - 1] + low_pos = stage_indices[-4] # use C2 here which has output_stride = 8 + high_pos = stage_indices[-1] # use C5 which has output_stride = 16 + low_channels = backbone[low_pos].out_channels + high_channels = backbone[high_pos].out_channels + + backbone = IntermediateLayerGetter(backbone, return_layers={str(low_pos): 'low', str(high_pos): 'high'}) + + model = LRASPP(backbone, low_channels, high_channels, num_classes) + return model + + +def fcn_resnet50(pretrained=False, progress=True, + num_classes=21, aux_loss=None, **kwargs): + """Constructs a Fully-Convolutional Network model with a ResNet-50 backbone. + + Args: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 which + contains the same classes as Pascal VOC + progress (bool): If True, displays a progress bar of the download to stderr + num_classes (int): number of output classes of the model (including the background) + aux_loss (bool): If True, it uses an auxiliary loss + """ + return _load_model('fcn', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs) + + +def fcn_resnet101(pretrained=False, progress=True, + num_classes=21, aux_loss=None, **kwargs): + """Constructs a Fully-Convolutional Network model with a ResNet-101 backbone. + + Args: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 which + contains the same classes as Pascal VOC + progress (bool): If True, displays a progress bar of the download to stderr + num_classes (int): number of output classes of the model (including the background) + aux_loss (bool): If True, it uses an auxiliary loss + """ + return _load_model('fcn', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs) + + +def deeplabv3_resnet50(pretrained=False, progress=True, + num_classes=21, aux_loss=None, **kwargs): + """Constructs a DeepLabV3 model with a ResNet-50 backbone. + + Args: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 which + contains the same classes as Pascal VOC + progress (bool): If True, displays a progress bar of the download to stderr + num_classes (int): number of output classes of the model (including the background) + aux_loss (bool): If True, it uses an auxiliary loss + """ + return _load_model('deeplabv3', 'resnet50', pretrained, progress, num_classes, aux_loss, **kwargs) + + +def deeplabv3_resnet101(pretrained=False, progress=True, + num_classes=21, aux_loss=None, **kwargs): + """Constructs a DeepLabV3 model with a ResNet-101 backbone. + + Args: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 which + contains the same classes as Pascal VOC + progress (bool): If True, displays a progress bar of the download to stderr + num_classes (int): The number of classes + aux_loss (bool): If True, include an auxiliary classifier + """ + return _load_model('deeplabv3', 'resnet101', pretrained, progress, num_classes, aux_loss, **kwargs) + + +def deeplabv3_mobilenet_v3_large(pretrained=False, progress=True, + num_classes=21, aux_loss=None, **kwargs): + """Constructs a DeepLabV3 model with a MobileNetV3-Large backbone. + + Args: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 which + contains the same classes as Pascal VOC + progress (bool): If True, displays a progress bar of the download to stderr + num_classes (int): number of output classes of the model (including the background) + aux_loss (bool): If True, it uses an auxiliary loss + """ + return _load_model('deeplabv3', 'mobilenet_v3_large', pretrained, progress, num_classes, aux_loss, **kwargs) + + +def lraspp_mobilenet_v3_large(pretrained=False, progress=True, num_classes=21, **kwargs): + """Constructs a Lite R-ASPP Network model with a MobileNetV3-Large backbone. + + Args: + pretrained (bool): If True, returns a model pre-trained on COCO train2017 which + contains the same classes as Pascal VOC + progress (bool): If True, displays a progress bar of the download to stderr + num_classes (int): number of output classes of the model (including the background) + """ + if kwargs.pop("aux_loss", False): + raise NotImplementedError('This model does not use auxiliary loss') + + backbone_name = 'mobilenet_v3_large' + model = _segm_lraspp_mobilenetv3(backbone_name, num_classes, **kwargs) + + if pretrained: + _load_weights(model, 'lraspp', backbone_name, progress) + + return model diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/shufflenetv2.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/shufflenetv2.py new file mode 100644 index 0000000000000000000000000000000000000000..9a4333eb10bb7b62e9e05165eb4c8bb6df0b3ac9 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/shufflenetv2.py @@ -0,0 +1,231 @@ +import torch +from torch import Tensor +import torch.nn as nn +from .utils import load_state_dict_from_url +from typing import Callable, Any, List + + +__all__ = [ + 'ShuffleNetV2', 'shufflenet_v2_x0_5', 'shufflenet_v2_x1_0', + 'shufflenet_v2_x1_5', 'shufflenet_v2_x2_0' +] + +model_urls = { + 'shufflenetv2_x0.5': 'https://download.pytorch.org/models/shufflenetv2_x0.5-f707e7126e.pth', + 'shufflenetv2_x1.0': 'https://download.pytorch.org/models/shufflenetv2_x1-5666bf0f80.pth', + 'shufflenetv2_x1.5': None, + 'shufflenetv2_x2.0': None, +} + + +def channel_shuffle(x: Tensor, groups: int) -> Tensor: + batchsize, num_channels, height, width = x.size() + channels_per_group = num_channels // groups + + # reshape + x = x.view(batchsize, groups, + channels_per_group, height, width) + + x = torch.transpose(x, 1, 2).contiguous() + + # flatten + x = x.view(batchsize, -1, height, width) + + return x + + +class InvertedResidual(nn.Module): + def __init__( + self, + inp: int, + oup: int, + stride: int + ) -> None: + super(InvertedResidual, self).__init__() + + if not (1 <= stride <= 3): + raise ValueError('illegal stride value') + self.stride = stride + + branch_features = oup // 2 + assert (self.stride != 1) or (inp == branch_features << 1) + + if self.stride > 1: + self.branch1 = nn.Sequential( + self.depthwise_conv(inp, inp, kernel_size=3, stride=self.stride, padding=1), + nn.BatchNorm2d(inp), + nn.Conv2d(inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False), + nn.BatchNorm2d(branch_features), + nn.ReLU(inplace=True), + ) + else: + self.branch1 = nn.Sequential() + + self.branch2 = nn.Sequential( + nn.Conv2d(inp if (self.stride > 1) else branch_features, + branch_features, kernel_size=1, stride=1, padding=0, bias=False), + nn.BatchNorm2d(branch_features), + nn.ReLU(inplace=True), + self.depthwise_conv(branch_features, branch_features, kernel_size=3, stride=self.stride, padding=1), + nn.BatchNorm2d(branch_features), + nn.Conv2d(branch_features, branch_features, kernel_size=1, stride=1, padding=0, bias=False), + nn.BatchNorm2d(branch_features), + nn.ReLU(inplace=True), + ) + + @staticmethod + def depthwise_conv( + i: int, + o: int, + kernel_size: int, + stride: int = 1, + padding: int = 0, + bias: bool = False + ) -> nn.Conv2d: + return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i) + + def forward(self, x: Tensor) -> Tensor: + if self.stride == 1: + x1, x2 = x.chunk(2, dim=1) + out = torch.cat((x1, self.branch2(x2)), dim=1) + else: + out = torch.cat((self.branch1(x), self.branch2(x)), dim=1) + + out = channel_shuffle(out, 2) + + return out + + +class ShuffleNetV2(nn.Module): + def __init__( + self, + stages_repeats: List[int], + stages_out_channels: List[int], + num_classes: int = 1000, + inverted_residual: Callable[..., nn.Module] = InvertedResidual + ) -> None: + super(ShuffleNetV2, self).__init__() + + if len(stages_repeats) != 3: + raise ValueError('expected stages_repeats as list of 3 positive ints') + if len(stages_out_channels) != 5: + raise ValueError('expected stages_out_channels as list of 5 positive ints') + self._stage_out_channels = stages_out_channels + + input_channels = 3 + output_channels = self._stage_out_channels[0] + self.conv1 = nn.Sequential( + nn.Conv2d(input_channels, output_channels, 3, 2, 1, bias=False), + nn.BatchNorm2d(output_channels), + nn.ReLU(inplace=True), + ) + input_channels = output_channels + + self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1) + + # Static annotations for mypy + self.stage2: nn.Sequential + self.stage3: nn.Sequential + self.stage4: nn.Sequential + stage_names = ['stage{}'.format(i) for i in [2, 3, 4]] + for name, repeats, output_channels in zip( + stage_names, stages_repeats, self._stage_out_channels[1:]): + seq = [inverted_residual(input_channels, output_channels, 2)] + for i in range(repeats - 1): + seq.append(inverted_residual(output_channels, output_channels, 1)) + setattr(self, name, nn.Sequential(*seq)) + input_channels = output_channels + + output_channels = self._stage_out_channels[-1] + self.conv5 = nn.Sequential( + nn.Conv2d(input_channels, output_channels, 1, 1, 0, bias=False), + nn.BatchNorm2d(output_channels), + nn.ReLU(inplace=True), + ) + + self.fc = nn.Linear(output_channels, num_classes) + + def _forward_impl(self, x: Tensor) -> Tensor: + # See note [TorchScript super()] + x = self.conv1(x) + x = self.maxpool(x) + x = self.stage2(x) + x = self.stage3(x) + x = self.stage4(x) + x = self.conv5(x) + x = x.mean([2, 3]) # globalpool + x = self.fc(x) + return x + + def forward(self, x: Tensor) -> Tensor: + return self._forward_impl(x) + + +def _shufflenetv2(arch: str, pretrained: bool, progress: bool, *args: Any, **kwargs: Any) -> ShuffleNetV2: + model = ShuffleNetV2(*args, **kwargs) + + if pretrained: + model_url = model_urls[arch] + if model_url is None: + raise NotImplementedError('pretrained {} is not supported as of now'.format(arch)) + else: + state_dict = load_state_dict_from_url(model_url, progress=progress) + model.load_state_dict(state_dict) + + return model + + +def shufflenet_v2_x0_5(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ShuffleNetV2: + """ + Constructs a ShuffleNetV2 with 0.5x output channels, as described in + `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" + <https://arxiv.org/abs/1807.11164>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _shufflenetv2('shufflenetv2_x0.5', pretrained, progress, + [4, 8, 4], [24, 48, 96, 192, 1024], **kwargs) + + +def shufflenet_v2_x1_0(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ShuffleNetV2: + """ + Constructs a ShuffleNetV2 with 1.0x output channels, as described in + `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" + <https://arxiv.org/abs/1807.11164>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _shufflenetv2('shufflenetv2_x1.0', pretrained, progress, + [4, 8, 4], [24, 116, 232, 464, 1024], **kwargs) + + +def shufflenet_v2_x1_5(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ShuffleNetV2: + """ + Constructs a ShuffleNetV2 with 1.5x output channels, as described in + `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" + <https://arxiv.org/abs/1807.11164>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _shufflenetv2('shufflenetv2_x1.5', pretrained, progress, + [4, 8, 4], [24, 176, 352, 704, 1024], **kwargs) + + +def shufflenet_v2_x2_0(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ShuffleNetV2: + """ + Constructs a ShuffleNetV2 with 2.0x output channels, as described in + `"ShuffleNet V2: Practical Guidelines for Efficient CNN Architecture Design" + <https://arxiv.org/abs/1807.11164>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _shufflenetv2('shufflenetv2_x2.0', pretrained, progress, + [4, 8, 4], [24, 244, 488, 976, 2048], **kwargs) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/squeezenet.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/squeezenet.py new file mode 100644 index 0000000000000000000000000000000000000000..7830e4b70ef4fda0db303bef3ea3f7d8ae6ce551 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/squeezenet.py @@ -0,0 +1,147 @@ +import torch +import torch.nn as nn +import torch.nn.init as init +from .utils import load_state_dict_from_url +from typing import Any + +__all__ = ['SqueezeNet', 'squeezenet1_0', 'squeezenet1_1'] + +model_urls = { + 'squeezenet1_0': 'https://download.pytorch.org/models/squeezenet1_0-b66bff10.pth', + 'squeezenet1_1': 'https://download.pytorch.org/models/squeezenet1_1-b8a52dc0.pth', +} + + +class Fire(nn.Module): + + def __init__( + self, + inplanes: int, + squeeze_planes: int, + expand1x1_planes: int, + expand3x3_planes: int + ) -> None: + super(Fire, self).__init__() + self.inplanes = inplanes + self.squeeze = nn.Conv2d(inplanes, squeeze_planes, kernel_size=1) + self.squeeze_activation = nn.ReLU(inplace=True) + self.expand1x1 = nn.Conv2d(squeeze_planes, expand1x1_planes, + kernel_size=1) + self.expand1x1_activation = nn.ReLU(inplace=True) + self.expand3x3 = nn.Conv2d(squeeze_planes, expand3x3_planes, + kernel_size=3, padding=1) + self.expand3x3_activation = nn.ReLU(inplace=True) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.squeeze_activation(self.squeeze(x)) + return torch.cat([ + self.expand1x1_activation(self.expand1x1(x)), + self.expand3x3_activation(self.expand3x3(x)) + ], 1) + + +class SqueezeNet(nn.Module): + + def __init__( + self, + version: str = '1_0', + num_classes: int = 1000 + ) -> None: + super(SqueezeNet, self).__init__() + self.num_classes = num_classes + if version == '1_0': + self.features = nn.Sequential( + nn.Conv2d(3, 96, kernel_size=7, stride=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), + Fire(96, 16, 64, 64), + Fire(128, 16, 64, 64), + Fire(128, 32, 128, 128), + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), + Fire(256, 32, 128, 128), + Fire(256, 48, 192, 192), + Fire(384, 48, 192, 192), + Fire(384, 64, 256, 256), + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), + Fire(512, 64, 256, 256), + ) + elif version == '1_1': + self.features = nn.Sequential( + nn.Conv2d(3, 64, kernel_size=3, stride=2), + nn.ReLU(inplace=True), + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), + Fire(64, 16, 64, 64), + Fire(128, 16, 64, 64), + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), + Fire(128, 32, 128, 128), + Fire(256, 32, 128, 128), + nn.MaxPool2d(kernel_size=3, stride=2, ceil_mode=True), + Fire(256, 48, 192, 192), + Fire(384, 48, 192, 192), + Fire(384, 64, 256, 256), + Fire(512, 64, 256, 256), + ) + else: + # FIXME: Is this needed? SqueezeNet should only be called from the + # FIXME: squeezenet1_x() functions + # FIXME: This checking is not done for the other models + raise ValueError("Unsupported SqueezeNet version {version}:" + "1_0 or 1_1 expected".format(version=version)) + + # Final convolution is initialized differently from the rest + final_conv = nn.Conv2d(512, self.num_classes, kernel_size=1) + self.classifier = nn.Sequential( + nn.Dropout(p=0.5), + final_conv, + nn.ReLU(inplace=True), + nn.AdaptiveAvgPool2d((1, 1)) + ) + + for m in self.modules(): + if isinstance(m, nn.Conv2d): + if m is final_conv: + init.normal_(m.weight, mean=0.0, std=0.01) + else: + init.kaiming_uniform_(m.weight) + if m.bias is not None: + init.constant_(m.bias, 0) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.features(x) + x = self.classifier(x) + return torch.flatten(x, 1) + + +def _squeezenet(version: str, pretrained: bool, progress: bool, **kwargs: Any) -> SqueezeNet: + model = SqueezeNet(version, **kwargs) + if pretrained: + arch = 'squeezenet' + version + state_dict = load_state_dict_from_url(model_urls[arch], + progress=progress) + model.load_state_dict(state_dict) + return model + + +def squeezenet1_0(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> SqueezeNet: + r"""SqueezeNet model architecture from the `"SqueezeNet: AlexNet-level + accuracy with 50x fewer parameters and <0.5MB model size" + <https://arxiv.org/abs/1602.07360>`_ paper. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _squeezenet('1_0', pretrained, progress, **kwargs) + + +def squeezenet1_1(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> SqueezeNet: + r"""SqueezeNet 1.1 model from the `official SqueezeNet repo + <https://github.com/DeepScale/SqueezeNet/tree/master/SqueezeNet_v1.1>`_. + SqueezeNet 1.1 has 2.4x less computation and slightly fewer parameters + than SqueezeNet 1.0, without sacrificing accuracy. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _squeezenet('1_1', pretrained, progress, **kwargs) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/utils.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..638ef07cd85ea5135920ed1c7364e32297a21030 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/utils.py @@ -0,0 +1,4 @@ +try: + from torch.hub import load_state_dict_from_url +except ImportError: + from torch.utils.model_zoo import load_url as load_state_dict_from_url diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/vgg.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/vgg.py new file mode 100644 index 0000000000000000000000000000000000000000..20da5c18a575e67a5e19a3d719a626ee2deb3ada --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/vgg.py @@ -0,0 +1,190 @@ +import torch +import torch.nn as nn +from .utils import load_state_dict_from_url +from typing import Union, List, Dict, Any, cast + + +__all__ = [ + 'VGG', 'vgg11', 'vgg11_bn', 'vgg13', 'vgg13_bn', 'vgg16', 'vgg16_bn', + 'vgg19_bn', 'vgg19', +] + + +model_urls = { + 'vgg11': 'https://download.pytorch.org/models/vgg11-8a719046.pth', + 'vgg13': 'https://download.pytorch.org/models/vgg13-19584684.pth', + 'vgg16': 'https://download.pytorch.org/models/vgg16-397923af.pth', + 'vgg19': 'https://download.pytorch.org/models/vgg19-dcbb9e9d.pth', + 'vgg11_bn': 'https://download.pytorch.org/models/vgg11_bn-6002323d.pth', + 'vgg13_bn': 'https://download.pytorch.org/models/vgg13_bn-abd245e5.pth', + 'vgg16_bn': 'https://download.pytorch.org/models/vgg16_bn-6c64b313.pth', + 'vgg19_bn': 'https://download.pytorch.org/models/vgg19_bn-c79401a0.pth', +} + + +class VGG(nn.Module): + + def __init__( + self, + features: nn.Module, + num_classes: int = 1000, + init_weights: bool = True + ) -> None: + super(VGG, self).__init__() + self.features = features + self.avgpool = nn.AdaptiveAvgPool2d((7, 7)) + self.classifier = nn.Sequential( + nn.Linear(512 * 7 * 7, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, 4096), + nn.ReLU(True), + nn.Dropout(), + nn.Linear(4096, num_classes), + ) + if init_weights: + self._initialize_weights() + + def forward(self, x: torch.Tensor) -> torch.Tensor: + x = self.features(x) + x = self.avgpool(x) + x = torch.flatten(x, 1) + x = self.classifier(x) + return x + + def _initialize_weights(self) -> None: + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu') + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm2d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + nn.init.constant_(m.bias, 0) + + +def make_layers(cfg: List[Union[str, int]], batch_norm: bool = False) -> nn.Sequential: + layers: List[nn.Module] = [] + in_channels = 3 + for v in cfg: + if v == 'M': + layers += [nn.MaxPool2d(kernel_size=2, stride=2)] + else: + v = cast(int, v) + conv2d = nn.Conv2d(in_channels, v, kernel_size=3, padding=1) + if batch_norm: + layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU(inplace=True)] + else: + layers += [conv2d, nn.ReLU(inplace=True)] + in_channels = v + return nn.Sequential(*layers) + + +cfgs: Dict[str, List[Union[str, int]]] = { + 'A': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'B': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'D': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], + 'E': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], +} + + +def _vgg(arch: str, cfg: str, batch_norm: bool, pretrained: bool, progress: bool, **kwargs: Any) -> VGG: + if pretrained: + kwargs['init_weights'] = False + model = VGG(make_layers(cfgs[cfg], batch_norm=batch_norm), **kwargs) + if pretrained: + state_dict = load_state_dict_from_url(model_urls[arch], + progress=progress) + model.load_state_dict(state_dict) + return model + + +def vgg11(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG: + r"""VGG 11-layer model (configuration "A") from + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg11', 'A', False, pretrained, progress, **kwargs) + + +def vgg11_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG: + r"""VGG 11-layer model (configuration "A") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg11_bn', 'A', True, pretrained, progress, **kwargs) + + +def vgg13(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG: + r"""VGG 13-layer model (configuration "B") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg13', 'B', False, pretrained, progress, **kwargs) + + +def vgg13_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG: + r"""VGG 13-layer model (configuration "B") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg13_bn', 'B', True, pretrained, progress, **kwargs) + + +def vgg16(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG: + r"""VGG 16-layer model (configuration "D") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg16', 'D', False, pretrained, progress, **kwargs) + + +def vgg16_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG: + r"""VGG 16-layer model (configuration "D") with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg16_bn', 'D', True, pretrained, progress, **kwargs) + + +def vgg19(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG: + r"""VGG 19-layer model (configuration "E") + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg19', 'E', False, pretrained, progress, **kwargs) + + +def vgg19_bn(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> VGG: + r"""VGG 19-layer model (configuration 'E') with batch normalization + `"Very Deep Convolutional Networks For Large-Scale Image Recognition" <https://arxiv.org/pdf/1409.1556.pdf>`_. + + Args: + pretrained (bool): If True, returns a model pre-trained on ImageNet + progress (bool): If True, displays a progress bar of the download to stderr + """ + return _vgg('vgg19_bn', 'E', True, pretrained, progress, **kwargs) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/video/README.md b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/video/README.md new file mode 100644 index 0000000000000000000000000000000000000000..23dd673d053a0c53694269a37afca4263484b48e --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/video/README.md @@ -0,0 +1,60 @@ +## Video classification models + +Starting with version `0.4.0` we have introduced support for basic video tasks and video classification modelling. +At the moment, our pretraining consists of base implementation of popular resnet-based video models [0], together with their +basic variant pre-trained on Kinetics400 [1]. Although this is a standard benchmark pre-training, we are always considering what is the best for the community. + +Additional documentation can be found [here](https://pytorch.org/docs/stable/torchvision/models.html#video-classification). + +### Kinetics400 dataset pretraining parameters + +See reference training script [here](https://github.com/pytorch/vision/blob/master/references/video_classification/train.py): + +- input size: [3, 16, 112, 112] +- input space: RGB +- input range: [0, 1] +- mean: [0.43216, 0.394666, 0.37645] +- std: [0.22803, 0.22145, 0.216989] +- number of classes: 400 + +Input data augmentations at training time (with optional parameters): + +0. ToTensor +1. Resize (128, 171) +2. Random horizontal flip (0.5) +3. Normalization (mean, std, see values above) +4. Random Crop (112, 112) + +Input data augmentations at validation time (with optional parameters): + +0. ToTensor +1. Resize (128, 171) +2. Normalization (mean, std, see values above) +3. Center Crop (112, 112) + +This translates in the following set of command-line arguments (please note that learning rate and batch size end up being scaled by the number of GPUs; all our models were trained on 8 nodes with 8 V100 GPUs each for a total of 64 GPUs): +``` +# number of frames per clip +--clip_len 16 \ +# allow for temporal jittering +--clips_per_video 5 \ +--batch-size 24 \ +--epochs 45 \ +--lr 0.01 \ +# we use 10 epochs for linear warmup +--lr-warmup-epochs 10 \ +# learning rate is decayed at 20, 30, and 40 epoch by a factor of 10 +--lr-milestones 20, 30, 40 \ +--lr-gamma 0.1 +``` + +### Additional video modelling resources + +- [Video Model Zoo](https://github.com/facebookresearch/VMZ) +- [PySlowFast](https://github.com/facebookresearch/SlowFast) + +### References + +[0] _D. Tran, H. Wang, L. Torresani, J. Ray, Y. LeCun and M. Paluri_: A Closer Look at Spatiotemporal Convolutions for Action Recognition. _CVPR 2018_ ([paper](https://research.fb.com/wp-content/uploads/2018/04/a-closer-look-at-spatiotemporal-convolutions-for-action-recognition.pdf)) + +[1] _W. Kay, J. Carreira, K. Simonyan, B. Zhang, C. Hillier, S. Vijayanarasimhan, F. Viola, T. Green, T. Back, P. Natsev, M. Suleyman, A. Zisserman_: The Kinetics Human Action Video Dataset ([paper](https://arxiv.org/abs/1705.06950)) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/video/__init__.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/video/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b792ca6ecf7cbe1d51c5c1dd72f1f98328fda8b9 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/video/__init__.py @@ -0,0 +1 @@ +from .resnet import * diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/video/resnet.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/video/resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..e60d27c18b2c8983255ff9979fdf683f43c8d8db --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/models/video/resnet.py @@ -0,0 +1,340 @@ +import torch.nn as nn + +from ..utils import load_state_dict_from_url + + +__all__ = ['r3d_18', 'mc3_18', 'r2plus1d_18'] + +model_urls = { + 'r3d_18': 'https://download.pytorch.org/models/r3d_18-b3b3357e.pth', + 'mc3_18': 'https://download.pytorch.org/models/mc3_18-a90a0ba3.pth', + 'r2plus1d_18': 'https://download.pytorch.org/models/r2plus1d_18-91a641e6.pth', +} + + +class Conv3DSimple(nn.Conv3d): + def __init__(self, + in_planes, + out_planes, + midplanes=None, + stride=1, + padding=1): + + super(Conv3DSimple, self).__init__( + in_channels=in_planes, + out_channels=out_planes, + kernel_size=(3, 3, 3), + stride=stride, + padding=padding, + bias=False) + + @staticmethod + def get_downsample_stride(stride): + return stride, stride, stride + + +class Conv2Plus1D(nn.Sequential): + + def __init__(self, + in_planes, + out_planes, + midplanes, + stride=1, + padding=1): + super(Conv2Plus1D, self).__init__( + nn.Conv3d(in_planes, midplanes, kernel_size=(1, 3, 3), + stride=(1, stride, stride), padding=(0, padding, padding), + bias=False), + nn.BatchNorm3d(midplanes), + nn.ReLU(inplace=True), + nn.Conv3d(midplanes, out_planes, kernel_size=(3, 1, 1), + stride=(stride, 1, 1), padding=(padding, 0, 0), + bias=False)) + + @staticmethod + def get_downsample_stride(stride): + return stride, stride, stride + + +class Conv3DNoTemporal(nn.Conv3d): + + def __init__(self, + in_planes, + out_planes, + midplanes=None, + stride=1, + padding=1): + + super(Conv3DNoTemporal, self).__init__( + in_channels=in_planes, + out_channels=out_planes, + kernel_size=(1, 3, 3), + stride=(1, stride, stride), + padding=(0, padding, padding), + bias=False) + + @staticmethod + def get_downsample_stride(stride): + return 1, stride, stride + + +class BasicBlock(nn.Module): + + expansion = 1 + + def __init__(self, inplanes, planes, conv_builder, stride=1, downsample=None): + midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes) + + super(BasicBlock, self).__init__() + self.conv1 = nn.Sequential( + conv_builder(inplanes, planes, midplanes, stride), + nn.BatchNorm3d(planes), + nn.ReLU(inplace=True) + ) + self.conv2 = nn.Sequential( + conv_builder(planes, planes, midplanes), + nn.BatchNorm3d(planes) + ) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.conv2(out) + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class Bottleneck(nn.Module): + expansion = 4 + + def __init__(self, inplanes, planes, conv_builder, stride=1, downsample=None): + + super(Bottleneck, self).__init__() + midplanes = (inplanes * planes * 3 * 3 * 3) // (inplanes * 3 * 3 + 3 * planes) + + # 1x1x1 + self.conv1 = nn.Sequential( + nn.Conv3d(inplanes, planes, kernel_size=1, bias=False), + nn.BatchNorm3d(planes), + nn.ReLU(inplace=True) + ) + # Second kernel + self.conv2 = nn.Sequential( + conv_builder(planes, planes, midplanes, stride), + nn.BatchNorm3d(planes), + nn.ReLU(inplace=True) + ) + + # 1x1x1 + self.conv3 = nn.Sequential( + nn.Conv3d(planes, planes * self.expansion, kernel_size=1, bias=False), + nn.BatchNorm3d(planes * self.expansion) + ) + self.relu = nn.ReLU(inplace=True) + self.downsample = downsample + self.stride = stride + + def forward(self, x): + residual = x + + out = self.conv1(x) + out = self.conv2(out) + out = self.conv3(out) + + if self.downsample is not None: + residual = self.downsample(x) + + out += residual + out = self.relu(out) + + return out + + +class BasicStem(nn.Sequential): + """The default conv-batchnorm-relu stem + """ + def __init__(self): + super(BasicStem, self).__init__( + nn.Conv3d(3, 64, kernel_size=(3, 7, 7), stride=(1, 2, 2), + padding=(1, 3, 3), bias=False), + nn.BatchNorm3d(64), + nn.ReLU(inplace=True)) + + +class R2Plus1dStem(nn.Sequential): + """R(2+1)D stem is different than the default one as it uses separated 3D convolution + """ + def __init__(self): + super(R2Plus1dStem, self).__init__( + nn.Conv3d(3, 45, kernel_size=(1, 7, 7), + stride=(1, 2, 2), padding=(0, 3, 3), + bias=False), + nn.BatchNorm3d(45), + nn.ReLU(inplace=True), + nn.Conv3d(45, 64, kernel_size=(3, 1, 1), + stride=(1, 1, 1), padding=(1, 0, 0), + bias=False), + nn.BatchNorm3d(64), + nn.ReLU(inplace=True)) + + +class VideoResNet(nn.Module): + + def __init__(self, block, conv_makers, layers, + stem, num_classes=400, + zero_init_residual=False): + """Generic resnet video generator. + + Args: + block (nn.Module): resnet building block + conv_makers (list(functions)): generator function for each layer + layers (List[int]): number of blocks per layer + stem (nn.Module, optional): Resnet stem, if None, defaults to conv-bn-relu. Defaults to None. + num_classes (int, optional): Dimension of the final FC layer. Defaults to 400. + zero_init_residual (bool, optional): Zero init bottleneck residual BN. Defaults to False. + """ + super(VideoResNet, self).__init__() + self.inplanes = 64 + + self.stem = stem() + + self.layer1 = self._make_layer(block, conv_makers[0], 64, layers[0], stride=1) + self.layer2 = self._make_layer(block, conv_makers[1], 128, layers[1], stride=2) + self.layer3 = self._make_layer(block, conv_makers[2], 256, layers[2], stride=2) + self.layer4 = self._make_layer(block, conv_makers[3], 512, layers[3], stride=2) + + self.avgpool = nn.AdaptiveAvgPool3d((1, 1, 1)) + self.fc = nn.Linear(512 * block.expansion, num_classes) + + # init weights + self._initialize_weights() + + if zero_init_residual: + for m in self.modules(): + if isinstance(m, Bottleneck): + nn.init.constant_(m.bn3.weight, 0) + + def forward(self, x): + x = self.stem(x) + + x = self.layer1(x) + x = self.layer2(x) + x = self.layer3(x) + x = self.layer4(x) + + x = self.avgpool(x) + # Flatten the layer to fc + x = x.flatten(1) + x = self.fc(x) + + return x + + def _make_layer(self, block, conv_builder, planes, blocks, stride=1): + downsample = None + + if stride != 1 or self.inplanes != planes * block.expansion: + ds_stride = conv_builder.get_downsample_stride(stride) + downsample = nn.Sequential( + nn.Conv3d(self.inplanes, planes * block.expansion, + kernel_size=1, stride=ds_stride, bias=False), + nn.BatchNorm3d(planes * block.expansion) + ) + layers = [] + layers.append(block(self.inplanes, planes, conv_builder, stride, downsample)) + + self.inplanes = planes * block.expansion + for i in range(1, blocks): + layers.append(block(self.inplanes, planes, conv_builder)) + + return nn.Sequential(*layers) + + def _initialize_weights(self): + for m in self.modules(): + if isinstance(m, nn.Conv3d): + nn.init.kaiming_normal_(m.weight, mode='fan_out', + nonlinearity='relu') + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.BatchNorm3d): + nn.init.constant_(m.weight, 1) + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.Linear): + nn.init.normal_(m.weight, 0, 0.01) + nn.init.constant_(m.bias, 0) + + +def _video_resnet(arch, pretrained=False, progress=True, **kwargs): + model = VideoResNet(**kwargs) + + if pretrained: + state_dict = load_state_dict_from_url(model_urls[arch], + progress=progress) + model.load_state_dict(state_dict) + return model + + +def r3d_18(pretrained=False, progress=True, **kwargs): + """Construct 18 layer Resnet3D model as in + https://arxiv.org/abs/1711.11248 + + Args: + pretrained (bool): If True, returns a model pre-trained on Kinetics-400 + progress (bool): If True, displays a progress bar of the download to stderr + + Returns: + nn.Module: R3D-18 network + """ + + return _video_resnet('r3d_18', + pretrained, progress, + block=BasicBlock, + conv_makers=[Conv3DSimple] * 4, + layers=[2, 2, 2, 2], + stem=BasicStem, **kwargs) + + +def mc3_18(pretrained=False, progress=True, **kwargs): + """Constructor for 18 layer Mixed Convolution network as in + https://arxiv.org/abs/1711.11248 + + Args: + pretrained (bool): If True, returns a model pre-trained on Kinetics-400 + progress (bool): If True, displays a progress bar of the download to stderr + + Returns: + nn.Module: MC3 Network definition + """ + return _video_resnet('mc3_18', + pretrained, progress, + block=BasicBlock, + conv_makers=[Conv3DSimple] + [Conv3DNoTemporal] * 3, + layers=[2, 2, 2, 2], + stem=BasicStem, **kwargs) + + +def r2plus1d_18(pretrained=False, progress=True, **kwargs): + """Constructor for the 18 layer deep R(2+1)D network as in + https://arxiv.org/abs/1711.11248 + + Args: + pretrained (bool): If True, returns a model pre-trained on Kinetics-400 + progress (bool): If True, displays a progress bar of the download to stderr + + Returns: + nn.Module: R(2+1)D-18 network + """ + return _video_resnet('r2plus1d_18', + pretrained, progress, + block=BasicBlock, + conv_makers=[Conv2Plus1D] * 4, + layers=[2, 2, 2, 2], + stem=R2Plus1dStem, **kwargs) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/__init__.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..0ec189dbc2a864840516b70ebf057cdabae5dfb7 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/__init__.py @@ -0,0 +1,24 @@ +from .boxes import nms, batched_nms, remove_small_boxes, clip_boxes_to_image, box_area, box_iou, generalized_box_iou +from .boxes import box_convert +from .deform_conv import deform_conv2d, DeformConv2d +from .roi_align import roi_align, RoIAlign +from .roi_pool import roi_pool, RoIPool +from .ps_roi_align import ps_roi_align, PSRoIAlign +from .ps_roi_pool import ps_roi_pool, PSRoIPool +from .poolers import MultiScaleRoIAlign +from .feature_pyramid_network import FeaturePyramidNetwork +from .focal_loss import sigmoid_focal_loss + +from ._register_onnx_ops import _register_custom_op + +_register_custom_op() + + +__all__ = [ + 'deform_conv2d', 'DeformConv2d', 'nms', 'batched_nms', 'remove_small_boxes', + 'clip_boxes_to_image', 'box_convert', + 'box_area', 'box_iou', 'generalized_box_iou', 'roi_align', 'RoIAlign', 'roi_pool', + 'RoIPool', 'ps_roi_align', 'PSRoIAlign', 'ps_roi_pool', + 'PSRoIPool', 'MultiScaleRoIAlign', 'FeaturePyramidNetwork', + 'sigmoid_focal_loss' +] diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/_box_convert.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/_box_convert.py new file mode 100644 index 0000000000000000000000000000000000000000..5e0520fc48aad07b13af5de30969378cfaebbe50 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/_box_convert.py @@ -0,0 +1,81 @@ +import torch +from torch import Tensor + + +def _box_cxcywh_to_xyxy(boxes: Tensor) -> Tensor: + """ + Converts bounding boxes from (cx, cy, w, h) format to (x1, y1, x2, y2) format. + (cx, cy) refers to center of bounding box + (w, h) are width and height of bounding box + Args: + boxes (Tensor[N, 4]): boxes in (cx, cy, w, h) format which will be converted. + + Returns: + boxes (Tensor(N, 4)): boxes in (x1, y1, x2, y2) format. + """ + # We need to change all 4 of them so some temporary variable is needed. + cx, cy, w, h = boxes.unbind(-1) + x1 = cx - 0.5 * w + y1 = cy - 0.5 * h + x2 = cx + 0.5 * w + y2 = cy + 0.5 * h + + boxes = torch.stack((x1, y1, x2, y2), dim=-1) + + return boxes + + +def _box_xyxy_to_cxcywh(boxes: Tensor) -> Tensor: + """ + Converts bounding boxes from (x1, y1, x2, y2) format to (cx, cy, w, h) format. + (x1, y1) refer to top left of bounding box + (x2, y2) refer to bottom right of bounding box + Args: + boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format which will be converted. + + Returns: + boxes (Tensor(N, 4)): boxes in (cx, cy, w, h) format. + """ + x1, y1, x2, y2 = boxes.unbind(-1) + cx = (x1 + x2) / 2 + cy = (y1 + y2) / 2 + w = x2 - x1 + h = y2 - y1 + + boxes = torch.stack((cx, cy, w, h), dim=-1) + + return boxes + + +def _box_xywh_to_xyxy(boxes: Tensor) -> Tensor: + """ + Converts bounding boxes from (x, y, w, h) format to (x1, y1, x2, y2) format. + (x, y) refers to top left of bouding box. + (w, h) refers to width and height of box. + Args: + boxes (Tensor[N, 4]): boxes in (x, y, w, h) which will be converted. + + Returns: + boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) format. + """ + x, y, w, h = boxes.unbind(-1) + boxes = torch.stack([x, y, x + w, y + h], dim=-1) + return boxes + + +def _box_xyxy_to_xywh(boxes: Tensor) -> Tensor: + """ + Converts bounding boxes from (x1, y1, x2, y2) format to (x, y, w, h) format. + (x1, y1) refer to top left of bounding box + (x2, y2) refer to bottom right of bounding box + Args: + boxes (Tensor[N, 4]): boxes in (x1, y1, x2, y2) which will be converted. + + Returns: + boxes (Tensor[N, 4]): boxes in (x, y, w, h) format. + """ + x1, y1, x2, y2 = boxes.unbind(-1) + w = x2 - x1 # x2 - x1 + h = y2 - y1 # y2 - y1 + boxes = torch.stack((x1, y1, w, h), dim=-1) + return boxes diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/_register_onnx_ops.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/_register_onnx_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..8e8ed33180392dfee67744d7bb1d47dd753ba424 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/_register_onnx_ops.py @@ -0,0 +1,50 @@ +import sys +import torch +import warnings + +_onnx_opset_version = 11 + + +def _register_custom_op(): + from torch.onnx.symbolic_helper import parse_args, scalar_type_to_onnx, scalar_type_to_pytorch_type, \ + cast_pytorch_to_onnx + from torch.onnx.symbolic_opset9 import select, unsqueeze, squeeze, _cast_Long, reshape + + @parse_args('v', 'v', 'f') + def symbolic_multi_label_nms(g, boxes, scores, iou_threshold): + boxes = unsqueeze(g, boxes, 0) + scores = unsqueeze(g, unsqueeze(g, scores, 0), 0) + max_output_per_class = g.op('Constant', value_t=torch.tensor([sys.maxsize], dtype=torch.long)) + iou_threshold = g.op('Constant', value_t=torch.tensor([iou_threshold], dtype=torch.float)) + nms_out = g.op('NonMaxSuppression', boxes, scores, max_output_per_class, iou_threshold) + return squeeze(g, select(g, nms_out, 1, g.op('Constant', value_t=torch.tensor([2], dtype=torch.long))), 1) + + @parse_args('v', 'v', 'f', 'i', 'i', 'i', 'i') + def roi_align(g, input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio, aligned): + batch_indices = _cast_Long(g, squeeze(g, select(g, rois, 1, g.op('Constant', + value_t=torch.tensor([0], dtype=torch.long))), 1), False) + rois = select(g, rois, 1, g.op('Constant', value_t=torch.tensor([1, 2, 3, 4], dtype=torch.long))) + if aligned: + warnings.warn("ONNX export of ROIAlign with aligned=True does not match PyTorch when using malformed boxes," + " ONNX forces ROIs to be 1x1 or larger.") + scale = torch.tensor(0.5 / spatial_scale).to(dtype=torch.float) + rois = g.op("Sub", rois, scale) + + # ONNX doesn't support negative sampling_ratio + if sampling_ratio < 0: + warnings.warn("ONNX doesn't support negative sampling ratio," + "therefore is is set to 0 in order to be exported.") + sampling_ratio = 0 + return g.op('RoiAlign', input, rois, batch_indices, spatial_scale_f=spatial_scale, + output_height_i=pooled_height, output_width_i=pooled_width, sampling_ratio_i=sampling_ratio) + + @parse_args('v', 'v', 'f', 'i', 'i') + def roi_pool(g, input, rois, spatial_scale, pooled_height, pooled_width): + roi_pool = g.op('MaxRoiPool', input, rois, + pooled_shape_i=(pooled_height, pooled_width), spatial_scale_f=spatial_scale) + return roi_pool, None + + from torch.onnx import register_custom_op_symbolic + register_custom_op_symbolic('torchvision::nms', symbolic_multi_label_nms, _onnx_opset_version) + register_custom_op_symbolic('torchvision::roi_align', roi_align, _onnx_opset_version) + register_custom_op_symbolic('torchvision::roi_pool', roi_pool, _onnx_opset_version) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/_utils.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bb6287ad61624044300a8416a4abdacd14655639 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/_utils.py @@ -0,0 +1,36 @@ +import torch +from torch import Tensor +from typing import List + + +def _cat(tensors: List[Tensor], dim: int = 0) -> Tensor: + """ + Efficient version of torch.cat that avoids a copy if there is only a single element in a list + """ + # TODO add back the assert + # assert isinstance(tensors, (list, tuple)) + if len(tensors) == 1: + return tensors[0] + return torch.cat(tensors, dim) + + +def convert_boxes_to_roi_format(boxes: List[Tensor]) -> Tensor: + concat_boxes = _cat([b for b in boxes], dim=0) + temp = [] + for i, b in enumerate(boxes): + temp.append(torch.full_like(b[:, :1], i)) + ids = _cat(temp, dim=0) + rois = torch.cat([ids, concat_boxes], dim=1) + return rois + + +def check_roi_boxes_shape(boxes: Tensor): + if isinstance(boxes, (list, tuple)): + for _tensor in boxes: + assert _tensor.size(1) == 4, \ + 'The shape of the tensor in the boxes list is not correct as List[Tensor[L, 4]]' + elif isinstance(boxes, torch.Tensor): + assert boxes.size(1) == 5, 'The boxes tensor shape is not correct as Tensor[K, 5]' + else: + assert False, 'boxes is expected to be a Tensor[L, 5] or a List[Tensor[K, 4]]' + return diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/boxes.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/boxes.py new file mode 100644 index 0000000000000000000000000000000000000000..c1f176f4da939075b732e97be701723aa98cd5ad --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/boxes.py @@ -0,0 +1,299 @@ +import torch +from torch import Tensor +from typing import Tuple +from ._box_convert import _box_cxcywh_to_xyxy, _box_xyxy_to_cxcywh, _box_xywh_to_xyxy, _box_xyxy_to_xywh +import torchvision +from torchvision.extension import _assert_has_ops + + +def nms(boxes: Tensor, scores: Tensor, iou_threshold: float) -> Tensor: + """ + Performs non-maximum suppression (NMS) on the boxes according + to their intersection-over-union (IoU). + + NMS iteratively removes lower scoring boxes which have an + IoU greater than iou_threshold with another (higher scoring) + box. + + If multiple boxes have the exact same score and satisfy the IoU + criterion with respect to a reference box, the selected box is + not guaranteed to be the same between CPU and GPU. This is similar + to the behavior of argsort in PyTorch when repeated values are present. + + Args: + boxes (Tensor[N, 4])): boxes to perform NMS on. They + are expected to be in ``(x1, y1, x2, y2)`` format with ``0 <= x1 < x2`` and + ``0 <= y1 < y2``. + scores (Tensor[N]): scores for each one of the boxes + iou_threshold (float): discards all overlapping boxes with IoU > iou_threshold + + Returns: + Tensor: int64 tensor with the indices of the elements that have been kept + by NMS, sorted in decreasing order of scores + """ + _assert_has_ops() + return torch.ops.torchvision.nms(boxes, scores, iou_threshold) + + +def batched_nms( + boxes: Tensor, + scores: Tensor, + idxs: Tensor, + iou_threshold: float, +) -> Tensor: + """ + Performs non-maximum suppression in a batched fashion. + + Each index value correspond to a category, and NMS + will not be applied between elements of different categories. + + Args: + boxes (Tensor[N, 4]): boxes where NMS will be performed. They + are expected to be in ``(x1, y1, x2, y2)`` format with ``0 <= x1 < x2`` and + ``0 <= y1 < y2``. + scores (Tensor[N]): scores for each one of the boxes + idxs (Tensor[N]): indices of the categories for each one of the boxes. + iou_threshold (float): discards all overlapping boxes with IoU > iou_threshold + + Returns: + Tensor: int64 tensor with the indices of the elements that have been kept by NMS, sorted + in decreasing order of scores + """ + # Benchmarks that drove the following thresholds are at + # https://github.com/pytorch/vision/issues/1311#issuecomment-781329339 + # Ideally for GPU we'd use a higher threshold + if boxes.numel() > 4_000 and not torchvision._is_tracing(): + return _batched_nms_vanilla(boxes, scores, idxs, iou_threshold) + else: + return _batched_nms_coordinate_trick(boxes, scores, idxs, iou_threshold) + + +@torch.jit._script_if_tracing +def _batched_nms_coordinate_trick( + boxes: Tensor, + scores: Tensor, + idxs: Tensor, + iou_threshold: float, +) -> Tensor: + # strategy: in order to perform NMS independently per class, + # we add an offset to all the boxes. The offset is dependent + # only on the class idx, and is large enough so that boxes + # from different classes do not overlap + if boxes.numel() == 0: + return torch.empty((0,), dtype=torch.int64, device=boxes.device) + max_coordinate = boxes.max() + offsets = idxs.to(boxes) * (max_coordinate + torch.tensor(1).to(boxes)) + boxes_for_nms = boxes + offsets[:, None] + keep = nms(boxes_for_nms, scores, iou_threshold) + return keep + + +@torch.jit._script_if_tracing +def _batched_nms_vanilla( + boxes: Tensor, + scores: Tensor, + idxs: Tensor, + iou_threshold: float, +) -> Tensor: + # Based on Detectron2 implementation, just manually call nms() on each class independently + keep_mask = torch.zeros_like(scores, dtype=torch.bool) + for class_id in torch.unique(idxs): + curr_indices = torch.where(idxs == class_id)[0] + curr_keep_indices = nms(boxes[curr_indices], scores[curr_indices], iou_threshold) + keep_mask[curr_indices[curr_keep_indices]] = True + keep_indices = torch.where(keep_mask)[0] + return keep_indices[scores[keep_indices].sort(descending=True)[1]] + + +def remove_small_boxes(boxes: Tensor, min_size: float) -> Tensor: + """ + Remove boxes which contains at least one side smaller than min_size. + + Args: + boxes (Tensor[N, 4]): boxes in ``(x1, y1, x2, y2)`` format + with ``0 <= x1 < x2`` and ``0 <= y1 < y2``. + min_size (float): minimum size + + Returns: + Tensor[K]: indices of the boxes that have both sides + larger than min_size + """ + ws, hs = boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1] + keep = (ws >= min_size) & (hs >= min_size) + keep = torch.where(keep)[0] + return keep + + +def clip_boxes_to_image(boxes: Tensor, size: Tuple[int, int]) -> Tensor: + """ + Clip boxes so that they lie inside an image of size `size`. + + Args: + boxes (Tensor[N, 4]): boxes in ``(x1, y1, x2, y2)`` format + with ``0 <= x1 < x2`` and ``0 <= y1 < y2``. + size (Tuple[height, width]): size of the image + + Returns: + Tensor[N, 4]: clipped boxes + """ + dim = boxes.dim() + boxes_x = boxes[..., 0::2] + boxes_y = boxes[..., 1::2] + height, width = size + + if torchvision._is_tracing(): + boxes_x = torch.max(boxes_x, torch.tensor(0, dtype=boxes.dtype, device=boxes.device)) + boxes_x = torch.min(boxes_x, torch.tensor(width, dtype=boxes.dtype, device=boxes.device)) + boxes_y = torch.max(boxes_y, torch.tensor(0, dtype=boxes.dtype, device=boxes.device)) + boxes_y = torch.min(boxes_y, torch.tensor(height, dtype=boxes.dtype, device=boxes.device)) + else: + boxes_x = boxes_x.clamp(min=0, max=width) + boxes_y = boxes_y.clamp(min=0, max=height) + + clipped_boxes = torch.stack((boxes_x, boxes_y), dim=dim) + return clipped_boxes.reshape(boxes.shape) + + +def box_convert(boxes: Tensor, in_fmt: str, out_fmt: str) -> Tensor: + """ + Converts boxes from given in_fmt to out_fmt. + Supported in_fmt and out_fmt are: + + 'xyxy': boxes are represented via corners, x1, y1 being top left and x2, y2 being bottom right. + This is the format that torchvision utilities expect. + + 'xywh' : boxes are represented via corner, width and height, x1, y2 being top left, w, h being width and height. + + 'cxcywh' : boxes are represented via centre, width and height, cx, cy being center of box, w, h + being width and height. + + Args: + boxes (Tensor[N, 4]): boxes which will be converted. + in_fmt (str): Input format of given boxes. Supported formats are ['xyxy', 'xywh', 'cxcywh']. + out_fmt (str): Output format of given boxes. Supported formats are ['xyxy', 'xywh', 'cxcywh'] + + Returns: + Tensor[N, 4]: Boxes into converted format. + """ + + allowed_fmts = ("xyxy", "xywh", "cxcywh") + if in_fmt not in allowed_fmts or out_fmt not in allowed_fmts: + raise ValueError("Unsupported Bounding Box Conversions for given in_fmt and out_fmt") + + if in_fmt == out_fmt: + return boxes.clone() + + if in_fmt != 'xyxy' and out_fmt != 'xyxy': + # convert to xyxy and change in_fmt xyxy + if in_fmt == "xywh": + boxes = _box_xywh_to_xyxy(boxes) + elif in_fmt == "cxcywh": + boxes = _box_cxcywh_to_xyxy(boxes) + in_fmt = 'xyxy' + + if in_fmt == "xyxy": + if out_fmt == "xywh": + boxes = _box_xyxy_to_xywh(boxes) + elif out_fmt == "cxcywh": + boxes = _box_xyxy_to_cxcywh(boxes) + elif out_fmt == "xyxy": + if in_fmt == "xywh": + boxes = _box_xywh_to_xyxy(boxes) + elif in_fmt == "cxcywh": + boxes = _box_cxcywh_to_xyxy(boxes) + return boxes + + +def _upcast(t: Tensor) -> Tensor: + # Protects from numerical overflows in multiplications by upcasting to the equivalent higher type + if t.is_floating_point(): + return t if t.dtype in (torch.float32, torch.float64) else t.float() + else: + return t if t.dtype in (torch.int32, torch.int64) else t.int() + + +def box_area(boxes: Tensor) -> Tensor: + """ + Computes the area of a set of bounding boxes, which are specified by their + (x1, y1, x2, y2) coordinates. + + Args: + boxes (Tensor[N, 4]): boxes for which the area will be computed. They + are expected to be in (x1, y1, x2, y2) format with + ``0 <= x1 < x2`` and ``0 <= y1 < y2``. + + Returns: + Tensor[N]: the area for each box + """ + boxes = _upcast(boxes) + return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1]) + + +# implementation from https://github.com/kuangliu/torchcv/blob/master/torchcv/utils/box.py +# with slight modifications +def _box_inter_union(boxes1: Tensor, boxes2: Tensor) -> Tuple[Tensor, Tensor]: + area1 = box_area(boxes1) + area2 = box_area(boxes2) + + lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2] + rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2] + + wh = _upcast(rb - lt).clamp(min=0) # [N,M,2] + inter = wh[:, :, 0] * wh[:, :, 1] # [N,M] + + union = area1[:, None] + area2 - inter + + return inter, union + + +def box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor: + """ + Return intersection-over-union (Jaccard index) between two sets of boxes. + + Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with + ``0 <= x1 < x2`` and ``0 <= y1 < y2``. + + Args: + boxes1 (Tensor[N, 4]): first set of boxes + boxes2 (Tensor[M, 4]): second set of boxes + + Returns: + Tensor[N, M]: the NxM matrix containing the pairwise IoU values for every element in boxes1 and boxes2 + """ + inter, union = _box_inter_union(boxes1, boxes2) + iou = inter / union + return iou + + +# Implementation adapted from https://github.com/facebookresearch/detr/blob/master/util/box_ops.py +def generalized_box_iou(boxes1: Tensor, boxes2: Tensor) -> Tensor: + """ + Return generalized intersection-over-union (Jaccard index) between two sets of boxes. + + Both sets of boxes are expected to be in ``(x1, y1, x2, y2)`` format with + ``0 <= x1 < x2`` and ``0 <= y1 < y2``. + + Args: + boxes1 (Tensor[N, 4]): first set of boxes + boxes2 (Tensor[M, 4]): second set of boxes + + Returns: + Tensor[N, M]: the NxM matrix containing the pairwise generalized IoU values + for every element in boxes1 and boxes2 + """ + + # degenerate boxes gives inf / nan results + # so do an early check + assert (boxes1[:, 2:] >= boxes1[:, :2]).all() + assert (boxes2[:, 2:] >= boxes2[:, :2]).all() + + inter, union = _box_inter_union(boxes1, boxes2) + iou = inter / union + + lti = torch.min(boxes1[:, None, :2], boxes2[:, :2]) + rbi = torch.max(boxes1[:, None, 2:], boxes2[:, 2:]) + + whi = _upcast(rbi - lti).clamp(min=0) # [N,M,2] + areai = whi[:, :, 0] * whi[:, :, 1] + + return iou - (areai - union) / areai diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/deform_conv.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/deform_conv.py new file mode 100644 index 0000000000000000000000000000000000000000..7dceee96f2783faafd55500dabe024f3c98b5c29 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/deform_conv.py @@ -0,0 +1,177 @@ +import math + +import torch +from torch import nn, Tensor +from torch.nn import init +from torch.nn.parameter import Parameter +from torch.nn.modules.utils import _pair +from typing import Optional, Tuple +from torchvision.extension import _assert_has_ops + + +def deform_conv2d( + input: Tensor, + offset: Tensor, + weight: Tensor, + bias: Optional[Tensor] = None, + stride: Tuple[int, int] = (1, 1), + padding: Tuple[int, int] = (0, 0), + dilation: Tuple[int, int] = (1, 1), + mask: Optional[Tensor] = None, +) -> Tensor: + r""" + Performs Deformable Convolution v2, described in + `Deformable ConvNets v2: More Deformable, Better Results + <https://arxiv.org/abs/1811.11168>`__ if :attr:`mask` is not ``None`` and + Performs Deformable Convolution, described in + `Deformable Convolutional Networks + <https://arxiv.org/abs/1703.06211>`__ if :attr:`mask` is ``None``. + + Args: + input (Tensor[batch_size, in_channels, in_height, in_width]): input tensor + offset (Tensor[batch_size, 2 * offset_groups * kernel_height * kernel_width, out_height, out_width]): + offsets to be applied for each position in the convolution kernel. + weight (Tensor[out_channels, in_channels // groups, kernel_height, kernel_width]): convolution weights, + split into groups of size (in_channels // groups) + bias (Tensor[out_channels]): optional bias of shape (out_channels,). Default: None + stride (int or Tuple[int, int]): distance between convolution centers. Default: 1 + padding (int or Tuple[int, int]): height/width of padding of zeroes around + each image. Default: 0 + dilation (int or Tuple[int, int]): the spacing between kernel elements. Default: 1 + mask (Tensor[batch_size, offset_groups * kernel_height * kernel_width, out_height, out_width]): + masks to be applied for each position in the convolution kernel. Default: None + + Returns: + Tensor[batch_sz, out_channels, out_h, out_w]: result of convolution + + Examples:: + >>> input = torch.rand(4, 3, 10, 10) + >>> kh, kw = 3, 3 + >>> weight = torch.rand(5, 3, kh, kw) + >>> # offset and mask should have the same spatial size as the output + >>> # of the convolution. In this case, for an input of 10, stride of 1 + >>> # and kernel size of 3, without padding, the output size is 8 + >>> offset = torch.rand(4, 2 * kh * kw, 8, 8) + >>> mask = torch.rand(4, kh * kw, 8, 8) + >>> out = deform_conv2d(input, offset, weight, mask=mask) + >>> print(out.shape) + >>> # returns + >>> torch.Size([4, 5, 8, 8]) + """ + + _assert_has_ops() + out_channels = weight.shape[0] + + use_mask = mask is not None + + if mask is None: + mask = torch.zeros((input.shape[0], 0), device=input.device, dtype=input.dtype) + + if bias is None: + bias = torch.zeros(out_channels, device=input.device, dtype=input.dtype) + + stride_h, stride_w = _pair(stride) + pad_h, pad_w = _pair(padding) + dil_h, dil_w = _pair(dilation) + weights_h, weights_w = weight.shape[-2:] + _, n_in_channels, in_h, in_w = input.shape + + n_offset_grps = offset.shape[1] // (2 * weights_h * weights_w) + n_weight_grps = n_in_channels // weight.shape[1] + + if n_offset_grps == 0: + raise RuntimeError( + "the shape of the offset tensor at dimension 1 is not valid. It should " + "be a multiple of 2 * weight.size[2] * weight.size[3].\n" + "Got offset.shape[1]={}, while 2 * weight.size[2] * weight.size[3]={}".format( + offset.shape[1], 2 * weights_h * weights_w)) + + return torch.ops.torchvision.deform_conv2d( + input, + weight, + offset, + mask, + bias, + stride_h, stride_w, + pad_h, pad_w, + dil_h, dil_w, + n_weight_grps, + n_offset_grps, + use_mask,) + + +class DeformConv2d(nn.Module): + """ + See :func:`deform_conv2d`. + """ + + def __init__( + self, + in_channels: int, + out_channels: int, + kernel_size: int, + stride: int = 1, + padding: int = 0, + dilation: int = 1, + groups: int = 1, + bias: bool = True, + ): + super(DeformConv2d, self).__init__() + + if in_channels % groups != 0: + raise ValueError('in_channels must be divisible by groups') + if out_channels % groups != 0: + raise ValueError('out_channels must be divisible by groups') + + self.in_channels = in_channels + self.out_channels = out_channels + self.kernel_size = _pair(kernel_size) + self.stride = _pair(stride) + self.padding = _pair(padding) + self.dilation = _pair(dilation) + self.groups = groups + + self.weight = Parameter(torch.empty(out_channels, in_channels // groups, + self.kernel_size[0], self.kernel_size[1])) + + if bias: + self.bias = Parameter(torch.empty(out_channels)) + else: + self.register_parameter('bias', None) + + self.reset_parameters() + + def reset_parameters(self) -> None: + init.kaiming_uniform_(self.weight, a=math.sqrt(5)) + + if self.bias is not None: + fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight) + bound = 1 / math.sqrt(fan_in) + init.uniform_(self.bias, -bound, bound) + + def forward(self, input: Tensor, offset: Tensor, mask: Tensor = None) -> Tensor: + """ + Args: + input (Tensor[batch_size, in_channels, in_height, in_width]): input tensor + offset (Tensor[batch_size, 2 * offset_groups * kernel_height * kernel_width, + out_height, out_width]): offsets to be applied for each position in the + convolution kernel. + mask (Tensor[batch_size, offset_groups * kernel_height * kernel_width, + out_height, out_width]): masks to be applied for each position in the + convolution kernel. + """ + return deform_conv2d(input, offset, self.weight, self.bias, stride=self.stride, + padding=self.padding, dilation=self.dilation, mask=mask) + + def __repr__(self) -> str: + s = self.__class__.__name__ + '(' + s += '{in_channels}' + s += ', {out_channels}' + s += ', kernel_size={kernel_size}' + s += ', stride={stride}' + s += ', padding={padding}' if self.padding != (0, 0) else '' + s += ', dilation={dilation}' if self.dilation != (1, 1) else '' + s += ', groups={groups}' if self.groups != 1 else '' + s += ', bias=False' if self.bias is None else '' + s += ')' + return s.format(**self.__dict__) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/feature_pyramid_network.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/feature_pyramid_network.py new file mode 100644 index 0000000000000000000000000000000000000000..7d72769ab070f99e4055490c7082defe9f034137 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/feature_pyramid_network.py @@ -0,0 +1,204 @@ +from collections import OrderedDict + +import torch.nn.functional as F +from torch import nn, Tensor + +from typing import Tuple, List, Dict, Optional + + +class ExtraFPNBlock(nn.Module): + """ + Base class for the extra block in the FPN. + + Args: + results (List[Tensor]): the result of the FPN + x (List[Tensor]): the original feature maps + names (List[str]): the names for each one of the + original feature maps + + Returns: + results (List[Tensor]): the extended set of results + of the FPN + names (List[str]): the extended set of names for the results + """ + def forward( + self, + results: List[Tensor], + x: List[Tensor], + names: List[str], + ) -> Tuple[List[Tensor], List[str]]: + pass + + +class FeaturePyramidNetwork(nn.Module): + """ + Module that adds a FPN from on top of a set of feature maps. This is based on + `"Feature Pyramid Network for Object Detection" <https://arxiv.org/abs/1612.03144>`_. + + The feature maps are currently supposed to be in increasing depth + order. + + The input to the model is expected to be an OrderedDict[Tensor], containing + the feature maps on top of which the FPN will be added. + + Args: + in_channels_list (list[int]): number of channels for each feature map that + is passed to the module + out_channels (int): number of channels of the FPN representation + extra_blocks (ExtraFPNBlock or None): if provided, extra operations will + be performed. It is expected to take the fpn features, the original + features and the names of the original features as input, and returns + a new list of feature maps and their corresponding names + + Examples:: + + >>> m = torchvision.ops.FeaturePyramidNetwork([10, 20, 30], 5) + >>> # get some dummy data + >>> x = OrderedDict() + >>> x['feat0'] = torch.rand(1, 10, 64, 64) + >>> x['feat2'] = torch.rand(1, 20, 16, 16) + >>> x['feat3'] = torch.rand(1, 30, 8, 8) + >>> # compute the FPN on top of x + >>> output = m(x) + >>> print([(k, v.shape) for k, v in output.items()]) + >>> # returns + >>> [('feat0', torch.Size([1, 5, 64, 64])), + >>> ('feat2', torch.Size([1, 5, 16, 16])), + >>> ('feat3', torch.Size([1, 5, 8, 8]))] + + """ + def __init__( + self, + in_channels_list: List[int], + out_channels: int, + extra_blocks: Optional[ExtraFPNBlock] = None, + ): + super(FeaturePyramidNetwork, self).__init__() + self.inner_blocks = nn.ModuleList() + self.layer_blocks = nn.ModuleList() + for in_channels in in_channels_list: + if in_channels == 0: + raise ValueError("in_channels=0 is currently not supported") + inner_block_module = nn.Conv2d(in_channels, out_channels, 1) + layer_block_module = nn.Conv2d(out_channels, out_channels, 3, padding=1) + self.inner_blocks.append(inner_block_module) + self.layer_blocks.append(layer_block_module) + + # initialize parameters now to avoid modifying the initialization of top_blocks + for m in self.modules(): + if isinstance(m, nn.Conv2d): + nn.init.kaiming_uniform_(m.weight, a=1) + nn.init.constant_(m.bias, 0) + + if extra_blocks is not None: + assert isinstance(extra_blocks, ExtraFPNBlock) + self.extra_blocks = extra_blocks + + def get_result_from_inner_blocks(self, x: Tensor, idx: int) -> Tensor: + """ + This is equivalent to self.inner_blocks[idx](x), + but torchscript doesn't support this yet + """ + num_blocks = len(self.inner_blocks) + if idx < 0: + idx += num_blocks + i = 0 + out = x + for module in self.inner_blocks: + if i == idx: + out = module(x) + i += 1 + return out + + def get_result_from_layer_blocks(self, x: Tensor, idx: int) -> Tensor: + """ + This is equivalent to self.layer_blocks[idx](x), + but torchscript doesn't support this yet + """ + num_blocks = len(self.layer_blocks) + if idx < 0: + idx += num_blocks + i = 0 + out = x + for module in self.layer_blocks: + if i == idx: + out = module(x) + i += 1 + return out + + def forward(self, x: Dict[str, Tensor]) -> Dict[str, Tensor]: + """ + Computes the FPN for a set of feature maps. + + Args: + x (OrderedDict[Tensor]): feature maps for each feature level. + + Returns: + results (OrderedDict[Tensor]): feature maps after FPN layers. + They are ordered from highest resolution first. + """ + # unpack OrderedDict into two lists for easier handling + names = list(x.keys()) + x = list(x.values()) + + last_inner = self.get_result_from_inner_blocks(x[-1], -1) + results = [] + results.append(self.get_result_from_layer_blocks(last_inner, -1)) + + for idx in range(len(x) - 2, -1, -1): + inner_lateral = self.get_result_from_inner_blocks(x[idx], idx) + feat_shape = inner_lateral.shape[-2:] + inner_top_down = F.interpolate(last_inner, size=feat_shape, mode="nearest") + last_inner = inner_lateral + inner_top_down + results.insert(0, self.get_result_from_layer_blocks(last_inner, idx)) + + if self.extra_blocks is not None: + results, names = self.extra_blocks(results, x, names) + + # make it back an OrderedDict + out = OrderedDict([(k, v) for k, v in zip(names, results)]) + + return out + + +class LastLevelMaxPool(ExtraFPNBlock): + """ + Applies a max_pool2d on top of the last feature map + """ + def forward( + self, + x: List[Tensor], + y: List[Tensor], + names: List[str], + ) -> Tuple[List[Tensor], List[str]]: + names.append("pool") + x.append(F.max_pool2d(x[-1], 1, 2, 0)) + return x, names + + +class LastLevelP6P7(ExtraFPNBlock): + """ + This module is used in RetinaNet to generate extra layers, P6 and P7. + """ + def __init__(self, in_channels: int, out_channels: int): + super(LastLevelP6P7, self).__init__() + self.p6 = nn.Conv2d(in_channels, out_channels, 3, 2, 1) + self.p7 = nn.Conv2d(out_channels, out_channels, 3, 2, 1) + for module in [self.p6, self.p7]: + nn.init.kaiming_uniform_(module.weight, a=1) + nn.init.constant_(module.bias, 0) + self.use_P5 = in_channels == out_channels + + def forward( + self, + p: List[Tensor], + c: List[Tensor], + names: List[str], + ) -> Tuple[List[Tensor], List[str]]: + p5, c5 = p[-1], c[-1] + x = p5 if self.use_P5 else c5 + p6 = self.p6(x) + p7 = self.p7(F.relu(p6)) + p.extend([p6, p7]) + names.extend(["p6", "p7"]) + return p, names diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/focal_loss.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/focal_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..de18f30c83a1e493c9b22f9f2a25487577eec6bf --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/focal_loss.py @@ -0,0 +1,49 @@ +import torch +import torch.nn.functional as F + + +def sigmoid_focal_loss( + inputs: torch.Tensor, + targets: torch.Tensor, + alpha: float = 0.25, + gamma: float = 2, + reduction: str = "none", +): + """ + Original implementation from https://github.com/facebookresearch/fvcore/blob/master/fvcore/nn/focal_loss.py . + Loss used in RetinaNet for dense detection: https://arxiv.org/abs/1708.02002. + + Args: + inputs: A float tensor of arbitrary shape. + The predictions for each example. + targets: A float tensor with the same shape as inputs. Stores the binary + classification label for each element in inputs + (0 for the negative class and 1 for the positive class). + alpha: (optional) Weighting factor in range (0,1) to balance + positive vs negative examples or -1 for ignore. Default = 0.25 + gamma: Exponent of the modulating factor (1 - p_t) to + balance easy vs hard examples. + reduction: 'none' | 'mean' | 'sum' + 'none': No reduction will be applied to the output. + 'mean': The output will be averaged. + 'sum': The output will be summed. + Returns: + Loss tensor with the reduction option applied. + """ + p = torch.sigmoid(inputs) + ce_loss = F.binary_cross_entropy_with_logits( + inputs, targets, reduction="none" + ) + p_t = p * targets + (1 - p) * (1 - targets) + loss = ce_loss * ((1 - p_t) ** gamma) + + if alpha >= 0: + alpha_t = alpha * targets + (1 - alpha) * (1 - targets) + loss = alpha_t * loss + + if reduction == "mean": + loss = loss.mean() + elif reduction == "sum": + loss = loss.sum() + + return loss diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/misc.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/misc.py new file mode 100644 index 0000000000000000000000000000000000000000..7e43caa78d6a6af89dcc4a3ee1c16ed3623681b9 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/misc.py @@ -0,0 +1,99 @@ +""" +helper class that supports empty tensors on some nn functions. + +Ideally, add support directly in PyTorch to empty tensors in +those functions. + +This can be removed once https://github.com/pytorch/pytorch/issues/12013 +is implemented +""" + +import warnings +import torch +from torch import Tensor +from typing import List, Optional + + +class Conv2d(torch.nn.Conv2d): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn( + "torchvision.ops.misc.Conv2d is deprecated and will be " + "removed in future versions, use torch.nn.Conv2d instead.", FutureWarning) + + +class ConvTranspose2d(torch.nn.ConvTranspose2d): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn( + "torchvision.ops.misc.ConvTranspose2d is deprecated and will be " + "removed in future versions, use torch.nn.ConvTranspose2d instead.", FutureWarning) + + +class BatchNorm2d(torch.nn.BatchNorm2d): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + warnings.warn( + "torchvision.ops.misc.BatchNorm2d is deprecated and will be " + "removed in future versions, use torch.nn.BatchNorm2d instead.", FutureWarning) + + +interpolate = torch.nn.functional.interpolate + + +# This is not in nn +class FrozenBatchNorm2d(torch.nn.Module): + """ + BatchNorm2d where the batch statistics and the affine parameters + are fixed + """ + + def __init__( + self, + num_features: int, + eps: float = 1e-5, + n: Optional[int] = None, + ): + # n=None for backward-compatibility + if n is not None: + warnings.warn("`n` argument is deprecated and has been renamed `num_features`", + DeprecationWarning) + num_features = n + super(FrozenBatchNorm2d, self).__init__() + self.eps = eps + self.register_buffer("weight", torch.ones(num_features)) + self.register_buffer("bias", torch.zeros(num_features)) + self.register_buffer("running_mean", torch.zeros(num_features)) + self.register_buffer("running_var", torch.ones(num_features)) + + def _load_from_state_dict( + self, + state_dict: dict, + prefix: str, + local_metadata: dict, + strict: bool, + missing_keys: List[str], + unexpected_keys: List[str], + error_msgs: List[str], + ): + num_batches_tracked_key = prefix + 'num_batches_tracked' + if num_batches_tracked_key in state_dict: + del state_dict[num_batches_tracked_key] + + super(FrozenBatchNorm2d, self)._load_from_state_dict( + state_dict, prefix, local_metadata, strict, + missing_keys, unexpected_keys, error_msgs) + + def forward(self, x: Tensor) -> Tensor: + # move reshapes to the beginning + # to make it fuser-friendly + w = self.weight.reshape(1, -1, 1, 1) + b = self.bias.reshape(1, -1, 1, 1) + rv = self.running_var.reshape(1, -1, 1, 1) + rm = self.running_mean.reshape(1, -1, 1, 1) + scale = w * (rv + self.eps).rsqrt() + bias = b - rm * scale + return x * scale + bias + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.weight.shape[0]}, eps={self.eps})" diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/poolers.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/poolers.py new file mode 100644 index 0000000000000000000000000000000000000000..f4ff289299b5a44f7d58c208de224c2f0c1d4928 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/poolers.py @@ -0,0 +1,277 @@ +import torch +from torch import nn, Tensor + +import torchvision +from torchvision.ops import roi_align +from torchvision.ops.boxes import box_area + +from typing import Optional, List, Dict, Tuple, Union + + +# copying result_idx_in_level to a specific index in result[] +# is not supported by ONNX tracing yet. +# _onnx_merge_levels() is an implementation supported by ONNX +# that merges the levels to the right indices +@torch.jit.unused +def _onnx_merge_levels(levels: Tensor, unmerged_results: List[Tensor]) -> Tensor: + first_result = unmerged_results[0] + dtype, device = first_result.dtype, first_result.device + res = torch.zeros((levels.size(0), first_result.size(1), + first_result.size(2), first_result.size(3)), + dtype=dtype, device=device) + for level in range(len(unmerged_results)): + index = torch.where(levels == level)[0].view(-1, 1, 1, 1) + index = index.expand(index.size(0), + unmerged_results[level].size(1), + unmerged_results[level].size(2), + unmerged_results[level].size(3)) + res = res.scatter(0, index, unmerged_results[level]) + return res + + +# TODO: (eellison) T54974082 https://github.com/pytorch/pytorch/issues/26744/pytorch/issues/26744 +def initLevelMapper( + k_min: int, + k_max: int, + canonical_scale: int = 224, + canonical_level: int = 4, + eps: float = 1e-6, +): + return LevelMapper(k_min, k_max, canonical_scale, canonical_level, eps) + + +class LevelMapper(object): + """Determine which FPN level each RoI in a set of RoIs should map to based + on the heuristic in the FPN paper. + + Args: + k_min (int) + k_max (int) + canonical_scale (int) + canonical_level (int) + eps (float) + """ + + def __init__( + self, + k_min: int, + k_max: int, + canonical_scale: int = 224, + canonical_level: int = 4, + eps: float = 1e-6, + ): + self.k_min = k_min + self.k_max = k_max + self.s0 = canonical_scale + self.lvl0 = canonical_level + self.eps = eps + + def __call__(self, boxlists: List[Tensor]) -> Tensor: + """ + Args: + boxlists (list[BoxList]) + """ + # Compute level ids + s = torch.sqrt(torch.cat([box_area(boxlist) for boxlist in boxlists])) + + # Eqn.(1) in FPN paper + target_lvls = torch.floor(self.lvl0 + torch.log2(s / self.s0) + torch.tensor(self.eps, dtype=s.dtype)) + target_lvls = torch.clamp(target_lvls, min=self.k_min, max=self.k_max) + return (target_lvls.to(torch.int64) - self.k_min).to(torch.int64) + + +class MultiScaleRoIAlign(nn.Module): + """ + Multi-scale RoIAlign pooling, which is useful for detection with or without FPN. + + It infers the scale of the pooling via the heuristics specified in eq. 1 + of the `Feature Pyramid Network paper <https://arxiv.org/abs/1612.03144>`_. + They keyword-only parameters ``canonical_scale`` and ``canonical_level`` + correspond respectively to ``224`` and ``k0=4`` in eq. 1, and + have the following meaning: ``canonical_level`` is the target level of the pyramid from + which to pool a region of interest with ``w x h = canonical_scale x canonical_scale``. + + Args: + featmap_names (List[str]): the names of the feature maps that will be used + for the pooling. + output_size (List[Tuple[int, int]] or List[int]): output size for the pooled region + sampling_ratio (int): sampling ratio for ROIAlign + canonical_scale (int, optional): canonical_scale for LevelMapper + canonical_level (int, optional): canonical_level for LevelMapper + + Examples:: + + >>> m = torchvision.ops.MultiScaleRoIAlign(['feat1', 'feat3'], 3, 2) + >>> i = OrderedDict() + >>> i['feat1'] = torch.rand(1, 5, 64, 64) + >>> i['feat2'] = torch.rand(1, 5, 32, 32) # this feature won't be used in the pooling + >>> i['feat3'] = torch.rand(1, 5, 16, 16) + >>> # create some random bounding boxes + >>> boxes = torch.rand(6, 4) * 256; boxes[:, 2:] += boxes[:, :2] + >>> # original image size, before computing the feature maps + >>> image_sizes = [(512, 512)] + >>> output = m(i, [boxes], image_sizes) + >>> print(output.shape) + >>> torch.Size([6, 5, 3, 3]) + + """ + + __annotations__ = { + 'scales': Optional[List[float]], + 'map_levels': Optional[LevelMapper] + } + + def __init__( + self, + featmap_names: List[str], + output_size: Union[int, Tuple[int], List[int]], + sampling_ratio: int, + *, + canonical_scale: int = 224, + canonical_level: int = 4, + ): + super(MultiScaleRoIAlign, self).__init__() + if isinstance(output_size, int): + output_size = (output_size, output_size) + self.featmap_names = featmap_names + self.sampling_ratio = sampling_ratio + self.output_size = tuple(output_size) + self.scales = None + self.map_levels = None + self.canonical_scale = canonical_scale + self.canonical_level = canonical_level + + def convert_to_roi_format(self, boxes: List[Tensor]) -> Tensor: + concat_boxes = torch.cat(boxes, dim=0) + device, dtype = concat_boxes.device, concat_boxes.dtype + ids = torch.cat( + [ + torch.full_like(b[:, :1], i, dtype=dtype, layout=torch.strided, device=device) + for i, b in enumerate(boxes) + ], + dim=0, + ) + rois = torch.cat([ids, concat_boxes], dim=1) + return rois + + def infer_scale(self, feature: Tensor, original_size: List[int]) -> float: + # assumption: the scale is of the form 2 ** (-k), with k integer + size = feature.shape[-2:] + possible_scales: List[float] = [] + for s1, s2 in zip(size, original_size): + approx_scale = float(s1) / float(s2) + scale = 2 ** float(torch.tensor(approx_scale).log2().round()) + possible_scales.append(scale) + assert possible_scales[0] == possible_scales[1] + return possible_scales[0] + + def setup_scales( + self, + features: List[Tensor], + image_shapes: List[Tuple[int, int]], + ) -> None: + assert len(image_shapes) != 0 + max_x = 0 + max_y = 0 + for shape in image_shapes: + max_x = max(shape[0], max_x) + max_y = max(shape[1], max_y) + original_input_shape = (max_x, max_y) + + scales = [self.infer_scale(feat, original_input_shape) for feat in features] + # get the levels in the feature map by leveraging the fact that the network always + # downsamples by a factor of 2 at each level. + lvl_min = -torch.log2(torch.tensor(scales[0], dtype=torch.float32)).item() + lvl_max = -torch.log2(torch.tensor(scales[-1], dtype=torch.float32)).item() + self.scales = scales + self.map_levels = initLevelMapper( + int(lvl_min), + int(lvl_max), + canonical_scale=self.canonical_scale, + canonical_level=self.canonical_level, + ) + + def forward( + self, + x: Dict[str, Tensor], + boxes: List[Tensor], + image_shapes: List[Tuple[int, int]], + ) -> Tensor: + """ + Args: + x (OrderedDict[Tensor]): feature maps for each level. They are assumed to have + all the same number of channels, but they can have different sizes. + boxes (List[Tensor[N, 4]]): boxes to be used to perform the pooling operation, in + (x1, y1, x2, y2) format and in the image reference size, not the feature map + reference. The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``. + image_shapes (List[Tuple[height, width]]): the sizes of each image before they + have been fed to a CNN to obtain feature maps. This allows us to infer the + scale factor for each one of the levels to be pooled. + Returns: + result (Tensor) + """ + x_filtered = [] + for k, v in x.items(): + if k in self.featmap_names: + x_filtered.append(v) + num_levels = len(x_filtered) + rois = self.convert_to_roi_format(boxes) + if self.scales is None: + self.setup_scales(x_filtered, image_shapes) + + scales = self.scales + assert scales is not None + + if num_levels == 1: + return roi_align( + x_filtered[0], rois, + output_size=self.output_size, + spatial_scale=scales[0], + sampling_ratio=self.sampling_ratio + ) + + mapper = self.map_levels + assert mapper is not None + + levels = mapper(boxes) + + num_rois = len(rois) + num_channels = x_filtered[0].shape[1] + + dtype, device = x_filtered[0].dtype, x_filtered[0].device + result = torch.zeros( + (num_rois, num_channels,) + self.output_size, + dtype=dtype, + device=device, + ) + + tracing_results = [] + for level, (per_level_feature, scale) in enumerate(zip(x_filtered, scales)): + idx_in_level = torch.where(levels == level)[0] + rois_per_level = rois[idx_in_level] + + result_idx_in_level = roi_align( + per_level_feature, rois_per_level, + output_size=self.output_size, + spatial_scale=scale, sampling_ratio=self.sampling_ratio) + + if torchvision._is_tracing(): + tracing_results.append(result_idx_in_level.to(dtype)) + else: + # result and result_idx_in_level's dtypes are based on dtypes of different + # elements in x_filtered. x_filtered contains tensors output by different + # layers. When autocast is active, it may choose different dtypes for + # different layers' outputs. Therefore, we defensively match result's dtype + # before copying elements from result_idx_in_level in the following op. + # We need to cast manually (can't rely on autocast to cast for us) because + # the op acts on result in-place, and autocast only affects out-of-place ops. + result[idx_in_level] = result_idx_in_level.to(result.dtype) + + if torchvision._is_tracing(): + result = _onnx_merge_levels(levels, tracing_results) + + return result + + def __repr__(self) -> str: + return (f"{self.__class__.__name__}(featmap_names={self.featmap_names}, " + f"output_size={self.output_size}, sampling_ratio={self.sampling_ratio})") diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/ps_roi_align.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/ps_roi_align.py new file mode 100644 index 0000000000000000000000000000000000000000..d42353e2b0dd2b8a01bfd90219a9d4b6688c88bc --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/ps_roi_align.py @@ -0,0 +1,82 @@ +import torch +from torch import nn, Tensor + +from torch.nn.modules.utils import _pair + +from torchvision.extension import _assert_has_ops +from ._utils import convert_boxes_to_roi_format, check_roi_boxes_shape + + +def ps_roi_align( + input: Tensor, + boxes: Tensor, + output_size: int, + spatial_scale: float = 1.0, + sampling_ratio: int = -1, +) -> Tensor: + """ + Performs Position-Sensitive Region of Interest (RoI) Align operator + mentioned in Light-Head R-CNN. + + Args: + input (Tensor[N, C, H, W]): The input tensor, i.e. a batch with ``N`` elements. Each element + contains ``C`` feature maps of dimensions ``H x W``. + boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2) + format where the regions will be taken from. + The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``. + If a single Tensor is passed, then the first column should + contain the index of the corresponding element in the batch, i.e. a number in ``[0, N - 1]``. + If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i + in the batch. + output_size (int or Tuple[int, int]): the size of the output (in bins or pixels) after the pooling + is performed, as (height, width). + spatial_scale (float): a scaling factor that maps the input coordinates to + the box coordinates. Default: 1.0 + sampling_ratio (int): number of sampling points in the interpolation grid + used to compute the output value of each pooled output bin. If > 0, + then exactly ``sampling_ratio x sampling_ratio`` sampling points per bin are used. If + <= 0, then an adaptive number of grid points are used (computed as + ``ceil(roi_width / output_width)``, and likewise for height). Default: -1 + + Returns: + Tensor[K, C / (output_size[0] * output_size[1]), output_size[0], output_size[1]]: The pooled RoIs + """ + _assert_has_ops() + check_roi_boxes_shape(boxes) + rois = boxes + output_size = _pair(output_size) + if not isinstance(rois, torch.Tensor): + rois = convert_boxes_to_roi_format(rois) + output, _ = torch.ops.torchvision.ps_roi_align(input, rois, spatial_scale, + output_size[0], + output_size[1], + sampling_ratio) + return output + + +class PSRoIAlign(nn.Module): + """ + See :func:`ps_roi_align`. + """ + def __init__( + self, + output_size: int, + spatial_scale: float, + sampling_ratio: int, + ): + super(PSRoIAlign, self).__init__() + self.output_size = output_size + self.spatial_scale = spatial_scale + self.sampling_ratio = sampling_ratio + + def forward(self, input: Tensor, rois: Tensor) -> Tensor: + return ps_roi_align(input, rois, self.output_size, self.spatial_scale, + self.sampling_ratio) + + def __repr__(self) -> str: + tmpstr = self.__class__.__name__ + '(' + tmpstr += 'output_size=' + str(self.output_size) + tmpstr += ', spatial_scale=' + str(self.spatial_scale) + tmpstr += ', sampling_ratio=' + str(self.sampling_ratio) + tmpstr += ')' + return tmpstr diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/ps_roi_pool.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/ps_roi_pool.py new file mode 100644 index 0000000000000000000000000000000000000000..d0331e557fddcb0a1917a8c61079e8d488bad7b8 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/ps_roi_pool.py @@ -0,0 +1,67 @@ +import torch +from torch import nn, Tensor + +from torch.nn.modules.utils import _pair + +from torchvision.extension import _assert_has_ops +from ._utils import convert_boxes_to_roi_format, check_roi_boxes_shape + + +def ps_roi_pool( + input: Tensor, + boxes: Tensor, + output_size: int, + spatial_scale: float = 1.0, +) -> Tensor: + """ + Performs Position-Sensitive Region of Interest (RoI) Pool operator + described in R-FCN + + Args: + input (Tensor[N, C, H, W]): The input tensor, i.e. a batch with ``N`` elements. Each element + contains ``C`` feature maps of dimensions ``H x W``. + boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2) + format where the regions will be taken from. + The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``. + If a single Tensor is passed, then the first column should + contain the index of the corresponding element in the batch, i.e. a number in ``[0, N - 1]``. + If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i + in the batch. + output_size (int or Tuple[int, int]): the size of the output (in bins or pixels) after the pooling + is performed, as (height, width). + spatial_scale (float): a scaling factor that maps the input coordinates to + the box coordinates. Default: 1.0 + + Returns: + Tensor[K, C / (output_size[0] * output_size[1]), output_size[0], output_size[1]]: The pooled RoIs. + """ + _assert_has_ops() + check_roi_boxes_shape(boxes) + rois = boxes + output_size = _pair(output_size) + if not isinstance(rois, torch.Tensor): + rois = convert_boxes_to_roi_format(rois) + output, _ = torch.ops.torchvision.ps_roi_pool(input, rois, spatial_scale, + output_size[0], + output_size[1]) + return output + + +class PSRoIPool(nn.Module): + """ + See :func:`ps_roi_pool`. + """ + def __init__(self, output_size: int, spatial_scale: float): + super(PSRoIPool, self).__init__() + self.output_size = output_size + self.spatial_scale = spatial_scale + + def forward(self, input: Tensor, rois: Tensor) -> Tensor: + return ps_roi_pool(input, rois, self.output_size, self.spatial_scale) + + def __repr__(self) -> str: + tmpstr = self.__class__.__name__ + '(' + tmpstr += 'output_size=' + str(self.output_size) + tmpstr += ', spatial_scale=' + str(self.spatial_scale) + tmpstr += ')' + return tmpstr diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/roi_align.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/roi_align.py new file mode 100644 index 0000000000000000000000000000000000000000..b589089aa42eaecb9ca43971a4230e7a6019a145 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/roi_align.py @@ -0,0 +1,87 @@ +import torch +from torch import nn, Tensor + +from torch.nn.modules.utils import _pair +from torch.jit.annotations import BroadcastingList2 + +from torchvision.extension import _assert_has_ops +from ._utils import convert_boxes_to_roi_format, check_roi_boxes_shape + + +def roi_align( + input: Tensor, + boxes: Tensor, + output_size: BroadcastingList2[int], + spatial_scale: float = 1.0, + sampling_ratio: int = -1, + aligned: bool = False, +) -> Tensor: + """ + Performs Region of Interest (RoI) Align operator with average pooling, as described in Mask R-CNN. + + Args: + input (Tensor[N, C, H, W]): The input tensor, i.e. a batch with ``N`` elements. Each element + contains ``C`` feature maps of dimensions ``H x W``. + If the tensor is quantized, we expect a batch size of ``N == 1``. + boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2) + format where the regions will be taken from. + The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``. + If a single Tensor is passed, then the first column should + contain the index of the corresponding element in the batch, i.e. a number in ``[0, N - 1]``. + If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i + in the batch. + output_size (int or Tuple[int, int]): the size of the output (in bins or pixels) after the pooling + is performed, as (height, width). + spatial_scale (float): a scaling factor that maps the input coordinates to + the box coordinates. Default: 1.0 + sampling_ratio (int): number of sampling points in the interpolation grid + used to compute the output value of each pooled output bin. If > 0, + then exactly ``sampling_ratio x sampling_ratio`` sampling points per bin are used. If + <= 0, then an adaptive number of grid points are used (computed as + ``ceil(roi_width / output_width)``, and likewise for height). Default: -1 + aligned (bool): If False, use the legacy implementation. + If True, pixel shift the box coordinates it by -0.5 for a better alignment with the two + neighboring pixel indices. This version is used in Detectron2 + + Returns: + Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs. + """ + _assert_has_ops() + check_roi_boxes_shape(boxes) + rois = boxes + output_size = _pair(output_size) + if not isinstance(rois, torch.Tensor): + rois = convert_boxes_to_roi_format(rois) + return torch.ops.torchvision.roi_align(input, rois, spatial_scale, + output_size[0], output_size[1], + sampling_ratio, aligned) + + +class RoIAlign(nn.Module): + """ + See :func:`roi_align`. + """ + def __init__( + self, + output_size: BroadcastingList2[int], + spatial_scale: float, + sampling_ratio: int, + aligned: bool = False, + ): + super(RoIAlign, self).__init__() + self.output_size = output_size + self.spatial_scale = spatial_scale + self.sampling_ratio = sampling_ratio + self.aligned = aligned + + def forward(self, input: Tensor, rois: Tensor) -> Tensor: + return roi_align(input, rois, self.output_size, self.spatial_scale, self.sampling_ratio, self.aligned) + + def __repr__(self) -> str: + tmpstr = self.__class__.__name__ + '(' + tmpstr += 'output_size=' + str(self.output_size) + tmpstr += ', spatial_scale=' + str(self.spatial_scale) + tmpstr += ', sampling_ratio=' + str(self.sampling_ratio) + tmpstr += ', aligned=' + str(self.aligned) + tmpstr += ')' + return tmpstr diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/roi_pool.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/roi_pool.py new file mode 100644 index 0000000000000000000000000000000000000000..90f2dd3d173c4682cead76870dd8fd7e8c96c303 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/ops/roi_pool.py @@ -0,0 +1,66 @@ +import torch +from torch import nn, Tensor + +from torch.nn.modules.utils import _pair +from torch.jit.annotations import BroadcastingList2 + +from torchvision.extension import _assert_has_ops +from ._utils import convert_boxes_to_roi_format, check_roi_boxes_shape + + +def roi_pool( + input: Tensor, + boxes: Tensor, + output_size: BroadcastingList2[int], + spatial_scale: float = 1.0, +) -> Tensor: + """ + Performs Region of Interest (RoI) Pool operator described in Fast R-CNN + + Args: + input (Tensor[N, C, H, W]): The input tensor, i.e. a batch with ``N`` elements. Each element + contains ``C`` feature maps of dimensions ``H x W``. + boxes (Tensor[K, 5] or List[Tensor[L, 4]]): the box coordinates in (x1, y1, x2, y2) + format where the regions will be taken from. + The coordinate must satisfy ``0 <= x1 < x2`` and ``0 <= y1 < y2``. + If a single Tensor is passed, then the first column should + contain the index of the corresponding element in the batch, i.e. a number in ``[0, N - 1]``. + If a list of Tensors is passed, then each Tensor will correspond to the boxes for an element i + in the batch. + output_size (int or Tuple[int, int]): the size of the output after the cropping + is performed, as (height, width) + spatial_scale (float): a scaling factor that maps the input coordinates to + the box coordinates. Default: 1.0 + + Returns: + Tensor[K, C, output_size[0], output_size[1]]: The pooled RoIs. + """ + _assert_has_ops() + check_roi_boxes_shape(boxes) + rois = boxes + output_size = _pair(output_size) + if not isinstance(rois, torch.Tensor): + rois = convert_boxes_to_roi_format(rois) + output, _ = torch.ops.torchvision.roi_pool(input, rois, spatial_scale, + output_size[0], output_size[1]) + return output + + +class RoIPool(nn.Module): + """ + See :func:`roi_pool`. + """ + def __init__(self, output_size: BroadcastingList2[int], spatial_scale: float): + super(RoIPool, self).__init__() + self.output_size = output_size + self.spatial_scale = spatial_scale + + def forward(self, input: Tensor, rois: Tensor) -> Tensor: + return roi_pool(input, rois, self.output_size, self.spatial_scale) + + def __repr__(self) -> str: + tmpstr = self.__class__.__name__ + '(' + tmpstr += 'output_size=' + str(self.output_size) + tmpstr += ', spatial_scale=' + str(self.spatial_scale) + tmpstr += ')' + return tmpstr diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/transforms/__init__.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/transforms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..77680a14f0d0599f4004a2ce5c299c0f5e13a0d5 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/transforms/__init__.py @@ -0,0 +1,2 @@ +from .transforms import * +from .autoaugment import * diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/transforms/_functional_video.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/transforms/_functional_video.py new file mode 100644 index 0000000000000000000000000000000000000000..9eba0463a4f1a46439db3abe00b2e2ca5e3a526b --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/transforms/_functional_video.py @@ -0,0 +1,107 @@ +import torch +import warnings + + +warnings.warn( + "The _functional_video module is deprecated. Please use the functional module instead." +) + + +def _is_tensor_video_clip(clip): + if not torch.is_tensor(clip): + raise TypeError("clip should be Tensor. Got %s" % type(clip)) + + if not clip.ndimension() == 4: + raise ValueError("clip should be 4D. Got %dD" % clip.dim()) + + return True + + +def crop(clip, i, j, h, w): + """ + Args: + clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) + """ + assert len(clip.size()) == 4, "clip should be a 4D tensor" + return clip[..., i:i + h, j:j + w] + + +def resize(clip, target_size, interpolation_mode): + assert len(target_size) == 2, "target size should be tuple (height, width)" + return torch.nn.functional.interpolate( + clip, size=target_size, mode=interpolation_mode, align_corners=False + ) + + +def resized_crop(clip, i, j, h, w, size, interpolation_mode="bilinear"): + """ + Do spatial cropping and resizing to the video clip + Args: + clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) + i (int): i in (i,j) i.e coordinates of the upper left corner. + j (int): j in (i,j) i.e coordinates of the upper left corner. + h (int): Height of the cropped region. + w (int): Width of the cropped region. + size (tuple(int, int)): height and width of resized clip + Returns: + clip (torch.tensor): Resized and cropped clip. Size is (C, T, H, W) + """ + assert _is_tensor_video_clip(clip), "clip should be a 4D torch.tensor" + clip = crop(clip, i, j, h, w) + clip = resize(clip, size, interpolation_mode) + return clip + + +def center_crop(clip, crop_size): + assert _is_tensor_video_clip(clip), "clip should be a 4D torch.tensor" + h, w = clip.size(-2), clip.size(-1) + th, tw = crop_size + assert h >= th and w >= tw, "height and width must be no smaller than crop_size" + + i = int(round((h - th) / 2.0)) + j = int(round((w - tw) / 2.0)) + return crop(clip, i, j, th, tw) + + +def to_tensor(clip): + """ + Convert tensor data type from uint8 to float, divide value by 255.0 and + permute the dimensions of clip tensor + Args: + clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C) + Return: + clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W) + """ + _is_tensor_video_clip(clip) + if not clip.dtype == torch.uint8: + raise TypeError("clip tensor should have data type uint8. Got %s" % str(clip.dtype)) + return clip.float().permute(3, 0, 1, 2) / 255.0 + + +def normalize(clip, mean, std, inplace=False): + """ + Args: + clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W) + mean (tuple): pixel RGB mean. Size is (3) + std (tuple): pixel standard deviation. Size is (3) + Returns: + normalized clip (torch.tensor): Size is (C, T, H, W) + """ + assert _is_tensor_video_clip(clip), "clip should be a 4D torch.tensor" + if not inplace: + clip = clip.clone() + mean = torch.as_tensor(mean, dtype=clip.dtype, device=clip.device) + std = torch.as_tensor(std, dtype=clip.dtype, device=clip.device) + clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None]) + return clip + + +def hflip(clip): + """ + Args: + clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W) + Returns: + flipped clip (torch.tensor): Size is (C, T, H, W) + """ + assert _is_tensor_video_clip(clip), "clip should be a 4D torch.tensor" + return clip.flip((-1)) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/transforms/_transforms_video.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/transforms/_transforms_video.py new file mode 100644 index 0000000000000000000000000000000000000000..bfef1b440d163a1419e09c24f61b6858ac6732be --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/transforms/_transforms_video.py @@ -0,0 +1,179 @@ +#!/usr/bin/env python3 + +import numbers +import random +import warnings + +from torchvision.transforms import ( + RandomCrop, + RandomResizedCrop, +) + +from . import _functional_video as F + + +__all__ = [ + "RandomCropVideo", + "RandomResizedCropVideo", + "CenterCropVideo", + "NormalizeVideo", + "ToTensorVideo", + "RandomHorizontalFlipVideo", +] + + +warnings.warn( + "The _transforms_video module is deprecated. Please use the transforms module instead." +) + + +class RandomCropVideo(RandomCrop): + def __init__(self, size): + if isinstance(size, numbers.Number): + self.size = (int(size), int(size)) + else: + self.size = size + + def __call__(self, clip): + """ + Args: + clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) + Returns: + torch.tensor: randomly cropped/resized video clip. + size is (C, T, OH, OW) + """ + i, j, h, w = self.get_params(clip, self.size) + return F.crop(clip, i, j, h, w) + + def __repr__(self): + return self.__class__.__name__ + '(size={0})'.format(self.size) + + +class RandomResizedCropVideo(RandomResizedCrop): + def __init__( + self, + size, + scale=(0.08, 1.0), + ratio=(3.0 / 4.0, 4.0 / 3.0), + interpolation_mode="bilinear", + ): + if isinstance(size, tuple): + assert len(size) == 2, "size should be tuple (height, width)" + self.size = size + else: + self.size = (size, size) + + self.interpolation_mode = interpolation_mode + self.scale = scale + self.ratio = ratio + + def __call__(self, clip): + """ + Args: + clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) + Returns: + torch.tensor: randomly cropped/resized video clip. + size is (C, T, H, W) + """ + i, j, h, w = self.get_params(clip, self.scale, self.ratio) + return F.resized_crop(clip, i, j, h, w, self.size, self.interpolation_mode) + + def __repr__(self): + return self.__class__.__name__ + \ + '(size={0}, interpolation_mode={1}, scale={2}, ratio={3})'.format( + self.size, self.interpolation_mode, self.scale, self.ratio + ) + + +class CenterCropVideo(object): + def __init__(self, crop_size): + if isinstance(crop_size, numbers.Number): + self.crop_size = (int(crop_size), int(crop_size)) + else: + self.crop_size = crop_size + + def __call__(self, clip): + """ + Args: + clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W) + Returns: + torch.tensor: central cropping of video clip. Size is + (C, T, crop_size, crop_size) + """ + return F.center_crop(clip, self.crop_size) + + def __repr__(self): + return self.__class__.__name__ + '(crop_size={0})'.format(self.crop_size) + + +class NormalizeVideo(object): + """ + Normalize the video clip by mean subtraction and division by standard deviation + Args: + mean (3-tuple): pixel RGB mean + std (3-tuple): pixel RGB standard deviation + inplace (boolean): whether do in-place normalization + """ + + def __init__(self, mean, std, inplace=False): + self.mean = mean + self.std = std + self.inplace = inplace + + def __call__(self, clip): + """ + Args: + clip (torch.tensor): video clip to be normalized. Size is (C, T, H, W) + """ + return F.normalize(clip, self.mean, self.std, self.inplace) + + def __repr__(self): + return self.__class__.__name__ + '(mean={0}, std={1}, inplace={2})'.format( + self.mean, self.std, self.inplace) + + +class ToTensorVideo(object): + """ + Convert tensor data type from uint8 to float, divide value by 255.0 and + permute the dimensions of clip tensor + """ + + def __init__(self): + pass + + def __call__(self, clip): + """ + Args: + clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C) + Return: + clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W) + """ + return F.to_tensor(clip) + + def __repr__(self): + return self.__class__.__name__ + + +class RandomHorizontalFlipVideo(object): + """ + Flip the video clip along the horizonal direction with a given probability + Args: + p (float): probability of the clip being flipped. Default value is 0.5 + """ + + def __init__(self, p=0.5): + self.p = p + + def __call__(self, clip): + """ + Args: + clip (torch.tensor): Size is (C, T, H, W) + Return: + clip (torch.tensor): Size is (C, T, H, W) + """ + if random.random() < self.p: + clip = F.hflip(clip) + return clip + + def __repr__(self): + return self.__class__.__name__ + "(p={0})".format(self.p) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/transforms/autoaugment.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/transforms/autoaugment.py new file mode 100644 index 0000000000000000000000000000000000000000..97522945d2e0c503a648b4069ca8b4b68c9521db --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/transforms/autoaugment.py @@ -0,0 +1,237 @@ +import math +import torch + +from enum import Enum +from torch import Tensor +from typing import List, Tuple, Optional + +from . import functional as F, InterpolationMode + +__all__ = ["AutoAugmentPolicy", "AutoAugment"] + + +class AutoAugmentPolicy(Enum): + """AutoAugment policies learned on different datasets. + Available policies are IMAGENET, CIFAR10 and SVHN. + """ + IMAGENET = "imagenet" + CIFAR10 = "cifar10" + SVHN = "svhn" + + +def _get_transforms(policy: AutoAugmentPolicy): + if policy == AutoAugmentPolicy.IMAGENET: + return [ + (("Posterize", 0.4, 8), ("Rotate", 0.6, 9)), + (("Solarize", 0.6, 5), ("AutoContrast", 0.6, None)), + (("Equalize", 0.8, None), ("Equalize", 0.6, None)), + (("Posterize", 0.6, 7), ("Posterize", 0.6, 6)), + (("Equalize", 0.4, None), ("Solarize", 0.2, 4)), + (("Equalize", 0.4, None), ("Rotate", 0.8, 8)), + (("Solarize", 0.6, 3), ("Equalize", 0.6, None)), + (("Posterize", 0.8, 5), ("Equalize", 1.0, None)), + (("Rotate", 0.2, 3), ("Solarize", 0.6, 8)), + (("Equalize", 0.6, None), ("Posterize", 0.4, 6)), + (("Rotate", 0.8, 8), ("Color", 0.4, 0)), + (("Rotate", 0.4, 9), ("Equalize", 0.6, None)), + (("Equalize", 0.0, None), ("Equalize", 0.8, None)), + (("Invert", 0.6, None), ("Equalize", 1.0, None)), + (("Color", 0.6, 4), ("Contrast", 1.0, 8)), + (("Rotate", 0.8, 8), ("Color", 1.0, 2)), + (("Color", 0.8, 8), ("Solarize", 0.8, 7)), + (("Sharpness", 0.4, 7), ("Invert", 0.6, None)), + (("ShearX", 0.6, 5), ("Equalize", 1.0, None)), + (("Color", 0.4, 0), ("Equalize", 0.6, None)), + (("Equalize", 0.4, None), ("Solarize", 0.2, 4)), + (("Solarize", 0.6, 5), ("AutoContrast", 0.6, None)), + (("Invert", 0.6, None), ("Equalize", 1.0, None)), + (("Color", 0.6, 4), ("Contrast", 1.0, 8)), + (("Equalize", 0.8, None), ("Equalize", 0.6, None)), + ] + elif policy == AutoAugmentPolicy.CIFAR10: + return [ + (("Invert", 0.1, None), ("Contrast", 0.2, 6)), + (("Rotate", 0.7, 2), ("TranslateX", 0.3, 9)), + (("Sharpness", 0.8, 1), ("Sharpness", 0.9, 3)), + (("ShearY", 0.5, 8), ("TranslateY", 0.7, 9)), + (("AutoContrast", 0.5, None), ("Equalize", 0.9, None)), + (("ShearY", 0.2, 7), ("Posterize", 0.3, 7)), + (("Color", 0.4, 3), ("Brightness", 0.6, 7)), + (("Sharpness", 0.3, 9), ("Brightness", 0.7, 9)), + (("Equalize", 0.6, None), ("Equalize", 0.5, None)), + (("Contrast", 0.6, 7), ("Sharpness", 0.6, 5)), + (("Color", 0.7, 7), ("TranslateX", 0.5, 8)), + (("Equalize", 0.3, None), ("AutoContrast", 0.4, None)), + (("TranslateY", 0.4, 3), ("Sharpness", 0.2, 6)), + (("Brightness", 0.9, 6), ("Color", 0.2, 8)), + (("Solarize", 0.5, 2), ("Invert", 0.0, None)), + (("Equalize", 0.2, None), ("AutoContrast", 0.6, None)), + (("Equalize", 0.2, None), ("Equalize", 0.6, None)), + (("Color", 0.9, 9), ("Equalize", 0.6, None)), + (("AutoContrast", 0.8, None), ("Solarize", 0.2, 8)), + (("Brightness", 0.1, 3), ("Color", 0.7, 0)), + (("Solarize", 0.4, 5), ("AutoContrast", 0.9, None)), + (("TranslateY", 0.9, 9), ("TranslateY", 0.7, 9)), + (("AutoContrast", 0.9, None), ("Solarize", 0.8, 3)), + (("Equalize", 0.8, None), ("Invert", 0.1, None)), + (("TranslateY", 0.7, 9), ("AutoContrast", 0.9, None)), + ] + elif policy == AutoAugmentPolicy.SVHN: + return [ + (("ShearX", 0.9, 4), ("Invert", 0.2, None)), + (("ShearY", 0.9, 8), ("Invert", 0.7, None)), + (("Equalize", 0.6, None), ("Solarize", 0.6, 6)), + (("Invert", 0.9, None), ("Equalize", 0.6, None)), + (("Equalize", 0.6, None), ("Rotate", 0.9, 3)), + (("ShearX", 0.9, 4), ("AutoContrast", 0.8, None)), + (("ShearY", 0.9, 8), ("Invert", 0.4, None)), + (("ShearY", 0.9, 5), ("Solarize", 0.2, 6)), + (("Invert", 0.9, None), ("AutoContrast", 0.8, None)), + (("Equalize", 0.6, None), ("Rotate", 0.9, 3)), + (("ShearX", 0.9, 4), ("Solarize", 0.3, 3)), + (("ShearY", 0.8, 8), ("Invert", 0.7, None)), + (("Equalize", 0.9, None), ("TranslateY", 0.6, 6)), + (("Invert", 0.9, None), ("Equalize", 0.6, None)), + (("Contrast", 0.3, 3), ("Rotate", 0.8, 4)), + (("Invert", 0.8, None), ("TranslateY", 0.0, 2)), + (("ShearY", 0.7, 6), ("Solarize", 0.4, 8)), + (("Invert", 0.6, None), ("Rotate", 0.8, 4)), + (("ShearY", 0.3, 7), ("TranslateX", 0.9, 3)), + (("ShearX", 0.1, 6), ("Invert", 0.6, None)), + (("Solarize", 0.7, 2), ("TranslateY", 0.6, 7)), + (("ShearY", 0.8, 4), ("Invert", 0.8, None)), + (("ShearX", 0.7, 9), ("TranslateY", 0.8, 3)), + (("ShearY", 0.8, 5), ("AutoContrast", 0.7, None)), + (("ShearX", 0.7, 2), ("Invert", 0.1, None)), + ] + + +def _get_magnitudes(): + _BINS = 10 + return { + # name: (magnitudes, signed) + "ShearX": (torch.linspace(0.0, 0.3, _BINS), True), + "ShearY": (torch.linspace(0.0, 0.3, _BINS), True), + "TranslateX": (torch.linspace(0.0, 150.0 / 331.0, _BINS), True), + "TranslateY": (torch.linspace(0.0, 150.0 / 331.0, _BINS), True), + "Rotate": (torch.linspace(0.0, 30.0, _BINS), True), + "Brightness": (torch.linspace(0.0, 0.9, _BINS), True), + "Color": (torch.linspace(0.0, 0.9, _BINS), True), + "Contrast": (torch.linspace(0.0, 0.9, _BINS), True), + "Sharpness": (torch.linspace(0.0, 0.9, _BINS), True), + "Posterize": (torch.tensor([8, 8, 7, 7, 6, 6, 5, 5, 4, 4]), False), + "Solarize": (torch.linspace(256.0, 0.0, _BINS), False), + "AutoContrast": (None, None), + "Equalize": (None, None), + "Invert": (None, None), + } + + +class AutoAugment(torch.nn.Module): + r"""AutoAugment data augmentation method based on + `"AutoAugment: Learning Augmentation Strategies from Data" <https://arxiv.org/pdf/1805.09501.pdf>`_. + If the image is torch Tensor, it should be of type torch.uint8, and it is expected + to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. + If img is PIL Image, it is expected to be in mode "L" or "RGB". + + Args: + policy (AutoAugmentPolicy): Desired policy enum defined by + :class:`torchvision.transforms.autoaugment.AutoAugmentPolicy`. Default is ``AutoAugmentPolicy.IMAGENET``. + interpolation (InterpolationMode): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. + fill (sequence or number, optional): Pixel fill value for the area outside the transformed + image. If given a number, the value is used for all bands respectively. + """ + + def __init__(self, policy: AutoAugmentPolicy = AutoAugmentPolicy.IMAGENET, + interpolation: InterpolationMode = InterpolationMode.NEAREST, fill: Optional[List[float]] = None): + super().__init__() + self.policy = policy + self.interpolation = interpolation + self.fill = fill + + self.transforms = _get_transforms(policy) + if self.transforms is None: + raise ValueError("The provided policy {} is not recognized.".format(policy)) + self._op_meta = _get_magnitudes() + + @staticmethod + def get_params(transform_num: int) -> Tuple[int, Tensor, Tensor]: + """Get parameters for autoaugment transformation + + Returns: + params required by the autoaugment transformation + """ + policy_id = torch.randint(transform_num, (1,)).item() + probs = torch.rand((2,)) + signs = torch.randint(2, (2,)) + + return policy_id, probs, signs + + def _get_op_meta(self, name: str) -> Tuple[Optional[Tensor], Optional[bool]]: + return self._op_meta[name] + + def forward(self, img: Tensor): + """ + img (PIL Image or Tensor): Image to be transformed. + + Returns: + PIL Image or Tensor: AutoAugmented image. + """ + fill = self.fill + if isinstance(img, Tensor): + if isinstance(fill, (int, float)): + fill = [float(fill)] * F._get_image_num_channels(img) + elif fill is not None: + fill = [float(f) for f in fill] + + transform_id, probs, signs = self.get_params(len(self.transforms)) + + for i, (op_name, p, magnitude_id) in enumerate(self.transforms[transform_id]): + if probs[i] <= p: + magnitudes, signed = self._get_op_meta(op_name) + magnitude = float(magnitudes[magnitude_id].item()) \ + if magnitudes is not None and magnitude_id is not None else 0.0 + if signed is not None and signed and signs[i] == 0: + magnitude *= -1.0 + + if op_name == "ShearX": + img = F.affine(img, angle=0.0, translate=[0, 0], scale=1.0, shear=[math.degrees(magnitude), 0.0], + interpolation=self.interpolation, fill=fill) + elif op_name == "ShearY": + img = F.affine(img, angle=0.0, translate=[0, 0], scale=1.0, shear=[0.0, math.degrees(magnitude)], + interpolation=self.interpolation, fill=fill) + elif op_name == "TranslateX": + img = F.affine(img, angle=0.0, translate=[int(F._get_image_size(img)[0] * magnitude), 0], scale=1.0, + interpolation=self.interpolation, shear=[0.0, 0.0], fill=fill) + elif op_name == "TranslateY": + img = F.affine(img, angle=0.0, translate=[0, int(F._get_image_size(img)[1] * magnitude)], scale=1.0, + interpolation=self.interpolation, shear=[0.0, 0.0], fill=fill) + elif op_name == "Rotate": + img = F.rotate(img, magnitude, interpolation=self.interpolation, fill=fill) + elif op_name == "Brightness": + img = F.adjust_brightness(img, 1.0 + magnitude) + elif op_name == "Color": + img = F.adjust_saturation(img, 1.0 + magnitude) + elif op_name == "Contrast": + img = F.adjust_contrast(img, 1.0 + magnitude) + elif op_name == "Sharpness": + img = F.adjust_sharpness(img, 1.0 + magnitude) + elif op_name == "Posterize": + img = F.posterize(img, int(magnitude)) + elif op_name == "Solarize": + img = F.solarize(img, magnitude) + elif op_name == "AutoContrast": + img = F.autocontrast(img) + elif op_name == "Equalize": + img = F.equalize(img) + elif op_name == "Invert": + img = F.invert(img) + else: + raise ValueError("The provided operator {} is not recognized.".format(op_name)) + + return img + + def __repr__(self): + return self.__class__.__name__ + '(policy={}, fill={})'.format(self.policy, self.fill) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/transforms/functional.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/transforms/functional.py new file mode 100644 index 0000000000000000000000000000000000000000..6a86a000d65a8237332152c7651b26d9c259714e --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/transforms/functional.py @@ -0,0 +1,1363 @@ +import math +import numbers +import warnings +from enum import Enum + +import numpy as np +from PIL import Image + +import torch +from torch import Tensor +from typing import List, Tuple, Any, Optional + +try: + import accimage +except ImportError: + accimage = None + +from . import functional_pil as F_pil +from . import functional_tensor as F_t + + +class InterpolationMode(Enum): + """Interpolation modes + Available interpolation methods are ``nearest``, ``bilinear``, ``bicubic``, ``box``, ``hamming``, and ``lanczos``. + """ + NEAREST = "nearest" + BILINEAR = "bilinear" + BICUBIC = "bicubic" + # For PIL compatibility + BOX = "box" + HAMMING = "hamming" + LANCZOS = "lanczos" + + +# TODO: Once torchscript supports Enums with staticmethod +# this can be put into InterpolationMode as staticmethod +def _interpolation_modes_from_int(i: int) -> InterpolationMode: + inverse_modes_mapping = { + 0: InterpolationMode.NEAREST, + 2: InterpolationMode.BILINEAR, + 3: InterpolationMode.BICUBIC, + 4: InterpolationMode.BOX, + 5: InterpolationMode.HAMMING, + 1: InterpolationMode.LANCZOS, + } + return inverse_modes_mapping[i] + + +pil_modes_mapping = { + InterpolationMode.NEAREST: 0, + InterpolationMode.BILINEAR: 2, + InterpolationMode.BICUBIC: 3, + InterpolationMode.BOX: 4, + InterpolationMode.HAMMING: 5, + InterpolationMode.LANCZOS: 1, +} + +_is_pil_image = F_pil._is_pil_image + + +def _get_image_size(img: Tensor) -> List[int]: + """Returns image size as [w, h] + """ + if isinstance(img, torch.Tensor): + return F_t._get_image_size(img) + + return F_pil._get_image_size(img) + + +def _get_image_num_channels(img: Tensor) -> int: + """Returns number of image channels + """ + if isinstance(img, torch.Tensor): + return F_t._get_image_num_channels(img) + + return F_pil._get_image_num_channels(img) + + +@torch.jit.unused +def _is_numpy(img: Any) -> bool: + return isinstance(img, np.ndarray) + + +@torch.jit.unused +def _is_numpy_image(img: Any) -> bool: + return img.ndim in {2, 3} + + +def to_tensor(pic): + """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor. + This function does not support torchscript. + + See :class:`~torchvision.transforms.ToTensor` for more details. + + Args: + pic (PIL Image or numpy.ndarray): Image to be converted to tensor. + + Returns: + Tensor: Converted image. + """ + if not(F_pil._is_pil_image(pic) or _is_numpy(pic)): + raise TypeError('pic should be PIL Image or ndarray. Got {}'.format(type(pic))) + + if _is_numpy(pic) and not _is_numpy_image(pic): + raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim)) + + default_float_dtype = torch.get_default_dtype() + + if isinstance(pic, np.ndarray): + # handle numpy array + if pic.ndim == 2: + pic = pic[:, :, None] + + img = torch.from_numpy(pic.transpose((2, 0, 1))).contiguous() + # backward compatibility + if isinstance(img, torch.ByteTensor): + return img.to(dtype=default_float_dtype).div(255) + else: + return img + + if accimage is not None and isinstance(pic, accimage.Image): + nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.float32) + pic.copyto(nppic) + return torch.from_numpy(nppic).to(dtype=default_float_dtype) + + # handle PIL Image + mode_to_nptype = {'I': np.int32, 'I;16': np.int16, 'F': np.float32} + img = torch.from_numpy( + np.array(pic, mode_to_nptype.get(pic.mode, np.uint8), copy=True) + ) + + if pic.mode == '1': + img = 255 * img + img = img.view(pic.size[1], pic.size[0], len(pic.getbands())) + # put it from HWC to CHW format + img = img.permute((2, 0, 1)).contiguous() + if isinstance(img, torch.ByteTensor): + return img.to(dtype=default_float_dtype).div(255) + else: + return img + + +def pil_to_tensor(pic): + """Convert a ``PIL Image`` to a tensor of the same type. + This function does not support torchscript. + + See :class:`~torchvision.transforms.PILToTensor` for more details. + + Args: + pic (PIL Image): Image to be converted to tensor. + + Returns: + Tensor: Converted image. + """ + if not F_pil._is_pil_image(pic): + raise TypeError('pic should be PIL Image. Got {}'.format(type(pic))) + + if accimage is not None and isinstance(pic, accimage.Image): + # accimage format is always uint8 internally, so always return uint8 here + nppic = np.zeros([pic.channels, pic.height, pic.width], dtype=np.uint8) + pic.copyto(nppic) + return torch.as_tensor(nppic) + + # handle PIL Image + img = torch.as_tensor(np.asarray(pic)) + img = img.view(pic.size[1], pic.size[0], len(pic.getbands())) + # put it from HWC to CHW format + img = img.permute((2, 0, 1)) + return img + + +def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) -> torch.Tensor: + """Convert a tensor image to the given ``dtype`` and scale the values accordingly + This function does not support PIL Image. + + Args: + image (torch.Tensor): Image to be converted + dtype (torch.dtype): Desired data type of the output + + Returns: + Tensor: Converted image + + .. note:: + + When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly. + If converted back and forth, this mismatch has no effect. + + Raises: + RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as + well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to + overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range + of the integer ``dtype``. + """ + if not isinstance(image, torch.Tensor): + raise TypeError('Input img should be Tensor Image') + + return F_t.convert_image_dtype(image, dtype) + + +def to_pil_image(pic, mode=None): + """Convert a tensor or an ndarray to PIL Image. This function does not support torchscript. + + See :class:`~torchvision.transforms.ToPILImage` for more details. + + Args: + pic (Tensor or numpy.ndarray): Image to be converted to PIL Image. + mode (`PIL.Image mode`_): color space and pixel depth of input data (optional). + + .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes + + Returns: + PIL Image: Image converted to PIL Image. + """ + if not(isinstance(pic, torch.Tensor) or isinstance(pic, np.ndarray)): + raise TypeError('pic should be Tensor or ndarray. Got {}.'.format(type(pic))) + + elif isinstance(pic, torch.Tensor): + if pic.ndimension() not in {2, 3}: + raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndimension())) + + elif pic.ndimension() == 2: + # if 2D image, add channel dimension (CHW) + pic = pic.unsqueeze(0) + + # check number of channels + if pic.shape[-3] > 4: + raise ValueError('pic should not have > 4 channels. Got {} channels.'.format(pic.shape[-3])) + + elif isinstance(pic, np.ndarray): + if pic.ndim not in {2, 3}: + raise ValueError('pic should be 2/3 dimensional. Got {} dimensions.'.format(pic.ndim)) + + elif pic.ndim == 2: + # if 2D image, add channel dimension (HWC) + pic = np.expand_dims(pic, 2) + + # check number of channels + if pic.shape[-1] > 4: + raise ValueError('pic should not have > 4 channels. Got {} channels.'.format(pic.shape[-1])) + + npimg = pic + if isinstance(pic, torch.Tensor): + if pic.is_floating_point() and mode != 'F': + pic = pic.mul(255).byte() + npimg = np.transpose(pic.cpu().numpy(), (1, 2, 0)) + + if not isinstance(npimg, np.ndarray): + raise TypeError('Input pic must be a torch.Tensor or NumPy ndarray, ' + + 'not {}'.format(type(npimg))) + + if npimg.shape[2] == 1: + expected_mode = None + npimg = npimg[:, :, 0] + if npimg.dtype == np.uint8: + expected_mode = 'L' + elif npimg.dtype == np.int16: + expected_mode = 'I;16' + elif npimg.dtype == np.int32: + expected_mode = 'I' + elif npimg.dtype == np.float32: + expected_mode = 'F' + if mode is not None and mode != expected_mode: + raise ValueError("Incorrect mode ({}) supplied for input type {}. Should be {}" + .format(mode, np.dtype, expected_mode)) + mode = expected_mode + + elif npimg.shape[2] == 2: + permitted_2_channel_modes = ['LA'] + if mode is not None and mode not in permitted_2_channel_modes: + raise ValueError("Only modes {} are supported for 2D inputs".format(permitted_2_channel_modes)) + + if mode is None and npimg.dtype == np.uint8: + mode = 'LA' + + elif npimg.shape[2] == 4: + permitted_4_channel_modes = ['RGBA', 'CMYK', 'RGBX'] + if mode is not None and mode not in permitted_4_channel_modes: + raise ValueError("Only modes {} are supported for 4D inputs".format(permitted_4_channel_modes)) + + if mode is None and npimg.dtype == np.uint8: + mode = 'RGBA' + else: + permitted_3_channel_modes = ['RGB', 'YCbCr', 'HSV'] + if mode is not None and mode not in permitted_3_channel_modes: + raise ValueError("Only modes {} are supported for 3D inputs".format(permitted_3_channel_modes)) + if mode is None and npimg.dtype == np.uint8: + mode = 'RGB' + + if mode is None: + raise TypeError('Input type {} is not supported'.format(npimg.dtype)) + + return Image.fromarray(npimg, mode=mode) + + +def normalize(tensor: Tensor, mean: List[float], std: List[float], inplace: bool = False) -> Tensor: + """Normalize a float tensor image with mean and standard deviation. + This transform does not support PIL Image. + + .. note:: + This transform acts out of place by default, i.e., it does not mutates the input tensor. + + See :class:`~torchvision.transforms.Normalize` for more details. + + Args: + tensor (Tensor): Float tensor image of size (C, H, W) or (B, C, H, W) to be normalized. + mean (sequence): Sequence of means for each channel. + std (sequence): Sequence of standard deviations for each channel. + inplace(bool,optional): Bool to make this operation inplace. + + Returns: + Tensor: Normalized Tensor image. + """ + if not isinstance(tensor, torch.Tensor): + raise TypeError('Input tensor should be a torch tensor. Got {}.'.format(type(tensor))) + + if not tensor.is_floating_point(): + raise TypeError('Input tensor should be a float tensor. Got {}.'.format(tensor.dtype)) + + if tensor.ndim < 3: + raise ValueError('Expected tensor to be a tensor image of size (..., C, H, W). Got tensor.size() = ' + '{}.'.format(tensor.size())) + + if not inplace: + tensor = tensor.clone() + + dtype = tensor.dtype + mean = torch.as_tensor(mean, dtype=dtype, device=tensor.device) + std = torch.as_tensor(std, dtype=dtype, device=tensor.device) + if (std == 0).any(): + raise ValueError('std evaluated to zero after conversion to {}, leading to division by zero.'.format(dtype)) + if mean.ndim == 1: + mean = mean.view(-1, 1, 1) + if std.ndim == 1: + std = std.view(-1, 1, 1) + tensor.sub_(mean).div_(std) + return tensor + + +def resize(img: Tensor, size: List[int], interpolation: InterpolationMode = InterpolationMode.BILINEAR, + max_size: Optional[int] = None, antialias: Optional[bool] = None) -> Tensor: + r"""Resize the input image to the given size. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions + + .. warning:: + The output image might be different depending on its type: when downsampling, the interpolation of PIL images + and tensors is slightly different, because PIL applies antialiasing. This may lead to significant differences + in the performance of a network. Therefore, it is preferable to train and serve a model with the same input + types. + + Args: + img (PIL Image or Tensor): Image to be resized. + size (sequence or int): Desired output size. If size is a sequence like + (h, w), the output size will be matched to this. If size is an int, + the smaller edge of the image will be matched to this number maintaining + the aspect ratio. i.e, if height > width, then image will be rescaled to + :math:`\left(\text{size} \times \frac{\text{height}}{\text{width}}, \text{size}\right)`. + + .. note:: + In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``. + interpolation (InterpolationMode): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. + Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``, + ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported. + For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable. + max_size (int, optional): The maximum allowed for the longer edge of + the resized image: if the longer edge of the image is greater + than ``max_size`` after being resized according to ``size``, then + the image is resized again so that the longer edge is equal to + ``max_size``. As a result, ``size`` might be overruled, i.e the + smaller edge may be shorter than ``size``. This is only supported + if ``size`` is an int (or a sequence of length 1 in torchscript + mode). + antialias (bool, optional): antialias flag. If ``img`` is PIL Image, the flag is ignored and anti-alias + is always used. If ``img`` is Tensor, the flag is False by default and can be set True for + ``InterpolationMode.BILINEAR`` only mode. + + .. warning:: + There is no autodiff support for ``antialias=True`` option with input ``img`` as Tensor. + + Returns: + PIL Image or Tensor: Resized image. + """ + # Backward compatibility with integer value + if isinstance(interpolation, int): + warnings.warn( + "Argument interpolation should be of type InterpolationMode instead of int. " + "Please, use InterpolationMode enum." + ) + interpolation = _interpolation_modes_from_int(interpolation) + + if not isinstance(interpolation, InterpolationMode): + raise TypeError("Argument interpolation should be a InterpolationMode") + + if not isinstance(img, torch.Tensor): + if antialias is not None and not antialias: + warnings.warn( + "Anti-alias option is always applied for PIL Image input. Argument antialias is ignored." + ) + pil_interpolation = pil_modes_mapping[interpolation] + return F_pil.resize(img, size=size, interpolation=pil_interpolation, max_size=max_size) + + return F_t.resize(img, size=size, interpolation=interpolation.value, max_size=max_size, antialias=antialias) + + +def scale(*args, **kwargs): + warnings.warn("The use of the transforms.Scale transform is deprecated, " + + "please use transforms.Resize instead.") + return resize(*args, **kwargs) + + +def pad(img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "constant") -> Tensor: + r"""Pad the given image on all sides with the given "pad" value. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means at most 2 leading dimensions for mode reflect and symmetric, + at most 3 leading dimensions for mode edge, + and an arbitrary number of leading dimensions for mode constant + + Args: + img (PIL Image or Tensor): Image to be padded. + padding (int or sequence): Padding on each border. If a single int is provided this + is used to pad all borders. If sequence of length 2 is provided this is the padding + on left/right and top/bottom respectively. If a sequence of length 4 is provided + this is the padding for the left, top, right and bottom borders respectively. + + .. note:: + In torchscript mode padding as single int is not supported, use a sequence of + length 1: ``[padding, ]``. + fill (number or str or tuple): Pixel fill value for constant fill. Default is 0. + If a tuple of length 3, it is used to fill R, G, B channels respectively. + This value is only used when the padding_mode is constant. + Only number is supported for torch Tensor. + Only int or str or tuple value is supported for PIL Image. + padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric. + Default is constant. + + - constant: pads with a constant value, this value is specified with fill + + - edge: pads with the last value at the edge of the image. + If input a 5D torch Tensor, the last 3 dimensions will be padded instead of the last 2 + + - reflect: pads with reflection of image without repeating the last value on the edge. + For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode + will result in [3, 2, 1, 2, 3, 4, 3, 2] + + - symmetric: pads with reflection of image repeating the last value on the edge. + For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode + will result in [2, 1, 1, 2, 3, 4, 4, 3] + + Returns: + PIL Image or Tensor: Padded image. + """ + if not isinstance(img, torch.Tensor): + return F_pil.pad(img, padding=padding, fill=fill, padding_mode=padding_mode) + + return F_t.pad(img, padding=padding, fill=fill, padding_mode=padding_mode) + + +def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor: + """Crop the given image at specified location and output size. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions. + If image size is smaller than output size along any edge, image is padded with 0 and then cropped. + + Args: + img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image. + top (int): Vertical component of the top left corner of the crop box. + left (int): Horizontal component of the top left corner of the crop box. + height (int): Height of the crop box. + width (int): Width of the crop box. + + Returns: + PIL Image or Tensor: Cropped image. + """ + + if not isinstance(img, torch.Tensor): + return F_pil.crop(img, top, left, height, width) + + return F_t.crop(img, top, left, height, width) + + +def center_crop(img: Tensor, output_size: List[int]) -> Tensor: + """Crops the given image at the center. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions. + If image size is smaller than output size along any edge, image is padded with 0 and then center cropped. + + Args: + img (PIL Image or Tensor): Image to be cropped. + output_size (sequence or int): (height, width) of the crop box. If int or sequence with single int, + it is used for both directions. + + Returns: + PIL Image or Tensor: Cropped image. + """ + if isinstance(output_size, numbers.Number): + output_size = (int(output_size), int(output_size)) + elif isinstance(output_size, (tuple, list)) and len(output_size) == 1: + output_size = (output_size[0], output_size[0]) + + image_width, image_height = _get_image_size(img) + crop_height, crop_width = output_size + + if crop_width > image_width or crop_height > image_height: + padding_ltrb = [ + (crop_width - image_width) // 2 if crop_width > image_width else 0, + (crop_height - image_height) // 2 if crop_height > image_height else 0, + (crop_width - image_width + 1) // 2 if crop_width > image_width else 0, + (crop_height - image_height + 1) // 2 if crop_height > image_height else 0, + ] + img = pad(img, padding_ltrb, fill=0) # PIL uses fill value 0 + image_width, image_height = _get_image_size(img) + if crop_width == image_width and crop_height == image_height: + return img + + crop_top = int(round((image_height - crop_height) / 2.)) + crop_left = int(round((image_width - crop_width) / 2.)) + return crop(img, crop_top, crop_left, crop_height, crop_width) + + +def resized_crop( + img: Tensor, top: int, left: int, height: int, width: int, size: List[int], + interpolation: InterpolationMode = InterpolationMode.BILINEAR +) -> Tensor: + """Crop the given image and resize it to desired size. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions + + Notably used in :class:`~torchvision.transforms.RandomResizedCrop`. + + Args: + img (PIL Image or Tensor): Image to be cropped. (0,0) denotes the top left corner of the image. + top (int): Vertical component of the top left corner of the crop box. + left (int): Horizontal component of the top left corner of the crop box. + height (int): Height of the crop box. + width (int): Width of the crop box. + size (sequence or int): Desired output size. Same semantics as ``resize``. + interpolation (InterpolationMode): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. + Default is ``InterpolationMode.BILINEAR``. If input is Tensor, only ``InterpolationMode.NEAREST``, + ``InterpolationMode.BILINEAR`` and ``InterpolationMode.BICUBIC`` are supported. + For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable. + + Returns: + PIL Image or Tensor: Cropped image. + """ + img = crop(img, top, left, height, width) + img = resize(img, size, interpolation) + return img + + +def hflip(img: Tensor) -> Tensor: + """Horizontally flip the given image. + + Args: + img (PIL Image or Tensor): Image to be flipped. If img + is a Tensor, it is expected to be in [..., H, W] format, + where ... means it can have an arbitrary number of leading + dimensions. + + Returns: + PIL Image or Tensor: Horizontally flipped image. + """ + if not isinstance(img, torch.Tensor): + return F_pil.hflip(img) + + return F_t.hflip(img) + + +def _get_perspective_coeffs( + startpoints: List[List[int]], endpoints: List[List[int]] +) -> List[float]: + """Helper function to get the coefficients (a, b, c, d, e, f, g, h) for the perspective transforms. + + In Perspective Transform each pixel (x, y) in the original image gets transformed as, + (x, y) -> ( (ax + by + c) / (gx + hy + 1), (dx + ey + f) / (gx + hy + 1) ) + + Args: + startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners + ``[top-left, top-right, bottom-right, bottom-left]`` of the original image. + endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners + ``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image. + + Returns: + octuple (a, b, c, d, e, f, g, h) for transforming each pixel. + """ + a_matrix = torch.zeros(2 * len(startpoints), 8, dtype=torch.float) + + for i, (p1, p2) in enumerate(zip(endpoints, startpoints)): + a_matrix[2 * i, :] = torch.tensor([p1[0], p1[1], 1, 0, 0, 0, -p2[0] * p1[0], -p2[0] * p1[1]]) + a_matrix[2 * i + 1, :] = torch.tensor([0, 0, 0, p1[0], p1[1], 1, -p2[1] * p1[0], -p2[1] * p1[1]]) + + b_matrix = torch.tensor(startpoints, dtype=torch.float).view(8) + res = torch.lstsq(b_matrix, a_matrix)[0] + + output: List[float] = res.squeeze(1).tolist() + return output + + +def perspective( + img: Tensor, + startpoints: List[List[int]], + endpoints: List[List[int]], + interpolation: InterpolationMode = InterpolationMode.BILINEAR, + fill: Optional[List[float]] = None +) -> Tensor: + """Perform perspective transform of the given image. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions. + + Args: + img (PIL Image or Tensor): Image to be transformed. + startpoints (list of list of ints): List containing four lists of two integers corresponding to four corners + ``[top-left, top-right, bottom-right, bottom-left]`` of the original image. + endpoints (list of list of ints): List containing four lists of two integers corresponding to four corners + ``[top-left, top-right, bottom-right, bottom-left]`` of the transformed image. + interpolation (InterpolationMode): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. + For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable. + fill (sequence or number, optional): Pixel fill value for the area outside the transformed + image. If given a number, the value is used for all bands respectively. + + .. note:: + In torchscript mode single int/float value is not supported, please use a sequence + of length 1: ``[value, ]``. + + Returns: + PIL Image or Tensor: transformed Image. + """ + + coeffs = _get_perspective_coeffs(startpoints, endpoints) + + # Backward compatibility with integer value + if isinstance(interpolation, int): + warnings.warn( + "Argument interpolation should be of type InterpolationMode instead of int. " + "Please, use InterpolationMode enum." + ) + interpolation = _interpolation_modes_from_int(interpolation) + + if not isinstance(interpolation, InterpolationMode): + raise TypeError("Argument interpolation should be a InterpolationMode") + + if not isinstance(img, torch.Tensor): + pil_interpolation = pil_modes_mapping[interpolation] + return F_pil.perspective(img, coeffs, interpolation=pil_interpolation, fill=fill) + + return F_t.perspective(img, coeffs, interpolation=interpolation.value, fill=fill) + + +def vflip(img: Tensor) -> Tensor: + """Vertically flip the given image. + + Args: + img (PIL Image or Tensor): Image to be flipped. If img + is a Tensor, it is expected to be in [..., H, W] format, + where ... means it can have an arbitrary number of leading + dimensions. + + Returns: + PIL Image or Tensor: Vertically flipped image. + """ + if not isinstance(img, torch.Tensor): + return F_pil.vflip(img) + + return F_t.vflip(img) + + +def five_crop(img: Tensor, size: List[int]) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]: + """Crop the given image into four corners and the central crop. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions + + .. Note:: + This transform returns a tuple of images and there may be a + mismatch in the number of inputs and targets your ``Dataset`` returns. + + Args: + img (PIL Image or Tensor): Image to be cropped. + size (sequence or int): Desired output size of the crop. If size is an + int instead of sequence like (h, w), a square crop (size, size) is + made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]). + + Returns: + tuple: tuple (tl, tr, bl, br, center) + Corresponding top left, top right, bottom left, bottom right and center crop. + """ + if isinstance(size, numbers.Number): + size = (int(size), int(size)) + elif isinstance(size, (tuple, list)) and len(size) == 1: + size = (size[0], size[0]) + + if len(size) != 2: + raise ValueError("Please provide only two dimensions (h, w) for size.") + + image_width, image_height = _get_image_size(img) + crop_height, crop_width = size + if crop_width > image_width or crop_height > image_height: + msg = "Requested crop size {} is bigger than input size {}" + raise ValueError(msg.format(size, (image_height, image_width))) + + tl = crop(img, 0, 0, crop_height, crop_width) + tr = crop(img, 0, image_width - crop_width, crop_height, crop_width) + bl = crop(img, image_height - crop_height, 0, crop_height, crop_width) + br = crop(img, image_height - crop_height, image_width - crop_width, crop_height, crop_width) + + center = center_crop(img, [crop_height, crop_width]) + + return tl, tr, bl, br, center + + +def ten_crop(img: Tensor, size: List[int], vertical_flip: bool = False) -> List[Tensor]: + """Generate ten cropped images from the given image. + Crop the given image into four corners and the central crop plus the + flipped version of these (horizontal flipping is used by default). + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions + + .. Note:: + This transform returns a tuple of images and there may be a + mismatch in the number of inputs and targets your ``Dataset`` returns. + + Args: + img (PIL Image or Tensor): Image to be cropped. + size (sequence or int): Desired output size of the crop. If size is an + int instead of sequence like (h, w), a square crop (size, size) is + made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]). + vertical_flip (bool): Use vertical flipping instead of horizontal + + Returns: + tuple: tuple (tl, tr, bl, br, center, tl_flip, tr_flip, bl_flip, br_flip, center_flip) + Corresponding top left, top right, bottom left, bottom right and + center crop and same for the flipped image. + """ + if isinstance(size, numbers.Number): + size = (int(size), int(size)) + elif isinstance(size, (tuple, list)) and len(size) == 1: + size = (size[0], size[0]) + + if len(size) != 2: + raise ValueError("Please provide only two dimensions (h, w) for size.") + + first_five = five_crop(img, size) + + if vertical_flip: + img = vflip(img) + else: + img = hflip(img) + + second_five = five_crop(img, size) + return first_five + second_five + + +def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor: + """Adjust brightness of an image. + + Args: + img (PIL Image or Tensor): Image to be adjusted. + If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format, + where ... means it can have an arbitrary number of leading dimensions. + brightness_factor (float): How much to adjust the brightness. Can be + any non negative number. 0 gives a black image, 1 gives the + original image while 2 increases the brightness by a factor of 2. + + Returns: + PIL Image or Tensor: Brightness adjusted image. + """ + if not isinstance(img, torch.Tensor): + return F_pil.adjust_brightness(img, brightness_factor) + + return F_t.adjust_brightness(img, brightness_factor) + + +def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor: + """Adjust contrast of an image. + + Args: + img (PIL Image or Tensor): Image to be adjusted. + If img is torch Tensor, it is expected to be in [..., 3, H, W] format, + where ... means it can have an arbitrary number of leading dimensions. + contrast_factor (float): How much to adjust the contrast. Can be any + non negative number. 0 gives a solid gray image, 1 gives the + original image while 2 increases the contrast by a factor of 2. + + Returns: + PIL Image or Tensor: Contrast adjusted image. + """ + if not isinstance(img, torch.Tensor): + return F_pil.adjust_contrast(img, contrast_factor) + + return F_t.adjust_contrast(img, contrast_factor) + + +def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor: + """Adjust color saturation of an image. + + Args: + img (PIL Image or Tensor): Image to be adjusted. + If img is torch Tensor, it is expected to be in [..., 3, H, W] format, + where ... means it can have an arbitrary number of leading dimensions. + saturation_factor (float): How much to adjust the saturation. 0 will + give a black and white image, 1 will give the original image while + 2 will enhance the saturation by a factor of 2. + + Returns: + PIL Image or Tensor: Saturation adjusted image. + """ + if not isinstance(img, torch.Tensor): + return F_pil.adjust_saturation(img, saturation_factor) + + return F_t.adjust_saturation(img, saturation_factor) + + +def adjust_hue(img: Tensor, hue_factor: float) -> Tensor: + """Adjust hue of an image. + + The image hue is adjusted by converting the image to HSV and + cyclically shifting the intensities in the hue channel (H). + The image is then converted back to original image mode. + + `hue_factor` is the amount of shift in H channel and must be in the + interval `[-0.5, 0.5]`. + + See `Hue`_ for more details. + + .. _Hue: https://en.wikipedia.org/wiki/Hue + + Args: + img (PIL Image or Tensor): Image to be adjusted. + If img is torch Tensor, it is expected to be in [..., 3, H, W] format, + where ... means it can have an arbitrary number of leading dimensions. + If img is PIL Image mode "1", "L", "I", "F" and modes with transparency (alpha channel) are not supported. + hue_factor (float): How much to shift the hue channel. Should be in + [-0.5, 0.5]. 0.5 and -0.5 give complete reversal of hue channel in + HSV space in positive and negative direction respectively. + 0 means no shift. Therefore, both -0.5 and 0.5 will give an image + with complementary colors while 0 gives the original image. + + Returns: + PIL Image or Tensor: Hue adjusted image. + """ + if not isinstance(img, torch.Tensor): + return F_pil.adjust_hue(img, hue_factor) + + return F_t.adjust_hue(img, hue_factor) + + +def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor: + r"""Perform gamma correction on an image. + + Also known as Power Law Transform. Intensities in RGB mode are adjusted + based on the following equation: + + .. math:: + I_{\text{out}} = 255 \times \text{gain} \times \left(\frac{I_{\text{in}}}{255}\right)^{\gamma} + + See `Gamma Correction`_ for more details. + + .. _Gamma Correction: https://en.wikipedia.org/wiki/Gamma_correction + + Args: + img (PIL Image or Tensor): PIL Image to be adjusted. + If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format, + where ... means it can have an arbitrary number of leading dimensions. + If img is PIL Image, modes with transparency (alpha channel) are not supported. + gamma (float): Non negative real number, same as :math:`\gamma` in the equation. + gamma larger than 1 make the shadows darker, + while gamma smaller than 1 make dark regions lighter. + gain (float): The constant multiplier. + Returns: + PIL Image or Tensor: Gamma correction adjusted image. + """ + if not isinstance(img, torch.Tensor): + return F_pil.adjust_gamma(img, gamma, gain) + + return F_t.adjust_gamma(img, gamma, gain) + + +def _get_inverse_affine_matrix( + center: List[float], angle: float, translate: List[float], scale: float, shear: List[float] +) -> List[float]: + # Helper method to compute inverse matrix for affine transformation + + # As it is explained in PIL.Image.rotate + # We need compute INVERSE of affine transformation matrix: M = T * C * RSS * C^-1 + # where T is translation matrix: [1, 0, tx | 0, 1, ty | 0, 0, 1] + # C is translation matrix to keep center: [1, 0, cx | 0, 1, cy | 0, 0, 1] + # RSS is rotation with scale and shear matrix + # RSS(a, s, (sx, sy)) = + # = R(a) * S(s) * SHy(sy) * SHx(sx) + # = [ s*cos(a - sy)/cos(sy), s*(-cos(a - sy)*tan(x)/cos(y) - sin(a)), 0 ] + # [ s*sin(a + sy)/cos(sy), s*(-sin(a - sy)*tan(x)/cos(y) + cos(a)), 0 ] + # [ 0 , 0 , 1 ] + # + # where R is a rotation matrix, S is a scaling matrix, and SHx and SHy are the shears: + # SHx(s) = [1, -tan(s)] and SHy(s) = [1 , 0] + # [0, 1 ] [-tan(s), 1] + # + # Thus, the inverse is M^-1 = C * RSS^-1 * C^-1 * T^-1 + + rot = math.radians(angle) + sx, sy = [math.radians(s) for s in shear] + + cx, cy = center + tx, ty = translate + + # RSS without scaling + a = math.cos(rot - sy) / math.cos(sy) + b = -math.cos(rot - sy) * math.tan(sx) / math.cos(sy) - math.sin(rot) + c = math.sin(rot - sy) / math.cos(sy) + d = -math.sin(rot - sy) * math.tan(sx) / math.cos(sy) + math.cos(rot) + + # Inverted rotation matrix with scale and shear + # det([[a, b], [c, d]]) == 1, since det(rotation) = 1 and det(shear) = 1 + matrix = [d, -b, 0.0, -c, a, 0.0] + matrix = [x / scale for x in matrix] + + # Apply inverse of translation and of center translation: RSS^-1 * C^-1 * T^-1 + matrix[2] += matrix[0] * (-cx - tx) + matrix[1] * (-cy - ty) + matrix[5] += matrix[3] * (-cx - tx) + matrix[4] * (-cy - ty) + + # Apply center translation: C * RSS^-1 * C^-1 * T^-1 + matrix[2] += cx + matrix[5] += cy + + return matrix + + +def rotate( + img: Tensor, angle: float, interpolation: InterpolationMode = InterpolationMode.NEAREST, + expand: bool = False, center: Optional[List[int]] = None, + fill: Optional[List[float]] = None, resample: Optional[int] = None +) -> Tensor: + """Rotate the image by angle. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions. + + Args: + img (PIL Image or Tensor): image to be rotated. + angle (number): rotation angle value in degrees, counter-clockwise. + interpolation (InterpolationMode): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. + For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable. + expand (bool, optional): Optional expansion flag. + If true, expands the output image to make it large enough to hold the entire rotated image. + If false or omitted, make the output image the same size as the input image. + Note that the expand flag assumes rotation around the center and no translation. + center (sequence, optional): Optional center of rotation. Origin is the upper left corner. + Default is the center of the image. + fill (sequence or number, optional): Pixel fill value for the area outside the transformed + image. If given a number, the value is used for all bands respectively. + + .. note:: + In torchscript mode single int/float value is not supported, please use a sequence + of length 1: ``[value, ]``. + + Returns: + PIL Image or Tensor: Rotated image. + + .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters + + """ + if resample is not None: + warnings.warn( + "Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead" + ) + interpolation = _interpolation_modes_from_int(resample) + + # Backward compatibility with integer value + if isinstance(interpolation, int): + warnings.warn( + "Argument interpolation should be of type InterpolationMode instead of int. " + "Please, use InterpolationMode enum." + ) + interpolation = _interpolation_modes_from_int(interpolation) + + if not isinstance(angle, (int, float)): + raise TypeError("Argument angle should be int or float") + + if center is not None and not isinstance(center, (list, tuple)): + raise TypeError("Argument center should be a sequence") + + if not isinstance(interpolation, InterpolationMode): + raise TypeError("Argument interpolation should be a InterpolationMode") + + if not isinstance(img, torch.Tensor): + pil_interpolation = pil_modes_mapping[interpolation] + return F_pil.rotate(img, angle=angle, interpolation=pil_interpolation, expand=expand, center=center, fill=fill) + + center_f = [0.0, 0.0] + if center is not None: + img_size = _get_image_size(img) + # Center values should be in pixel coordinates but translated such that (0, 0) corresponds to image center. + center_f = [1.0 * (c - s * 0.5) for c, s in zip(center, img_size)] + + # due to current incoherence of rotation angle direction between affine and rotate implementations + # we need to set -angle. + matrix = _get_inverse_affine_matrix(center_f, -angle, [0.0, 0.0], 1.0, [0.0, 0.0]) + return F_t.rotate(img, matrix=matrix, interpolation=interpolation.value, expand=expand, fill=fill) + + +def affine( + img: Tensor, angle: float, translate: List[int], scale: float, shear: List[float], + interpolation: InterpolationMode = InterpolationMode.NEAREST, fill: Optional[List[float]] = None, + resample: Optional[int] = None, fillcolor: Optional[List[float]] = None +) -> Tensor: + """Apply affine transformation on the image keeping image center invariant. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions. + + Args: + img (PIL Image or Tensor): image to transform. + angle (number): rotation angle in degrees between -180 and 180, clockwise direction. + translate (sequence of integers): horizontal and vertical translations (post-rotation translation) + scale (float): overall scale + shear (float or sequence): shear angle value in degrees between -180 to 180, clockwise direction. + If a sequence is specified, the first value corresponds to a shear parallel to the x axis, while + the second value corresponds to a shear parallel to the y axis. + interpolation (InterpolationMode): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. + For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable. + fill (sequence or number, optional): Pixel fill value for the area outside the transformed + image. If given a number, the value is used for all bands respectively. + + .. note:: + In torchscript mode single int/float value is not supported, please use a sequence + of length 1: ``[value, ]``. + fillcolor (sequence, int, float): deprecated argument and will be removed since v0.10.0. + Please use the ``fill`` parameter instead. + resample (int, optional): deprecated argument and will be removed since v0.10.0. + Please use the ``interpolation`` parameter instead. + + Returns: + PIL Image or Tensor: Transformed image. + """ + if resample is not None: + warnings.warn( + "Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead" + ) + interpolation = _interpolation_modes_from_int(resample) + + # Backward compatibility with integer value + if isinstance(interpolation, int): + warnings.warn( + "Argument interpolation should be of type InterpolationMode instead of int. " + "Please, use InterpolationMode enum." + ) + interpolation = _interpolation_modes_from_int(interpolation) + + if fillcolor is not None: + warnings.warn( + "Argument fillcolor is deprecated and will be removed since v0.10.0. Please, use fill instead" + ) + fill = fillcolor + + if not isinstance(angle, (int, float)): + raise TypeError("Argument angle should be int or float") + + if not isinstance(translate, (list, tuple)): + raise TypeError("Argument translate should be a sequence") + + if len(translate) != 2: + raise ValueError("Argument translate should be a sequence of length 2") + + if scale <= 0.0: + raise ValueError("Argument scale should be positive") + + if not isinstance(shear, (numbers.Number, (list, tuple))): + raise TypeError("Shear should be either a single value or a sequence of two values") + + if not isinstance(interpolation, InterpolationMode): + raise TypeError("Argument interpolation should be a InterpolationMode") + + if isinstance(angle, int): + angle = float(angle) + + if isinstance(translate, tuple): + translate = list(translate) + + if isinstance(shear, numbers.Number): + shear = [shear, 0.0] + + if isinstance(shear, tuple): + shear = list(shear) + + if len(shear) == 1: + shear = [shear[0], shear[0]] + + if len(shear) != 2: + raise ValueError("Shear should be a sequence containing two values. Got {}".format(shear)) + + img_size = _get_image_size(img) + if not isinstance(img, torch.Tensor): + # center = (img_size[0] * 0.5 + 0.5, img_size[1] * 0.5 + 0.5) + # it is visually better to estimate the center without 0.5 offset + # otherwise image rotated by 90 degrees is shifted vs output image of torch.rot90 or F_t.affine + center = [img_size[0] * 0.5, img_size[1] * 0.5] + matrix = _get_inverse_affine_matrix(center, angle, translate, scale, shear) + pil_interpolation = pil_modes_mapping[interpolation] + return F_pil.affine(img, matrix=matrix, interpolation=pil_interpolation, fill=fill) + + translate_f = [1.0 * t for t in translate] + matrix = _get_inverse_affine_matrix([0.0, 0.0], angle, translate_f, scale, shear) + return F_t.affine(img, matrix=matrix, interpolation=interpolation.value, fill=fill) + + +@torch.jit.unused +def to_grayscale(img, num_output_channels=1): + """Convert PIL image of any mode (RGB, HSV, LAB, etc) to grayscale version of image. + This transform does not support torch Tensor. + + Args: + img (PIL Image): PIL Image to be converted to grayscale. + num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default is 1. + + Returns: + PIL Image: Grayscale version of the image. + + - if num_output_channels = 1 : returned image is single channel + - if num_output_channels = 3 : returned image is 3 channel with r = g = b + """ + if isinstance(img, Image.Image): + return F_pil.to_grayscale(img, num_output_channels) + + raise TypeError("Input should be PIL Image") + + +def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor: + """Convert RGB image to grayscale version of image. + If the image is torch Tensor, it is expected + to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions + + Note: + Please, note that this method supports only RGB images as input. For inputs in other color spaces, + please, consider using meth:`~torchvision.transforms.functional.to_grayscale` with PIL Image. + + Args: + img (PIL Image or Tensor): RGB Image to be converted to grayscale. + num_output_channels (int): number of channels of the output image. Value can be 1 or 3. Default, 1. + + Returns: + PIL Image or Tensor: Grayscale version of the image. + + - if num_output_channels = 1 : returned image is single channel + - if num_output_channels = 3 : returned image is 3 channel with r = g = b + """ + if not isinstance(img, torch.Tensor): + return F_pil.to_grayscale(img, num_output_channels) + + return F_t.rgb_to_grayscale(img, num_output_channels) + + +def erase(img: Tensor, i: int, j: int, h: int, w: int, v: Tensor, inplace: bool = False) -> Tensor: + """ Erase the input Tensor Image with given value. + This transform does not support PIL Image. + + Args: + img (Tensor Image): Tensor image of size (C, H, W) to be erased + i (int): i in (i,j) i.e coordinates of the upper left corner. + j (int): j in (i,j) i.e coordinates of the upper left corner. + h (int): Height of the erased region. + w (int): Width of the erased region. + v: Erasing value. + inplace(bool, optional): For in-place operations. By default is set False. + + Returns: + Tensor Image: Erased image. + """ + if not isinstance(img, torch.Tensor): + raise TypeError('img should be Tensor Image. Got {}'.format(type(img))) + + if not inplace: + img = img.clone() + + img[..., i:i + h, j:j + w] = v + return img + + +def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: Optional[List[float]] = None) -> Tensor: + """Performs Gaussian blurring on the image by given kernel. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions. + + Args: + img (PIL Image or Tensor): Image to be blurred + kernel_size (sequence of ints or int): Gaussian kernel size. Can be a sequence of integers + like ``(kx, ky)`` or a single integer for square kernels. + + .. note:: + In torchscript mode kernel_size as single int is not supported, use a sequence of + length 1: ``[ksize, ]``. + sigma (sequence of floats or float, optional): Gaussian kernel standard deviation. Can be a + sequence of floats like ``(sigma_x, sigma_y)`` or a single float to define the + same sigma in both X/Y directions. If None, then it is computed using + ``kernel_size`` as ``sigma = 0.3 * ((kernel_size - 1) * 0.5 - 1) + 0.8``. + Default, None. + + .. note:: + In torchscript mode sigma as single float is + not supported, use a sequence of length 1: ``[sigma, ]``. + + Returns: + PIL Image or Tensor: Gaussian Blurred version of the image. + """ + if not isinstance(kernel_size, (int, list, tuple)): + raise TypeError('kernel_size should be int or a sequence of integers. Got {}'.format(type(kernel_size))) + if isinstance(kernel_size, int): + kernel_size = [kernel_size, kernel_size] + if len(kernel_size) != 2: + raise ValueError('If kernel_size is a sequence its length should be 2. Got {}'.format(len(kernel_size))) + for ksize in kernel_size: + if ksize % 2 == 0 or ksize < 0: + raise ValueError('kernel_size should have odd and positive integers. Got {}'.format(kernel_size)) + + if sigma is None: + sigma = [ksize * 0.15 + 0.35 for ksize in kernel_size] + + if sigma is not None and not isinstance(sigma, (int, float, list, tuple)): + raise TypeError('sigma should be either float or sequence of floats. Got {}'.format(type(sigma))) + if isinstance(sigma, (int, float)): + sigma = [float(sigma), float(sigma)] + if isinstance(sigma, (list, tuple)) and len(sigma) == 1: + sigma = [sigma[0], sigma[0]] + if len(sigma) != 2: + raise ValueError('If sigma is a sequence, its length should be 2. Got {}'.format(len(sigma))) + for s in sigma: + if s <= 0.: + raise ValueError('sigma should have positive values. Got {}'.format(sigma)) + + t_img = img + if not isinstance(img, torch.Tensor): + if not F_pil._is_pil_image(img): + raise TypeError('img should be PIL Image or Tensor. Got {}'.format(type(img))) + + t_img = to_tensor(img) + + output = F_t.gaussian_blur(t_img, kernel_size, sigma) + + if not isinstance(img, torch.Tensor): + output = to_pil_image(output) + return output + + +def invert(img: Tensor) -> Tensor: + """Invert the colors of an RGB/grayscale image. + + Args: + img (PIL Image or Tensor): Image to have its colors inverted. + If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format, + where ... means it can have an arbitrary number of leading dimensions. + If img is PIL Image, it is expected to be in mode "L" or "RGB". + + Returns: + PIL Image or Tensor: Color inverted image. + """ + if not isinstance(img, torch.Tensor): + return F_pil.invert(img) + + return F_t.invert(img) + + +def posterize(img: Tensor, bits: int) -> Tensor: + """Posterize an image by reducing the number of bits for each color channel. + + Args: + img (PIL Image or Tensor): Image to have its colors posterized. + If img is torch Tensor, it should be of type torch.uint8 and + it is expected to be in [..., 1 or 3, H, W] format, where ... means + it can have an arbitrary number of leading dimensions. + If img is PIL Image, it is expected to be in mode "L" or "RGB". + bits (int): The number of bits to keep for each channel (0-8). + Returns: + PIL Image or Tensor: Posterized image. + """ + if not (0 <= bits <= 8): + raise ValueError('The number if bits should be between 0 and 8. Got {}'.format(bits)) + + if not isinstance(img, torch.Tensor): + return F_pil.posterize(img, bits) + + return F_t.posterize(img, bits) + + +def solarize(img: Tensor, threshold: float) -> Tensor: + """Solarize an RGB/grayscale image by inverting all pixel values above a threshold. + + Args: + img (PIL Image or Tensor): Image to have its colors inverted. + If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format, + where ... means it can have an arbitrary number of leading dimensions. + If img is PIL Image, it is expected to be in mode "L" or "RGB". + threshold (float): All pixels equal or above this value are inverted. + Returns: + PIL Image or Tensor: Solarized image. + """ + if not isinstance(img, torch.Tensor): + return F_pil.solarize(img, threshold) + + return F_t.solarize(img, threshold) + + +def adjust_sharpness(img: Tensor, sharpness_factor: float) -> Tensor: + """Adjust the sharpness of an image. + + Args: + img (PIL Image or Tensor): Image to be adjusted. + If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format, + where ... means it can have an arbitrary number of leading dimensions. + sharpness_factor (float): How much to adjust the sharpness. Can be + any non negative number. 0 gives a blurred image, 1 gives the + original image while 2 increases the sharpness by a factor of 2. + + Returns: + PIL Image or Tensor: Sharpness adjusted image. + """ + if not isinstance(img, torch.Tensor): + return F_pil.adjust_sharpness(img, sharpness_factor) + + return F_t.adjust_sharpness(img, sharpness_factor) + + +def autocontrast(img: Tensor) -> Tensor: + """Maximize contrast of an image by remapping its + pixels per channel so that the lowest becomes black and the lightest + becomes white. + + Args: + img (PIL Image or Tensor): Image on which autocontrast is applied. + If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format, + where ... means it can have an arbitrary number of leading dimensions. + If img is PIL Image, it is expected to be in mode "L" or "RGB". + + Returns: + PIL Image or Tensor: An image that was autocontrasted. + """ + if not isinstance(img, torch.Tensor): + return F_pil.autocontrast(img) + + return F_t.autocontrast(img) + + +def equalize(img: Tensor) -> Tensor: + """Equalize the histogram of an image by applying + a non-linear mapping to the input in order to create a uniform + distribution of grayscale values in the output. + + Args: + img (PIL Image or Tensor): Image on which equalize is applied. + If img is torch Tensor, it is expected to be in [..., 1 or 3, H, W] format, + where ... means it can have an arbitrary number of leading dimensions. + The tensor dtype must be ``torch.uint8`` and values are expected to be in ``[0, 255]``. + If img is PIL Image, it is expected to be in mode "P", "L" or "RGB". + + Returns: + PIL Image or Tensor: An image that was equalized. + """ + if not isinstance(img, torch.Tensor): + return F_pil.equalize(img) + + return F_t.equalize(img) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/transforms/functional_pil.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/transforms/functional_pil.py new file mode 100644 index 0000000000000000000000000000000000000000..3829637fdb723a9d75ac63e99f7d2a1be8b754c2 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/transforms/functional_pil.py @@ -0,0 +1,352 @@ +import numbers +from typing import Any, List, Sequence + +import numpy as np +import torch +from PIL import Image, ImageOps, ImageEnhance + +try: + import accimage +except ImportError: + accimage = None + + +@torch.jit.unused +def _is_pil_image(img: Any) -> bool: + if accimage is not None: + return isinstance(img, (Image.Image, accimage.Image)) + else: + return isinstance(img, Image.Image) + + +@torch.jit.unused +def _get_image_size(img: Any) -> List[int]: + if _is_pil_image(img): + return img.size + raise TypeError("Unexpected type {}".format(type(img))) + + +@torch.jit.unused +def _get_image_num_channels(img: Any) -> int: + if _is_pil_image(img): + return 1 if img.mode == 'L' else 3 + raise TypeError("Unexpected type {}".format(type(img))) + + +@torch.jit.unused +def hflip(img): + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + + return img.transpose(Image.FLIP_LEFT_RIGHT) + + +@torch.jit.unused +def vflip(img): + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + + return img.transpose(Image.FLIP_TOP_BOTTOM) + + +@torch.jit.unused +def adjust_brightness(img, brightness_factor): + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + + enhancer = ImageEnhance.Brightness(img) + img = enhancer.enhance(brightness_factor) + return img + + +@torch.jit.unused +def adjust_contrast(img, contrast_factor): + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + + enhancer = ImageEnhance.Contrast(img) + img = enhancer.enhance(contrast_factor) + return img + + +@torch.jit.unused +def adjust_saturation(img, saturation_factor): + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + + enhancer = ImageEnhance.Color(img) + img = enhancer.enhance(saturation_factor) + return img + + +@torch.jit.unused +def adjust_hue(img, hue_factor): + if not(-0.5 <= hue_factor <= 0.5): + raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor)) + + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + + input_mode = img.mode + if input_mode in {'L', '1', 'I', 'F'}: + return img + + h, s, v = img.convert('HSV').split() + + np_h = np.array(h, dtype=np.uint8) + # uint8 addition take cares of rotation across boundaries + with np.errstate(over='ignore'): + np_h += np.uint8(hue_factor * 255) + h = Image.fromarray(np_h, 'L') + + img = Image.merge('HSV', (h, s, v)).convert(input_mode) + return img + + +@torch.jit.unused +def adjust_gamma(img, gamma, gain=1): + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + + if gamma < 0: + raise ValueError('Gamma should be a non-negative real number') + + input_mode = img.mode + img = img.convert('RGB') + gamma_map = [(255 + 1 - 1e-3) * gain * pow(ele / 255., gamma) for ele in range(256)] * 3 + img = img.point(gamma_map) # use PIL's point-function to accelerate this part + + img = img.convert(input_mode) + return img + + +@torch.jit.unused +def pad(img, padding, fill=0, padding_mode="constant"): + if not _is_pil_image(img): + raise TypeError("img should be PIL Image. Got {}".format(type(img))) + + if not isinstance(padding, (numbers.Number, tuple, list)): + raise TypeError("Got inappropriate padding arg") + if not isinstance(fill, (numbers.Number, str, tuple)): + raise TypeError("Got inappropriate fill arg") + if not isinstance(padding_mode, str): + raise TypeError("Got inappropriate padding_mode arg") + + if isinstance(padding, list): + padding = tuple(padding) + + if isinstance(padding, tuple) and len(padding) not in [1, 2, 4]: + raise ValueError("Padding must be an int or a 1, 2, or 4 element tuple, not a " + + "{} element tuple".format(len(padding))) + + if isinstance(padding, tuple) and len(padding) == 1: + # Compatibility with `functional_tensor.pad` + padding = padding[0] + + if padding_mode not in ["constant", "edge", "reflect", "symmetric"]: + raise ValueError("Padding mode should be either constant, edge, reflect or symmetric") + + if padding_mode == "constant": + opts = _parse_fill(fill, img, name="fill") + if img.mode == "P": + palette = img.getpalette() + image = ImageOps.expand(img, border=padding, **opts) + image.putpalette(palette) + return image + + return ImageOps.expand(img, border=padding, **opts) + else: + if isinstance(padding, int): + pad_left = pad_right = pad_top = pad_bottom = padding + if isinstance(padding, tuple) and len(padding) == 2: + pad_left = pad_right = padding[0] + pad_top = pad_bottom = padding[1] + if isinstance(padding, tuple) and len(padding) == 4: + pad_left = padding[0] + pad_top = padding[1] + pad_right = padding[2] + pad_bottom = padding[3] + + p = [pad_left, pad_top, pad_right, pad_bottom] + cropping = -np.minimum(p, 0) + + if cropping.any(): + crop_left, crop_top, crop_right, crop_bottom = cropping + img = img.crop((crop_left, crop_top, img.width - crop_right, img.height - crop_bottom)) + + pad_left, pad_top, pad_right, pad_bottom = np.maximum(p, 0) + + if img.mode == 'P': + palette = img.getpalette() + img = np.asarray(img) + img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode) + img = Image.fromarray(img) + img.putpalette(palette) + return img + + img = np.asarray(img) + # RGB image + if len(img.shape) == 3: + img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right), (0, 0)), padding_mode) + # Grayscale image + if len(img.shape) == 2: + img = np.pad(img, ((pad_top, pad_bottom), (pad_left, pad_right)), padding_mode) + + return Image.fromarray(img) + + +@torch.jit.unused +def crop(img: Image.Image, top: int, left: int, height: int, width: int) -> Image.Image: + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + + return img.crop((left, top, left + width, top + height)) + + +@torch.jit.unused +def resize(img, size, interpolation=Image.BILINEAR, max_size=None): + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + if not (isinstance(size, int) or (isinstance(size, Sequence) and len(size) in (1, 2))): + raise TypeError('Got inappropriate size arg: {}'.format(size)) + + if isinstance(size, Sequence) and len(size) == 1: + size = size[0] + if isinstance(size, int): + w, h = img.size + + short, long = (w, h) if w <= h else (h, w) + if short == size: + return img + + new_short, new_long = size, int(size * long / short) + + if max_size is not None: + if max_size <= size: + raise ValueError( + f"max_size = {max_size} must be strictly greater than the requested " + f"size for the smaller edge size = {size}" + ) + if new_long > max_size: + new_short, new_long = int(max_size * new_short / new_long), max_size + + new_w, new_h = (new_short, new_long) if w <= h else (new_long, new_short) + return img.resize((new_w, new_h), interpolation) + else: + if max_size is not None: + raise ValueError( + "max_size should only be passed if size specifies the length of the smaller edge, " + "i.e. size should be an int or a sequence of length 1 in torchscript mode." + ) + return img.resize(size[::-1], interpolation) + + +@torch.jit.unused +def _parse_fill(fill, img, name="fillcolor"): + # Process fill color for affine transforms + num_bands = len(img.getbands()) + if fill is None: + fill = 0 + if isinstance(fill, (int, float)) and num_bands > 1: + fill = tuple([fill] * num_bands) + if isinstance(fill, (list, tuple)): + if len(fill) != num_bands: + msg = ("The number of elements in 'fill' does not match the number of " + "bands of the image ({} != {})") + raise ValueError(msg.format(len(fill), num_bands)) + + fill = tuple(fill) + + return {name: fill} + + +@torch.jit.unused +def affine(img, matrix, interpolation=0, fill=None): + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + + output_size = img.size + opts = _parse_fill(fill, img) + return img.transform(output_size, Image.AFFINE, matrix, interpolation, **opts) + + +@torch.jit.unused +def rotate(img, angle, interpolation=0, expand=False, center=None, fill=None): + if not _is_pil_image(img): + raise TypeError("img should be PIL Image. Got {}".format(type(img))) + + opts = _parse_fill(fill, img) + return img.rotate(angle, interpolation, expand, center, **opts) + + +@torch.jit.unused +def perspective(img, perspective_coeffs, interpolation=Image.BICUBIC, fill=None): + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + + opts = _parse_fill(fill, img) + + return img.transform(img.size, Image.PERSPECTIVE, perspective_coeffs, interpolation, **opts) + + +@torch.jit.unused +def to_grayscale(img, num_output_channels): + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + + if num_output_channels == 1: + img = img.convert('L') + elif num_output_channels == 3: + img = img.convert('L') + np_img = np.array(img, dtype=np.uint8) + np_img = np.dstack([np_img, np_img, np_img]) + img = Image.fromarray(np_img, 'RGB') + else: + raise ValueError('num_output_channels should be either 1 or 3') + + return img + + +@torch.jit.unused +def invert(img): + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + return ImageOps.invert(img) + + +@torch.jit.unused +def posterize(img, bits): + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + return ImageOps.posterize(img, bits) + + +@torch.jit.unused +def solarize(img, threshold): + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + return ImageOps.solarize(img, threshold) + + +@torch.jit.unused +def adjust_sharpness(img, sharpness_factor): + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + + enhancer = ImageEnhance.Sharpness(img) + img = enhancer.enhance(sharpness_factor) + return img + + +@torch.jit.unused +def autocontrast(img): + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + return ImageOps.autocontrast(img) + + +@torch.jit.unused +def equalize(img): + if not _is_pil_image(img): + raise TypeError('img should be PIL Image. Got {}'.format(type(img))) + return ImageOps.equalize(img) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/transforms/functional_tensor.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/transforms/functional_tensor.py new file mode 100644 index 0000000000000000000000000000000000000000..a0e32d4237ef17f768eefdd84a51dcc8495b504a --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/transforms/functional_tensor.py @@ -0,0 +1,966 @@ +import warnings + +import torch +from torch import Tensor +from torch.nn.functional import grid_sample, conv2d, interpolate, pad as torch_pad +from torch.jit.annotations import BroadcastingList2 +from typing import Optional, Tuple, List + + +def _is_tensor_a_torch_image(x: Tensor) -> bool: + return x.ndim >= 2 + + +def _assert_image_tensor(img): + if not _is_tensor_a_torch_image(img): + raise TypeError("Tensor is not a torch image.") + + +def _get_image_size(img: Tensor) -> List[int]: + # Returns (w, h) of tensor image + _assert_image_tensor(img) + return [img.shape[-1], img.shape[-2]] + + +def _get_image_num_channels(img: Tensor) -> int: + if img.ndim == 2: + return 1 + elif img.ndim > 2: + return img.shape[-3] + + raise TypeError("Input ndim should be 2 or more. Got {}".format(img.ndim)) + + +def _max_value(dtype: torch.dtype) -> float: + # TODO: replace this method with torch.iinfo when it gets torchscript support. + # https://github.com/pytorch/pytorch/issues/41492 + + a = torch.tensor(2, dtype=dtype) + signed = 1 if torch.tensor(0, dtype=dtype).is_signed() else 0 + bits = 1 + max_value = torch.tensor(-signed, dtype=torch.long) + while True: + next_value = a.pow(bits - signed).sub(1) + if next_value > max_value: + max_value = next_value + bits *= 2 + else: + break + return max_value.item() + + +def _assert_channels(img: Tensor, permitted: List[int]) -> None: + c = _get_image_num_channels(img) + if c not in permitted: + raise TypeError("Input image tensor permitted channel values are {}, but found {}".format(permitted, c)) + + +def convert_image_dtype(image: torch.Tensor, dtype: torch.dtype = torch.float) -> torch.Tensor: + if image.dtype == dtype: + return image + + if image.is_floating_point(): + + # TODO: replace with dtype.is_floating_point when torchscript supports it + if torch.tensor(0, dtype=dtype).is_floating_point(): + return image.to(dtype) + + # float to int + if (image.dtype == torch.float32 and dtype in (torch.int32, torch.int64)) or ( + image.dtype == torch.float64 and dtype == torch.int64 + ): + msg = f"The cast from {image.dtype} to {dtype} cannot be performed safely." + raise RuntimeError(msg) + + # https://github.com/pytorch/vision/pull/2078#issuecomment-612045321 + # For data in the range 0-1, (float * 255).to(uint) is only 255 + # when float is exactly 1.0. + # `max + 1 - epsilon` provides more evenly distributed mapping of + # ranges of floats to ints. + eps = 1e-3 + max_val = _max_value(dtype) + result = image.mul(max_val + 1.0 - eps) + return result.to(dtype) + else: + input_max = _max_value(image.dtype) + + # int to float + # TODO: replace with dtype.is_floating_point when torchscript supports it + if torch.tensor(0, dtype=dtype).is_floating_point(): + image = image.to(dtype) + return image / input_max + + output_max = _max_value(dtype) + + # int to int + if input_max > output_max: + # factor should be forced to int for torch jit script + # otherwise factor is a float and image // factor can produce different results + factor = int((input_max + 1) // (output_max + 1)) + image = torch.div(image, factor, rounding_mode='floor') + return image.to(dtype) + else: + # factor should be forced to int for torch jit script + # otherwise factor is a float and image * factor can produce different results + factor = int((output_max + 1) // (input_max + 1)) + image = image.to(dtype) + return image * factor + + +def vflip(img: Tensor) -> Tensor: + _assert_image_tensor(img) + + return img.flip(-2) + + +def hflip(img: Tensor) -> Tensor: + _assert_image_tensor(img) + + return img.flip(-1) + + +def crop(img: Tensor, top: int, left: int, height: int, width: int) -> Tensor: + _assert_image_tensor(img) + + w, h = _get_image_size(img) + right = left + width + bottom = top + height + + if left < 0 or top < 0 or right > w or bottom > h: + padding_ltrb = [max(-left, 0), max(-top, 0), max(right - w, 0), max(bottom - h, 0)] + return pad(img[..., max(top, 0):bottom, max(left, 0):right], padding_ltrb, fill=0) + return img[..., top:bottom, left:right] + + +def rgb_to_grayscale(img: Tensor, num_output_channels: int = 1) -> Tensor: + if img.ndim < 3: + raise TypeError("Input image tensor should have at least 3 dimensions, but found {}".format(img.ndim)) + _assert_channels(img, [3]) + + if num_output_channels not in (1, 3): + raise ValueError('num_output_channels should be either 1 or 3') + + r, g, b = img.unbind(dim=-3) + # This implementation closely follows the TF one: + # https://github.com/tensorflow/tensorflow/blob/v2.3.0/tensorflow/python/ops/image_ops_impl.py#L2105-L2138 + l_img = (0.2989 * r + 0.587 * g + 0.114 * b).to(img.dtype) + l_img = l_img.unsqueeze(dim=-3) + + if num_output_channels == 3: + return l_img.expand(img.shape) + + return l_img + + +def adjust_brightness(img: Tensor, brightness_factor: float) -> Tensor: + if brightness_factor < 0: + raise ValueError('brightness_factor ({}) is not non-negative.'.format(brightness_factor)) + + _assert_image_tensor(img) + + _assert_channels(img, [1, 3]) + + return _blend(img, torch.zeros_like(img), brightness_factor) + + +def adjust_contrast(img: Tensor, contrast_factor: float) -> Tensor: + if contrast_factor < 0: + raise ValueError('contrast_factor ({}) is not non-negative.'.format(contrast_factor)) + + _assert_image_tensor(img) + + _assert_channels(img, [3]) + + dtype = img.dtype if torch.is_floating_point(img) else torch.float32 + mean = torch.mean(rgb_to_grayscale(img).to(dtype), dim=(-3, -2, -1), keepdim=True) + + return _blend(img, mean, contrast_factor) + + +def adjust_hue(img: Tensor, hue_factor: float) -> Tensor: + if not (-0.5 <= hue_factor <= 0.5): + raise ValueError('hue_factor ({}) is not in [-0.5, 0.5].'.format(hue_factor)) + + if not (isinstance(img, torch.Tensor)): + raise TypeError('Input img should be Tensor image') + + _assert_image_tensor(img) + + _assert_channels(img, [1, 3]) + if _get_image_num_channels(img) == 1: # Match PIL behaviour + return img + + orig_dtype = img.dtype + if img.dtype == torch.uint8: + img = img.to(dtype=torch.float32) / 255.0 + + img = _rgb2hsv(img) + h, s, v = img.unbind(dim=-3) + h = (h + hue_factor) % 1.0 + img = torch.stack((h, s, v), dim=-3) + img_hue_adj = _hsv2rgb(img) + + if orig_dtype == torch.uint8: + img_hue_adj = (img_hue_adj * 255.0).to(dtype=orig_dtype) + + return img_hue_adj + + +def adjust_saturation(img: Tensor, saturation_factor: float) -> Tensor: + if saturation_factor < 0: + raise ValueError('saturation_factor ({}) is not non-negative.'.format(saturation_factor)) + + _assert_image_tensor(img) + + _assert_channels(img, [3]) + + return _blend(img, rgb_to_grayscale(img), saturation_factor) + + +def adjust_gamma(img: Tensor, gamma: float, gain: float = 1) -> Tensor: + if not isinstance(img, torch.Tensor): + raise TypeError('Input img should be a Tensor.') + + _assert_channels(img, [1, 3]) + + if gamma < 0: + raise ValueError('Gamma should be a non-negative real number') + + result = img + dtype = img.dtype + if not torch.is_floating_point(img): + result = convert_image_dtype(result, torch.float32) + + result = (gain * result ** gamma).clamp(0, 1) + + result = convert_image_dtype(result, dtype) + return result + + +def center_crop(img: Tensor, output_size: BroadcastingList2[int]) -> Tensor: + """DEPRECATED + """ + warnings.warn( + "This method is deprecated and will be removed in future releases. " + "Please, use ``F.center_crop`` instead." + ) + + _assert_image_tensor(img) + + _, image_width, image_height = img.size() + crop_height, crop_width = output_size + # crop_top = int(round((image_height - crop_height) / 2.)) + # Result can be different between python func and scripted func + # Temporary workaround: + crop_top = int((image_height - crop_height + 1) * 0.5) + # crop_left = int(round((image_width - crop_width) / 2.)) + # Result can be different between python func and scripted func + # Temporary workaround: + crop_left = int((image_width - crop_width + 1) * 0.5) + + return crop(img, crop_top, crop_left, crop_height, crop_width) + + +def five_crop(img: Tensor, size: BroadcastingList2[int]) -> List[Tensor]: + """DEPRECATED + """ + warnings.warn( + "This method is deprecated and will be removed in future releases. " + "Please, use ``F.five_crop`` instead." + ) + + _assert_image_tensor(img) + + assert len(size) == 2, "Please provide only two dimensions (h, w) for size." + + _, image_width, image_height = img.size() + crop_height, crop_width = size + if crop_width > image_width or crop_height > image_height: + msg = "Requested crop size {} is bigger than input size {}" + raise ValueError(msg.format(size, (image_height, image_width))) + + tl = crop(img, 0, 0, crop_width, crop_height) + tr = crop(img, image_width - crop_width, 0, image_width, crop_height) + bl = crop(img, 0, image_height - crop_height, crop_width, image_height) + br = crop(img, image_width - crop_width, image_height - crop_height, image_width, image_height) + center = center_crop(img, (crop_height, crop_width)) + + return [tl, tr, bl, br, center] + + +def ten_crop(img: Tensor, size: BroadcastingList2[int], vertical_flip: bool = False) -> List[Tensor]: + """DEPRECATED + """ + warnings.warn( + "This method is deprecated and will be removed in future releases. " + "Please, use ``F.ten_crop`` instead." + ) + + _assert_image_tensor(img) + + assert len(size) == 2, "Please provide only two dimensions (h, w) for size." + first_five = five_crop(img, size) + + if vertical_flip: + img = vflip(img) + else: + img = hflip(img) + + second_five = five_crop(img, size) + + return first_five + second_five + + +def _blend(img1: Tensor, img2: Tensor, ratio: float) -> Tensor: + ratio = float(ratio) + bound = 1.0 if img1.is_floating_point() else 255.0 + return (ratio * img1 + (1.0 - ratio) * img2).clamp(0, bound).to(img1.dtype) + + +def _rgb2hsv(img): + r, g, b = img.unbind(dim=-3) + + # Implementation is based on https://github.com/python-pillow/Pillow/blob/4174d4267616897df3746d315d5a2d0f82c656ee/ + # src/libImaging/Convert.c#L330 + maxc = torch.max(img, dim=-3).values + minc = torch.min(img, dim=-3).values + + # The algorithm erases S and H channel where `maxc = minc`. This avoids NaN + # from happening in the results, because + # + S channel has division by `maxc`, which is zero only if `maxc = minc` + # + H channel has division by `(maxc - minc)`. + # + # Instead of overwriting NaN afterwards, we just prevent it from occuring so + # we don't need to deal with it in case we save the NaN in a buffer in + # backprop, if it is ever supported, but it doesn't hurt to do so. + eqc = maxc == minc + + cr = maxc - minc + # Since `eqc => cr = 0`, replacing denominator with 1 when `eqc` is fine. + ones = torch.ones_like(maxc) + s = cr / torch.where(eqc, ones, maxc) + # Note that `eqc => maxc = minc = r = g = b`. So the following calculation + # of `h` would reduce to `bc - gc + 2 + rc - bc + 4 + rc - bc = 6` so it + # would not matter what values `rc`, `gc`, and `bc` have here, and thus + # replacing denominator with 1 when `eqc` is fine. + cr_divisor = torch.where(eqc, ones, cr) + rc = (maxc - r) / cr_divisor + gc = (maxc - g) / cr_divisor + bc = (maxc - b) / cr_divisor + + hr = (maxc == r) * (bc - gc) + hg = ((maxc == g) & (maxc != r)) * (2.0 + rc - bc) + hb = ((maxc != g) & (maxc != r)) * (4.0 + gc - rc) + h = (hr + hg + hb) + h = torch.fmod((h / 6.0 + 1.0), 1.0) + return torch.stack((h, s, maxc), dim=-3) + + +def _hsv2rgb(img): + h, s, v = img.unbind(dim=-3) + i = torch.floor(h * 6.0) + f = (h * 6.0) - i + i = i.to(dtype=torch.int32) + + p = torch.clamp((v * (1.0 - s)), 0.0, 1.0) + q = torch.clamp((v * (1.0 - s * f)), 0.0, 1.0) + t = torch.clamp((v * (1.0 - s * (1.0 - f))), 0.0, 1.0) + i = i % 6 + + mask = i.unsqueeze(dim=-3) == torch.arange(6, device=i.device).view(-1, 1, 1) + + a1 = torch.stack((v, q, p, p, t, v), dim=-3) + a2 = torch.stack((t, v, v, q, p, p), dim=-3) + a3 = torch.stack((p, p, t, v, v, q), dim=-3) + a4 = torch.stack((a1, a2, a3), dim=-4) + + return torch.einsum("...ijk, ...xijk -> ...xjk", mask.to(dtype=img.dtype), a4) + + +def _pad_symmetric(img: Tensor, padding: List[int]) -> Tensor: + # padding is left, right, top, bottom + + # crop if needed + if padding[0] < 0 or padding[1] < 0 or padding[2] < 0 or padding[3] < 0: + crop_left, crop_right, crop_top, crop_bottom = [-min(x, 0) for x in padding] + img = img[..., crop_top:img.shape[-2] - crop_bottom, crop_left:img.shape[-1] - crop_right] + padding = [max(x, 0) for x in padding] + + in_sizes = img.size() + + x_indices = [i for i in range(in_sizes[-1])] # [0, 1, 2, 3, ...] + left_indices = [i for i in range(padding[0] - 1, -1, -1)] # e.g. [3, 2, 1, 0] + right_indices = [-(i + 1) for i in range(padding[1])] # e.g. [-1, -2, -3] + x_indices = torch.tensor(left_indices + x_indices + right_indices, device=img.device) + + y_indices = [i for i in range(in_sizes[-2])] + top_indices = [i for i in range(padding[2] - 1, -1, -1)] + bottom_indices = [-(i + 1) for i in range(padding[3])] + y_indices = torch.tensor(top_indices + y_indices + bottom_indices, device=img.device) + + ndim = img.ndim + if ndim == 3: + return img[:, y_indices[:, None], x_indices[None, :]] + elif ndim == 4: + return img[:, :, y_indices[:, None], x_indices[None, :]] + else: + raise RuntimeError("Symmetric padding of N-D tensors are not supported yet") + + +def pad(img: Tensor, padding: List[int], fill: int = 0, padding_mode: str = "constant") -> Tensor: + _assert_image_tensor(img) + + if not isinstance(padding, (int, tuple, list)): + raise TypeError("Got inappropriate padding arg") + if not isinstance(fill, (int, float)): + raise TypeError("Got inappropriate fill arg") + if not isinstance(padding_mode, str): + raise TypeError("Got inappropriate padding_mode arg") + + if isinstance(padding, tuple): + padding = list(padding) + + if isinstance(padding, list) and len(padding) not in [1, 2, 4]: + raise ValueError("Padding must be an int or a 1, 2, or 4 element tuple, not a " + + "{} element tuple".format(len(padding))) + + if padding_mode not in ["constant", "edge", "reflect", "symmetric"]: + raise ValueError("Padding mode should be either constant, edge, reflect or symmetric") + + if isinstance(padding, int): + if torch.jit.is_scripting(): + # This maybe unreachable + raise ValueError("padding can't be an int while torchscripting, set it as a list [value, ]") + pad_left = pad_right = pad_top = pad_bottom = padding + elif len(padding) == 1: + pad_left = pad_right = pad_top = pad_bottom = padding[0] + elif len(padding) == 2: + pad_left = pad_right = padding[0] + pad_top = pad_bottom = padding[1] + else: + pad_left = padding[0] + pad_top = padding[1] + pad_right = padding[2] + pad_bottom = padding[3] + + p = [pad_left, pad_right, pad_top, pad_bottom] + + if padding_mode == "edge": + # remap padding_mode str + padding_mode = "replicate" + elif padding_mode == "symmetric": + # route to another implementation + return _pad_symmetric(img, p) + + need_squeeze = False + if img.ndim < 4: + img = img.unsqueeze(dim=0) + need_squeeze = True + + out_dtype = img.dtype + need_cast = False + if (padding_mode != "constant") and img.dtype not in (torch.float32, torch.float64): + # Here we temporary cast input tensor to float + # until pytorch issue is resolved : + # https://github.com/pytorch/pytorch/issues/40763 + need_cast = True + img = img.to(torch.float32) + + img = torch_pad(img, p, mode=padding_mode, value=float(fill)) + + if need_squeeze: + img = img.squeeze(dim=0) + + if need_cast: + img = img.to(out_dtype) + + return img + + +def resize( + img: Tensor, + size: List[int], + interpolation: str = "bilinear", + max_size: Optional[int] = None, + antialias: Optional[bool] = None +) -> Tensor: + _assert_image_tensor(img) + + if not isinstance(size, (int, tuple, list)): + raise TypeError("Got inappropriate size arg") + if not isinstance(interpolation, str): + raise TypeError("Got inappropriate interpolation arg") + + if interpolation not in ["nearest", "bilinear", "bicubic"]: + raise ValueError("This interpolation mode is unsupported with Tensor input") + + if isinstance(size, tuple): + size = list(size) + + if isinstance(size, list): + if len(size) not in [1, 2]: + raise ValueError("Size must be an int or a 1 or 2 element tuple/list, not a " + "{} element tuple/list".format(len(size))) + if max_size is not None and len(size) != 1: + raise ValueError( + "max_size should only be passed if size specifies the length of the smaller edge, " + "i.e. size should be an int or a sequence of length 1 in torchscript mode." + ) + + if antialias is None: + antialias = False + + if antialias and interpolation not in ["bilinear", "bicubic"]: + raise ValueError("Antialias option is supported for bilinear and bicubic interpolation modes only") + + w, h = _get_image_size(img) + + if isinstance(size, int) or len(size) == 1: # specified size only for the smallest edge + short, long = (w, h) if w <= h else (h, w) + requested_new_short = size if isinstance(size, int) else size[0] + + if short == requested_new_short: + return img + + new_short, new_long = requested_new_short, int(requested_new_short * long / short) + + if max_size is not None: + if max_size <= requested_new_short: + raise ValueError( + f"max_size = {max_size} must be strictly greater than the requested " + f"size for the smaller edge size = {size}" + ) + if new_long > max_size: + new_short, new_long = int(max_size * new_short / new_long), max_size + + new_w, new_h = (new_short, new_long) if w <= h else (new_long, new_short) + + else: # specified both h and w + new_w, new_h = size[1], size[0] + + img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [torch.float32, torch.float64]) + + # Define align_corners to avoid warnings + align_corners = False if interpolation in ["bilinear", "bicubic"] else None + + if antialias: + if interpolation == "bilinear": + img = torch.ops.torchvision._interpolate_linear_aa(img, [new_h, new_w], align_corners=False) + elif interpolation == "bicubic": + img = torch.ops.torchvision._interpolate_bicubic_aa(img, [new_h, new_w], align_corners=False) + else: + img = interpolate(img, size=[new_h, new_w], mode=interpolation, align_corners=align_corners) + + if interpolation == "bicubic" and out_dtype == torch.uint8: + img = img.clamp(min=0, max=255) + + img = _cast_squeeze_out(img, need_cast=need_cast, need_squeeze=need_squeeze, out_dtype=out_dtype) + + return img + + +def _assert_grid_transform_inputs( + img: Tensor, + matrix: Optional[List[float]], + interpolation: str, + fill: Optional[List[float]], + supported_interpolation_modes: List[str], + coeffs: Optional[List[float]] = None, +): + + if not (isinstance(img, torch.Tensor)): + raise TypeError("Input img should be Tensor") + + _assert_image_tensor(img) + + if matrix is not None and not isinstance(matrix, list): + raise TypeError("Argument matrix should be a list") + + if matrix is not None and len(matrix) != 6: + raise ValueError("Argument matrix should have 6 float values") + + if coeffs is not None and len(coeffs) != 8: + raise ValueError("Argument coeffs should have 8 float values") + + if fill is not None and not isinstance(fill, (int, float, tuple, list)): + warnings.warn("Argument fill should be either int, float, tuple or list") + + # Check fill + num_channels = _get_image_num_channels(img) + if isinstance(fill, (tuple, list)) and (len(fill) > 1 and len(fill) != num_channels): + msg = ("The number of elements in 'fill' cannot broadcast to match the number of " + "channels of the image ({} != {})") + raise ValueError(msg.format(len(fill), num_channels)) + + if interpolation not in supported_interpolation_modes: + raise ValueError("Interpolation mode '{}' is unsupported with Tensor input".format(interpolation)) + + +def _cast_squeeze_in(img: Tensor, req_dtypes: List[torch.dtype]) -> Tuple[Tensor, bool, bool, torch.dtype]: + need_squeeze = False + # make image NCHW + if img.ndim < 4: + img = img.unsqueeze(dim=0) + need_squeeze = True + + out_dtype = img.dtype + need_cast = False + if out_dtype not in req_dtypes: + need_cast = True + req_dtype = req_dtypes[0] + img = img.to(req_dtype) + return img, need_cast, need_squeeze, out_dtype + + +def _cast_squeeze_out(img: Tensor, need_cast: bool, need_squeeze: bool, out_dtype: torch.dtype): + if need_squeeze: + img = img.squeeze(dim=0) + + if need_cast: + if out_dtype in (torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64): + # it is better to round before cast + img = torch.round(img) + img = img.to(out_dtype) + + return img + + +def _apply_grid_transform(img: Tensor, grid: Tensor, mode: str, fill: Optional[List[float]]) -> Tensor: + + img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [grid.dtype, ]) + + if img.shape[0] > 1: + # Apply same grid to a batch of images + grid = grid.expand(img.shape[0], grid.shape[1], grid.shape[2], grid.shape[3]) + + # Append a dummy mask for customized fill colors, should be faster than grid_sample() twice + if fill is not None: + dummy = torch.ones((img.shape[0], 1, img.shape[2], img.shape[3]), dtype=img.dtype, device=img.device) + img = torch.cat((img, dummy), dim=1) + + img = grid_sample(img, grid, mode=mode, padding_mode="zeros", align_corners=False) + + # Fill with required color + if fill is not None: + mask = img[:, -1:, :, :] # N * 1 * H * W + img = img[:, :-1, :, :] # N * C * H * W + mask = mask.expand_as(img) + len_fill = len(fill) if isinstance(fill, (tuple, list)) else 1 + fill_img = torch.tensor(fill, dtype=img.dtype, device=img.device).view(1, len_fill, 1, 1).expand_as(img) + if mode == 'nearest': + mask = mask < 0.5 + img[mask] = fill_img[mask] + else: # 'bilinear' + img = img * mask + (1.0 - mask) * fill_img + + img = _cast_squeeze_out(img, need_cast, need_squeeze, out_dtype) + return img + + +def _gen_affine_grid( + theta: Tensor, w: int, h: int, ow: int, oh: int, +) -> Tensor: + # https://github.com/pytorch/pytorch/blob/74b65c32be68b15dc7c9e8bb62459efbfbde33d8/aten/src/ATen/native/ + # AffineGridGenerator.cpp#L18 + # Difference with AffineGridGenerator is that: + # 1) we normalize grid values after applying theta + # 2) we can normalize by other image size, such that it covers "extend" option like in PIL.Image.rotate + + d = 0.5 + base_grid = torch.empty(1, oh, ow, 3, dtype=theta.dtype, device=theta.device) + x_grid = torch.linspace(-ow * 0.5 + d, ow * 0.5 + d - 1, steps=ow, device=theta.device) + base_grid[..., 0].copy_(x_grid) + y_grid = torch.linspace(-oh * 0.5 + d, oh * 0.5 + d - 1, steps=oh, device=theta.device).unsqueeze_(-1) + base_grid[..., 1].copy_(y_grid) + base_grid[..., 2].fill_(1) + + rescaled_theta = theta.transpose(1, 2) / torch.tensor([0.5 * w, 0.5 * h], dtype=theta.dtype, device=theta.device) + output_grid = base_grid.view(1, oh * ow, 3).bmm(rescaled_theta) + return output_grid.view(1, oh, ow, 2) + + +def affine( + img: Tensor, matrix: List[float], interpolation: str = "nearest", fill: Optional[List[float]] = None +) -> Tensor: + _assert_grid_transform_inputs(img, matrix, interpolation, fill, ["nearest", "bilinear"]) + + dtype = img.dtype if torch.is_floating_point(img) else torch.float32 + theta = torch.tensor(matrix, dtype=dtype, device=img.device).reshape(1, 2, 3) + shape = img.shape + # grid will be generated on the same device as theta and img + grid = _gen_affine_grid(theta, w=shape[-1], h=shape[-2], ow=shape[-1], oh=shape[-2]) + return _apply_grid_transform(img, grid, interpolation, fill=fill) + + +def _compute_output_size(matrix: List[float], w: int, h: int) -> Tuple[int, int]: + + # Inspired of PIL implementation: + # https://github.com/python-pillow/Pillow/blob/11de3318867e4398057373ee9f12dcb33db7335c/src/PIL/Image.py#L2054 + + # pts are Top-Left, Top-Right, Bottom-Left, Bottom-Right points. + pts = torch.tensor([ + [-0.5 * w, -0.5 * h, 1.0], + [-0.5 * w, 0.5 * h, 1.0], + [0.5 * w, 0.5 * h, 1.0], + [0.5 * w, -0.5 * h, 1.0], + ]) + theta = torch.tensor(matrix, dtype=torch.float).reshape(1, 2, 3) + new_pts = pts.view(1, 4, 3).bmm(theta.transpose(1, 2)).view(4, 2) + min_vals, _ = new_pts.min(dim=0) + max_vals, _ = new_pts.max(dim=0) + + # Truncate precision to 1e-4 to avoid ceil of Xe-15 to 1.0 + tol = 1e-4 + cmax = torch.ceil((max_vals / tol).trunc_() * tol) + cmin = torch.floor((min_vals / tol).trunc_() * tol) + size = cmax - cmin + return int(size[0]), int(size[1]) + + +def rotate( + img: Tensor, matrix: List[float], interpolation: str = "nearest", + expand: bool = False, fill: Optional[List[float]] = None +) -> Tensor: + _assert_grid_transform_inputs(img, matrix, interpolation, fill, ["nearest", "bilinear"]) + w, h = img.shape[-1], img.shape[-2] + ow, oh = _compute_output_size(matrix, w, h) if expand else (w, h) + dtype = img.dtype if torch.is_floating_point(img) else torch.float32 + theta = torch.tensor(matrix, dtype=dtype, device=img.device).reshape(1, 2, 3) + # grid will be generated on the same device as theta and img + grid = _gen_affine_grid(theta, w=w, h=h, ow=ow, oh=oh) + + return _apply_grid_transform(img, grid, interpolation, fill=fill) + + +def _perspective_grid(coeffs: List[float], ow: int, oh: int, dtype: torch.dtype, device: torch.device): + # https://github.com/python-pillow/Pillow/blob/4634eafe3c695a014267eefdce830b4a825beed7/ + # src/libImaging/Geometry.c#L394 + + # + # x_out = (coeffs[0] * x + coeffs[1] * y + coeffs[2]) / (coeffs[6] * x + coeffs[7] * y + 1) + # y_out = (coeffs[3] * x + coeffs[4] * y + coeffs[5]) / (coeffs[6] * x + coeffs[7] * y + 1) + # + theta1 = torch.tensor([[ + [coeffs[0], coeffs[1], coeffs[2]], + [coeffs[3], coeffs[4], coeffs[5]] + ]], dtype=dtype, device=device) + theta2 = torch.tensor([[ + [coeffs[6], coeffs[7], 1.0], + [coeffs[6], coeffs[7], 1.0] + ]], dtype=dtype, device=device) + + d = 0.5 + base_grid = torch.empty(1, oh, ow, 3, dtype=dtype, device=device) + x_grid = torch.linspace(d, ow * 1.0 + d - 1.0, steps=ow, device=device) + base_grid[..., 0].copy_(x_grid) + y_grid = torch.linspace(d, oh * 1.0 + d - 1.0, steps=oh, device=device).unsqueeze_(-1) + base_grid[..., 1].copy_(y_grid) + base_grid[..., 2].fill_(1) + + rescaled_theta1 = theta1.transpose(1, 2) / torch.tensor([0.5 * ow, 0.5 * oh], dtype=dtype, device=device) + output_grid1 = base_grid.view(1, oh * ow, 3).bmm(rescaled_theta1) + output_grid2 = base_grid.view(1, oh * ow, 3).bmm(theta2.transpose(1, 2)) + + output_grid = output_grid1 / output_grid2 - 1.0 + return output_grid.view(1, oh, ow, 2) + + +def perspective( + img: Tensor, perspective_coeffs: List[float], interpolation: str = "bilinear", fill: Optional[List[float]] = None +) -> Tensor: + if not (isinstance(img, torch.Tensor)): + raise TypeError('Input img should be Tensor.') + + _assert_image_tensor(img) + + _assert_grid_transform_inputs( + img, + matrix=None, + interpolation=interpolation, + fill=fill, + supported_interpolation_modes=["nearest", "bilinear"], + coeffs=perspective_coeffs + ) + + ow, oh = img.shape[-1], img.shape[-2] + dtype = img.dtype if torch.is_floating_point(img) else torch.float32 + grid = _perspective_grid(perspective_coeffs, ow=ow, oh=oh, dtype=dtype, device=img.device) + return _apply_grid_transform(img, grid, interpolation, fill=fill) + + +def _get_gaussian_kernel1d(kernel_size: int, sigma: float) -> Tensor: + ksize_half = (kernel_size - 1) * 0.5 + + x = torch.linspace(-ksize_half, ksize_half, steps=kernel_size) + pdf = torch.exp(-0.5 * (x / sigma).pow(2)) + kernel1d = pdf / pdf.sum() + + return kernel1d + + +def _get_gaussian_kernel2d( + kernel_size: List[int], sigma: List[float], dtype: torch.dtype, device: torch.device +) -> Tensor: + kernel1d_x = _get_gaussian_kernel1d(kernel_size[0], sigma[0]).to(device, dtype=dtype) + kernel1d_y = _get_gaussian_kernel1d(kernel_size[1], sigma[1]).to(device, dtype=dtype) + kernel2d = torch.mm(kernel1d_y[:, None], kernel1d_x[None, :]) + return kernel2d + + +def gaussian_blur(img: Tensor, kernel_size: List[int], sigma: List[float]) -> Tensor: + if not (isinstance(img, torch.Tensor)): + raise TypeError('img should be Tensor. Got {}'.format(type(img))) + + _assert_image_tensor(img) + + dtype = img.dtype if torch.is_floating_point(img) else torch.float32 + kernel = _get_gaussian_kernel2d(kernel_size, sigma, dtype=dtype, device=img.device) + kernel = kernel.expand(img.shape[-3], 1, kernel.shape[0], kernel.shape[1]) + + img, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [kernel.dtype, ]) + + # padding = (left, right, top, bottom) + padding = [kernel_size[0] // 2, kernel_size[0] // 2, kernel_size[1] // 2, kernel_size[1] // 2] + img = torch_pad(img, padding, mode="reflect") + img = conv2d(img, kernel, groups=img.shape[-3]) + + img = _cast_squeeze_out(img, need_cast, need_squeeze, out_dtype) + return img + + +def invert(img: Tensor) -> Tensor: + + _assert_image_tensor(img) + + if img.ndim < 3: + raise TypeError("Input image tensor should have at least 3 dimensions, but found {}".format(img.ndim)) + + _assert_channels(img, [1, 3]) + + bound = torch.tensor(1 if img.is_floating_point() else 255, dtype=img.dtype, device=img.device) + return bound - img + + +def posterize(img: Tensor, bits: int) -> Tensor: + + _assert_image_tensor(img) + + if img.ndim < 3: + raise TypeError("Input image tensor should have at least 3 dimensions, but found {}".format(img.ndim)) + if img.dtype != torch.uint8: + raise TypeError("Only torch.uint8 image tensors are supported, but found {}".format(img.dtype)) + + _assert_channels(img, [1, 3]) + mask = -int(2**(8 - bits)) # JIT-friendly for: ~(2 ** (8 - bits) - 1) + return img & mask + + +def solarize(img: Tensor, threshold: float) -> Tensor: + + _assert_image_tensor(img) + + if img.ndim < 3: + raise TypeError("Input image tensor should have at least 3 dimensions, but found {}".format(img.ndim)) + + _assert_channels(img, [1, 3]) + + inverted_img = invert(img) + return torch.where(img >= threshold, inverted_img, img) + + +def _blurred_degenerate_image(img: Tensor) -> Tensor: + dtype = img.dtype if torch.is_floating_point(img) else torch.float32 + + kernel = torch.ones((3, 3), dtype=dtype, device=img.device) + kernel[1, 1] = 5.0 + kernel /= kernel.sum() + kernel = kernel.expand(img.shape[-3], 1, kernel.shape[0], kernel.shape[1]) + + result_tmp, need_cast, need_squeeze, out_dtype = _cast_squeeze_in(img, [kernel.dtype, ]) + result_tmp = conv2d(result_tmp, kernel, groups=result_tmp.shape[-3]) + result_tmp = _cast_squeeze_out(result_tmp, need_cast, need_squeeze, out_dtype) + + result = img.clone() + result[..., 1:-1, 1:-1] = result_tmp + + return result + + +def adjust_sharpness(img: Tensor, sharpness_factor: float) -> Tensor: + if sharpness_factor < 0: + raise ValueError('sharpness_factor ({}) is not non-negative.'.format(sharpness_factor)) + + _assert_image_tensor(img) + + _assert_channels(img, [1, 3]) + + if img.size(-1) <= 2 or img.size(-2) <= 2: + return img + + return _blend(img, _blurred_degenerate_image(img), sharpness_factor) + + +def autocontrast(img: Tensor) -> Tensor: + + _assert_image_tensor(img) + + if img.ndim < 3: + raise TypeError("Input image tensor should have at least 3 dimensions, but found {}".format(img.ndim)) + + _assert_channels(img, [1, 3]) + + bound = 1.0 if img.is_floating_point() else 255.0 + dtype = img.dtype if torch.is_floating_point(img) else torch.float32 + + minimum = img.amin(dim=(-2, -1), keepdim=True).to(dtype) + maximum = img.amax(dim=(-2, -1), keepdim=True).to(dtype) + eq_idxs = torch.where(minimum == maximum)[0] + minimum[eq_idxs] = 0 + maximum[eq_idxs] = bound + scale = bound / (maximum - minimum) + + return ((img - minimum) * scale).clamp(0, bound).to(img.dtype) + + +def _scale_channel(img_chan): + # TODO: we should expect bincount to always be faster than histc, but this + # isn't always the case. Once + # https://github.com/pytorch/pytorch/issues/53194 is fixed, remove the if + # block and only use bincount. + if img_chan.is_cuda: + hist = torch.histc(img_chan.to(torch.float32), bins=256, min=0, max=255) + else: + hist = torch.bincount(img_chan.view(-1), minlength=256) + + nonzero_hist = hist[hist != 0] + step = torch.div(nonzero_hist[:-1].sum(), 255, rounding_mode='floor') + if step == 0: + return img_chan + + lut = torch.div( + torch.cumsum(hist, 0) + torch.div(step, 2, rounding_mode='floor'), + step, rounding_mode='floor') + lut = torch.nn.functional.pad(lut, [1, 0])[:-1].clamp(0, 255) + + return lut[img_chan.to(torch.int64)].to(torch.uint8) + + +def _equalize_single_image(img: Tensor) -> Tensor: + return torch.stack([_scale_channel(img[c]) for c in range(img.size(0))]) + + +def equalize(img: Tensor) -> Tensor: + + _assert_image_tensor(img) + + if not (3 <= img.ndim <= 4): + raise TypeError("Input image tensor should have 3 or 4 dimensions, but found {}".format(img.ndim)) + if img.dtype != torch.uint8: + raise TypeError("Only torch.uint8 image tensors are supported, but found {}".format(img.dtype)) + + _assert_channels(img, [1, 3]) + + if img.ndim == 3: + return _equalize_single_image(img) + + return torch.stack([_equalize_single_image(x) for x in img]) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/transforms/transforms.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/transforms/transforms.py new file mode 100644 index 0000000000000000000000000000000000000000..1ec5bee00028b89d7d759289bd94a38c12b599ce --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/transforms/transforms.py @@ -0,0 +1,1953 @@ +import math +import numbers +import random +import warnings +from collections.abc import Sequence +from typing import Tuple, List, Optional + +import torch +from torch import Tensor + +try: + import accimage +except ImportError: + accimage = None + +from . import functional as F +from .functional import InterpolationMode, _interpolation_modes_from_int + + +__all__ = ["Compose", "ToTensor", "PILToTensor", "ConvertImageDtype", "ToPILImage", "Normalize", "Resize", "Scale", + "CenterCrop", "Pad", "Lambda", "RandomApply", "RandomChoice", "RandomOrder", "RandomCrop", + "RandomHorizontalFlip", "RandomVerticalFlip", "RandomResizedCrop", "RandomSizedCrop", "FiveCrop", "TenCrop", + "LinearTransformation", "ColorJitter", "RandomRotation", "RandomAffine", "Grayscale", "RandomGrayscale", + "RandomPerspective", "RandomErasing", "GaussianBlur", "InterpolationMode", "RandomInvert", "RandomPosterize", + "RandomSolarize", "RandomAdjustSharpness", "RandomAutocontrast", "RandomEqualize"] + + +class Compose: + """Composes several transforms together. This transform does not support torchscript. + Please, see the note below. + + Args: + transforms (list of ``Transform`` objects): list of transforms to compose. + + Example: + >>> transforms.Compose([ + >>> transforms.CenterCrop(10), + >>> transforms.ToTensor(), + >>> ]) + + .. note:: + In order to script the transformations, please use ``torch.nn.Sequential`` as below. + + >>> transforms = torch.nn.Sequential( + >>> transforms.CenterCrop(10), + >>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), + >>> ) + >>> scripted_transforms = torch.jit.script(transforms) + + Make sure to use only scriptable transformations, i.e. that work with ``torch.Tensor``, does not require + `lambda` functions or ``PIL.Image``. + + """ + + def __init__(self, transforms): + self.transforms = transforms + + def __call__(self, img): + for t in self.transforms: + img = t(img) + return img + + def __repr__(self): + format_string = self.__class__.__name__ + '(' + for t in self.transforms: + format_string += '\n' + format_string += ' {0}'.format(t) + format_string += '\n)' + return format_string + + +class ToTensor: + """Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor. This transform does not support torchscript. + + Converts a PIL Image or numpy.ndarray (H x W x C) in the range + [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0] + if the PIL Image belongs to one of the modes (L, LA, P, I, F, RGB, YCbCr, RGBA, CMYK, 1) + or if the numpy.ndarray has dtype = np.uint8 + + In the other cases, tensors are returned without scaling. + + .. note:: + Because the input image is scaled to [0.0, 1.0], this transformation should not be used when + transforming target image masks. See the `references`_ for implementing the transforms for image masks. + + .. _references: https://github.com/pytorch/vision/tree/master/references/segmentation + """ + + def __call__(self, pic): + """ + Args: + pic (PIL Image or numpy.ndarray): Image to be converted to tensor. + + Returns: + Tensor: Converted image. + """ + return F.to_tensor(pic) + + def __repr__(self): + return self.__class__.__name__ + '()' + + +class PILToTensor: + """Convert a ``PIL Image`` to a tensor of the same type. This transform does not support torchscript. + + Converts a PIL Image (H x W x C) to a Tensor of shape (C x H x W). + """ + + def __call__(self, pic): + """ + Args: + pic (PIL Image): Image to be converted to tensor. + + Returns: + Tensor: Converted image. + """ + return F.pil_to_tensor(pic) + + def __repr__(self): + return self.__class__.__name__ + '()' + + +class ConvertImageDtype(torch.nn.Module): + """Convert a tensor image to the given ``dtype`` and scale the values accordingly + This function does not support PIL Image. + + Args: + dtype (torch.dtype): Desired data type of the output + + .. note:: + + When converting from a smaller to a larger integer ``dtype`` the maximum values are **not** mapped exactly. + If converted back and forth, this mismatch has no effect. + + Raises: + RuntimeError: When trying to cast :class:`torch.float32` to :class:`torch.int32` or :class:`torch.int64` as + well as for trying to cast :class:`torch.float64` to :class:`torch.int64`. These conversions might lead to + overflow errors since the floating point ``dtype`` cannot store consecutive integers over the whole range + of the integer ``dtype``. + """ + + def __init__(self, dtype: torch.dtype) -> None: + super().__init__() + self.dtype = dtype + + def forward(self, image): + return F.convert_image_dtype(image, self.dtype) + + +class ToPILImage: + """Convert a tensor or an ndarray to PIL Image. This transform does not support torchscript. + + Converts a torch.*Tensor of shape C x H x W or a numpy ndarray of shape + H x W x C to a PIL Image while preserving the value range. + + Args: + mode (`PIL.Image mode`_): color space and pixel depth of input data (optional). + If ``mode`` is ``None`` (default) there are some assumptions made about the input data: + - If the input has 4 channels, the ``mode`` is assumed to be ``RGBA``. + - If the input has 3 channels, the ``mode`` is assumed to be ``RGB``. + - If the input has 2 channels, the ``mode`` is assumed to be ``LA``. + - If the input has 1 channel, the ``mode`` is determined by the data type (i.e ``int``, ``float``, + ``short``). + + .. _PIL.Image mode: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#concept-modes + """ + def __init__(self, mode=None): + self.mode = mode + + def __call__(self, pic): + """ + Args: + pic (Tensor or numpy.ndarray): Image to be converted to PIL Image. + + Returns: + PIL Image: Image converted to PIL Image. + + """ + return F.to_pil_image(pic, self.mode) + + def __repr__(self): + format_string = self.__class__.__name__ + '(' + if self.mode is not None: + format_string += 'mode={0}'.format(self.mode) + format_string += ')' + return format_string + + +class Normalize(torch.nn.Module): + """Normalize a tensor image with mean and standard deviation. + This transform does not support PIL Image. + Given mean: ``(mean[1],...,mean[n])`` and std: ``(std[1],..,std[n])`` for ``n`` + channels, this transform will normalize each channel of the input + ``torch.*Tensor`` i.e., + ``output[channel] = (input[channel] - mean[channel]) / std[channel]`` + + .. note:: + This transform acts out of place, i.e., it does not mutate the input tensor. + + Args: + mean (sequence): Sequence of means for each channel. + std (sequence): Sequence of standard deviations for each channel. + inplace(bool,optional): Bool to make this operation in-place. + + """ + + def __init__(self, mean, std, inplace=False): + super().__init__() + self.mean = mean + self.std = std + self.inplace = inplace + + def forward(self, tensor: Tensor) -> Tensor: + """ + Args: + tensor (Tensor): Tensor image to be normalized. + + Returns: + Tensor: Normalized Tensor image. + """ + return F.normalize(tensor, self.mean, self.std, self.inplace) + + def __repr__(self): + return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std) + + +class Resize(torch.nn.Module): + """Resize the input image to the given size. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions + + .. warning:: + The output image might be different depending on its type: when downsampling, the interpolation of PIL images + and tensors is slightly different, because PIL applies antialiasing. This may lead to significant differences + in the performance of a network. Therefore, it is preferable to train and serve a model with the same input + types. + + Args: + size (sequence or int): Desired output size. If size is a sequence like + (h, w), output size will be matched to this. If size is an int, + smaller edge of the image will be matched to this number. + i.e, if height > width, then image will be rescaled to + (size * height / width, size). + + .. note:: + In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``. + interpolation (InterpolationMode): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` and + ``InterpolationMode.BICUBIC`` are supported. + For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable. + max_size (int, optional): The maximum allowed for the longer edge of + the resized image: if the longer edge of the image is greater + than ``max_size`` after being resized according to ``size``, then + the image is resized again so that the longer edge is equal to + ``max_size``. As a result, ``size`` might be overruled, i.e the + smaller edge may be shorter than ``size``. This is only supported + if ``size`` is an int (or a sequence of length 1 in torchscript + mode). + antialias (bool, optional): antialias flag. If ``img`` is PIL Image, the flag is ignored and anti-alias + is always used. If ``img`` is Tensor, the flag is False by default and can be set True for + ``InterpolationMode.BILINEAR`` only mode. + + .. warning:: + There is no autodiff support for ``antialias=True`` option with input ``img`` as Tensor. + + """ + + def __init__(self, size, interpolation=InterpolationMode.BILINEAR, max_size=None, antialias=None): + super().__init__() + if not isinstance(size, (int, Sequence)): + raise TypeError("Size should be int or sequence. Got {}".format(type(size))) + if isinstance(size, Sequence) and len(size) not in (1, 2): + raise ValueError("If size is a sequence, it should have 1 or 2 values") + self.size = size + self.max_size = max_size + + # Backward compatibility with integer value + if isinstance(interpolation, int): + warnings.warn( + "Argument interpolation should be of type InterpolationMode instead of int. " + "Please, use InterpolationMode enum." + ) + interpolation = _interpolation_modes_from_int(interpolation) + + self.interpolation = interpolation + self.antialias = antialias + + def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be scaled. + + Returns: + PIL Image or Tensor: Rescaled image. + """ + return F.resize(img, self.size, self.interpolation, self.max_size, self.antialias) + + def __repr__(self): + interpolate_str = self.interpolation.value + return self.__class__.__name__ + '(size={0}, interpolation={1}, max_size={2}, antialias={3})'.format( + self.size, interpolate_str, self.max_size, self.antialias) + + +class Scale(Resize): + """ + Note: This transform is deprecated in favor of Resize. + """ + def __init__(self, *args, **kwargs): + warnings.warn("The use of the transforms.Scale transform is deprecated, " + + "please use transforms.Resize instead.") + super(Scale, self).__init__(*args, **kwargs) + + +class CenterCrop(torch.nn.Module): + """Crops the given image at the center. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions. + If image size is smaller than output size along any edge, image is padded with 0 and then center cropped. + + Args: + size (sequence or int): Desired output size of the crop. If size is an + int instead of sequence like (h, w), a square crop (size, size) is + made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]). + """ + + def __init__(self, size): + super().__init__() + self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") + + def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be cropped. + + Returns: + PIL Image or Tensor: Cropped image. + """ + return F.center_crop(img, self.size) + + def __repr__(self): + return self.__class__.__name__ + '(size={0})'.format(self.size) + + +class Pad(torch.nn.Module): + """Pad the given image on all sides with the given "pad" value. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means at most 2 leading dimensions for mode reflect and symmetric, + at most 3 leading dimensions for mode edge, + and an arbitrary number of leading dimensions for mode constant + + Args: + padding (int or sequence): Padding on each border. If a single int is provided this + is used to pad all borders. If sequence of length 2 is provided this is the padding + on left/right and top/bottom respectively. If a sequence of length 4 is provided + this is the padding for the left, top, right and bottom borders respectively. + + .. note:: + In torchscript mode padding as single int is not supported, use a sequence of + length 1: ``[padding, ]``. + fill (number or str or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of + length 3, it is used to fill R, G, B channels respectively. + This value is only used when the padding_mode is constant. + Only number is supported for torch Tensor. + Only int or str or tuple value is supported for PIL Image. + padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric. + Default is constant. + + - constant: pads with a constant value, this value is specified with fill + + - edge: pads with the last value at the edge of the image. + If input a 5D torch Tensor, the last 3 dimensions will be padded instead of the last 2 + + - reflect: pads with reflection of image without repeating the last value on the edge. + For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode + will result in [3, 2, 1, 2, 3, 4, 3, 2] + + - symmetric: pads with reflection of image repeating the last value on the edge. + For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode + will result in [2, 1, 1, 2, 3, 4, 4, 3] + """ + + def __init__(self, padding, fill=0, padding_mode="constant"): + super().__init__() + if not isinstance(padding, (numbers.Number, tuple, list)): + raise TypeError("Got inappropriate padding arg") + + if not isinstance(fill, (numbers.Number, str, tuple)): + raise TypeError("Got inappropriate fill arg") + + if padding_mode not in ["constant", "edge", "reflect", "symmetric"]: + raise ValueError("Padding mode should be either constant, edge, reflect or symmetric") + + if isinstance(padding, Sequence) and len(padding) not in [1, 2, 4]: + raise ValueError("Padding must be an int or a 1, 2, or 4 element tuple, not a " + + "{} element tuple".format(len(padding))) + + self.padding = padding + self.fill = fill + self.padding_mode = padding_mode + + def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be padded. + + Returns: + PIL Image or Tensor: Padded image. + """ + return F.pad(img, self.padding, self.fill, self.padding_mode) + + def __repr__(self): + return self.__class__.__name__ + '(padding={0}, fill={1}, padding_mode={2})'.\ + format(self.padding, self.fill, self.padding_mode) + + +class Lambda: + """Apply a user-defined lambda as a transform. This transform does not support torchscript. + + Args: + lambd (function): Lambda/function to be used for transform. + """ + + def __init__(self, lambd): + if not callable(lambd): + raise TypeError("Argument lambd should be callable, got {}".format(repr(type(lambd).__name__))) + self.lambd = lambd + + def __call__(self, img): + return self.lambd(img) + + def __repr__(self): + return self.__class__.__name__ + '()' + + +class RandomTransforms: + """Base class for a list of transformations with randomness + + Args: + transforms (sequence): list of transformations + """ + + def __init__(self, transforms): + if not isinstance(transforms, Sequence): + raise TypeError("Argument transforms should be a sequence") + self.transforms = transforms + + def __call__(self, *args, **kwargs): + raise NotImplementedError() + + def __repr__(self): + format_string = self.__class__.__name__ + '(' + for t in self.transforms: + format_string += '\n' + format_string += ' {0}'.format(t) + format_string += '\n)' + return format_string + + +class RandomApply(torch.nn.Module): + """Apply randomly a list of transformations with a given probability. + + .. note:: + In order to script the transformation, please use ``torch.nn.ModuleList`` as input instead of list/tuple of + transforms as shown below: + + >>> transforms = transforms.RandomApply(torch.nn.ModuleList([ + >>> transforms.ColorJitter(), + >>> ]), p=0.3) + >>> scripted_transforms = torch.jit.script(transforms) + + Make sure to use only scriptable transformations, i.e. that work with ``torch.Tensor``, does not require + `lambda` functions or ``PIL.Image``. + + Args: + transforms (sequence or torch.nn.Module): list of transformations + p (float): probability + """ + + def __init__(self, transforms, p=0.5): + super().__init__() + self.transforms = transforms + self.p = p + + def forward(self, img): + if self.p < torch.rand(1): + return img + for t in self.transforms: + img = t(img) + return img + + def __repr__(self): + format_string = self.__class__.__name__ + '(' + format_string += '\n p={}'.format(self.p) + for t in self.transforms: + format_string += '\n' + format_string += ' {0}'.format(t) + format_string += '\n)' + return format_string + + +class RandomOrder(RandomTransforms): + """Apply a list of transformations in a random order. This transform does not support torchscript. + """ + def __call__(self, img): + order = list(range(len(self.transforms))) + random.shuffle(order) + for i in order: + img = self.transforms[i](img) + return img + + +class RandomChoice(RandomTransforms): + """Apply single transformation randomly picked from a list. This transform does not support torchscript. + """ + def __call__(self, img): + t = random.choice(self.transforms) + return t(img) + + +class RandomCrop(torch.nn.Module): + """Crop the given image at a random location. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions, + but if non-constant padding is used, the input is expected to have at most 2 leading dimensions + + Args: + size (sequence or int): Desired output size of the crop. If size is an + int instead of sequence like (h, w), a square crop (size, size) is + made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]). + padding (int or sequence, optional): Optional padding on each border + of the image. Default is None. If a single int is provided this + is used to pad all borders. If sequence of length 2 is provided this is the padding + on left/right and top/bottom respectively. If a sequence of length 4 is provided + this is the padding for the left, top, right and bottom borders respectively. + + .. note:: + In torchscript mode padding as single int is not supported, use a sequence of + length 1: ``[padding, ]``. + pad_if_needed (boolean): It will pad the image if smaller than the + desired size to avoid raising an exception. Since cropping is done + after padding, the padding seems to be done at a random offset. + fill (number or str or tuple): Pixel fill value for constant fill. Default is 0. If a tuple of + length 3, it is used to fill R, G, B channels respectively. + This value is only used when the padding_mode is constant. + Only number is supported for torch Tensor. + Only int or str or tuple value is supported for PIL Image. + padding_mode (str): Type of padding. Should be: constant, edge, reflect or symmetric. + Default is constant. + + - constant: pads with a constant value, this value is specified with fill + + - edge: pads with the last value at the edge of the image. + If input a 5D torch Tensor, the last 3 dimensions will be padded instead of the last 2 + + - reflect: pads with reflection of image without repeating the last value on the edge. + For example, padding [1, 2, 3, 4] with 2 elements on both sides in reflect mode + will result in [3, 2, 1, 2, 3, 4, 3, 2] + + - symmetric: pads with reflection of image repeating the last value on the edge. + For example, padding [1, 2, 3, 4] with 2 elements on both sides in symmetric mode + will result in [2, 1, 1, 2, 3, 4, 4, 3] + """ + + @staticmethod + def get_params(img: Tensor, output_size: Tuple[int, int]) -> Tuple[int, int, int, int]: + """Get parameters for ``crop`` for a random crop. + + Args: + img (PIL Image or Tensor): Image to be cropped. + output_size (tuple): Expected output size of the crop. + + Returns: + tuple: params (i, j, h, w) to be passed to ``crop`` for random crop. + """ + w, h = F._get_image_size(img) + th, tw = output_size + + if h + 1 < th or w + 1 < tw: + raise ValueError( + "Required crop size {} is larger then input image size {}".format((th, tw), (h, w)) + ) + + if w == tw and h == th: + return 0, 0, h, w + + i = torch.randint(0, h - th + 1, size=(1, )).item() + j = torch.randint(0, w - tw + 1, size=(1, )).item() + return i, j, th, tw + + def __init__(self, size, padding=None, pad_if_needed=False, fill=0, padding_mode="constant"): + super().__init__() + + self.size = tuple(_setup_size( + size, error_msg="Please provide only two dimensions (h, w) for size." + )) + + self.padding = padding + self.pad_if_needed = pad_if_needed + self.fill = fill + self.padding_mode = padding_mode + + def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be cropped. + + Returns: + PIL Image or Tensor: Cropped image. + """ + if self.padding is not None: + img = F.pad(img, self.padding, self.fill, self.padding_mode) + + width, height = F._get_image_size(img) + # pad the width if needed + if self.pad_if_needed and width < self.size[1]: + padding = [self.size[1] - width, 0] + img = F.pad(img, padding, self.fill, self.padding_mode) + # pad the height if needed + if self.pad_if_needed and height < self.size[0]: + padding = [0, self.size[0] - height] + img = F.pad(img, padding, self.fill, self.padding_mode) + + i, j, h, w = self.get_params(img, self.size) + + return F.crop(img, i, j, h, w) + + def __repr__(self): + return self.__class__.__name__ + "(size={0}, padding={1})".format(self.size, self.padding) + + +class RandomHorizontalFlip(torch.nn.Module): + """Horizontally flip the given image randomly with a given probability. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading + dimensions + + Args: + p (float): probability of the image being flipped. Default value is 0.5 + """ + + def __init__(self, p=0.5): + super().__init__() + self.p = p + + def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be flipped. + + Returns: + PIL Image or Tensor: Randomly flipped image. + """ + if torch.rand(1) < self.p: + return F.hflip(img) + return img + + def __repr__(self): + return self.__class__.__name__ + '(p={})'.format(self.p) + + +class RandomVerticalFlip(torch.nn.Module): + """Vertically flip the given image randomly with a given probability. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading + dimensions + + Args: + p (float): probability of the image being flipped. Default value is 0.5 + """ + + def __init__(self, p=0.5): + super().__init__() + self.p = p + + def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be flipped. + + Returns: + PIL Image or Tensor: Randomly flipped image. + """ + if torch.rand(1) < self.p: + return F.vflip(img) + return img + + def __repr__(self): + return self.__class__.__name__ + '(p={})'.format(self.p) + + +class RandomPerspective(torch.nn.Module): + """Performs a random perspective transformation of the given image with a given probability. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions. + + Args: + distortion_scale (float): argument to control the degree of distortion and ranges from 0 to 1. + Default is 0.5. + p (float): probability of the image being transformed. Default is 0.5. + interpolation (InterpolationMode): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. + For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable. + fill (sequence or number): Pixel fill value for the area outside the transformed + image. Default is ``0``. If given a number, the value is used for all bands respectively. + """ + + def __init__(self, distortion_scale=0.5, p=0.5, interpolation=InterpolationMode.BILINEAR, fill=0): + super().__init__() + self.p = p + + # Backward compatibility with integer value + if isinstance(interpolation, int): + warnings.warn( + "Argument interpolation should be of type InterpolationMode instead of int. " + "Please, use InterpolationMode enum." + ) + interpolation = _interpolation_modes_from_int(interpolation) + + self.interpolation = interpolation + self.distortion_scale = distortion_scale + + if fill is None: + fill = 0 + elif not isinstance(fill, (Sequence, numbers.Number)): + raise TypeError("Fill should be either a sequence or a number.") + + self.fill = fill + + def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be Perspectively transformed. + + Returns: + PIL Image or Tensor: Randomly transformed image. + """ + + fill = self.fill + if isinstance(img, Tensor): + if isinstance(fill, (int, float)): + fill = [float(fill)] * F._get_image_num_channels(img) + else: + fill = [float(f) for f in fill] + + if torch.rand(1) < self.p: + width, height = F._get_image_size(img) + startpoints, endpoints = self.get_params(width, height, self.distortion_scale) + return F.perspective(img, startpoints, endpoints, self.interpolation, fill) + return img + + @staticmethod + def get_params(width: int, height: int, distortion_scale: float) -> Tuple[List[List[int]], List[List[int]]]: + """Get parameters for ``perspective`` for a random perspective transform. + + Args: + width (int): width of the image. + height (int): height of the image. + distortion_scale (float): argument to control the degree of distortion and ranges from 0 to 1. + + Returns: + List containing [top-left, top-right, bottom-right, bottom-left] of the original image, + List containing [top-left, top-right, bottom-right, bottom-left] of the transformed image. + """ + half_height = height // 2 + half_width = width // 2 + topleft = [ + int(torch.randint(0, int(distortion_scale * half_width) + 1, size=(1, )).item()), + int(torch.randint(0, int(distortion_scale * half_height) + 1, size=(1, )).item()) + ] + topright = [ + int(torch.randint(width - int(distortion_scale * half_width) - 1, width, size=(1, )).item()), + int(torch.randint(0, int(distortion_scale * half_height) + 1, size=(1, )).item()) + ] + botright = [ + int(torch.randint(width - int(distortion_scale * half_width) - 1, width, size=(1, )).item()), + int(torch.randint(height - int(distortion_scale * half_height) - 1, height, size=(1, )).item()) + ] + botleft = [ + int(torch.randint(0, int(distortion_scale * half_width) + 1, size=(1, )).item()), + int(torch.randint(height - int(distortion_scale * half_height) - 1, height, size=(1, )).item()) + ] + startpoints = [[0, 0], [width - 1, 0], [width - 1, height - 1], [0, height - 1]] + endpoints = [topleft, topright, botright, botleft] + return startpoints, endpoints + + def __repr__(self): + return self.__class__.__name__ + '(p={})'.format(self.p) + + +class RandomResizedCrop(torch.nn.Module): + """Crop a random portion of image and resize it to a given size. + + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions + + A crop of the original image is made: the crop has a random area (H * W) + and a random aspect ratio. This crop is finally resized to the given + size. This is popularly used to train the Inception networks. + + Args: + size (int or sequence): expected output size of the crop, for each edge. If size is an + int instead of sequence like (h, w), a square output size ``(size, size)`` is + made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]). + + .. note:: + In torchscript mode size as single int is not supported, use a sequence of length 1: ``[size, ]``. + scale (tuple of float): Specifies the lower and upper bounds for the random area of the crop, + before resizing. The scale is defined with respect to the area of the original image. + ratio (tuple of float): lower and upper bounds for the random aspect ratio of the crop, before + resizing. + interpolation (InterpolationMode): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.BILINEAR``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` and + ``InterpolationMode.BICUBIC`` are supported. + For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable. + + """ + + def __init__(self, size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation=InterpolationMode.BILINEAR): + super().__init__() + self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") + + if not isinstance(scale, Sequence): + raise TypeError("Scale should be a sequence") + if not isinstance(ratio, Sequence): + raise TypeError("Ratio should be a sequence") + if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): + warnings.warn("Scale and ratio should be of kind (min, max)") + + # Backward compatibility with integer value + if isinstance(interpolation, int): + warnings.warn( + "Argument interpolation should be of type InterpolationMode instead of int. " + "Please, use InterpolationMode enum." + ) + interpolation = _interpolation_modes_from_int(interpolation) + + self.interpolation = interpolation + self.scale = scale + self.ratio = ratio + + @staticmethod + def get_params( + img: Tensor, scale: List[float], ratio: List[float] + ) -> Tuple[int, int, int, int]: + """Get parameters for ``crop`` for a random sized crop. + + Args: + img (PIL Image or Tensor): Input image. + scale (list): range of scale of the origin size cropped + ratio (list): range of aspect ratio of the origin aspect ratio cropped + + Returns: + tuple: params (i, j, h, w) to be passed to ``crop`` for a random + sized crop. + """ + width, height = F._get_image_size(img) + area = height * width + + log_ratio = torch.log(torch.tensor(ratio)) + for _ in range(10): + target_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item() + aspect_ratio = torch.exp( + torch.empty(1).uniform_(log_ratio[0], log_ratio[1]) + ).item() + + w = int(round(math.sqrt(target_area * aspect_ratio))) + h = int(round(math.sqrt(target_area / aspect_ratio))) + + if 0 < w <= width and 0 < h <= height: + i = torch.randint(0, height - h + 1, size=(1,)).item() + j = torch.randint(0, width - w + 1, size=(1,)).item() + return i, j, h, w + + # Fallback to central crop + in_ratio = float(width) / float(height) + if in_ratio < min(ratio): + w = width + h = int(round(w / min(ratio))) + elif in_ratio > max(ratio): + h = height + w = int(round(h * max(ratio))) + else: # whole image + w = width + h = height + i = (height - h) // 2 + j = (width - w) // 2 + return i, j, h, w + + def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be cropped and resized. + + Returns: + PIL Image or Tensor: Randomly cropped and resized image. + """ + i, j, h, w = self.get_params(img, self.scale, self.ratio) + return F.resized_crop(img, i, j, h, w, self.size, self.interpolation) + + def __repr__(self): + interpolate_str = self.interpolation.value + format_string = self.__class__.__name__ + '(size={0}'.format(self.size) + format_string += ', scale={0}'.format(tuple(round(s, 4) for s in self.scale)) + format_string += ', ratio={0}'.format(tuple(round(r, 4) for r in self.ratio)) + format_string += ', interpolation={0})'.format(interpolate_str) + return format_string + + +class RandomSizedCrop(RandomResizedCrop): + """ + Note: This transform is deprecated in favor of RandomResizedCrop. + """ + def __init__(self, *args, **kwargs): + warnings.warn("The use of the transforms.RandomSizedCrop transform is deprecated, " + + "please use transforms.RandomResizedCrop instead.") + super(RandomSizedCrop, self).__init__(*args, **kwargs) + + +class FiveCrop(torch.nn.Module): + """Crop the given image into four corners and the central crop. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading + dimensions + + .. Note:: + This transform returns a tuple of images and there may be a mismatch in the number of + inputs and targets your Dataset returns. See below for an example of how to deal with + this. + + Args: + size (sequence or int): Desired output size of the crop. If size is an ``int`` + instead of sequence like (h, w), a square crop of size (size, size) is made. + If provided a sequence of length 1, it will be interpreted as (size[0], size[0]). + + Example: + >>> transform = Compose([ + >>> FiveCrop(size), # this is a list of PIL Images + >>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor + >>> ]) + >>> #In your test loop you can do the following: + >>> input, target = batch # input is a 5d tensor, target is 2d + >>> bs, ncrops, c, h, w = input.size() + >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops + >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops + """ + + def __init__(self, size): + super().__init__() + self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") + + def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be cropped. + + Returns: + tuple of 5 images. Image can be PIL Image or Tensor + """ + return F.five_crop(img, self.size) + + def __repr__(self): + return self.__class__.__name__ + '(size={0})'.format(self.size) + + +class TenCrop(torch.nn.Module): + """Crop the given image into four corners and the central crop plus the flipped version of + these (horizontal flipping is used by default). + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading + dimensions + + .. Note:: + This transform returns a tuple of images and there may be a mismatch in the number of + inputs and targets your Dataset returns. See below for an example of how to deal with + this. + + Args: + size (sequence or int): Desired output size of the crop. If size is an + int instead of sequence like (h, w), a square crop (size, size) is + made. If provided a sequence of length 1, it will be interpreted as (size[0], size[0]). + vertical_flip (bool): Use vertical flipping instead of horizontal + + Example: + >>> transform = Compose([ + >>> TenCrop(size), # this is a list of PIL Images + >>> Lambda(lambda crops: torch.stack([ToTensor()(crop) for crop in crops])) # returns a 4D tensor + >>> ]) + >>> #In your test loop you can do the following: + >>> input, target = batch # input is a 5d tensor, target is 2d + >>> bs, ncrops, c, h, w = input.size() + >>> result = model(input.view(-1, c, h, w)) # fuse batch size and ncrops + >>> result_avg = result.view(bs, ncrops, -1).mean(1) # avg over crops + """ + + def __init__(self, size, vertical_flip=False): + super().__init__() + self.size = _setup_size(size, error_msg="Please provide only two dimensions (h, w) for size.") + self.vertical_flip = vertical_flip + + def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be cropped. + + Returns: + tuple of 10 images. Image can be PIL Image or Tensor + """ + return F.ten_crop(img, self.size, self.vertical_flip) + + def __repr__(self): + return self.__class__.__name__ + '(size={0}, vertical_flip={1})'.format(self.size, self.vertical_flip) + + +class LinearTransformation(torch.nn.Module): + """Transform a tensor image with a square transformation matrix and a mean_vector computed + offline. + This transform does not support PIL Image. + Given transformation_matrix and mean_vector, will flatten the torch.*Tensor and + subtract mean_vector from it which is then followed by computing the dot + product with the transformation matrix and then reshaping the tensor to its + original shape. + + Applications: + whitening transformation: Suppose X is a column vector zero-centered data. + Then compute the data covariance matrix [D x D] with torch.mm(X.t(), X), + perform SVD on this matrix and pass it as transformation_matrix. + + Args: + transformation_matrix (Tensor): tensor [D x D], D = C x H x W + mean_vector (Tensor): tensor [D], D = C x H x W + """ + + def __init__(self, transformation_matrix, mean_vector): + super().__init__() + if transformation_matrix.size(0) != transformation_matrix.size(1): + raise ValueError("transformation_matrix should be square. Got " + + "[{} x {}] rectangular matrix.".format(*transformation_matrix.size())) + + if mean_vector.size(0) != transformation_matrix.size(0): + raise ValueError("mean_vector should have the same length {}".format(mean_vector.size(0)) + + " as any one of the dimensions of the transformation_matrix [{}]" + .format(tuple(transformation_matrix.size()))) + + if transformation_matrix.device != mean_vector.device: + raise ValueError("Input tensors should be on the same device. Got {} and {}" + .format(transformation_matrix.device, mean_vector.device)) + + self.transformation_matrix = transformation_matrix + self.mean_vector = mean_vector + + def forward(self, tensor: Tensor) -> Tensor: + """ + Args: + tensor (Tensor): Tensor image to be whitened. + + Returns: + Tensor: Transformed image. + """ + shape = tensor.shape + n = shape[-3] * shape[-2] * shape[-1] + if n != self.transformation_matrix.shape[0]: + raise ValueError("Input tensor and transformation matrix have incompatible shape." + + "[{} x {} x {}] != ".format(shape[-3], shape[-2], shape[-1]) + + "{}".format(self.transformation_matrix.shape[0])) + + if tensor.device.type != self.mean_vector.device.type: + raise ValueError("Input tensor should be on the same device as transformation matrix and mean vector. " + "Got {} vs {}".format(tensor.device, self.mean_vector.device)) + + flat_tensor = tensor.view(-1, n) - self.mean_vector + transformed_tensor = torch.mm(flat_tensor, self.transformation_matrix) + tensor = transformed_tensor.view(shape) + return tensor + + def __repr__(self): + format_string = self.__class__.__name__ + '(transformation_matrix=' + format_string += (str(self.transformation_matrix.tolist()) + ')') + format_string += (", (mean_vector=" + str(self.mean_vector.tolist()) + ')') + return format_string + + +class ColorJitter(torch.nn.Module): + """Randomly change the brightness, contrast, saturation and hue of an image. + If the image is torch Tensor, it is expected + to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions. + If img is PIL Image, mode "1", "L", "I", "F" and modes with transparency (alpha channel) are not supported. + + Args: + brightness (float or tuple of float (min, max)): How much to jitter brightness. + brightness_factor is chosen uniformly from [max(0, 1 - brightness), 1 + brightness] + or the given [min, max]. Should be non negative numbers. + contrast (float or tuple of float (min, max)): How much to jitter contrast. + contrast_factor is chosen uniformly from [max(0, 1 - contrast), 1 + contrast] + or the given [min, max]. Should be non negative numbers. + saturation (float or tuple of float (min, max)): How much to jitter saturation. + saturation_factor is chosen uniformly from [max(0, 1 - saturation), 1 + saturation] + or the given [min, max]. Should be non negative numbers. + hue (float or tuple of float (min, max)): How much to jitter hue. + hue_factor is chosen uniformly from [-hue, hue] or the given [min, max]. + Should have 0<= hue <= 0.5 or -0.5 <= min <= max <= 0.5. + """ + + def __init__(self, brightness=0, contrast=0, saturation=0, hue=0): + super().__init__() + self.brightness = self._check_input(brightness, 'brightness') + self.contrast = self._check_input(contrast, 'contrast') + self.saturation = self._check_input(saturation, 'saturation') + self.hue = self._check_input(hue, 'hue', center=0, bound=(-0.5, 0.5), + clip_first_on_zero=False) + + @torch.jit.unused + def _check_input(self, value, name, center=1, bound=(0, float('inf')), clip_first_on_zero=True): + if isinstance(value, numbers.Number): + if value < 0: + raise ValueError("If {} is a single number, it must be non negative.".format(name)) + value = [center - float(value), center + float(value)] + if clip_first_on_zero: + value[0] = max(value[0], 0.0) + elif isinstance(value, (tuple, list)) and len(value) == 2: + if not bound[0] <= value[0] <= value[1] <= bound[1]: + raise ValueError("{} values should be between {}".format(name, bound)) + else: + raise TypeError("{} should be a single number or a list/tuple with length 2.".format(name)) + + # if value is 0 or (1., 1.) for brightness/contrast/saturation + # or (0., 0.) for hue, do nothing + if value[0] == value[1] == center: + value = None + return value + + @staticmethod + def get_params(brightness: Optional[List[float]], + contrast: Optional[List[float]], + saturation: Optional[List[float]], + hue: Optional[List[float]] + ) -> Tuple[Tensor, Optional[float], Optional[float], Optional[float], Optional[float]]: + """Get the parameters for the randomized transform to be applied on image. + + Args: + brightness (tuple of float (min, max), optional): The range from which the brightness_factor is chosen + uniformly. Pass None to turn off the transformation. + contrast (tuple of float (min, max), optional): The range from which the contrast_factor is chosen + uniformly. Pass None to turn off the transformation. + saturation (tuple of float (min, max), optional): The range from which the saturation_factor is chosen + uniformly. Pass None to turn off the transformation. + hue (tuple of float (min, max), optional): The range from which the hue_factor is chosen uniformly. + Pass None to turn off the transformation. + + Returns: + tuple: The parameters used to apply the randomized transform + along with their random order. + """ + fn_idx = torch.randperm(4) + + b = None if brightness is None else float(torch.empty(1).uniform_(brightness[0], brightness[1])) + c = None if contrast is None else float(torch.empty(1).uniform_(contrast[0], contrast[1])) + s = None if saturation is None else float(torch.empty(1).uniform_(saturation[0], saturation[1])) + h = None if hue is None else float(torch.empty(1).uniform_(hue[0], hue[1])) + + return fn_idx, b, c, s, h + + def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Input image. + + Returns: + PIL Image or Tensor: Color jittered image. + """ + fn_idx, brightness_factor, contrast_factor, saturation_factor, hue_factor = \ + self.get_params(self.brightness, self.contrast, self.saturation, self.hue) + + for fn_id in fn_idx: + if fn_id == 0 and brightness_factor is not None: + img = F.adjust_brightness(img, brightness_factor) + elif fn_id == 1 and contrast_factor is not None: + img = F.adjust_contrast(img, contrast_factor) + elif fn_id == 2 and saturation_factor is not None: + img = F.adjust_saturation(img, saturation_factor) + elif fn_id == 3 and hue_factor is not None: + img = F.adjust_hue(img, hue_factor) + + return img + + def __repr__(self): + format_string = self.__class__.__name__ + '(' + format_string += 'brightness={0}'.format(self.brightness) + format_string += ', contrast={0}'.format(self.contrast) + format_string += ', saturation={0}'.format(self.saturation) + format_string += ', hue={0})'.format(self.hue) + return format_string + + +class RandomRotation(torch.nn.Module): + """Rotate the image by angle. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions. + + Args: + degrees (sequence or number): Range of degrees to select from. + If degrees is a number instead of sequence like (min, max), the range of degrees + will be (-degrees, +degrees). + interpolation (InterpolationMode): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. + For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable. + expand (bool, optional): Optional expansion flag. + If true, expands the output to make it large enough to hold the entire rotated image. + If false or omitted, make the output image the same size as the input image. + Note that the expand flag assumes rotation around the center and no translation. + center (sequence, optional): Optional center of rotation, (x, y). Origin is the upper left corner. + Default is the center of the image. + fill (sequence or number): Pixel fill value for the area outside the rotated + image. Default is ``0``. If given a number, the value is used for all bands respectively. + resample (int, optional): deprecated argument and will be removed since v0.10.0. + Please use the ``interpolation`` parameter instead. + + .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters + + """ + + def __init__( + self, degrees, interpolation=InterpolationMode.NEAREST, expand=False, center=None, fill=0, resample=None + ): + super().__init__() + if resample is not None: + warnings.warn( + "Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead" + ) + interpolation = _interpolation_modes_from_int(resample) + + # Backward compatibility with integer value + if isinstance(interpolation, int): + warnings.warn( + "Argument interpolation should be of type InterpolationMode instead of int. " + "Please, use InterpolationMode enum." + ) + interpolation = _interpolation_modes_from_int(interpolation) + + self.degrees = _setup_angle(degrees, name="degrees", req_sizes=(2, )) + + if center is not None: + _check_sequence_input(center, "center", req_sizes=(2, )) + + self.center = center + + self.resample = self.interpolation = interpolation + self.expand = expand + + if fill is None: + fill = 0 + elif not isinstance(fill, (Sequence, numbers.Number)): + raise TypeError("Fill should be either a sequence or a number.") + + self.fill = fill + + @staticmethod + def get_params(degrees: List[float]) -> float: + """Get parameters for ``rotate`` for a random rotation. + + Returns: + float: angle parameter to be passed to ``rotate`` for random rotation. + """ + angle = float(torch.empty(1).uniform_(float(degrees[0]), float(degrees[1])).item()) + return angle + + def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be rotated. + + Returns: + PIL Image or Tensor: Rotated image. + """ + fill = self.fill + if isinstance(img, Tensor): + if isinstance(fill, (int, float)): + fill = [float(fill)] * F._get_image_num_channels(img) + else: + fill = [float(f) for f in fill] + angle = self.get_params(self.degrees) + + return F.rotate(img, angle, self.resample, self.expand, self.center, fill) + + def __repr__(self): + interpolate_str = self.interpolation.value + format_string = self.__class__.__name__ + '(degrees={0}'.format(self.degrees) + format_string += ', interpolation={0}'.format(interpolate_str) + format_string += ', expand={0}'.format(self.expand) + if self.center is not None: + format_string += ', center={0}'.format(self.center) + if self.fill is not None: + format_string += ', fill={0}'.format(self.fill) + format_string += ')' + return format_string + + +class RandomAffine(torch.nn.Module): + """Random affine transformation of the image keeping center invariant. + If the image is torch Tensor, it is expected + to have [..., H, W] shape, where ... means an arbitrary number of leading dimensions. + + Args: + degrees (sequence or number): Range of degrees to select from. + If degrees is a number instead of sequence like (min, max), the range of degrees + will be (-degrees, +degrees). Set to 0 to deactivate rotations. + translate (tuple, optional): tuple of maximum absolute fraction for horizontal + and vertical translations. For example translate=(a, b), then horizontal shift + is randomly sampled in the range -img_width * a < dx < img_width * a and vertical shift is + randomly sampled in the range -img_height * b < dy < img_height * b. Will not translate by default. + scale (tuple, optional): scaling factor interval, e.g (a, b), then scale is + randomly sampled from the range a <= scale <= b. Will keep original scale by default. + shear (sequence or number, optional): Range of degrees to select from. + If shear is a number, a shear parallel to the x axis in the range (-shear, +shear) + will be applied. Else if shear is a sequence of 2 values a shear parallel to the x axis in the + range (shear[0], shear[1]) will be applied. Else if shear is a sequence of 4 values, + a x-axis shear in (shear[0], shear[1]) and y-axis shear in (shear[2], shear[3]) will be applied. + Will not apply shear by default. + interpolation (InterpolationMode): Desired interpolation enum defined by + :class:`torchvision.transforms.InterpolationMode`. Default is ``InterpolationMode.NEAREST``. + If input is Tensor, only ``InterpolationMode.NEAREST``, ``InterpolationMode.BILINEAR`` are supported. + For backward compatibility integer values (e.g. ``PIL.Image.NEAREST``) are still acceptable. + fill (sequence or number): Pixel fill value for the area outside the transformed + image. Default is ``0``. If given a number, the value is used for all bands respectively. + fillcolor (sequence or number, optional): deprecated argument and will be removed since v0.10.0. + Please use the ``fill`` parameter instead. + resample (int, optional): deprecated argument and will be removed since v0.10.0. + Please use the ``interpolation`` parameter instead. + + .. _filters: https://pillow.readthedocs.io/en/latest/handbook/concepts.html#filters + + """ + + def __init__( + self, degrees, translate=None, scale=None, shear=None, interpolation=InterpolationMode.NEAREST, fill=0, + fillcolor=None, resample=None + ): + super().__init__() + if resample is not None: + warnings.warn( + "Argument resample is deprecated and will be removed since v0.10.0. Please, use interpolation instead" + ) + interpolation = _interpolation_modes_from_int(resample) + + # Backward compatibility with integer value + if isinstance(interpolation, int): + warnings.warn( + "Argument interpolation should be of type InterpolationMode instead of int. " + "Please, use InterpolationMode enum." + ) + interpolation = _interpolation_modes_from_int(interpolation) + + if fillcolor is not None: + warnings.warn( + "Argument fillcolor is deprecated and will be removed since v0.10.0. Please, use fill instead" + ) + fill = fillcolor + + self.degrees = _setup_angle(degrees, name="degrees", req_sizes=(2, )) + + if translate is not None: + _check_sequence_input(translate, "translate", req_sizes=(2, )) + for t in translate: + if not (0.0 <= t <= 1.0): + raise ValueError("translation values should be between 0 and 1") + self.translate = translate + + if scale is not None: + _check_sequence_input(scale, "scale", req_sizes=(2, )) + for s in scale: + if s <= 0: + raise ValueError("scale values should be positive") + self.scale = scale + + if shear is not None: + self.shear = _setup_angle(shear, name="shear", req_sizes=(2, 4)) + else: + self.shear = shear + + self.resample = self.interpolation = interpolation + + if fill is None: + fill = 0 + elif not isinstance(fill, (Sequence, numbers.Number)): + raise TypeError("Fill should be either a sequence or a number.") + + self.fillcolor = self.fill = fill + + @staticmethod + def get_params( + degrees: List[float], + translate: Optional[List[float]], + scale_ranges: Optional[List[float]], + shears: Optional[List[float]], + img_size: List[int] + ) -> Tuple[float, Tuple[int, int], float, Tuple[float, float]]: + """Get parameters for affine transformation + + Returns: + params to be passed to the affine transformation + """ + angle = float(torch.empty(1).uniform_(float(degrees[0]), float(degrees[1])).item()) + if translate is not None: + max_dx = float(translate[0] * img_size[0]) + max_dy = float(translate[1] * img_size[1]) + tx = int(round(torch.empty(1).uniform_(-max_dx, max_dx).item())) + ty = int(round(torch.empty(1).uniform_(-max_dy, max_dy).item())) + translations = (tx, ty) + else: + translations = (0, 0) + + if scale_ranges is not None: + scale = float(torch.empty(1).uniform_(scale_ranges[0], scale_ranges[1]).item()) + else: + scale = 1.0 + + shear_x = shear_y = 0.0 + if shears is not None: + shear_x = float(torch.empty(1).uniform_(shears[0], shears[1]).item()) + if len(shears) == 4: + shear_y = float(torch.empty(1).uniform_(shears[2], shears[3]).item()) + + shear = (shear_x, shear_y) + + return angle, translations, scale, shear + + def forward(self, img): + """ + img (PIL Image or Tensor): Image to be transformed. + + Returns: + PIL Image or Tensor: Affine transformed image. + """ + fill = self.fill + if isinstance(img, Tensor): + if isinstance(fill, (int, float)): + fill = [float(fill)] * F._get_image_num_channels(img) + else: + fill = [float(f) for f in fill] + + img_size = F._get_image_size(img) + + ret = self.get_params(self.degrees, self.translate, self.scale, self.shear, img_size) + + return F.affine(img, *ret, interpolation=self.interpolation, fill=fill) + + def __repr__(self): + s = '{name}(degrees={degrees}' + if self.translate is not None: + s += ', translate={translate}' + if self.scale is not None: + s += ', scale={scale}' + if self.shear is not None: + s += ', shear={shear}' + if self.interpolation != InterpolationMode.NEAREST: + s += ', interpolation={interpolation}' + if self.fill != 0: + s += ', fill={fill}' + s += ')' + d = dict(self.__dict__) + d['interpolation'] = self.interpolation.value + return s.format(name=self.__class__.__name__, **d) + + +class Grayscale(torch.nn.Module): + """Convert image to grayscale. + If the image is torch Tensor, it is expected + to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions + + Args: + num_output_channels (int): (1 or 3) number of channels desired for output image + + Returns: + PIL Image: Grayscale version of the input. + + - If ``num_output_channels == 1`` : returned image is single channel + - If ``num_output_channels == 3`` : returned image is 3 channel with r == g == b + + """ + + def __init__(self, num_output_channels=1): + super().__init__() + self.num_output_channels = num_output_channels + + def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be converted to grayscale. + + Returns: + PIL Image or Tensor: Grayscaled image. + """ + return F.rgb_to_grayscale(img, num_output_channels=self.num_output_channels) + + def __repr__(self): + return self.__class__.__name__ + '(num_output_channels={0})'.format(self.num_output_channels) + + +class RandomGrayscale(torch.nn.Module): + """Randomly convert image to grayscale with a probability of p (default 0.1). + If the image is torch Tensor, it is expected + to have [..., 3, H, W] shape, where ... means an arbitrary number of leading dimensions + + Args: + p (float): probability that image should be converted to grayscale. + + Returns: + PIL Image or Tensor: Grayscale version of the input image with probability p and unchanged + with probability (1-p). + - If input image is 1 channel: grayscale version is 1 channel + - If input image is 3 channel: grayscale version is 3 channel with r == g == b + + """ + + def __init__(self, p=0.1): + super().__init__() + self.p = p + + def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be converted to grayscale. + + Returns: + PIL Image or Tensor: Randomly grayscaled image. + """ + num_output_channels = F._get_image_num_channels(img) + if torch.rand(1) < self.p: + return F.rgb_to_grayscale(img, num_output_channels=num_output_channels) + return img + + def __repr__(self): + return self.__class__.__name__ + '(p={0})'.format(self.p) + + +class RandomErasing(torch.nn.Module): + """ Randomly selects a rectangle region in an torch Tensor image and erases its pixels. + This transform does not support PIL Image. + 'Random Erasing Data Augmentation' by Zhong et al. See https://arxiv.org/abs/1708.04896 + + Args: + p: probability that the random erasing operation will be performed. + scale: range of proportion of erased area against input image. + ratio: range of aspect ratio of erased area. + value: erasing value. Default is 0. If a single int, it is used to + erase all pixels. If a tuple of length 3, it is used to erase + R, G, B channels respectively. + If a str of 'random', erasing each pixel with random values. + inplace: boolean to make this transform inplace. Default set to False. + + Returns: + Erased Image. + + Example: + >>> transform = transforms.Compose([ + >>> transforms.RandomHorizontalFlip(), + >>> transforms.ToTensor(), + >>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), + >>> transforms.RandomErasing(), + >>> ]) + """ + + def __init__(self, p=0.5, scale=(0.02, 0.33), ratio=(0.3, 3.3), value=0, inplace=False): + super().__init__() + if not isinstance(value, (numbers.Number, str, tuple, list)): + raise TypeError("Argument value should be either a number or str or a sequence") + if isinstance(value, str) and value != "random": + raise ValueError("If value is str, it should be 'random'") + if not isinstance(scale, (tuple, list)): + raise TypeError("Scale should be a sequence") + if not isinstance(ratio, (tuple, list)): + raise TypeError("Ratio should be a sequence") + if (scale[0] > scale[1]) or (ratio[0] > ratio[1]): + warnings.warn("Scale and ratio should be of kind (min, max)") + if scale[0] < 0 or scale[1] > 1: + raise ValueError("Scale should be between 0 and 1") + if p < 0 or p > 1: + raise ValueError("Random erasing probability should be between 0 and 1") + + self.p = p + self.scale = scale + self.ratio = ratio + self.value = value + self.inplace = inplace + + @staticmethod + def get_params( + img: Tensor, scale: Tuple[float, float], ratio: Tuple[float, float], value: Optional[List[float]] = None + ) -> Tuple[int, int, int, int, Tensor]: + """Get parameters for ``erase`` for a random erasing. + + Args: + img (Tensor): Tensor image to be erased. + scale (sequence): range of proportion of erased area against input image. + ratio (sequence): range of aspect ratio of erased area. + value (list, optional): erasing value. If None, it is interpreted as "random" + (erasing each pixel with random values). If ``len(value)`` is 1, it is interpreted as a number, + i.e. ``value[0]``. + + Returns: + tuple: params (i, j, h, w, v) to be passed to ``erase`` for random erasing. + """ + img_c, img_h, img_w = img.shape[-3], img.shape[-2], img.shape[-1] + area = img_h * img_w + + log_ratio = torch.log(torch.tensor(ratio)) + for _ in range(10): + erase_area = area * torch.empty(1).uniform_(scale[0], scale[1]).item() + aspect_ratio = torch.exp( + torch.empty(1).uniform_(log_ratio[0], log_ratio[1]) + ).item() + + h = int(round(math.sqrt(erase_area * aspect_ratio))) + w = int(round(math.sqrt(erase_area / aspect_ratio))) + if not (h < img_h and w < img_w): + continue + + if value is None: + v = torch.empty([img_c, h, w], dtype=torch.float32).normal_() + else: + v = torch.tensor(value)[:, None, None] + + i = torch.randint(0, img_h - h + 1, size=(1, )).item() + j = torch.randint(0, img_w - w + 1, size=(1, )).item() + return i, j, h, w, v + + # Return original image + return 0, 0, img_h, img_w, img + + def forward(self, img): + """ + Args: + img (Tensor): Tensor image to be erased. + + Returns: + img (Tensor): Erased Tensor image. + """ + if torch.rand(1) < self.p: + + # cast self.value to script acceptable type + if isinstance(self.value, (int, float)): + value = [self.value, ] + elif isinstance(self.value, str): + value = None + elif isinstance(self.value, tuple): + value = list(self.value) + else: + value = self.value + + if value is not None and not (len(value) in (1, img.shape[-3])): + raise ValueError( + "If value is a sequence, it should have either a single value or " + "{} (number of input channels)".format(img.shape[-3]) + ) + + x, y, h, w, v = self.get_params(img, scale=self.scale, ratio=self.ratio, value=value) + return F.erase(img, x, y, h, w, v, self.inplace) + return img + + def __repr__(self): + s = '(p={}, '.format(self.p) + s += 'scale={}, '.format(self.scale) + s += 'ratio={}, '.format(self.ratio) + s += 'value={}, '.format(self.value) + s += 'inplace={})'.format(self.inplace) + return self.__class__.__name__ + s + + +class GaussianBlur(torch.nn.Module): + """Blurs image with randomly chosen Gaussian blur. + If the image is torch Tensor, it is expected + to have [..., C, H, W] shape, where ... means an arbitrary number of leading dimensions. + + Args: + kernel_size (int or sequence): Size of the Gaussian kernel. + sigma (float or tuple of float (min, max)): Standard deviation to be used for + creating kernel to perform blurring. If float, sigma is fixed. If it is tuple + of float (min, max), sigma is chosen uniformly at random to lie in the + given range. + + Returns: + PIL Image or Tensor: Gaussian blurred version of the input image. + + """ + + def __init__(self, kernel_size, sigma=(0.1, 2.0)): + super().__init__() + self.kernel_size = _setup_size(kernel_size, "Kernel size should be a tuple/list of two integers") + for ks in self.kernel_size: + if ks <= 0 or ks % 2 == 0: + raise ValueError("Kernel size value should be an odd and positive number.") + + if isinstance(sigma, numbers.Number): + if sigma <= 0: + raise ValueError("If sigma is a single number, it must be positive.") + sigma = (sigma, sigma) + elif isinstance(sigma, Sequence) and len(sigma) == 2: + if not 0. < sigma[0] <= sigma[1]: + raise ValueError("sigma values should be positive and of the form (min, max).") + else: + raise ValueError("sigma should be a single number or a list/tuple with length 2.") + + self.sigma = sigma + + @staticmethod + def get_params(sigma_min: float, sigma_max: float) -> float: + """Choose sigma for random gaussian blurring. + + Args: + sigma_min (float): Minimum standard deviation that can be chosen for blurring kernel. + sigma_max (float): Maximum standard deviation that can be chosen for blurring kernel. + + Returns: + float: Standard deviation to be passed to calculate kernel for gaussian blurring. + """ + return torch.empty(1).uniform_(sigma_min, sigma_max).item() + + def forward(self, img: Tensor) -> Tensor: + """ + Args: + img (PIL Image or Tensor): image to be blurred. + + Returns: + PIL Image or Tensor: Gaussian blurred image + """ + sigma = self.get_params(self.sigma[0], self.sigma[1]) + return F.gaussian_blur(img, self.kernel_size, [sigma, sigma]) + + def __repr__(self): + s = '(kernel_size={}, '.format(self.kernel_size) + s += 'sigma={})'.format(self.sigma) + return self.__class__.__name__ + s + + +def _setup_size(size, error_msg): + if isinstance(size, numbers.Number): + return int(size), int(size) + + if isinstance(size, Sequence) and len(size) == 1: + return size[0], size[0] + + if len(size) != 2: + raise ValueError(error_msg) + + return size + + +def _check_sequence_input(x, name, req_sizes): + msg = req_sizes[0] if len(req_sizes) < 2 else " or ".join([str(s) for s in req_sizes]) + if not isinstance(x, Sequence): + raise TypeError("{} should be a sequence of length {}.".format(name, msg)) + if len(x) not in req_sizes: + raise ValueError("{} should be sequence of length {}.".format(name, msg)) + + +def _setup_angle(x, name, req_sizes=(2, )): + if isinstance(x, numbers.Number): + if x < 0: + raise ValueError("If {} is a single number, it must be positive.".format(name)) + x = [-x, x] + else: + _check_sequence_input(x, name, req_sizes) + + return [float(d) for d in x] + + +class RandomInvert(torch.nn.Module): + """Inverts the colors of the given image randomly with a given probability. + If img is a Tensor, it is expected to be in [..., 1 or 3, H, W] format, + where ... means it can have an arbitrary number of leading dimensions. + If img is PIL Image, it is expected to be in mode "L" or "RGB". + + Args: + p (float): probability of the image being color inverted. Default value is 0.5 + """ + + def __init__(self, p=0.5): + super().__init__() + self.p = p + + def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be inverted. + + Returns: + PIL Image or Tensor: Randomly color inverted image. + """ + if torch.rand(1).item() < self.p: + return F.invert(img) + return img + + def __repr__(self): + return self.__class__.__name__ + '(p={})'.format(self.p) + + +class RandomPosterize(torch.nn.Module): + """Posterize the image randomly with a given probability by reducing the + number of bits for each color channel. If the image is torch Tensor, it should be of type torch.uint8, + and it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. + If img is PIL Image, it is expected to be in mode "L" or "RGB". + + Args: + bits (int): number of bits to keep for each channel (0-8) + p (float): probability of the image being color inverted. Default value is 0.5 + """ + + def __init__(self, bits, p=0.5): + super().__init__() + self.bits = bits + self.p = p + + def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be posterized. + + Returns: + PIL Image or Tensor: Randomly posterized image. + """ + if torch.rand(1).item() < self.p: + return F.posterize(img, self.bits) + return img + + def __repr__(self): + return self.__class__.__name__ + '(bits={},p={})'.format(self.bits, self.p) + + +class RandomSolarize(torch.nn.Module): + """Solarize the image randomly with a given probability by inverting all pixel + values above a threshold. If img is a Tensor, it is expected to be in [..., 1 or 3, H, W] format, + where ... means it can have an arbitrary number of leading dimensions. + If img is PIL Image, it is expected to be in mode "L" or "RGB". + + Args: + threshold (float): all pixels equal or above this value are inverted. + p (float): probability of the image being color inverted. Default value is 0.5 + """ + + def __init__(self, threshold, p=0.5): + super().__init__() + self.threshold = threshold + self.p = p + + def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be solarized. + + Returns: + PIL Image or Tensor: Randomly solarized image. + """ + if torch.rand(1).item() < self.p: + return F.solarize(img, self.threshold) + return img + + def __repr__(self): + return self.__class__.__name__ + '(threshold={},p={})'.format(self.threshold, self.p) + + +class RandomAdjustSharpness(torch.nn.Module): + """Adjust the sharpness of the image randomly with a given probability. If the image is torch Tensor, + it is expected to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. + + Args: + sharpness_factor (float): How much to adjust the sharpness. Can be + any non negative number. 0 gives a blurred image, 1 gives the + original image while 2 increases the sharpness by a factor of 2. + p (float): probability of the image being color inverted. Default value is 0.5 + """ + + def __init__(self, sharpness_factor, p=0.5): + super().__init__() + self.sharpness_factor = sharpness_factor + self.p = p + + def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be sharpened. + + Returns: + PIL Image or Tensor: Randomly sharpened image. + """ + if torch.rand(1).item() < self.p: + return F.adjust_sharpness(img, self.sharpness_factor) + return img + + def __repr__(self): + return self.__class__.__name__ + '(sharpness_factor={},p={})'.format(self.sharpness_factor, self.p) + + +class RandomAutocontrast(torch.nn.Module): + """Autocontrast the pixels of the given image randomly with a given probability. + If the image is torch Tensor, it is expected + to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. + If img is PIL Image, it is expected to be in mode "L" or "RGB". + + Args: + p (float): probability of the image being autocontrasted. Default value is 0.5 + """ + + def __init__(self, p=0.5): + super().__init__() + self.p = p + + def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be autocontrasted. + + Returns: + PIL Image or Tensor: Randomly autocontrasted image. + """ + if torch.rand(1).item() < self.p: + return F.autocontrast(img) + return img + + def __repr__(self): + return self.__class__.__name__ + '(p={})'.format(self.p) + + +class RandomEqualize(torch.nn.Module): + """Equalize the histogram of the given image randomly with a given probability. + If the image is torch Tensor, it is expected + to have [..., 1 or 3, H, W] shape, where ... means an arbitrary number of leading dimensions. + If img is PIL Image, it is expected to be in mode "P", "L" or "RGB". + + Args: + p (float): probability of the image being equalized. Default value is 0.5 + """ + + def __init__(self, p=0.5): + super().__init__() + self.p = p + + def forward(self, img): + """ + Args: + img (PIL Image or Tensor): Image to be equalized. + + Returns: + PIL Image or Tensor: Randomly equalized image. + """ + if torch.rand(1).item() < self.p: + return F.equalize(img) + return img + + def __repr__(self): + return self.__class__.__name__ + '(p={})'.format(self.p) diff --git a/pretrained_model/pytorch_vision_v0.10.0/torchvision/utils.py b/pretrained_model/pytorch_vision_v0.10.0/torchvision/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..127874337819e80f8e0d526e868e019b5cb84a4f --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/torchvision/utils.py @@ -0,0 +1,297 @@ +from typing import Union, Optional, List, Tuple, Text, BinaryIO +import pathlib +import torch +import math +import warnings +import numpy as np +from PIL import Image, ImageDraw, ImageFont, ImageColor + +__all__ = ["make_grid", "save_image", "draw_bounding_boxes", "draw_segmentation_masks"] + + +@torch.no_grad() +def make_grid( + tensor: Union[torch.Tensor, List[torch.Tensor]], + nrow: int = 8, + padding: int = 2, + normalize: bool = False, + value_range: Optional[Tuple[int, int]] = None, + scale_each: bool = False, + pad_value: int = 0, + **kwargs +) -> torch.Tensor: + """ + Make a grid of images. + + Args: + tensor (Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W) + or a list of images all of the same size. + nrow (int, optional): Number of images displayed in each row of the grid. + The final grid size is ``(B / nrow, nrow)``. Default: ``8``. + padding (int, optional): amount of padding. Default: ``2``. + normalize (bool, optional): If True, shift the image to the range (0, 1), + by the min and max values specified by :attr:`range`. Default: ``False``. + value_range (tuple, optional): tuple (min, max) where min and max are numbers, + then these numbers are used to normalize the image. By default, min and max + are computed from the tensor. + scale_each (bool, optional): If ``True``, scale each image in the batch of + images separately rather than the (min, max) over all images. Default: ``False``. + pad_value (float, optional): Value for the padded pixels. Default: ``0``. + + Returns: + grid (Tensor): the tensor containing grid of images. + """ + if not (torch.is_tensor(tensor) or + (isinstance(tensor, list) and all(torch.is_tensor(t) for t in tensor))): + raise TypeError(f'tensor or list of tensors expected, got {type(tensor)}') + + if "range" in kwargs.keys(): + warning = "range will be deprecated, please use value_range instead." + warnings.warn(warning) + value_range = kwargs["range"] + + # if list of tensors, convert to a 4D mini-batch Tensor + if isinstance(tensor, list): + tensor = torch.stack(tensor, dim=0) + + if tensor.dim() == 2: # single image H x W + tensor = tensor.unsqueeze(0) + if tensor.dim() == 3: # single image + if tensor.size(0) == 1: # if single-channel, convert to 3-channel + tensor = torch.cat((tensor, tensor, tensor), 0) + tensor = tensor.unsqueeze(0) + + if tensor.dim() == 4 and tensor.size(1) == 1: # single-channel images + tensor = torch.cat((tensor, tensor, tensor), 1) + + if normalize is True: + tensor = tensor.clone() # avoid modifying tensor in-place + if value_range is not None: + assert isinstance(value_range, tuple), \ + "value_range has to be a tuple (min, max) if specified. min and max are numbers" + + def norm_ip(img, low, high): + img.clamp_(min=low, max=high) + img.sub_(low).div_(max(high - low, 1e-5)) + + def norm_range(t, value_range): + if value_range is not None: + norm_ip(t, value_range[0], value_range[1]) + else: + norm_ip(t, float(t.min()), float(t.max())) + + if scale_each is True: + for t in tensor: # loop over mini-batch dimension + norm_range(t, value_range) + else: + norm_range(tensor, value_range) + + if tensor.size(0) == 1: + return tensor.squeeze(0) + + # make the mini-batch of images into a grid + nmaps = tensor.size(0) + xmaps = min(nrow, nmaps) + ymaps = int(math.ceil(float(nmaps) / xmaps)) + height, width = int(tensor.size(2) + padding), int(tensor.size(3) + padding) + num_channels = tensor.size(1) + grid = tensor.new_full((num_channels, height * ymaps + padding, width * xmaps + padding), pad_value) + k = 0 + for y in range(ymaps): + for x in range(xmaps): + if k >= nmaps: + break + # Tensor.copy_() is a valid method but seems to be missing from the stubs + # https://pytorch.org/docs/stable/tensors.html#torch.Tensor.copy_ + grid.narrow(1, y * height + padding, height - padding).narrow( # type: ignore[attr-defined] + 2, x * width + padding, width - padding + ).copy_(tensor[k]) + k = k + 1 + return grid + + +@torch.no_grad() +def save_image( + tensor: Union[torch.Tensor, List[torch.Tensor]], + fp: Union[Text, pathlib.Path, BinaryIO], + format: Optional[str] = None, + **kwargs +) -> None: + """ + Save a given Tensor into an image file. + + Args: + tensor (Tensor or list): Image to be saved. If given a mini-batch tensor, + saves the tensor as a grid of images by calling ``make_grid``. + fp (string or file object): A filename or a file object + format(Optional): If omitted, the format to use is determined from the filename extension. + If a file object was used instead of a filename, this parameter should always be used. + **kwargs: Other arguments are documented in ``make_grid``. + """ + + grid = make_grid(tensor, **kwargs) + # Add 0.5 after unnormalizing to [0, 255] to round to nearest integer + ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy() + im = Image.fromarray(ndarr) + im.save(fp, format=format) + + +@torch.no_grad() +def draw_bounding_boxes( + image: torch.Tensor, + boxes: torch.Tensor, + labels: Optional[List[str]] = None, + colors: Optional[List[Union[str, Tuple[int, int, int]]]] = None, + fill: Optional[bool] = False, + width: int = 1, + font: Optional[str] = None, + font_size: int = 10 +) -> torch.Tensor: + + """ + Draws bounding boxes on given image. + The values of the input image should be uint8 between 0 and 255. + If fill is True, Resulting Tensor should be saved as PNG image. + + Args: + image (Tensor): Tensor of shape (C x H x W) and dtype uint8. + boxes (Tensor): Tensor of size (N, 4) containing bounding boxes in (xmin, ymin, xmax, ymax) format. Note that + the boxes are absolute coordinates with respect to the image. In other words: `0 <= xmin < xmax < W` and + `0 <= ymin < ymax < H`. + labels (List[str]): List containing the labels of bounding boxes. + colors (List[Union[str, Tuple[int, int, int]]]): List containing the colors of bounding boxes. The colors can + be represented as `str` or `Tuple[int, int, int]`. + fill (bool): If `True` fills the bounding box with specified color. + width (int): Width of bounding box. + font (str): A filename containing a TrueType font. If the file is not found in this filename, the loader may + also search in other directories, such as the `fonts/` directory on Windows or `/Library/Fonts/`, + `/System/Library/Fonts/` and `~/Library/Fonts/` on macOS. + font_size (int): The requested font size in points. + + Returns: + img (Tensor[C, H, W]): Image Tensor of dtype uint8 with bounding boxes plotted. + """ + + if not isinstance(image, torch.Tensor): + raise TypeError(f"Tensor expected, got {type(image)}") + elif image.dtype != torch.uint8: + raise ValueError(f"Tensor uint8 expected, got {image.dtype}") + elif image.dim() != 3: + raise ValueError("Pass individual images, not batches") + + ndarr = image.permute(1, 2, 0).numpy() + img_to_draw = Image.fromarray(ndarr) + + img_boxes = boxes.to(torch.int64).tolist() + + if fill: + draw = ImageDraw.Draw(img_to_draw, "RGBA") + + else: + draw = ImageDraw.Draw(img_to_draw) + + txt_font = ImageFont.load_default() if font is None else ImageFont.truetype(font=font, size=font_size) + + for i, bbox in enumerate(img_boxes): + if colors is None: + color = None + else: + color = colors[i] + + if fill: + if color is None: + fill_color = (255, 255, 255, 100) + elif isinstance(color, str): + # This will automatically raise Error if rgb cannot be parsed. + fill_color = ImageColor.getrgb(color) + (100,) + elif isinstance(color, tuple): + fill_color = color + (100,) + draw.rectangle(bbox, width=width, outline=color, fill=fill_color) + else: + draw.rectangle(bbox, width=width, outline=color) + + if labels is not None: + margin = width + 1 + draw.text((bbox[0] + margin, bbox[1] + margin), labels[i], fill=color, font=txt_font) + + return torch.from_numpy(np.array(img_to_draw)).permute(2, 0, 1).to(dtype=torch.uint8) + + +@torch.no_grad() +def draw_segmentation_masks( + image: torch.Tensor, + masks: torch.Tensor, + alpha: float = 0.8, + colors: Optional[List[Union[str, Tuple[int, int, int]]]] = None, +) -> torch.Tensor: + + """ + Draws segmentation masks on given RGB image. + The values of the input image should be uint8 between 0 and 255. + + Args: + image (Tensor): Tensor of shape (3, H, W) and dtype uint8. + masks (Tensor): Tensor of shape (num_masks, H, W) or (H, W) and dtype bool. + alpha (float): Float number between 0 and 1 denoting the transparency of the masks. + 0 means full transparency, 1 means no transparency. + colors (list or None): List containing the colors of the masks. The colors can + be represented as PIL strings e.g. "red" or "#FF00FF", or as RGB tuples e.g. ``(240, 10, 157)``. + When ``masks`` has a single entry of shape (H, W), you can pass a single color instead of a list + with one element. By default, random colors are generated for each mask. + + Returns: + img (Tensor[C, H, W]): Image Tensor, with segmentation masks drawn on top. + """ + + if not isinstance(image, torch.Tensor): + raise TypeError(f"The image must be a tensor, got {type(image)}") + elif image.dtype != torch.uint8: + raise ValueError(f"The image dtype must be uint8, got {image.dtype}") + elif image.dim() != 3: + raise ValueError("Pass individual images, not batches") + elif image.size()[0] != 3: + raise ValueError("Pass an RGB image. Other Image formats are not supported") + if masks.ndim == 2: + masks = masks[None, :, :] + if masks.ndim != 3: + raise ValueError("masks must be of shape (H, W) or (batch_size, H, W)") + if masks.dtype != torch.bool: + raise ValueError(f"The masks must be of dtype bool. Got {masks.dtype}") + if masks.shape[-2:] != image.shape[-2:]: + raise ValueError("The image and the masks must have the same height and width") + + num_masks = masks.size()[0] + if colors is not None and num_masks > len(colors): + raise ValueError(f"There are more masks ({num_masks}) than colors ({len(colors)})") + + if colors is None: + colors = _generate_color_palette(num_masks) + + if not isinstance(colors, list): + colors = [colors] + if not isinstance(colors[0], (tuple, str)): + raise ValueError("colors must be a tuple or a string, or a list thereof") + if isinstance(colors[0], tuple) and len(colors[0]) != 3: + raise ValueError("It seems that you passed a tuple of colors instead of a list of colors") + + out_dtype = torch.uint8 + + colors_ = [] + for color in colors: + if isinstance(color, str): + color = ImageColor.getrgb(color) + color = torch.tensor(color, dtype=out_dtype) + colors_.append(color) + + img_to_draw = image.detach().clone() + # TODO: There might be a way to vectorize this + for mask, color in zip(masks, colors_): + img_to_draw[:, mask] = color[:, None] + + out = image * (1 - alpha) + img_to_draw * alpha + return out.to(out_dtype) + + +def _generate_color_palette(num_masks): + palette = torch.tensor([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1]) + return [tuple((i * palette) % 255) for i in range(num_masks)] diff --git a/pretrained_model/pytorch_vision_v0.10.0/tox.ini b/pretrained_model/pytorch_vision_v0.10.0/tox.ini new file mode 100644 index 0000000000000000000000000000000000000000..9dc57a506d72b93bb509ac982c3c4f69250d82dd --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/tox.ini @@ -0,0 +1,4 @@ +[flake8] +max-line-length = 120 +ignore = F401,E402,F403,W503,W504 +exclude = docs/src diff --git a/pretrained_model/pytorch_vision_v0.10.0/version.txt b/pretrained_model/pytorch_vision_v0.10.0/version.txt new file mode 100644 index 0000000000000000000000000000000000000000..37f1777fc352b10969986e7e56c75b94cf3850e5 --- /dev/null +++ b/pretrained_model/pytorch_vision_v0.10.0/version.txt @@ -0,0 +1 @@ +0.10.0a0 diff --git a/pretrained_model/trusted_list b/pretrained_model/trusted_list new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391