diff --git a/.editorconfig b/.editorconfig
new file mode 100644
index 0000000..1e1dd28
--- /dev/null
+++ b/.editorconfig
@@ -0,0 +1,34 @@
+root = true
+
+[*]
+end_of_line = lf
+trim_trailing_whitespace = true
+insert_final_newline = true
+charset = utf-8
+indent_style = tab
+indent_size = 2
+
+# YAML doesn't support hard tabs
+# Templates that will be weird with hard tabs in the website editor
+[*.{yml,yaml,md}]
+indent_style = space
+indent_size = 2
+
+# Force python to be as python demands
+[*.{py,ipynb}]
+indent_size = 4
+indent_style = space
+
+[{**.*sh}]
+indent_size = 2
+indent_style = tab
+
+shell_variant = bash
+binary_next_line = false # like -bn
+switch_case_indent = true # like -ci
+space_redirects = true # like -sr
+keep_padding = false # like -kp
+end_of_line = lf
+charset = utf-8
+trim_trailing_whitespace = true
+insert_final_newline = true
diff --git a/.flake8 b/.flake8
new file mode 100644
index 0000000..dac69f6
--- /dev/null
+++ b/.flake8
@@ -0,0 +1,130 @@
+[flake8]
+extend-ignore =
+ # Allow function call as argument default
+ B008,
+ # Do not enforce trailing comma (lets Black decide what is best)
+ C812,C813,C814,C815,C816,C818,C819,
+ # Don't ask for docstring at top of module --- put it in the functions/classes
+ D100,
+ # Do not check for docstring within __init__ method
+ D107,
+ # Ignore whitespace before ';'
+ E203,
+ # Don't ask about line length, Black recommends using bugbear B950 instead
+ E501,
+ # Stop finding commented out code because it's mistaking shape annotations for code
+ E800,
+ # Stop complaining about subprocess, we need it for this project
+ S404,S602,S603,S607,
+ # Stop complaining about using functions from random
+ S311,
+ # Ignore errors for internal mypy traceback, stderr output, or an unmatched line.
+ T499,
+ # Do not complain about line-break before binary operator (caused by Black)
+ W503,
+ # Do not warn on too many imports.
+ WPS201,
+ # Do not warn on too many module members
+ WPS202,
+ # Do not warn when too many arguments in functions
+ WPS211,
+ # Do not warn on too many methods
+ WPS214,
+ # Allow lots of importing from the same module --- it can happen and thats okay!
+ WPS235,
+ # Do not warn on complex f-string
+ WPS237,
+ # Allow relative module references
+ WPS300,
+ # Allow f-strings
+ WPS305,
+ # Do not force base classes to inherit object
+ WPS306,
+ # Allow return statement that simply returns a prior statement
+ WPS331,
+ # Allow new lines to start with a dot (caused by Black)
+ WPS348,
+ # Allow logic in __init__ modules
+ WPS412,
+ # we use magics because we need to.
+ WPS609,
+ # Google Python style is not RST until after processed by Napoleon
+ # See https://github.com/peterjc/flake8-rst-docstrings/issues/17
+ RST201,RST203,RST301,
+extend-select =
+ # Should raise AssertionError instead of assert False
+ B011,
+ # Use of break, continue or return in finally blocks will silence exceptions.
+ B012,
+ # Redundant exception types in except
+ B014,
+ # Pointless comparisons
+ B015,
+ # Cannot raise a literal
+ B016,
+ # Do not use `self.assertRaises(Exception)`
+ B017,
+ # Find useless expressions
+ B018,
+ # Use namedtuple instead of dataclass when only `__init__` attributes are set
+ B903,
+ # Within an except clause, raise exceptions with `raise ... from err` or `raise ...
+ # from None` to distinguish them from errors in exception handling
+ B904,
+ # Counterpart to W503, enforce having the operator at the start of a new line.
+ W504,
+
+max-line-length = 99
+max-complexity = 18
+max-methods = 10
+max-line-complexity = 18
+max-local-variables = 20
+max-expressions = 20
+max-function-expressions = 10
+max-module-expressions = 20
+max-string-usages = 10
+max-annotation-complexity = 4
+min-name-length = 1
+max-try-body-length = 2
+exps-for-one-empty-line = 1
+max-access-level = 5
+show-violation-links = true
+format = wemake
+
+# Black enforces double quotes.
+inline-quotes = double
+
+docstring-convention = google
+
+# Darglint
+docstring_style = google
+strictness = long
+
+nested-classes-whitelist =
+ Meta
+ Params
+ Config
+
+allowed-domain-names =
+ data
+ utils
+ util
+ params
+
+per-file-ignores =
+ src/*/_version.py:WPS410
+ src/**/__init__.py:D,F401,WPS436
+ src/*/__main__.py:WPS404
+ src/arena_missions/challenges/*:WPS226,WPS430
+ src/arena_missions/challenges/__init__.py:D
+ src/arena_missions/builders/required_objects_builder.py:WPS226
+ tests/*:D,F401,WPS118,WPS202,WPS204,WPS214,WPS218,WPS226,WPS231,WPS232,WPS235,WPS301,WPS404,WPS432,WPS437,WPS442,S101
+
+extend-exclude=
+ .venv/,
+ *_cache/,
+ .cache/,
+ logs/,
+ storage/,
+ docs/,
+ src/arena_wrapper/
diff --git a/.github/labels.yml b/.github/labels.yml
new file mode 100644
index 0000000..43c77b3
--- /dev/null
+++ b/.github/labels.yml
@@ -0,0 +1,90 @@
+---
+# Labels names are important as they are used by Release Drafter to decide
+# regarding where to record them in changelog or if to skip them.
+#
+# The repository labels will be automatically configured using this file and
+# the GitHub Action https://github.com/marketplace/actions/github-labeler.
+## more info https://github.com/crazy-max/ghaction-github-labeler
+
+# ------------------------- Conventional Commit types ------------------------ #
+# From https://github.com/commitizen/conventional-commit-types/blob/master/index.json
+
+- name: feature
+ description: A new enhancement or feature
+ color: 0A8844
+ from_name: "enhancement"
+
+- name: fix
+ description: A bug fix
+ color: d23832
+ from_name: "bug"
+
+- name: documentation
+ description: Documentation changes only
+ color: 8AD9F5
+
+- name: style
+ description: Changes that do not affect meaning of code (formatting, etc.)
+ color: F9CD8E
+
+- name: refactor
+ description: Code change that neither fixes a bug nor adds a feature
+ color: FBCA0C
+ from_name: refactoring
+
+- name: performance
+ description: Code change that improves performance
+ color: F2A33C
+
+- name: test
+ description: Adding missing tests or correcting existing tests
+ color: 34FFB3
+
+- name: build
+ description: Changes that affect the build system or external dependencies
+ color: 8F4FBB
+
+- name: continuous integration
+ description: Changes to CI configuration and scripts
+ color: FCBFE3
+
+- name: chore
+ description: Other changes that don't modify src or test files
+ color: d3d3d3
+
+- name: revert
+ description: Revert a previous commit
+ color: 1e1e1e
+
+- name: backwards incompatible
+ description: incompatible changes to how the application works
+ color: AB2232
+
+- name: question
+ description: Further information is requested
+ color: EE328E
+
+# ------------------------------- Dependencies ------------------------------- #
+- name: dependencies
+ description: Pull requests that update dependencies
+ color: 0366d6
+
+# ------------------------------ Utility labels ------------------------------ #
+- name: automerge
+ color: "ffffff"
+ description: "Automerge this PR"
+
+- name: "stale"
+ color: "ffffff"
+ description: ""
+# - name: duplicate
+# description: This issue or pull request already exists
+# color: ffffff
+
+# - name: invalid
+# description: This doesn't seem right
+# color: ffffff
+
+# - name: wontfix
+# description: This will not be worked on
+# color: ffffff
diff --git a/.github/workflows/continuous_integration.yml b/.github/workflows/continuous_integration.yml
new file mode 100644
index 0000000..38ecb8b
--- /dev/null
+++ b/.github/workflows/continuous_integration.yml
@@ -0,0 +1,170 @@
+name: Continuous Integration
+
+on:
+ workflow_dispatch:
+ workflow_call:
+ pull_request:
+ types: [opened, reopened, synchronize, ready_for_review]
+ branches:
+ - main
+
+env:
+ PYTHON_VERSION: 3.9
+
+jobs:
+ changes:
+ name: Check for Python file changes
+ runs-on: ubuntu-latest
+ if: ${{ !github.event.pull_request.draft }}
+ outputs:
+ python: ${{steps.filter.outputs.python}}
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v3
+ - uses: dorny/paths-filter@v2
+ id: filter
+ with:
+ filters: |
+ python:
+ - '**/*.py'
+ - 'pyproject.toml'
+ - 'poetry.lock'
+ - '.github/workflows/continuous_integration.yml'
+ - '.mypy.ini'
+ - '.flake8'
+
+ typecheck:
+ name: Type check Python
+ needs: [changes]
+ if: ${{needs.changes.outputs.python == 'true' && !github.event.pull_request.draft }}
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout the repository
+ uses: actions/checkout@v4
+
+ - name: Setup reviewdog
+ uses: reviewdog/action-setup@v1
+
+ - name: Install Poetry
+ run: pipx install poetry
+
+ - name: Set up Python ${{ env.PYTHON_VERSION }}
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ env.PYTHON_VERSION }}
+ cache: "poetry"
+
+ - name: Install dependencies
+ run: poetry install
+
+ - name: Load mypy cache
+ uses: actions/cache@v3
+ id: mypy-cache
+ with:
+ path: .mypy_cache
+ key: ${{ runner.os }}-mypy-cache-${{ hashFiles('poetry.lock') }}-${{hashFiles('pyproject.toml') }}
+ restore-keys: |
+ ${{ runner.os }}-mypy-cache-${{ hashFiles('poetry.lock') }}-${{hashFiles('pyproject.toml') }}
+ ${{ runner.os }}-mypy-cache-
+
+ - name: Run mypy with reviewdog
+ env:
+ REVIEWDOG_GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ GITHUB_EVENT_NAME: ${{ github.event_name }}
+ run: |
+ exit_val="0"
+ [[ $GITHUB_EVENT_NAME == "pull_request" ]] && reporter="github-pr-review" || reporter="github-check"
+ poetry run mypy \
+ --show-column-numbers \
+ --show-absolute-path \
+ --no-error-summary . 2>&1 | reviewdog \
+ -efm="%f:%l:%c: %t%*[^:]: %m" \
+ -name="mypy" \
+ -filter-mode=nofilter \
+ -fail-on-error \
+ -reporter="${reporter}" || exit_val="$?"
+ if [[ "${exit_val}" -ne '0' ]]; then
+ exit 1
+ fi
+
+ lint:
+ name: Lint Python
+ needs: [changes]
+ if: ${{needs.changes.outputs.python == 'true' && !github.event.pull_request.draft }}
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Setup reviewdog
+ uses: reviewdog/action-setup@v1
+
+ - name: Install Poetry
+ run: pipx install poetry
+
+ - name: Set up Python ${{ env.PYTHON_VERSION }}
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ env.PYTHON_VERSION }}
+ cache: "poetry"
+
+ - name: Install dependencies
+ run: poetry install
+
+ - name: Run flake8
+ env:
+ REVIEWDOG_GITHUB_API_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ GITHUB_EVENT_NAME: ${{ github.event_name }}
+ run: |
+ exit_val="0"
+ [[ $GITHUB_EVENT_NAME == "pull_request" ]] && reporter="github-pr-review" || reporter="github-check"
+ poetry run flake8 \
+ --format=default . 2>&1 | reviewdog \
+ -f=pep8 \
+ -name="flake8" \
+ -fail-on-error \
+ -filter-mode=file \
+ -reporter="${reporter}" || exit_val="$?"
+ if [[ "${exit_val}" -ne '0' ]]; then
+ exit 1
+ fi
+
+ format:
+ name: Format
+ runs-on: ubuntu-latest
+ if: ${{ !github.event.pull_request.draft }}
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v4
+
+ - name: Set up Python ${{ env.PYTHON_VERSION }}
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ env.PYTHON_VERSION }}
+ cache: "pip"
+
+ - name: Install pre-commit
+ run: |
+ pip install pre-commit
+
+ - name: Load cached pre-commit environment
+ uses: actions/cache@v3
+ id: pre-commit-cache
+ with:
+ path: ~/.cache/pre-commit
+ key: ${{ runner.os }}-pre-commit-${{ hashFiles('**/.pre-commit-config.yaml') }}
+ restore-keys: |
+ ${{ runner.os }}-pre-commit-
+
+ - name: Run pre-commit hook
+ id: run-pre-commit-hooks
+ run: |
+ git add .pre-commit-config.yaml
+ pre-commit run --color=always --all-files
+
+ - name: Annotate any changes using reviewdog
+ if: ${{ failure() }}
+ id: reviewdog-suggester
+ uses: reviewdog/action-suggester@v1
+ with:
+ tool_name: pre-commit
diff --git a/.github/workflows/pr-lint.yml b/.github/workflows/pr-lint.yml
new file mode 100644
index 0000000..2c917a9
--- /dev/null
+++ b/.github/workflows/pr-lint.yml
@@ -0,0 +1,42 @@
+name: "Lint PR"
+
+on:
+ pull_request_target:
+ types:
+ - opened
+ - edited
+ - synchronize
+
+jobs:
+ main:
+ name: Validate PR title
+ runs-on: ubuntu-latest
+ steps:
+ - name: Validate PR title
+ uses: amannn/action-semantic-pull-request@v5
+ id: lint_pr_title
+ env:
+ GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+
+ # When the previous steps fails, the workflow would stop. By adding this
+ # condition you can continue the execution with the populated error message.
+ - name: Create error message if validation fails
+ uses: marocchino/sticky-pull-request-comment@v2
+ if: always() && (steps.lint_pr_title.outputs.error_message != null)
+ with:
+ header: pr-title-lint-error
+ message: |
+ Hey there and thank you for opening this pull request! 👋🏼
+ We require pull request titles to follow the [Conventional Commits specification](https://www.conventionalcommits.org/en/v1.0.0/) and it looks like your proposed title needs to be adjusted.
+
+ Details:
+ ```
+ ${{ steps.lint_pr_title.outputs.error_message }}
+ ```
+
+ - name: Delete previous comment when issue is resolved
+ if: ${{ steps.lint_pr_title.outputs.error_message == null }}
+ uses: marocchino/sticky-pull-request-comment@v2
+ with:
+ header: pr-title-lint-error
+ delete: true
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
new file mode 100644
index 0000000..461c709
--- /dev/null
+++ b/.github/workflows/release.yml
@@ -0,0 +1,29 @@
+name: Releases
+
+on:
+ push:
+ branches: [main]
+
+env:
+ PYTHON_VERSION: 3.9
+
+jobs:
+ labeler:
+ name: Update labels
+ runs-on: ubuntu-latest
+ steps:
+ - name: Check out the repository
+ uses: actions/checkout@v4
+
+ - name: Run Labeler
+ uses: crazy-max/ghaction-github-labeler@v4.1.0
+
+ continuous-integration:
+ name: Continuous Integration
+ uses: "./.github/workflows/continuous_integration.yml"
+ secrets: inherit
+
+ tests:
+ name: Tests
+ uses: "./.github/workflows/tests.yml"
+ secrets: inherit
diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml
new file mode 100644
index 0000000..76d5b5b
--- /dev/null
+++ b/.github/workflows/tests.yml
@@ -0,0 +1,73 @@
+name: Tests
+
+on:
+ workflow_dispatch:
+ workflow_call:
+ pull_request:
+ branches: [main]
+ paths-ignore: ["**/*.md", "**/*.rst"]
+
+env:
+ PYTHON_VERSION: 3.9
+ # Disable tokenizers parallelism because this doesn't help, and can cause issues in distributed tests.
+ TOKENIZERS_PARALLELISM: "false"
+ # Disable multithreading with OMP because this can lead to dead-locks in distributed tests.
+ OMP_NUM_THREADS: "1"
+ # See https://github.com/pytorch/pytorch/issues/37377#issuecomment-677851112.
+ MKL_THREADING_LAYER: "GNU"
+
+jobs:
+ changes:
+ name: Check for Python file changes
+ runs-on: ubuntu-latest
+ if: ${{ !github.event.pull_request.draft }}
+ outputs:
+ python: ${{steps.filter.outputs.python}}
+ steps:
+ - name: Checkout repository
+ uses: actions/checkout@v3
+ - uses: dorny/paths-filter@v2
+ id: filter
+ with:
+ filters: |
+ python:
+ - '**/*.py'
+ - 'storage/fixtures/**/*'
+ - 'pyproject.toml'
+ - 'poetry.lock'
+ - '.github/workflows/tests.yml'
+
+ python:
+ name: Run Python test suite
+ defaults:
+ run:
+ shell: bash
+
+ runs-on: ubuntu-latest
+ needs: [changes]
+ if: ${{needs.changes.outputs.python == 'true' && !github.event.pull_request.draft }}
+ steps:
+ - name: Checkout the repository
+ uses: actions/checkout@v4
+
+ - name: Install Poetry
+ run: pipx install poetry
+
+ - name: Set up Python ${{ env.PYTHON_VERSION }}
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ env.PYTHON_VERSION }}
+ cache: "poetry"
+
+ - name: Install dependencies
+ run: poetry install
+
+ - name: Run test suite
+ run: poetry run poe test-everything | tee pytest-coverage.txt
+
+ - name: Comment the coverage
+ if: ${{ always() && github.event_name == 'pull_request' && github.actor != 'dependabot[bot]' }}
+ uses: MishaKav/pytest-coverage-comment@main
+ with:
+ pytest-coverage-path: ./pytest-coverage.txt
+ junitxml-path: ./pytest.xml
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..b22618d
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,595 @@
+# Created by https://www.toptal.com/developers/gitignore/api/visualstudiocode,jetbrains+all,python,jupyternotebooks,node,data,images,video
+
+.rtx.toml
+
+# Edit at https://www.toptal.com/developers/gitignore?templates=visualstudiocode,jetbrains+all,python,jupyternotebooks,node,data,images,video
+
+### Data ###
+*.csv
+*.dat
+*.efx
+*.gbr
+*.key
+*.pps
+*.ppt
+*.pptx
+*.sdf
+*.tax2010
+*.vcf
+*.xml
+
+### Images ###
+# JPEG
+*.jpg
+*.jpeg
+*.jpe
+*.jif
+*.jfif
+*.jfi
+
+# JPEG 2000
+*.jp2
+*.j2k
+*.jpf
+*.jpx
+*.jpm
+*.mj2
+
+# JPEG XR
+*.jxr
+*.hdp
+*.wdp
+
+# Graphics Interchange Format
+*.gif
+
+# RAW
+*.raw
+
+# Web P
+*.webp
+
+# Portable Network Graphics
+*.png
+
+# Animated Portable Network Graphics
+*.apng
+
+# Multiple-image Network Graphics
+*.mng
+
+# Tagged Image File Format
+*.tiff
+*.tif
+
+# Scalable Vector Graphics
+*.svg
+*.svgz
+
+# Portable Document Format
+*.pdf
+
+# X BitMap
+*.xbm
+
+# BMP
+*.bmp
+*.dib
+
+# ICO
+*.ico
+
+# 3D Images
+*.3dm
+*.max
+
+### JetBrains+all ###
+# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
+# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
+
+# User-specific stuff
+.idea/**/workspace.xml
+.idea/**/tasks.xml
+.idea/**/usage.statistics.xml
+.idea/**/dictionaries
+.idea/**/shelf
+
+# AWS User-specific
+.idea/**/aws.xml
+
+# Generated files
+.idea/**/contentModel.xml
+
+# Sensitive or high-churn files
+.idea/**/dataSources/
+.idea/**/dataSources.ids
+.idea/**/dataSources.local.xml
+.idea/**/sqlDataSources.xml
+.idea/**/dynamic.xml
+.idea/**/uiDesigner.xml
+.idea/**/dbnavigator.xml
+
+# Gradle
+.idea/**/gradle.xml
+.idea/**/libraries
+
+# Gradle and Maven with auto-import
+# When using Gradle or Maven with auto-import, you should exclude module files,
+# since they will be recreated, and may cause churn. Uncomment if using
+# auto-import.
+# .idea/artifacts
+# .idea/compiler.xml
+# .idea/jarRepositories.xml
+# .idea/modules.xml
+# .idea/*.iml
+# .idea/modules
+# *.iml
+# *.ipr
+
+# CMake
+cmake-build-*/
+
+# Mongo Explorer plugin
+.idea/**/mongoSettings.xml
+
+# File-based project format
+*.iws
+
+# IntelliJ
+out/
+
+# mpeltonen/sbt-idea plugin
+.idea_modules/
+
+# JIRA plugin
+atlassian-ide-plugin.xml
+
+# Cursive Clojure plugin
+.idea/replstate.xml
+
+# SonarLint plugin
+.idea/sonarlint/
+
+# Crashlytics plugin (for Android Studio and IntelliJ)
+com_crashlytics_export_strings.xml
+crashlytics.properties
+crashlytics-build.properties
+fabric.properties
+
+# Editor-based Rest Client
+.idea/httpRequests
+
+# Android studio 3.1+ serialized cache file
+.idea/caches/build_file_checksums.ser
+
+### JetBrains+all Patch ###
+# Ignore everything but code style settings and run configurations
+# that are supposed to be shared within teams.
+
+.idea/*
+
+!.idea/codeStyles
+!.idea/runConfigurations
+
+### JupyterNotebooks ###
+# gitignore template for Jupyter Notebooks
+# website: http://jupyter.org/
+
+.ipynb_checkpoints
+*/.ipynb_checkpoints/*
+
+# IPython
+profile_default/
+ipython_config.py
+
+# Remove previous ipynb_checkpoints
+# git rm -r .ipynb_checkpoints/
+
+### Node ###
+# Logs
+logs
+*.log
+npm-debug.log*
+yarn-debug.log*
+yarn-error.log*
+lerna-debug.log*
+.pnpm-debug.log*
+
+# Diagnostic reports (https://nodejs.org/api/report.html)
+report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json
+
+# Runtime data
+pids
+*.pid
+*.seed
+*.pid.lock
+
+# Directory for instrumented libs generated by jscoverage/JSCover
+lib-cov
+
+# Coverage directory used by tools like istanbul
+coverage
+*.lcov
+
+# nyc test coverage
+.nyc_output
+
+# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
+.grunt
+
+# Bower dependency directory (https://bower.io/)
+bower_components
+
+# node-waf configuration
+.lock-wscript
+
+# Compiled binary addons (https://nodejs.org/api/addons.html)
+build/Release
+
+# Dependency directories
+node_modules/
+jspm_packages/
+
+# Snowpack dependency directory (https://snowpack.dev/)
+web_modules/
+
+# TypeScript cache
+*.tsbuildinfo
+
+# Optional npm cache directory
+.npm
+
+# Optional eslint cache
+.eslintcache
+
+# Optional stylelint cache
+.stylelintcache
+
+# Microbundle cache
+.rpt2_cache/
+.rts2_cache_cjs/
+.rts2_cache_es/
+.rts2_cache_umd/
+
+# Optional REPL history
+.node_repl_history
+
+# Output of 'npm pack'
+*.tgz
+
+# Yarn Integrity file
+.yarn-integrity
+
+# dotenv environment variable files
+.envrc
+.env
+.env.development.local
+.env.test.local
+.env.production.local
+.env.local
+
+# parcel-bundler cache (https://parceljs.org/)
+.cache
+.parcel-cache
+
+# Next.js build output
+.next
+out
+
+# Nuxt.js build / generate output
+.nuxt
+dist
+
+# Gatsby files
+.cache/
+# Comment in the public line in if your project uses Gatsby and not Next.js
+# https://nextjs.org/blog/next-9-1#public-directory-support
+# public
+
+# vuepress build output
+.vuepress/dist
+
+# vuepress v2.x temp and cache directory
+.temp
+
+# Docusaurus cache and generated files
+.docusaurus
+
+# Serverless directories
+.serverless/
+
+# FuseBox cache
+.fusebox/
+
+# DynamoDB Local files
+.dynamodb/
+
+# TernJS port file
+.tern-port
+
+# Stores VSCode versions used for testing VSCode extensions
+.vscode-test
+
+# yarn v2
+.yarn/cache
+.yarn/unplugged
+.yarn/build-state.yml
+.yarn/install-state.gz
+.pnp.*
+
+### Node Patch ###
+# Serverless Webpack directories
+.webpack/
+
+# Optional stylelint cache
+
+# SvelteKit build / generate output
+.svelte-kit
+
+### Python ###
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+.pybuilder/
+target/
+
+# Jupyter Notebook
+
+# IPython
+
+# pyenv
+# For a library or package, you might want to ignore these files since the code is
+# intended to run in multiple environments; otherwise, check them in:
+.python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# poetry
+# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
+# This is especially recommended for binary packages to ensure reproducibility, and is more
+# commonly ignored for libraries.
+# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
+#poetry.lock
+poetry.toml
+
+# pdm
+# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
+#pdm.lock
+# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
+# in version control.
+# https://pdm.fming.dev/#use-with-ide
+.pdm.toml
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
+
+# PyCharm
+# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
+# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
+# and can be added to the global gitignore or merged into this file. For a more nuclear
+# option (not recommended) you can uncomment the following to ignore the entire idea folder.
+#.idea/
+
+### Video ###
+*.3g2
+*.3gp
+*.asf
+*.asx
+*.avi
+*.flv
+*.mkv
+*.mov
+*.mp4
+*.mpg
+*.ogv
+*.rm
+*.swf
+*.vob
+*.wmv
+*.webm
+
+### VisualStudioCode ###
+.vscode/*
+# !.vscode/settings.json
+!.vscode/tasks.json
+!.vscode/launch.json
+!.vscode/extensions.json
+!.vscode/*.code-snippets
+
+# Local History for Visual Studio Code
+.history/
+
+# Built Visual Studio Code Extensions
+*.vsix
+
+### VisualStudioCode Patch ###
+# Ignore all local history of files
+.history
+.ionide
+
+# Support for Project snippet scope
+.vscode/*.code-snippets
+
+# Ignore code-workspaces
+*.code-workspace
+
+# End of https://www.toptal.com/developers/gitignore/api/visualstudiocode,jetbrains+all,python,jupyternotebooks,node,data,images,video
+
+# Created by https://www.toptal.com/developers/gitignore/api/terraform,terragrunt
+# Edit at https://www.toptal.com/developers/gitignore?templates=terraform,terragrunt
+
+### Terraform ###
+# Local .terraform directories
+**/.terraform/*
+
+# .tfstate files
+*.tfstate
+*.tfstate.*
+
+# Crash log files
+crash.log
+crash.*.log
+
+# Exclude all .tfvars files, which are likely to contain sensitive data, such as
+# password, private keys, and other secrets. These should not be part of version
+# control as they are data points which are potentially sensitive and subject
+# to change depending on the environment.
+*.tfvars
+*.tfvars.json
+
+# Ignore override files as they are usually used to override resources locally and so
+# are not checked in
+override.tf
+override.tf.json
+*_override.tf
+*_override.tf.json
+
+# Include override files you do wish to add to version control using negated pattern
+# !example_override.tf
+
+# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
+# example: *tfplan*
+
+# Ignore CLI configuration files
+.terraformrc
+terraform.rc
+
+### Terragrunt ###
+# terragrunt cache directories
+**/.terragrunt-cache/*
+
+# Terragrunt debug output file (when using `--terragrunt-debug` option)
+# See: https://terragrunt.gruntwork.io/docs/reference/cli-options/#terragrunt-debug
+terragrunt-debug.tfvars.json
+
+# End of https://www.toptal.com/developers/gitignore/api/terraform,terragrunt
+
+storage/*
+!storage/**/.gitkeep
+!storage/cdfs
+storage/cdfs/missions/
+
+# ignore wandb
+wandb/
+
+# Ignore the docker dir symlink
+docker/
+docker
+
+# Handle ignoring files within the bin
+bin/**/build
+!bin/**/*.png
+!bin/**/*.ico
diff --git a/.kodiak.toml b/.kodiak.toml
new file mode 100644
index 0000000..de128d4
--- /dev/null
+++ b/.kodiak.toml
@@ -0,0 +1,15 @@
+version = 1
+
+[merge.automerge_dependencies]
+# only auto merge "minor" and "patch" version upgrades.
+# do not automerge "major" version upgrades.
+versions = ["minor", "patch"]
+usernames = ["dependabot"]
+
+# allow dependabot to update and close stale dependency upgrades.
+[update]
+ignored_usernames = ["dependabot"]
+
+# Automatically approve when using automerge label
+[approve]
+auto_approve_usernames = ["dependabot"]
diff --git a/.mypy.ini b/.mypy.ini
new file mode 100644
index 0000000..af1888d
--- /dev/null
+++ b/.mypy.ini
@@ -0,0 +1,40 @@
+[mypy]
+
+python_version = 3.9
+exclude = (?x)(configs | storage | logs | docs | src/arena_wrapper)
+
+# Import discovery
+ignore_missing_imports = true
+namespace_packages = true
+
+# Untyped definitions and calls
+disallow_untyped_defs = true
+disallow_incomplete_defs = true
+check_untyped_defs = true
+disallow_any_generics = true
+disallow_subclassing_any = true
+disallow_untyped_decorators = false
+
+# None and Optional handling
+no_implicit_optional = true
+
+# Warnings
+warn_redundant_casts = true
+warn_unused_ignores = true
+warn_unused_configs = true
+warn_unreachable = true
+
+# Config error messages
+show_error_context = false
+show_column_numbers = true
+show_error_codes = true
+pretty = false
+
+plugins = pydantic.mypy
+
+[mypy-tests.*]
+disallow_untyped_defs = false
+
+[pydantic-mypy]
+init_typed = true
+warn_untyped_fields = true
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000..f328f3f
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,156 @@
+# using default_language_version
+default_language_version:
+ node: 16.14.2
+
+repos:
+ # -------------------------- Version control checks -------------------------- #
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.5.0
+ hooks:
+ - id: check-merge-conflict
+ name: Check for merge conflicts
+ - id: check-vcs-permalinks
+ name: Ensure links to VCS websites are permalinks
+ - id: detect-private-key
+ name: Detect private key
+ - id: check-case-conflict
+ name: Check issues with file name casing
+ - id: check-symlinks
+ name: Check for symlinks which point to nothing
+ - id: destroyed-symlinks
+ name: Check for destroyed symlinks
+
+ - repo: https://github.com/sirosen/check-jsonschema
+ rev: 0.27.2
+ hooks:
+ - id: check-github-workflows
+ name: Validate GitHub workflows
+ types: [yaml]
+
+ # --------------------------- Commit message check --------------------------- #
+ - repo: https://github.com/compilerla/conventional-pre-commit
+ rev: v3.0.0
+ hooks:
+ - id: conventional-pre-commit
+ name: Check commit message follows Conventional Commits
+ stages: [commit-msg]
+
+ # ----------------------------- Check file issues ---------------------------- #
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.5.0
+ hooks:
+ - id: check-toml
+ name: Check TOML
+ types: [toml]
+ - id: check-yaml
+ name: Check YAML
+ args: [--allow-multiple-documents]
+ types: [yaml]
+ - id: end-of-file-fixer
+ name: Fix end of files
+ types: [text]
+ - id: trailing-whitespace
+ name: Trim trailing whitespace
+ args: [--markdown-linebreak-ext=md]
+ types: [text]
+ - id: mixed-line-ending
+ name: Check line endings
+ - id: fix-encoding-pragma
+ name: Remove any encoding pragma
+ args: [--remove]
+
+ # ------------------------------ Python checking ----------------------------- #
+ - repo: https://github.com/pre-commit/pre-commit-hooks
+ rev: v4.5.0
+ hooks:
+ - id: debug-statements
+ name: Check for debugger statements
+ types: [python]
+
+ - repo: https://github.com/pre-commit/pygrep-hooks
+ rev: v1.10.0
+ hooks:
+ - id: python-use-type-annotations
+ name: Using type annotations over comments
+ types: [python]
+ - id: python-check-blanket-noqa
+ name: Check for blanket `# noqa`
+ types: [python]
+ - id: python-check-blanket-type-ignore
+ name: "Check for blanket `# type: ignore`"
+ types: [python]
+ - id: python-no-log-warn
+ name: Check for deprecated `.warn()` method of python loggers
+ types: [python]
+
+ # ----------------------------- Automatic linters ---------------------------- #
+ - repo: https://github.com/asottile/pyupgrade
+ rev: v3.15.0
+ hooks:
+ - id: pyupgrade
+ name: Update syntax for newer Python
+ types: [python]
+ args: ["--py39-plus"]
+ - repo: https://github.com/sirosen/texthooks
+ rev: 0.6.3
+ hooks:
+ - id: fix-smartquotes
+ name: Fix Smart Quotes
+ - repo: https://github.com/asottile/yesqa
+ rev: v1.5.0
+ hooks:
+ - id: yesqa
+ name: Remove unnecessary `# noqa` comments
+ types: [python]
+ additional_dependencies: [wemake-python-styleguide]
+
+ # ------------------------------ Python imports ------------------------------ #
+ - repo: https://github.com/hakancelik96/unimport
+ rev: 1.1.0
+ hooks:
+ - id: unimport
+ name: Remove any unused imports
+ types: [python]
+ args:
+ [
+ --remove,
+ --exclude,
+ '^.*/?__init__\.py$',
+ --include-star-import,
+ --gitignore,
+ ]
+ - repo: https://github.com/MarcoGorelli/absolufy-imports
+ rev: v0.3.1
+ hooks:
+ - id: absolufy-imports
+ types: [python]
+ name: Convert relative imports to absolute
+ - repo: https://github.com/timothycrosley/isort
+ rev: 5.12.0
+ hooks:
+ - id: isort
+ name: Format imports
+ additional_dependencies: [toml]
+ types: [python]
+ exclude: ^.*/?setup\.py$
+
+ # -------------------------------- Formatting -------------------------------- #
+ - repo: https://github.com/pre-commit/mirrors-prettier
+ rev: v3.1.0
+ hooks:
+ - id: prettier
+ name: Prettier
+ exclude: ^.*/?CHANGELOG\.md$
+ - repo: https://github.com/myint/docformatter
+ rev: v1.7.5
+ hooks:
+ - id: docformatter
+ name: Format docstrings
+ types: [python]
+ args: [--in-place, --wrap-summaries=99, --wrap-descriptions=99]
+ - repo: https://github.com/psf/black
+ rev: 23.11.0
+ hooks:
+ - id: black-jupyter
+ types: [python]
+ name: Format code
diff --git a/.releaserc.js b/.releaserc.js
new file mode 100644
index 0000000..43335e5
--- /dev/null
+++ b/.releaserc.js
@@ -0,0 +1,71 @@
+const RELEASE_BRANCH = process.env.RELEASE_BRANCH || "main";
+const CHANGELOG_FILE = process.env.CHANGELOG_FILE || "CHANGELOG.md";
+const VERSION_FILE =
+ process.env.VERSION_FILE || "src/simbot_offline_inference/_version.py";
+
+const config = {
+ branches: [RELEASE_BRANCH],
+ plugins: [
+ [
+ "@semantic-release/commit-analyzer",
+ {
+ preset: "conventionalcommits",
+ },
+ ],
+ [
+ "@semantic-release/release-notes-generator",
+ {
+ preset: "conventionalcommits",
+ },
+ ],
+ [
+ "@semantic-release/changelog",
+ {
+ changelogFile: CHANGELOG_FILE,
+ changelogTitle:
+ "# Changelog\n\nAll notable changes to this project will be documented in this file. See\n[Conventional Commits](https://conventionalcommits.org) for commit guidelines.",
+ },
+ ],
+ [
+ "@semantic-release/exec",
+ {
+ prepareCmd: "poetry version ${nextRelease.version}",
+ },
+ ],
+ [
+ "@google/semantic-release-replace-plugin",
+ {
+ replacements: [
+ {
+ files: [VERSION_FILE],
+ ignore: ["test/*", "tests/*"],
+ from: "__version__ = [\"'].*[\"']",
+ to: '__version__ = "${nextRelease.version}"',
+ },
+ ],
+ },
+ ],
+ [
+ "@semantic-release/github",
+ {
+ assets: [
+ { path: "dist/*.tar.gz", label: "sdist" },
+ { path: "dist/*.whl", label: "wheel" },
+ ],
+ successComment: false,
+ failComment: false,
+ releasedLabels: false,
+ failTitle: false,
+ labels: false,
+ },
+ ],
+ [
+ "@semantic-release/git",
+ {
+ assets: ["pyproject.toml", VERSION_FILE, CHANGELOG_FILE],
+ },
+ ],
+ ],
+};
+
+module.exports = config;
diff --git a/.vscode/launch.json b/.vscode/launch.json
new file mode 100644
index 0000000..3dcc6a9
--- /dev/null
+++ b/.vscode/launch.json
@@ -0,0 +1,49 @@
+{
+ // Use IntelliSense to learn about possible attributes.
+ // Hover to view descriptions of existing attributes.
+ // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
+ "version": "0.2.0",
+ "configurations": [
+ {
+ "name": "Debug Tests",
+ "type": "python",
+ "request": "launch",
+ "console": "integratedTerminal",
+ "purpose": ["debug-test"],
+ "justMyCode": false,
+ "env": {
+ "_PYTEST_RAISE": "1"
+ }
+ },
+ {
+ "name": "Run command",
+ "type": "python",
+ "request": "launch",
+ "program": "${workspaceFolder}/src/simbot_offline_inference/__main__.py",
+ "console": "integratedTerminal",
+ "justMyCode": false,
+ "subProcess": true,
+ "sudo": true
+ },
+ {
+ "name": "Generate trajectories",
+ "type": "python",
+ "request": "launch",
+ "program": "${workspaceFolder}/src/simbot_offline_inference/__main__.py",
+ "console": "integratedTerminal",
+ "justMyCode": false,
+ "subProcess": true,
+ "args": ["generate-trajectories", "./storage/cdfs/missions"]
+ },
+ {
+ "name": "Validate generated missions",
+ "type": "python",
+ "request": "launch",
+ "program": "${workspaceFolder}/src/simbot_offline_inference/__main__.py",
+ "console": "integratedTerminal",
+ "justMyCode": false,
+ "subProcess": true,
+ "args": ["validate-generated-missions"]
+ }
+ ]
+}
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000..8de13bd
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,945 @@
+# Changelog
+
+All notable changes to this project will be documented in this file. See
+[Conventional Commits](https://conventionalcommits.org) for commit guidelines.
+
+## [4.33.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.32.0...v4.33.0) (2023-05-17)
+
+
+### Features
+
+* **commands:** add `--force-from-scratch` cli arg when running their evaluation to force delete any previous mission information ([ae2793c](https://github.com/emma-simbot/simbot-offline-inference/commit/ae2793c394e3cf4085f7d76ccd128c5b6e660db2))
+* **metrics:** delete evaluation metrics checkpoint at the end of a run to prevent accidentally resuming ([6e77b7a](https://github.com/emma-simbot/simbot-offline-inference/commit/6e77b7a6c35320a687f2cff246d90b7306da171e))
+* **metrics:** if the action outputs dir is empty and we are not resuming, delete the checkpoint file ([b88c03d](https://github.com/emma-simbot/simbot-offline-inference/commit/b88c03dcaabf92a1618e0a872934e8cebb5bb191))
+* **metrics:** support creating and restoring checkpoints of evaluation metrics if resuming runs ([bef8c1f](https://github.com/emma-simbot/simbot-offline-inference/commit/bef8c1f222440a3ca748bca9ada721cc31ed0081))
+* **wandb:** automatically determine whether we should start a new run instead of using an environment variable ([82d6ace](https://github.com/emma-simbot/simbot-offline-inference/commit/82d6aceadb2985522c0628da364f6d7e53bacb9b))
+* **wandb:** include the offline inference version used in the run config ([6505c7f](https://github.com/emma-simbot/simbot-offline-inference/commit/6505c7f7dd6e387acfea3c862c2c167bedfe108b))
+* **wandb:** track session ids and success per mission across runs ([1260998](https://github.com/emma-simbot/simbot-offline-inference/commit/1260998a4d1b488f4d6a89741b30dc95716991d0))
+
+
+### Bug Fixes
+
+* just manually point to the service registry path ([612315b](https://github.com/emma-simbot/simbot-offline-inference/commit/612315b8b240c0f436a3d0ae743170990617f971))
+* **metrics:** log the step number instead of trying to modify the config ([871ca3f](https://github.com/emma-simbot/simbot-offline-inference/commit/871ca3f4013aecfe660440e034895f813f7b14e8))
+* prevent type coercion for the CDFs (within `MissionTrajectory`) ([90b9613](https://github.com/emma-simbot/simbot-offline-inference/commit/90b9613b1a826df9dd2c1fefc2505b04c6a2b7e2))
+* **scripts:** symlink commands in the prepare-user-data script ([e5af1e5](https://github.com/emma-simbot/simbot-offline-inference/commit/e5af1e5fbca210072e37c8cb11d951b4ae67dcd8))
+* **settings:** boolean whether we resume the run or not ([99d3dbc](https://github.com/emma-simbot/simbot-offline-inference/commit/99d3dbc7e17dc0eee35ba73bfbd44f49e490d55c))
+* **wandb:** allow config value changes during a run ([bb9499c](https://github.com/emma-simbot/simbot-offline-inference/commit/bb9499c7f65237b615bb7dd371fedcf95f828ebe))
+* **wandb:** column name for the per mission group success rate ([e732ee8](https://github.com/emma-simbot/simbot-offline-inference/commit/e732ee839e7707128b599b29562185052b12e39f))
+* **wandb:** saving output files during the evaluation run ([ade01ff](https://github.com/emma-simbot/simbot-offline-inference/commit/ade01ff11a6c2c9d304355c91bfa3722a7350e05))
+* **wandb:** set the default success rate to 0 if there are nan's ([eec3a00](https://github.com/emma-simbot/simbot-offline-inference/commit/eec3a0028b021459dbabd3af73d89f11174d3d2b))
+* **wandb:** step needs to be an int ([b65310a](https://github.com/emma-simbot/simbot-offline-inference/commit/b65310a79294640b32b59309769514b31015bd57))
+
+## [4.32.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.31.1...v4.32.0) (2023-05-15)
+
+
+### Features
+
+* create flag to enforce/ignore successful preparation steps ([4af06aa](https://github.com/emma-simbot/simbot-offline-inference/commit/4af06aad214e57367e8be390878811f35a3c5098))
+* **script:** create script that prepares user area for running the offline inference ([951080a](https://github.com/emma-simbot/simbot-offline-inference/commit/951080a0d4b2deeda7630a1ccbe0105dedd04f75))
+
+
+### Bug Fixes
+
+* guard against `InterruptedByNewCommandBatch` ([6b8c239](https://github.com/emma-simbot/simbot-offline-inference/commit/6b8c2390304a38cfe597e42e98192a0a9259426a))
+* **scripts:** automatically setup necessary symlinks ([b45cff7](https://github.com/emma-simbot/simbot-offline-inference/commit/b45cff7584c0ac4e285e2260dc6136b1f7074cd3))
+
+## [4.31.1](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.31.0...v4.31.1) (2023-05-12)
+
+
+### Bug Fixes
+
+* raycast missed exception handling ([#40](https://github.com/emma-simbot/simbot-offline-inference/issues/40)) ([7a3d3c4](https://github.com/emma-simbot/simbot-offline-inference/commit/7a3d3c4abeecffb553d258873bcfaafbf5d2f025))
+
+## [4.31.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.30.0...v4.31.0) (2023-04-28)
+
+
+### Features
+
+* add setting for fastmode ([b891a94](https://github.com/emma-simbot/simbot-offline-inference/commit/b891a94f431a86459a31ae92472899b7a7b3dc9f))
+* added flag randomise start position ([#41](https://github.com/emma-simbot/simbot-offline-inference/issues/41)) ([741abc8](https://github.com/emma-simbot/simbot-offline-inference/commit/741abc8bae77aacfce0e891dd439166e75b8ae7f))
+
+
+### Bug Fixes
+
+* arena source cdfs ([be81cb8](https://github.com/emma-simbot/simbot-offline-inference/commit/be81cb89affc94d355ffaacc7765b04c3873c204))
+* restore support for running the T1 trajectories ([75d545c](https://github.com/emma-simbot/simbot-offline-inference/commit/75d545c996c8acd4959b82ca6e3071a9719411b7))
+
+## [4.30.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.29.9...v4.30.0) (2023-04-26)
+
+
+### Features
+
+* **T3:** add mission 1 ([455d023](https://github.com/emma-simbot/simbot-offline-inference/commit/455d023b07055ce23895e4768d647968f1e3b349))
+
+## [4.29.9](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.29.8...v4.29.9) (2023-04-26)
+
+
+### Bug Fixes
+
+* operate microwave challenges ([59b8b44](https://github.com/emma-simbot/simbot-offline-inference/commit/59b8b441c73d447a2e9e43c7db2488053e785951))
+
+## [4.29.8](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.29.7...v4.29.8) (2023-04-25)
+
+
+### Bug Fixes
+
+* **coffee unmaker:** improve plans, add pot to beans, and iterate layouts ([30a6a0d](https://github.com/emma-simbot/simbot-offline-inference/commit/30a6a0daff08057972ba940f7b776b6978831297))
+
+## [4.29.7](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.29.6...v4.29.7) (2023-04-25)
+
+
+### Bug Fixes
+
+* ensure the microwave is empty ([a4c454a](https://github.com/emma-simbot/simbot-offline-inference/commit/a4c454aa86792d1213172b5aa6f6aa43c5e91791))
+* remove the boss coffee mug from the colour changer missions ([eee16b4](https://github.com/emma-simbot/simbot-offline-inference/commit/eee16b45369978fc75668d64ce24d277efa0340b))
+
+## [4.29.6](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.29.5...v4.29.6) (2023-04-25)
+
+
+### Bug Fixes
+
+* disabling color variants with microwave challenges ([0d8ad3a](https://github.com/emma-simbot/simbot-offline-inference/commit/0d8ad3aba632293be9f1cb637d42db5ac02d9b86))
+
+## [4.29.5](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.29.4...v4.29.5) (2023-04-25)
+
+
+### Bug Fixes
+
+* prep condition for pickup objects from containers ([ab9a60c](https://github.com/emma-simbot/simbot-offline-inference/commit/ab9a60cc14f30c79d2354965c11470127d4dae4e))
+* prep goal condition for place object in container challenge ([40769c4](https://github.com/emma-simbot/simbot-offline-inference/commit/40769c4c72f24e23880c70706fa192cba112aa49))
+
+## [4.29.4](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.29.3...v4.29.4) (2023-04-25)
+
+
+### Bug Fixes
+
+* disable start colour variants for the colour changer challenges ([a1e39cb](https://github.com/emma-simbot/simbot-offline-inference/commit/a1e39cb79454a2f8c916d77c29319898fd416936))
+* remove objects that cannot be color-changed ([66451f1](https://github.com/emma-simbot/simbot-offline-inference/commit/66451f13515900b59830cc674a20ea254dcf971f))
+* update plans for the challenges ([b4269d1](https://github.com/emma-simbot/simbot-offline-inference/commit/b4269d1b9e9ab1e857a4434e9ecc1161f0bb9b00))
+
+## [4.29.3](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.29.2...v4.29.3) (2023-04-25)
+
+
+### Bug Fixes
+
+* sink-related challenges ([e44408c](https://github.com/emma-simbot/simbot-offline-inference/commit/e44408c22707ffd5b522e673102a8bbc0f43469d))
+* sink-related goal conditions and plans ([ed27162](https://github.com/emma-simbot/simbot-offline-inference/commit/ed271629859ca609e38270b18c66fb722fa47b70))
+
+## [4.29.2](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.29.1...v4.29.2) (2023-04-23)
+
+
+### Bug Fixes
+
+* target object was not flagged as ambiguous in key ([5579cda](https://github.com/emma-simbot/simbot-offline-inference/commit/5579cda66407a435ba9200b5e4aeb0d382951507))
+
+## [4.29.1](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.29.0...v4.29.1) (2023-04-23)
+
+
+### Bug Fixes
+
+* high level keys and plans ([b1dfde9](https://github.com/emma-simbot/simbot-offline-inference/commit/b1dfde9b82f6a64eff7a720f59a350e816c4ead8))
+* include from_receptacle in breaking challenges ([0f236ba](https://github.com/emma-simbot/simbot-offline-inference/commit/0f236ba228ae8265cc13564b07077281e0d27545))
+* remove the from receptacle from high level key in break challenges ([8db7588](https://github.com/emma-simbot/simbot-offline-inference/commit/8db75880e298f3f39e59bc51ceadb006b238ce05))
+
+## [4.29.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.28.0...v4.29.0) (2023-04-23)
+
+
+### Features
+
+* break objects on random desks ([#39](https://github.com/emma-simbot/simbot-offline-inference/issues/39)) ([4e2afe9](https://github.com/emma-simbot/simbot-offline-inference/commit/4e2afe9858d346f811b4eec709cdcf119904f3cf))
+
+
+### Bug Fixes
+
+* desks should not be unique ([9f3f1f2](https://github.com/emma-simbot/simbot-offline-inference/commit/9f3f1f24ccafe77ae26012dcc2cd7f9884d30abb))
+* do not use the colour changer to make the object the same color ([80d9a90](https://github.com/emma-simbot/simbot-offline-inference/commit/80d9a9076e39af74005d4341ed1d4ee1937a6e8e))
+
+## [4.28.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.27.0...v4.28.0) (2023-04-23)
+
+
+### Features
+
+* place bowl on plate on gravity pad ([#38](https://github.com/emma-simbot/simbot-offline-inference/issues/38)) ([e058d15](https://github.com/emma-simbot/simbot-offline-inference/commit/e058d152c8d26945e0680f3328807218398725c5))
+
+
+### Bug Fixes
+
+* check that the gravity pad contains the plate during prep subgoal ([6ec8b31](https://github.com/emma-simbot/simbot-offline-inference/commit/6ec8b31532f5c9ae3e0c3662fd9d1edaa08a20e6))
+* disable color variants for fridge/freezer challenges ([1778d72](https://github.com/emma-simbot/simbot-offline-inference/commit/1778d720632ecf629fd7d8c947679df09d530ca8))
+* update room/colour was failing because there was no 0th index ([a57bb0c](https://github.com/emma-simbot/simbot-offline-inference/commit/a57bb0ccfe9946ab025c5fd27097d3afdf0a906e))
+
+## [4.27.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.26.0...v4.27.0) (2023-04-23)
+
+
+### Features
+
+* added color variants flag ([#37](https://github.com/emma-simbot/simbot-offline-inference/issues/37)) ([6823c47](https://github.com/emma-simbot/simbot-offline-inference/commit/6823c478271c80634731d8df0e6a266a56d28c82))
+
+## [4.26.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.25.0...v4.26.0) (2023-04-23)
+
+
+### Features
+
+* breaking things with the hammer ([#36](https://github.com/emma-simbot/simbot-offline-inference/issues/36)) ([0bed877](https://github.com/emma-simbot/simbot-offline-inference/commit/0bed87733900834c3cbbc8612cdebebb434319a3))
+
+## [4.25.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.24.1...v4.25.0) (2023-04-23)
+
+
+### Features
+
+* pickup object from printer ([#35](https://github.com/emma-simbot/simbot-offline-inference/issues/35)) ([b5f1cb3](https://github.com/emma-simbot/simbot-offline-inference/commit/b5f1cb35c660149e46910fda9342dc8e0740bb1e))
+
+
+### Bug Fixes
+
+* add breakroom table to required objects list ([4e6d8b6](https://github.com/emma-simbot/simbot-offline-inference/commit/4e6d8b6daae52c837d1a0eda757239aa6acdd073))
+* dont make the spawned object into a required one ([987a287](https://github.com/emma-simbot/simbot-offline-inference/commit/987a287e8819b0b8374b72815fedc11e04a29322))
+* replace the spawned object with the printer cartridge ([172fb7a](https://github.com/emma-simbot/simbot-offline-inference/commit/172fb7a757dea6d1a4062623f64d4ad68434c246))
+
+## [4.24.1](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.24.0...v4.24.1) (2023-04-22)
+
+
+### Bug Fixes
+
+* expliticly ensure at least the first subgoal has been completed ([f61f810](https://github.com/emma-simbot/simbot-offline-inference/commit/f61f810b94c3b569b533475fd79a24ab4edc0981))
+* return after logging the failed trajectory ([940d400](https://github.com/emma-simbot/simbot-offline-inference/commit/940d40085493805275afa6072fd3eaab2dfaaa40))
+
+## [4.24.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.23.0...v4.24.0) (2023-04-22)
+
+
+### Features
+
+* pickup target among distractors ([#34](https://github.com/emma-simbot/simbot-offline-inference/issues/34)) ([8264204](https://github.com/emma-simbot/simbot-offline-inference/commit/826420490dfb667b4cf8030d1d1a7e209b133662))
+
+## [4.23.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.22.0...v4.23.0) (2023-04-22)
+
+
+### Features
+
+* add goal for prep steps ([f3db7ec](https://github.com/emma-simbot/simbot-offline-inference/commit/f3db7ecdb5b1fc7b9e264b9d8ae72fecc50ccd2d))
+
+## [4.22.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.21.0...v4.22.0) (2023-04-22)
+
+
+### Features
+
+* add goal for prep steps ([7742128](https://github.com/emma-simbot/simbot-offline-inference/commit/7742128de7952519e6042e3183c561e97d932e1d))
+* add goals to ensure the "objects in containers" missions are setup correctly ([606a4c3](https://github.com/emma-simbot/simbot-offline-inference/commit/606a4c3dcccac07e960310999a32aa276b932546))
+* mark the run as failed if the preparation steps did not succeed ([4830023](https://github.com/emma-simbot/simbot-offline-inference/commit/48300234304ef524fe4fb85f9c3142b3a5d03eeb))
+* mark the run as failed if the subgoal success is 0 ([f3659b3](https://github.com/emma-simbot/simbot-offline-inference/commit/f3659b35abaa77f0cb63ac4de619339e9f681c1a))
+
+
+### Bug Fixes
+
+* disable color variants usage for pickup and place missions separately ([b941892](https://github.com/emma-simbot/simbot-offline-inference/commit/b941892e05640fac81775394776ed349bf8e3c87))
+
+## [4.21.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.20.0...v4.21.0) (2023-04-22)
+
+
+### Features
+
+* add the `target-object-is-ambiguous` high-level key ([f534502](https://github.com/emma-simbot/simbot-offline-inference/commit/f534502965f217fb8e87f667f3e0d95012de9417))
+
+
+### Bug Fixes
+
+* update high-level key config within wandb tracker ([3bbc069](https://github.com/emma-simbot/simbot-offline-inference/commit/3bbc0697d9730224062a4615458862353a144399))
+
+## [4.20.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.19.1...v4.20.0) (2023-04-21)
+
+
+### Features
+
+* place object onto plate in container ([#33](https://github.com/emma-simbot/simbot-offline-inference/issues/33)) ([ae16713](https://github.com/emma-simbot/simbot-offline-inference/commit/ae16713e85483736fce93529a7b301f21d6048bc))
+
+## [4.19.1](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.19.0...v4.19.1) (2023-04-21)
+
+
+### Bug Fixes
+
+* wrong receptacle for target object ([c45a898](https://github.com/emma-simbot/simbot-offline-inference/commit/c45a8982b2fc7a84f0158d1689b853c90e3c28e2))
+
+## [4.19.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.18.0...v4.19.0) (2023-04-21)
+
+
+### Features
+
+* make it easier to generate trajectories in a different folder ([4215037](https://github.com/emma-simbot/simbot-offline-inference/commit/4215037258cf58945db50bfdf6e86b5ed8701c87))
+
+
+### Bug Fixes
+
+* make the output dir for generated trajectories ([fcedc65](https://github.com/emma-simbot/simbot-offline-inference/commit/fcedc65825904e3dbcda5cba3c9c67eb470b17c0))
+
+## [4.18.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.17.0...v4.18.0) (2023-04-21)
+
+
+### Features
+
+* enable carrot machine challenges ([321d7a6](https://github.com/emma-simbot/simbot-offline-inference/commit/321d7a60edb6e5c1450dac0ab195e769a1a146ed))
+
+
+### Bug Fixes
+
+* kill the unity instance before trying to restart it ([1274ae2](https://github.com/emma-simbot/simbot-offline-inference/commit/1274ae218082d77c280bb96a203416fb4b4f6b39))
+
+## [4.17.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.16.1...v4.17.0) (2023-04-21)
+
+
+### Features
+
+* stack food plate ([#32](https://github.com/emma-simbot/simbot-offline-inference/issues/32)) ([2ef14db](https://github.com/emma-simbot/simbot-offline-inference/commit/2ef14db383bb8f253b52b36291b20c978004d488))
+
+## [4.16.1](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.16.0...v4.16.1) (2023-04-21)
+
+
+### Bug Fixes
+
+* always remove objects from microwave ([ea0dbcd](https://github.com/emma-simbot/simbot-offline-inference/commit/ea0dbcdedb72fd18c84e5446cdbc5a56c0582890))
+
+## [4.16.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.15.1...v4.16.0) (2023-04-21)
+
+
+### Features
+
+* include stacked objects in the high level key ([3f937e7](https://github.com/emma-simbot/simbot-offline-inference/commit/3f937e76af5b7cbc1ba8c1afbf14d4c5cb654702))
+
+
+### Bug Fixes
+
+* remove the `change_color` instruction action ([d53cbaa](https://github.com/emma-simbot/simbot-offline-inference/commit/d53cbaa0e62698cde5343254be8bbdc3b77a203c))
+
+## [4.15.1](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.15.0...v4.15.1) (2023-04-21)
+
+
+### Bug Fixes
+
+* color changer preparation step ([cc548ec](https://github.com/emma-simbot/simbot-offline-inference/commit/cc548ec415a371431d68e45e328d1697db65a080))
+
+## [4.15.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.14.2...v4.15.0) (2023-04-21)
+
+
+### Features
+
+* Add container challenges for the warehouse sink ([#30](https://github.com/emma-simbot/simbot-offline-inference/issues/30)) ([460dbe1](https://github.com/emma-simbot/simbot-offline-inference/commit/460dbe1900af203d0a5d6dad24a1eb6a4fb7d614))
+
+
+### Bug Fixes
+
+* use color variants for both boss mug and normal mug ([a7b5371](https://github.com/emma-simbot/simbot-offline-inference/commit/a7b537191fd4729f4edef905470d1e15b8055307))
+
+## [4.14.2](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.14.1...v4.14.2) (2023-04-21)
+
+
+### Bug Fixes
+
+* incorrect function call ([af355fd](https://github.com/emma-simbot/simbot-offline-inference/commit/af355fd9b5bd7a2f89baf1d18078b05d0861b0ec))
+
+## [4.14.1](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.14.0...v4.14.1) (2023-04-20)
+
+
+### Bug Fixes
+
+* attempt to handle the 408 connection error ([467f5c7](https://github.com/emma-simbot/simbot-offline-inference/commit/467f5c7fc2c58a876b249c934e9e602758af1535))
+
+## [4.14.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.13.3...v4.14.0) (2023-04-20)
+
+
+### Features
+
+* include unity logs with each run ([bdea65e](https://github.com/emma-simbot/simbot-offline-inference/commit/bdea65e4e677028028b1cc991fb70d1087029810))
+
+
+### Bug Fixes
+
+* preparation steps for operate microwave missions ([e23eb24](https://github.com/emma-simbot/simbot-offline-inference/commit/e23eb249b4624f2edf772726f3da0d3963bdffc2))
+* Remove final pick ups ([#29](https://github.com/emma-simbot/simbot-offline-inference/issues/29)) ([fa71470](https://github.com/emma-simbot/simbot-offline-inference/commit/fa71470b50bcd5a6316f1965a95a02da40c7035a))
+
+## [4.13.3](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.13.2...v4.13.3) (2023-04-20)
+
+
+### Bug Fixes
+
+* Color changer interaction object in High Level Key ([#28](https://github.com/emma-simbot/simbot-offline-inference/issues/28)) ([c0468d9](https://github.com/emma-simbot/simbot-offline-inference/commit/c0468d9a4e4c039abba7d4e23685f6879a01ec4a))
+
+## [4.13.2](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.13.1...v4.13.2) (2023-04-20)
+
+
+### Bug Fixes
+
+* goal conditions for the operate printer challenges ([151647c](https://github.com/emma-simbot/simbot-offline-inference/commit/151647cfd91f35e28884d8d6e70b1e310432a4ea))
+
+## [4.13.1](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.13.0...v4.13.1) (2023-04-19)
+
+
+### Bug Fixes
+
+* disable the carrot machine challenges ([4badcef](https://github.com/emma-simbot/simbot-offline-inference/commit/4badcef705a815b36d0d87dcb4e510ac28e1dc50))
+
+## [4.13.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.12.1...v4.13.0) (2023-04-19)
+
+
+### Features
+
+* add more helper methods to structures ([893f0b6](https://github.com/emma-simbot/simbot-offline-inference/commit/893f0b6ac690f4fcb77973ce0ca9948423833408))
+
+
+### Bug Fixes
+
+* change starting room to robotics lab ([518425a](https://github.com/emma-simbot/simbot-offline-inference/commit/518425afe0df928959e9ee91819cd0114bdf568b))
+* let object instance ID's end in a * ([f3d7713](https://github.com/emma-simbot/simbot-offline-inference/commit/f3d7713bd6b98b84fe031c4f243e8cf61c17b879))
+* make the argument name be more descriptive to what is needed ([93e1760](https://github.com/emma-simbot/simbot-offline-inference/commit/93e1760f230d7782c0383418834523e6d675a457))
+* operate printer challenges ([82ce622](https://github.com/emma-simbot/simbot-offline-inference/commit/82ce6222f12862ae7d987d8dc66cba56b70c3587))
+* **operate printer:** make sure robotic arm is out the way ([1dca91b](https://github.com/emma-simbot/simbot-offline-inference/commit/1dca91b45747039a54e3e62dd3b73fa863c122e2))
+* operate time machine with carrots ([1f85703](https://github.com/emma-simbot/simbot-offline-inference/commit/1f8570371a577218810a2c59ffa60ba6fd3616e0))
+* printer cartridge name in the preparation plan ([b8250fa](https://github.com/emma-simbot/simbot-offline-inference/commit/b8250fa6d61260a6ebe73d023e637c150993166f))
+* typo ([0355636](https://github.com/emma-simbot/simbot-offline-inference/commit/0355636fe90418f94c73ad994d4fc2de5bd10fd2))
+* validator condition for object instance id suffix ([d8fb10f](https://github.com/emma-simbot/simbot-offline-inference/commit/d8fb10f3cccf97334a7f542da141039f50abee7c))
+
+## [4.12.1](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.12.0...v4.12.1) (2023-04-19)
+
+
+### Bug Fixes
+
+* Initial room for carrot maker challenge ([#27](https://github.com/emma-simbot/simbot-offline-inference/issues/27)) ([7221c32](https://github.com/emma-simbot/simbot-offline-inference/commit/7221c3278ebeb42e1427f3034b4406fe5f8393ea))
+
+## [4.12.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.11.0...v4.12.0) (2023-04-19)
+
+
+### Features
+
+* add printer challenges ([#26](https://github.com/emma-simbot/simbot-offline-inference/issues/26)) ([f895cd4](https://github.com/emma-simbot/simbot-offline-inference/commit/f895cd43da9d001ba003a9a902498bfa7896be55))
+
+## [4.11.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.10.0...v4.11.0) (2023-04-19)
+
+
+### Features
+
+* Add carrot machine challenges ([#23](https://github.com/emma-simbot/simbot-offline-inference/issues/23)) ([dabbc47](https://github.com/emma-simbot/simbot-offline-inference/commit/dabbc474163ec653860a0757aa647b41fb54e3ff))
+
+## [4.10.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.9.0...v4.10.0) (2023-04-19)
+
+
+### Features
+
+* Add time machine on carrots ([#25](https://github.com/emma-simbot/simbot-offline-inference/issues/25)) ([7d91e43](https://github.com/emma-simbot/simbot-offline-inference/commit/7d91e430ce1a22a816dc440f0d0f7b38a8f015c1))
+
+## [4.9.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.8.0...v4.9.0) (2023-04-19)
+
+
+### Features
+
+* coffee unmaker with additional objects ([#22](https://github.com/emma-simbot/simbot-offline-inference/issues/22)) ([7f284f9](https://github.com/emma-simbot/simbot-offline-inference/commit/7f284f937ef572deb4cd9acc318d29a6006f80fd))
+
+## [4.8.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.7.0...v4.8.0) (2023-04-19)
+
+
+### Features
+
+* Add microwave challenge ([#21](https://github.com/emma-simbot/simbot-offline-inference/issues/21)) ([f62b996](https://github.com/emma-simbot/simbot-offline-inference/commit/f62b99650c73603e23b592e981718e3450f4fc91))
+* object transformations ([#19](https://github.com/emma-simbot/simbot-offline-inference/issues/19)) ([f76c4c2](https://github.com/emma-simbot/simbot-offline-inference/commit/f76c4c29bbf1e9f6926f37dbf9ca7515beb5c8bf))
+
+## [4.7.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.6.0...v4.7.0) (2023-04-19)
+
+
+### Features
+
+* support setting wandb group name from run command ([9b72cc8](https://github.com/emma-simbot/simbot-offline-inference/commit/9b72cc898e36474b00c691f9330b659bb767bf8a))
+
+
+### Bug Fixes
+
+* kill command for the experience hub ([9852ce8](https://github.com/emma-simbot/simbot-offline-inference/commit/9852ce852600549e071a9469d65b560231d4b7a5))
+* use the preparation plan to toggle the sink ([4d3c266](https://github.com/emma-simbot/simbot-offline-inference/commit/4d3c26666702eb682c8ffc9e3848db157a0b7b09))
+
+## [4.6.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.5.0...v4.6.0) (2023-04-18)
+
+
+### Features
+
+* Add more pickable objects for fridge/freezer ([#18](https://github.com/emma-simbot/simbot-offline-inference/issues/18)) ([baf6850](https://github.com/emma-simbot/simbot-offline-inference/commit/baf68504e39212ad16e9bab3fce08f4bfbca8a11))
+* save the mission trajectory file and the output file to wandb ([211bd8c](https://github.com/emma-simbot/simbot-offline-inference/commit/211bd8c47034806128fbfe6cbd3ab25c4513e019))
+
+## [4.5.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.4.1...v4.5.0) (2023-04-18)
+
+
+### Features
+
+* log the experience hub version with the wandb run ([45c17fb](https://github.com/emma-simbot/simbot-offline-inference/commit/45c17fb4bbb12605f9f2b4b3c5b7b9ea2464f58d))
+
+## [4.4.1](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.4.0...v4.4.1) (2023-04-17)
+
+
+### Bug Fixes
+
+* command that starts the experience hub ([f187fef](https://github.com/emma-simbot/simbot-offline-inference/commit/f187fef18711f47cbe7c7584abbc6316eac11bde))
+* command used to kill the experience hub ([5e8bd3d](https://github.com/emma-simbot/simbot-offline-inference/commit/5e8bd3d5f588b29a5bba9e4c003acdc23cd0d961))
+* if failed to go to a random viewpoint, just go to the first one in the room ([bb3d652](https://github.com/emma-simbot/simbot-offline-inference/commit/bb3d6523e6acf9e8725dc1c8318b1efcdf65730e))
+* use 2 workers to hopefully stop the experience hub from crashing ([daa17a0](https://github.com/emma-simbot/simbot-offline-inference/commit/daa17a0ab4f76dd120b1d264ac1f330954a44f44))
+
+## [4.4.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.3.0...v4.4.0) (2023-04-17)
+
+
+### Features
+
+* also send subgoal completion success rate to wandb per session ([3a789f0](https://github.com/emma-simbot/simbot-offline-inference/commit/3a789f0ece7443058b1fad536bcb92248983e44d))
+* set the session id as the run name ([1960040](https://github.com/emma-simbot/simbot-offline-inference/commit/196004061d6f08939967b3926f298b195df6a5a4))
+
+## [4.3.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.2.0...v4.3.0) (2023-04-17)
+
+
+### Features
+
+* use subprocess to run the experience hub because its easier to consistently kill ([bfaeea2](https://github.com/emma-simbot/simbot-offline-inference/commit/bfaeea2499fb69178f8d40a53af380176f6b1f62))
+
+
+### Bug Fixes
+
+* disable fast mode ([d894ef2](https://github.com/emma-simbot/simbot-offline-inference/commit/d894ef2bde809591e97530b8ae9ce4075df70593))
+* disable look actions in random walk ([6060c33](https://github.com/emma-simbot/simbot-offline-inference/commit/6060c33601238e679aaf18d22a6f1803f7eeb486))
+* increase number of healthcheck attempts for experience hub ([a6737a2](https://github.com/emma-simbot/simbot-offline-inference/commit/a6737a27173dabafe373266022657f2fc3148067))
+* plan for placing objects in container ([f522c26](https://github.com/emma-simbot/simbot-offline-inference/commit/f522c2665c8060add37a63b691c48800fedce4b3))
+* remove initial contained items from various containers ([d53b7a0](https://github.com/emma-simbot/simbot-offline-inference/commit/d53b7a06b92f9f642979a50f2601eb317d4941b2))
+
+## [4.2.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.1.0...v4.2.0) (2023-04-16)
+
+
+### Features
+
+* track progress of generated trajectories on wandb ([cb5c1ab](https://github.com/emma-simbot/simbot-offline-inference/commit/cb5c1ab0622bb64c9401652d9aaac3aa5166cf5c))
+
+
+### Bug Fixes
+
+* set experience hub timeout to be stupidly high ([9302cca](https://github.com/emma-simbot/simbot-offline-inference/commit/9302ccacfb40b07673bf488692f0403fc8ffa72f))
+
+## [4.1.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.0.1...v4.1.0) (2023-04-16)
+
+
+### Features
+
+* store any remaining utterances that have not been sent to the action outputs ([2bf68b7](https://github.com/emma-simbot/simbot-offline-inference/commit/2bf68b7302bf05866aee510168300228c4feb785))
+
+
+### Bug Fixes
+
+* break out the loop if all the goals are complete ([5b8a18f](https://github.com/emma-simbot/simbot-offline-inference/commit/5b8a18fbc5108b7156c661fb24193f75c7d01122))
+* break out the loop if the goals have been completed ([5abe3b1](https://github.com/emma-simbot/simbot-offline-inference/commit/5abe3b15bdb2a44983d3cb79a5a2b7736c9227d1))
+
+## [4.0.1](https://github.com/emma-simbot/simbot-offline-inference/compare/v4.0.0...v4.0.1) (2023-04-16)
+
+
+### Bug Fixes
+
+* make sure the breakroom table exists, and has space for the preparation items ([34a10cd](https://github.com/emma-simbot/simbot-offline-inference/commit/34a10cd68c5be24c35c7b89b7467dec2aa87b480))
+* preparation plan for fill object in sink challenge ([86a96b0](https://github.com/emma-simbot/simbot-offline-inference/commit/86a96b0a0833377b277f6571e2d9cb1c9d77379f))
+
+## [4.0.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v3.2.0...v4.0.0) (2023-04-16)
+
+
+### ⚠ BREAKING CHANGES
+
+* migrate challenges to using state conditions
+
+### Features
+
+* migrate challenges to using state conditions ([615b37c](https://github.com/emma-simbot/simbot-offline-inference/commit/615b37cbf7b4b80ee2ab0285ed6ad7d1cfcae24d))
+* turn on fastmode in the arena ([3325143](https://github.com/emma-simbot/simbot-offline-inference/commit/3325143c7bfc98a4d87dd46eb9ac581880fd071d))
+
+
+### Bug Fixes
+
+* "and" is no longer allowed ([1ca4655](https://github.com/emma-simbot/simbot-offline-inference/commit/1ca4655251035f6be205b59d4a0341d1c2798514))
+* ensure the reimported CDF is same as the original CDF ([df7a6b8](https://github.com/emma-simbot/simbot-offline-inference/commit/df7a6b8971332553376e8d933c8c90065a5299a4))
+* go to a random viewpoint that actually exists in the current scene ([f4a21cb](https://github.com/emma-simbot/simbot-offline-inference/commit/f4a21cb48608d07c209f7799709630a53b166743))
+* high level key action for "fill object in sink" ([af4829b](https://github.com/emma-simbot/simbot-offline-inference/commit/af4829b9b087cb3f9b26b06749bf31022c8af098))
+* remove the duplicated running of preparation steps ([4db9b7e](https://github.com/emma-simbot/simbot-offline-inference/commit/4db9b7e7b2b873c2a91f2a806d3875558780cc90))
+* use join and close when killing the experience hub process ([3c18b58](https://github.com/emma-simbot/simbot-offline-inference/commit/3c18b58c1293213320411f23b78cb0e5ba8f2f0c))
+
+## [3.2.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v3.1.2...v3.2.0) (2023-04-14)
+
+
+### Features
+
+* do not send utterances to the arena if all the goals are complete ([b8cc13c](https://github.com/emma-simbot/simbot-offline-inference/commit/b8cc13c125338e4965b03edf7f244e88aa46c46f))
+* make it easier to kill the experience hub when something goes wrong ([7e375fe](https://github.com/emma-simbot/simbot-offline-inference/commit/7e375fec74eb0c7d20038fc8e02011201e1dd9ec))
+
+
+### Bug Fixes
+
+* fill the object challenge goals ([8543bb2](https://github.com/emma-simbot/simbot-offline-inference/commit/8543bb2a7d8f238a6330761fed959bae9d945d67))
+* include a preparation plan to be able to pickup objects ([#16](https://github.com/emma-simbot/simbot-offline-inference/issues/16)) ([d86301a](https://github.com/emma-simbot/simbot-offline-inference/commit/d86301ad96dd407c6c768847e5b494cac1dbc825))
+
+## [3.1.2](https://github.com/emma-simbot/simbot-offline-inference/compare/v3.1.1...v3.1.2) (2023-04-13)
+
+
+### Bug Fixes
+
+* remove the trajectory batching ([a467340](https://github.com/emma-simbot/simbot-offline-inference/commit/a467340cca8825bbcaa475155e584dccb41d5c6d))
+
+## [3.1.1](https://github.com/emma-simbot/simbot-offline-inference/compare/v3.1.0...v3.1.1) (2023-04-12)
+
+
+### Bug Fixes
+
+* do not randomise trajectory run order by default ([3e16279](https://github.com/emma-simbot/simbot-offline-inference/commit/3e16279bda25192d488792a0c5a297fc95252ba0))
+* get rid of the progress bar ([133b049](https://github.com/emma-simbot/simbot-offline-inference/commit/133b04938b89eff8e864fd17fa35b6702b424e21))
+* remove old "kill arena" command ([23e4cda](https://github.com/emma-simbot/simbot-offline-inference/commit/23e4cda97cac949b775835b2e17cc63206c0d6d9))
+
+## [3.1.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v3.0.0...v3.1.0) (2023-04-12)
+
+
+### Features
+
+* restart the arena after every 10 sessions ([0f9fcdd](https://github.com/emma-simbot/simbot-offline-inference/commit/0f9fcdd800e30cc0e94a047dd37ec9a43dffdc42))
+
+
+### Bug Fixes
+
+* improve the styling of the progress bar ([3191a34](https://github.com/emma-simbot/simbot-offline-inference/commit/3191a3492b4a2765f5bcaeefc43d91a6b536aa8c))
+* update the progress bar after sending the utterance ([5d698c3](https://github.com/emma-simbot/simbot-offline-inference/commit/5d698c36e0223bc46ed0d6cd12bbed32714ccbda))
+* update the progress bar more ([4ecce70](https://github.com/emma-simbot/simbot-offline-inference/commit/4ecce701c75075be3859101114b320f80f777a1d))
+
+## [3.0.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v2.13.0...v3.0.0) (2023-04-12)
+
+
+### ⚠ BREAKING CHANGES
+
+* use a new structure for the high-level keys
+
+### Features
+
+* do not re-run missions that have already been run ([1720664](https://github.com/emma-simbot/simbot-offline-inference/commit/1720664590646b82efd4de7f24d376b8f735c59c))
+* use a new structure for the high-level keys ([2116539](https://github.com/emma-simbot/simbot-offline-inference/commit/211653951de884c7b953111bbdd61c4fb696abd6))
+
+
+### Bug Fixes
+
+* add more state names to the arena constants ([79365b2](https://github.com/emma-simbot/simbot-offline-inference/commit/79365b289adbcd1a912f9762e3034506f63a2af4))
+* use kebab-case when converting high-level key to string ([2e2d3e8](https://github.com/emma-simbot/simbot-offline-inference/commit/2e2d3e8d271a7fb1447ddcdc1ee906fa2af4a1d9))
+
+## [2.13.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v2.12.0...v2.13.0) (2023-04-12)
+
+
+### Features
+
+* print a table of num challenges per high level key ([4364dbd](https://github.com/emma-simbot/simbot-offline-inference/commit/4364dbd766c18e37c8952724203209cd287ed6b5))
+
+## [2.12.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v2.11.0...v2.12.0) (2023-04-12)
+
+
+### Features
+
+* separate the trajectory generation from the trajectory running ([4b9cd0d](https://github.com/emma-simbot/simbot-offline-inference/commit/4b9cd0d4b78c59373351a3c79d30d426c023e3f0))
+
+## [2.11.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v2.10.0...v2.11.0) (2023-04-12)
+
+
+### Features
+
+* **challenge:** convert coffee into beans using the coffee unmaker ([#15](https://github.com/emma-simbot/simbot-offline-inference/issues/15)) ([8e36065](https://github.com/emma-simbot/simbot-offline-inference/commit/8e36065c350a84400d92b61c1e5cb54c9c89e88e))
+
+## [2.10.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v2.9.1...v2.10.0) (2023-04-12)
+
+
+### Features
+
+* ensure each object-related key is a 'readable name' (in the `HighLevelKey` ([0cb022e](https://github.com/emma-simbot/simbot-offline-inference/commit/0cb022ef2fefad9825095d9bdb083fc003bb586c))
+
+## [2.9.1](https://github.com/emma-simbot/simbot-offline-inference/compare/v2.9.0...v2.9.1) (2023-04-12)
+
+
+### Bug Fixes
+
+* explicitly forbid unsupported keys from the `HighLevelKey` ([aca0079](https://github.com/emma-simbot/simbot-offline-inference/commit/aca0079b905460b24e90a12dbfb99bd7136f5831))
+
+## [2.9.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v2.8.0...v2.9.0) (2023-04-11)
+
+
+### Features
+
+* fill trajectories ([#13](https://github.com/emma-simbot/simbot-offline-inference/issues/13)) ([420313c](https://github.com/emma-simbot/simbot-offline-inference/commit/420313ce28cb490a45cf3f0d14ef171a5d5ce035))
+
+## [2.8.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v2.7.0...v2.8.0) (2023-04-11)
+
+
+### Features
+
+* add challenge to clean a plate in sink ([392518a](https://github.com/emma-simbot/simbot-offline-inference/commit/392518afc26cc3741d8b47f8b296a35b435cf249))
+
+
+### Bug Fixes
+
+* object instance ids for the sink and plate ([9354196](https://github.com/emma-simbot/simbot-offline-inference/commit/9354196947139ee57ad5eddcaf4e7006fdda99c6))
+* use deepcopy on the sink and create a trajectory for every layout ([0b004a5](https://github.com/emma-simbot/simbot-offline-inference/commit/0b004a588c04083afdf4f74491b508585c4bf9a4))
+
+## [2.7.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v2.6.1...v2.7.0) (2023-04-11)
+
+
+### Features
+
+* lowercase the session ids (except for the prefix) ([eac8202](https://github.com/emma-simbot/simbot-offline-inference/commit/eac82027909d3d6ac14d666d4c94a7ec2f6e3dad))
+* separate the readable name from the object key when building challenges ([35fbde2](https://github.com/emma-simbot/simbot-offline-inference/commit/35fbde272867788cb44e864ce4eedd1567c125fb))
+* use readable names for the keys ([9c30b00](https://github.com/emma-simbot/simbot-offline-inference/commit/9c30b00fc30ce62d46b274c35d0db18fd20826c3))
+
+## [2.6.1](https://github.com/emma-simbot/simbot-offline-inference/compare/v2.6.0...v2.6.1) (2023-04-11)
+
+
+### Bug Fixes
+
+* add more error types to ignore when randomising start position ([f866cdb](https://github.com/emma-simbot/simbot-offline-inference/commit/f866cdb1168f891da9df29a734c4e2bb65025962))
+* go back to using `str` for `str`-based structures ([8a62425](https://github.com/emma-simbot/simbot-offline-inference/commit/8a62425bd55156606165b42650ebb153633b3aed))
+
+## [2.6.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v2.5.0...v2.6.0) (2023-04-11)
+
+
+### Features
+
+* add challenges for broken bowls and coloured bowls ([43cb76c](https://github.com/emma-simbot/simbot-offline-inference/commit/43cb76c9893ac908703e4c4a4b51577c111d994f))
+* improve the progress bar for the challenge validator ([9e9e2d2](https://github.com/emma-simbot/simbot-offline-inference/commit/9e9e2d2607895e310ef349c3d3bc96e736581e36))
+* only use the colour changer colors ([4775e46](https://github.com/emma-simbot/simbot-offline-inference/commit/4775e46b0bcac1e0a4cf444af8754b2581a3d289))
+* shuffle the order trajectories are generated in ([33ea2fa](https://github.com/emma-simbot/simbot-offline-inference/commit/33ea2faa102a8511cfdaae77ea55263c905c3d0e))
+* support using RNG for the CDF scenes (with `floor_plan`) ([b7e1b4d](https://github.com/emma-simbot/simbot-offline-inference/commit/b7e1b4d589c6350eed2a29411dd4e155e7686324))
+* validate cdfs from generated missions ([062178c](https://github.com/emma-simbot/simbot-offline-inference/commit/062178c9c866533198fc3f1f3b2a7a9a68dab91f))
+
+
+### Bug Fixes
+
+* `required_objects` key within the `CDFScene` ([fe3bcd1](https://github.com/emma-simbot/simbot-offline-inference/commit/fe3bcd1148108a80c1879b6726574bd1636ab7eb))
+* add `__str__` methods for the `ObjectId` and `ObjectInstanceId` ([9b764be](https://github.com/emma-simbot/simbot-offline-inference/commit/9b764bea87fcb33727086553b90db8bf619e8e22))
+* change CDF `floor_plan` validation to allow for `"-1"` ([d12bfe2](https://github.com/emma-simbot/simbot-offline-inference/commit/d12bfe274da5de0f3e7c9ca03b71a0409e1d2b3d))
+* object ids for broken cords and computer monitors ([33aaffa](https://github.com/emma-simbot/simbot-offline-inference/commit/33aaffabe498dd49313b436185cf3413cae96aae))
+* remove the duplicated object state ([3d92f22](https://github.com/emma-simbot/simbot-offline-inference/commit/3d92f22cad37acabba14a437aacc806b644144a3))
+* send actions to randomise start position one-by-one ([8b31f76](https://github.com/emma-simbot/simbot-offline-inference/commit/8b31f76017ad9b80054e96df8576ebdb79ac3737))
+
+## [2.5.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v2.4.0...v2.5.0) (2023-04-10)
+
+
+### Features
+
+* generate all the pickup from fridge/freezer missions ([e263c93](https://github.com/emma-simbot/simbot-offline-inference/commit/e263c934e764ec0fd6c414c8bb46964fa0f606a8))
+
+## [2.4.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v2.3.0...v2.4.0) (2023-04-10)
+
+
+### Features
+
+* add `insert` and `vendingmachine` actions to the high-level key structure ([a6486a2](https://github.com/emma-simbot/simbot-offline-inference/commit/a6486a2fc031c9c19a2c5ab36b15b1b640cec645))
+* add command to print the high-levels keys that we have challenges for ([051043e](https://github.com/emma-simbot/simbot-offline-inference/commit/051043eac59053c09039dd16099fa5eaeb807c91))
+
+## [2.3.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v2.2.0...v2.3.0) (2023-04-09)
+
+
+### Features
+
+* randomise start position before challenge begins ([f78a9fb](https://github.com/emma-simbot/simbot-offline-inference/commit/f78a9fb1e6e67bc2c9d1c07403e2db9e9d6f74f6))
+
+## [2.2.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v2.1.0...v2.2.0) (2023-04-09)
+
+
+### Features
+
+* add 'pickup apple from fridge' mission, with coloured variants ([f431757](https://github.com/emma-simbot/simbot-offline-inference/commit/f43175720f914af22487e1c934b9b24ecf1b9b0e))
+* register challenge to pick up (coloured) apples from an open fridge ([43fd6ed](https://github.com/emma-simbot/simbot-offline-inference/commit/43fd6ed2004167896404c595504c0063e5fe9399))
+* support generating challenges from other challenges with minor modifications ([1ffbc1e](https://github.com/emma-simbot/simbot-offline-inference/commit/1ffbc1ecd79d63ee2c50f9348c52d56eca6474c9))
+
+
+### Bug Fixes
+
+* make sure the `isColorChanged` key doesn't exist already to prevent duplicates ([9555241](https://github.com/emma-simbot/simbot-offline-inference/commit/955524136a19148abd1bd53d3c46d4b2b2bfec2e))
+* replace property setters with explicit functions ([2d4038f](https://github.com/emma-simbot/simbot-offline-inference/commit/2d4038fff20e611199e79b9dcf3ade9fff80a9af))
+* using `*-is-container` when parsing high-level keys from string ([6c78d32](https://github.com/emma-simbot/simbot-offline-inference/commit/6c78d323cd3bf701b41ee83490d757078ee0a480))
+
+## [2.1.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v2.0.0...v2.1.0) (2023-04-09)
+
+
+### Features
+
+* improve generalisability of challenge builders for less boilerplate ([fe24100](https://github.com/emma-simbot/simbot-offline-inference/commit/fe24100fc68860fd17f1730c3b634bdf7b9f205b))
+
+
+### Bug Fixes
+
+* add tests and make sure the generation process works ([1a6f6ac](https://github.com/emma-simbot/simbot-offline-inference/commit/1a6f6acf8368f260824b595fe25c68dd77d8f3a2))
+
+## [2.0.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v1.5.0...v2.0.0) (2023-04-08)
+
+
+### ⚠ BREAKING CHANGES
+
+* be able to generate missions from high-level keys
+
+### Features
+
+* add more missions for other layouts ([1c5615d](https://github.com/emma-simbot/simbot-offline-inference/commit/1c5615d1ce506db26868491bc9ef475bd97e765d))
+* add option to send dummy actions when validating cdfs ([0de8f4b](https://github.com/emma-simbot/simbot-offline-inference/commit/0de8f4bc6fe1da3edfb79b727504bb2ef99359ef))
+* be able to generate missions from high-level keys ([1772f92](https://github.com/emma-simbot/simbot-offline-inference/commit/1772f92f03db32379a03d0db388acac78cca2878))
+* include progress to more clearly know the overall progress ([e135740](https://github.com/emma-simbot/simbot-offline-inference/commit/e1357400681ecfc31e674ac3ab23149f73805b63))
+* optionally add randomness to the session id name ([c0c43e2](https://github.com/emma-simbot/simbot-offline-inference/commit/c0c43e281bbf4a2a0fbf20290f96a692e4bec4e7))
+* upload trajectory results to s3 ([77d1532](https://github.com/emma-simbot/simbot-offline-inference/commit/77d153263128e7ea056c6655c641167eacad16bc))
+* use cloudpathlib to upload all the metrics to S3 ([986b1ce](https://github.com/emma-simbot/simbot-offline-inference/commit/986b1ce9116a85b1a5a76d0c0fa53413c2461f8d))
+
+
+### Bug Fixes
+
+* create all parents for the metric output file ([41d9372](https://github.com/emma-simbot/simbot-offline-inference/commit/41d9372d27119b9f33abb4ccc4113c2db673957f))
+* created session id needs to not have slashes ([644268b](https://github.com/emma-simbot/simbot-offline-inference/commit/644268bd3becaa28be1c0c7ee49a6965441be0eb))
+* created session id to be in the form `T.DATE/KEY-UUID` ([e94d2ff](https://github.com/emma-simbot/simbot-offline-inference/commit/e94d2ff170f244da1e49364ad8670a807a10d392))
+* env var key to enable the offline evaluation mode ([5380a4b](https://github.com/emma-simbot/simbot-offline-inference/commit/5380a4bf252e511d9ff7c9d1a11e1c5ecd3d299a))
+* generated session id that is valid as a path and uri ([c2a033d](https://github.com/emma-simbot/simbot-offline-inference/commit/c2a033d174e3f10e51588b342af54b7547a07cf8))
+* lint issues ([ad3527d](https://github.com/emma-simbot/simbot-offline-inference/commit/ad3527d1cfdd281354bf1500d8b85a93abf7f09a))
+* set a long timeout for experience hub, which gets overridden by the settings client timeout ([30b45c1](https://github.com/emma-simbot/simbot-offline-inference/commit/30b45c1badac746085ea240278fec9ac4a5b7e04))
+* simplify the provided session id prefix ([70c5971](https://github.com/emma-simbot/simbot-offline-inference/commit/70c5971b0b06a761efe66852ad69234a207e7ce2))
+
+## [1.5.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v1.4.0...v1.5.0) (2023-04-06)
+
+
+### Features
+
+* add command to validate cdfs within a dir ([8b65c55](https://github.com/emma-simbot/simbot-offline-inference/commit/8b65c55fbcd4330b33e2ce2d95d9c306815cd5fe))
+* add flag to enable offline evaluation mode in experience hub ([4514467](https://github.com/emma-simbot/simbot-offline-inference/commit/4514467b75407d0fe5f999f99143b42f1338190d))
+* add missions for picking up from freezer ([8108cf1](https://github.com/emma-simbot/simbot-offline-inference/commit/8108cf1ed1b93b9077be3bbe160b4dc0f454a527))
+* clone the experience hub into the storage dir ([22b91a7](https://github.com/emma-simbot/simbot-offline-inference/commit/22b91a71272f8cd22a127bf21b3042fae31af31d))
+* create new structures for the challenges and trajectories ([1e12c0b](https://github.com/emma-simbot/simbot-offline-inference/commit/1e12c0b445107020f3c0c4f869498af2650616c2))
+* improve how generating trajectories are to be run ([d3b8979](https://github.com/emma-simbot/simbot-offline-inference/commit/d3b897970a148e5b7b43abff40bc5351d3cfd777))
+
+
+### Bug Fixes
+
+* improve the first attempt to make the CDFs ([c19be24](https://github.com/emma-simbot/simbot-offline-inference/commit/c19be24925f8da21c3116bda2ccfdcf0ed739ed9))
+* just use a single high level key for each mission ([788ea34](https://github.com/emma-simbot/simbot-offline-inference/commit/788ea3411c0737d2ae0d0b2314b3bcb0d16437ed))
+* kill command for the arena ([605863b](https://github.com/emma-simbot/simbot-offline-inference/commit/605863bcc7922daca752c1b4f9f661761a40e780))
+* missions for pickup from fridge ([b74601c](https://github.com/emma-simbot/simbot-offline-inference/commit/b74601c7fb30e451bfee24c2ef0ce4ccc378ffdc))
+* settings need to exist before being able to run things ([951aa38](https://github.com/emma-simbot/simbot-offline-inference/commit/951aa382e2673a7cbf0c6214f352374cfe41813e))
+* the service registry path ([eb8c5c5](https://github.com/emma-simbot/simbot-offline-inference/commit/eb8c5c59ccc14fb383b5034cdbbb1f1012302bcd))
+
+## [1.4.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v1.3.1...v1.4.0) (2023-04-05)
+
+
+### Features
+
+* be able to validate cdfs ([faa6722](https://github.com/emma-simbot/simbot-offline-inference/commit/faa6722aa5a73671e6a1dcedc6c0234ccf0767f9))
+
+
+### Bug Fixes
+
+* if the arena/experience hub are running, do not try and start it again ([cde1077](https://github.com/emma-simbot/simbot-offline-inference/commit/cde1077b0b030c7fa3c706c72e89e0b854fc47ae))
+* make sure the experience hub dies ([5094187](https://github.com/emma-simbot/simbot-offline-inference/commit/50941874941b76df641ae79220d4b56788c5e941))
+* run command with the new experience hub version ([6ad167b](https://github.com/emma-simbot/simbot-offline-inference/commit/6ad167bbf94c643210fd23ccad50d710affc6340))
+
+## [1.3.1](https://github.com/emma-simbot/simbot-offline-inference/compare/v1.3.0...v1.3.1) (2023-04-04)
+
+
+### Bug Fixes
+
+* context managers for the controllers/orchestrators ([fc27d12](https://github.com/emma-simbot/simbot-offline-inference/commit/fc27d126948705ce3683f7aabbd8253ca8c52f55))
+
+## [1.3.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v1.2.0...v1.3.0) (2023-04-02)
+
+
+### Features
+
+* simplify run commands and add in the backend for the web tool ([4b4ee95](https://github.com/emma-simbot/simbot-offline-inference/commit/4b4ee95a2825058ba35c879b1b2b59af50873eec))
+
+## [1.2.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v1.1.0...v1.2.0) (2023-04-01)
+
+
+### Features
+
+* add options to run for subset ([2391594](https://github.com/emma-simbot/simbot-offline-inference/commit/23915945e1f80cb5157554fbd37232cfc7e8c8b9))
+* create separate module/script to run the background services ([ec19cd8](https://github.com/emma-simbot/simbot-offline-inference/commit/ec19cd8efc04afd6869b4f6ac50d94e0a27aa83e))
+* dump all metrics so they can be stitched together again ([d8366ce](https://github.com/emma-simbot/simbot-offline-inference/commit/d8366ce36649a79e05fa1c48a357c97b64598b6d))
+* improve metrics calculating ([1bfa793](https://github.com/emma-simbot/simbot-offline-inference/commit/1bfa79315252a66724fc7ce81f9ebe67a41d0b90))
+* include CDFs ([d22fe14](https://github.com/emma-simbot/simbot-offline-inference/commit/d22fe1403b546c866b949992402cd2c2654b5b9d))
+* just everything from running the eval before report submission ([07b000e](https://github.com/emma-simbot/simbot-offline-inference/commit/07b000e1afb17b54ac46f5451490ad500c522626))
+* only evaluate missions that have not been evaluated yet ([453ceb9](https://github.com/emma-simbot/simbot-offline-inference/commit/453ceb9298dbe3cb3c199bfa84cb3f72621d7b49))
+* run eval on single gpu ([bf16694](https://github.com/emma-simbot/simbot-offline-inference/commit/bf166941fe5530972d4f78c909443b99ef9bbe2c))
+* set instance range in settings and send to s3 when done ([b126ffc](https://github.com/emma-simbot/simbot-offline-inference/commit/b126ffcb90d4c39f959e96c60b8006fdedcac026))
+
+
+### Bug Fixes
+
+* improve logs and healthchecks and responses ([849ba61](https://github.com/emma-simbot/simbot-offline-inference/commit/849ba61062f9cb5ef745bc1bd70a183623d9fc4d))
+* just make loads of changes to make it actually work properly ([041d40c](https://github.com/emma-simbot/simbot-offline-inference/commit/041d40c75c34a7e672ddc34bdd2df5ebcddc7b13))
+* just make loads of changes to make it actually work properly ([2a88df5](https://github.com/emma-simbot/simbot-offline-inference/commit/2a88df5716e1a7d14bcf96249feb5d31a2cab5cc))
+* make sure we do all instances and dont miss any ([f4901b5](https://github.com/emma-simbot/simbot-offline-inference/commit/f4901b58127eac81e4cb55c6d8d849c9d60feb07))
+* use the new arena executable ([c8a287d](https://github.com/emma-simbot/simbot-offline-inference/commit/c8a287dc3557339c2e63d6f0025ac9dc250d0a60))
+
+## [1.1.0](https://github.com/emma-simbot/simbot-offline-inference/compare/v1.0.1...v1.1.0) (2023-03-19)
+
+
+### Features
+
+* handle lightweight dialogs within the context of the actions ([8647fe7](https://github.com/emma-simbot/simbot-offline-inference/commit/8647fe7da42983dac5d2aa0a66b4efd44ce79182))
+
+## [1.0.1](https://github.com/emma-simbot/simbot-offline-inference/compare/v1.0.0...v1.0.1) (2023-01-24)
+
+
+### Bug Fixes
+
+* only download and prepare T2 validation data ([54f716e](https://github.com/emma-simbot/simbot-offline-inference/commit/54f716e97ad07053313fb89997fbcc9b7225d947))
+
+## 1.0.0 (2023-01-23)
+
+
+### Features
+
+* add arena wrapper from ml toolbox ([9837f96](https://github.com/emma-simbot/simbot-offline-inference/commit/9837f9688371930388b6465ec2a86ff20cfe6691))
+* add code to orchestrate the test ([bc17922](https://github.com/emma-simbot/simbot-offline-inference/commit/bc17922989175672545b212c18a235fa6a7f90a8))
+* add experience hub dir to settings ([d292d1b](https://github.com/emma-simbot/simbot-offline-inference/commit/d292d1bea89ccf56e71363a18f8af33ca262979d))
+* add log points ([bac27af](https://github.com/emma-simbot/simbot-offline-inference/commit/bac27af447e875ba12a9bafd80229ab52b232019))
+* add metric logging for the evaluation ([c0f8ee7](https://github.com/emma-simbot/simbot-offline-inference/commit/c0f8ee7da7a06609d953702502a034d7c698a02c))
+* add run command ([0830cc5](https://github.com/emma-simbot/simbot-offline-inference/commit/0830cc5ea6f113b26dbb04137379023444165cf1))
+* add script to launch xserver ([21389ce](https://github.com/emma-simbot/simbot-offline-inference/commit/21389ce89931a7b8576508dd06b7dfc67baa2f64))
+* add scripts to download the mission data ([ef568c6](https://github.com/emma-simbot/simbot-offline-inference/commit/ef568c667236bb7064a3b1e76a88ec87609ee6ab))
+* add terraform config for creating the instance ([4ab0e6c](https://github.com/emma-simbot/simbot-offline-inference/commit/4ab0e6c0fdf553e61c873d127c0b7a50125d9422))
+* automatically prepare the file system for the evaluation ([397018b](https://github.com/emma-simbot/simbot-offline-inference/commit/397018b5d1eaaa3dc03ae0bca2920e747b63f2d5))
+* automatically update permissions and start xserver ([37a39fe](https://github.com/emma-simbot/simbot-offline-inference/commit/37a39fe302167f2b459311f1b3b0791b8aed2b27))
+* create the settings file ([1b45fcd](https://github.com/emma-simbot/simbot-offline-inference/commit/1b45fcdaed97654bb85f3df53412970139eb5802))
+* disable client timeouts on experience hub ([ed5551d](https://github.com/emma-simbot/simbot-offline-inference/commit/ed5551d37527d5595ae26e9f127270b83fe6b367))
+* install multiprocessing logging ([b767f43](https://github.com/emma-simbot/simbot-offline-inference/commit/b767f436115ac90522ee746cd55fe1983029085c))
+* set ssh key to the ec2 key ([e4a98bd](https://github.com/emma-simbot/simbot-offline-inference/commit/e4a98bd352cd43ad62d1a48acc86b126f5c4cec8))
+* setup repo ([39949b5](https://github.com/emma-simbot/simbot-offline-inference/commit/39949b597f71f64715b9aa3cf9cd5b54d10476e0))
+* setup venv and prepare trajectory data in user-data script ([4ccf28d](https://github.com/emma-simbot/simbot-offline-inference/commit/4ccf28dd82bbe18a685f805285659a8cf7b25949))
+* stop docker containers on exit too ([a369b42](https://github.com/emma-simbot/simbot-offline-inference/commit/a369b428f0213a5f3634a166f615d3c7e710d1e7))
+* use experience hub for access to the storage/docker configs ([9249bbe](https://github.com/emma-simbot/simbot-offline-inference/commit/9249bbef93291aeeaad8b896a0d3338807575192))
+* use loguru for logging in arena_orchestrator ([c9fc3ba](https://github.com/emma-simbot/simbot-offline-inference/commit/c9fc3ba6e2cfaeda4028b4ddf8617b7e78a2f9c9))
+* use rich logging ([557b006](https://github.com/emma-simbot/simbot-offline-inference/commit/557b006fb8ac523124f1fbcfa546d56c141b77ba))
+
+
+### Bug Fixes
+
+* add catch for timeout on healthcheck ([0bbab99](https://github.com/emma-simbot/simbot-offline-inference/commit/0bbab99e3bb6604535fb96453a120436b805bb29))
+* allow pickle when loading data ([7ac9ec0](https://github.com/emma-simbot/simbot-offline-inference/commit/7ac9ec0383df033c749dea215c8be72ca2f89ecf))
+* change port for the experience hub to run on ([f211885](https://github.com/emma-simbot/simbot-offline-inference/commit/f211885e8a1c5cbc3c3196ea0e69a40e5d9b681a))
+* copy the arena deps as ubuntu ([91f8c90](https://github.com/emma-simbot/simbot-offline-inference/commit/91f8c9083adb5c4970b2f224b260f7517a86a118))
+* create session id directory for auxiliary metadata ([214ccb6](https://github.com/emma-simbot/simbot-offline-inference/commit/214ccb6204bd285de6715b6fd685e139c70ca7e9))
+* creating the storage dir for cloning the experience hub ([8cc6dbf](https://github.com/emma-simbot/simbot-offline-inference/commit/8cc6dbf0637ab98ed9dcb187c88d0d203b1fa8e0))
+* disable multiprocess logging ([f8cdc4a](https://github.com/emma-simbot/simbot-offline-inference/commit/f8cdc4a6fec3135c0462e1c39bafb699db96b154))
+* do not create a symlink ([70cb36f](https://github.com/emma-simbot/simbot-offline-inference/commit/70cb36f7fa5aef5776cc98c85951352c6da8d44f))
+* do not force download the models if they exist ([5b8949e](https://github.com/emma-simbot/simbot-offline-inference/commit/5b8949ecd4ba39906ab8d45a5a8af903b0105dce))
+* do not run process as a daemon ([7ec2f2c](https://github.com/emma-simbot/simbot-offline-inference/commit/7ec2f2cad2172024f9abd99aff7f8a2f245ea59e))
+* do not start xserver within the user-data ([700f440](https://github.com/emma-simbot/simbot-offline-inference/commit/700f4406783d2d6cc543477090ce75c518f68680))
+* do not try to setup the python env on launch - it wont play nice ([9380128](https://github.com/emma-simbot/simbot-offline-inference/commit/9380128950cf6db6c706e0d97aa164769043a283))
+* explicitly define the args to run the controller api ([a97ddb4](https://github.com/emma-simbot/simbot-offline-inference/commit/a97ddb493041cb0905357358ade7d4bdcc75a6de))
+* explicitly disable observability and production ([79b43cd](https://github.com/emma-simbot/simbot-offline-inference/commit/79b43cdb55bf5b7c49141e0c58d856706cf5a910))
+* formatting ([b21c550](https://github.com/emma-simbot/simbot-offline-inference/commit/b21c5500a69eecb92660f67cca63ca8c3ff4e705))
+* improve orchestrators start order ([defb686](https://github.com/emma-simbot/simbot-offline-inference/commit/defb6864d22ade3f77be28ebb39cbf8646333330))
+* lint errors ([ada1332](https://github.com/emma-simbot/simbot-offline-inference/commit/ada13320224a04ef302046baa08cb4ca5261adae))
+* method order in class ([4a49a24](https://github.com/emma-simbot/simbot-offline-inference/commit/4a49a24c15c1908590963f26504a2b361ef962e7))
+* model storage dir ([ced8940](https://github.com/emma-simbot/simbot-offline-inference/commit/ced89404a770e9646736e2219655d34e6d1de456))
+* only need about 10 retries before it should be running ([61f6e20](https://github.com/emma-simbot/simbot-offline-inference/commit/61f6e2066d1468bb56e255704b98215a98f5f8ed))
+* order of setting orchestrators up ([f4949ef](https://github.com/emma-simbot/simbot-offline-inference/commit/f4949ef170528e3ad80475a48fe29f5cbf692dec))
+* re-able running as a daemon ([eceb2f4](https://github.com/emma-simbot/simbot-offline-inference/commit/eceb2f4a962748eed3804d8ec3899b86f0e2da70))
+* remove dialog actions from the experience hub response actions ([e7f1d50](https://github.com/emma-simbot/simbot-offline-inference/commit/e7f1d50870ae729c346186e784d00270482ebd13))
+* remove the xserver module - its not needed ([f1836b4](https://github.com/emma-simbot/simbot-offline-inference/commit/f1836b42d8d0e01f74ea9f20133fa28dc3ecffe8))
+* send dummy actions when loading the game ([c152998](https://github.com/emma-simbot/simbot-offline-inference/commit/c152998e7fdbbf5bf87f36d1fd8292a02dded327))
+* set arena env vars within the run ([5edb809](https://github.com/emma-simbot/simbot-offline-inference/commit/5edb809c6a4ea408ab0de988e5fb0e588ef33b73))
+* set the appconfig to a dataclass so that it hopefully loads the env vars ([31624c9](https://github.com/emma-simbot/simbot-offline-inference/commit/31624c99feed5706d06f1dcbe10eae7faf50e152))
+* set the envvars outside the function ([4192e36](https://github.com/emma-simbot/simbot-offline-inference/commit/4192e3683b468b54b5446bbab194ae9e5172e713))
+* type error because im pickling so we dont care ([5f0c3a3](https://github.com/emma-simbot/simbot-offline-inference/commit/5f0c3a3d03d6e3f494ebe6904d52a99d6af997e8))
+* types for paths in case they dont exist already because we create them ([05d565d](https://github.com/emma-simbot/simbot-offline-inference/commit/05d565de891567d953f261f9e6313b9a1f31ea7e))
+* unzip path for cdf data ([a035e7c](https://github.com/emma-simbot/simbot-offline-inference/commit/a035e7c50a15faa8df136bd16d51524c911c61e6))
+* use the httpx client when making the request ([5897d15](https://github.com/emma-simbot/simbot-offline-inference/commit/5897d15e5048576f37beebdbf5de67a9777aa000))
+* use the settings to hopefully run the thing ([db07a39](https://github.com/emma-simbot/simbot-offline-inference/commit/db07a39d4dd9be16a0670bff44a4a539a6b2a6f5))
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..e8b5d28
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,21 @@
+MIT License
+
+Copyright (c) 2021 emma-heriot-watt
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..ef65e88
--- /dev/null
+++ b/README.md
@@ -0,0 +1,99 @@
+
+
+# EMMA: Offline Inference on the Alexa Arena
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+[![Continuous Integration](https://github.com/emma-heriot-watt/offline-inference/actions/workflows/continuous_integration.yml/badge.svg)](https://github.com/emma-heriot-watt/offline-inference/actions/workflows/continuous_integration.yml)
+[![Tests](https://github.com/emma-heriot-watt/offline-inference/actions/workflows/tests.yml/badge.svg)](https://github.com/emma-heriot-watt/offline-inference/actions/workflows/tests.yml)
+
+
+
+> [!IMPORTANT]
+> If you have questions or find bugs or anything, you can contact us in our [organisation's discussion](https://github.com/orgs/emma-heriot-watt/discussions).
+
+## About
+
+We use code in this repository to generate new missions to facilitate self-play, to run the agent in the environment, and to evaluate the agent for the eval.ai leaderboard.
+
+> [!IMPORTANT]
+> We only ever ran this on a Ubuntu 20 Linux machine. This has not been tested or verified on MacOS or Windows. Your mileage may vary.
+
+## Installing dependencies
+
+You can run the convenience script at `scripts/prepare-user-area.sh`.
+
+> [!CAUTION]
+> I **HIGHLY RECOMMEND** reading the script to know what it does, because you might not need all of it!
+> The convenience script does do some sudo-based file changes so if you don't want to permanently dirty your computer, look at it!
+
+## Running things
+
+> [!TIP]
+> If you need sudo to run Docker, prefix the `poetry run` command with: `sudo -E env PATH=$PATH`.
+
+### T1 evaluation (the one for [evai.ai](https://eval.ai/web/challenges/challenge-page/1903/overview))
+
+When running T1, progress is sent to [wandb:emma-simbot/alexa-arena-evaluation](https://wandb.ai/emma-simbot/alexa-arena-evaluation). All session IDs are prefixed with `T1-`.
+
+#### Steps
+
+1. Create a new tmux session.
+2. In one tmux pane, run `sudo /usr/bin/X :1 &`
+3. In the other pane, run:
+
+ ```bash
+ poetry run python -m simbot_offline_inference run-background-services
+ ```
+
+4. Finally, in a third pane, run:
+
+ ```bash
+ poetry run python -m simbot_offline_inference run-their-evaluation
+ ```
+
+5. Let it run.
+
+### Running the trajectory generation
+
+When running trajectories, each one is a new "run", and all the runs are tracked at [wandb:emma-simbot/arena-high-level-trajectories](https://wandb.ai/emma-simbot/arena-high-level-trajectories).
+
+#### Steps
+
+1. Create a new tmux session.
+2. In one tmux pane, run `sudo /usr/bin/X :1 &`
+3. In the other pane, run:
+
+ ```bash
+ poetry run python -m simbot_offline_inference run-background-services
+ ```
+
+4. Finally, in a third pane, run:
+
+ ```bash
+ poetry run python -m simbot_offline_inference generate-trajectories
+ ```
+
+5. Let it run.
diff --git a/poetry.lock b/poetry.lock
new file mode 100644
index 0000000..87abdb8
--- /dev/null
+++ b/poetry.lock
@@ -0,0 +1,3562 @@
+# This file is automatically @generated by Poetry 1.7.1 and should not be changed by hand.
+
+[[package]]
+name = "anyio"
+version = "4.1.0"
+description = "High level compatibility layer for multiple asynchronous event loop implementations"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "anyio-4.1.0-py3-none-any.whl", hash = "sha256:56a415fbc462291813a94528a779597226619c8e78af7de0507333f700011e5f"},
+ {file = "anyio-4.1.0.tar.gz", hash = "sha256:5a0bec7085176715be77df87fc66d6c9d70626bd752fcc85f57cdbee5b3760da"},
+]
+
+[package.dependencies]
+exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""}
+idna = ">=2.8"
+sniffio = ">=1.1"
+
+[package.extras]
+doc = ["Sphinx (>=7)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
+test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"]
+trio = ["trio (>=0.23)"]
+
+[[package]]
+name = "appdirs"
+version = "1.4.4"
+description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
+optional = false
+python-versions = "*"
+files = [
+ {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"},
+ {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"},
+]
+
+[[package]]
+name = "astor"
+version = "0.8.1"
+description = "Read/rewrite/write Python ASTs"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,>=2.7"
+files = [
+ {file = "astor-0.8.1-py2.py3-none-any.whl", hash = "sha256:070a54e890cefb5b3739d19f30f5a5ec840ffc9c50ffa7d23cc9fc1a38ebbfc5"},
+ {file = "astor-0.8.1.tar.gz", hash = "sha256:6a6effda93f4e1ce9f618779b2dd1d9d84f1e32812c23a29b3fff6fd7f63fa5e"},
+]
+
+[[package]]
+name = "attrs"
+version = "23.1.0"
+description = "Classes Without Boilerplate"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"},
+ {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"},
+]
+
+[package.extras]
+cov = ["attrs[tests]", "coverage[toml] (>=5.3)"]
+dev = ["attrs[docs,tests]", "pre-commit"]
+docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"]
+tests = ["attrs[tests-no-zope]", "zope-interface"]
+tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+
+[[package]]
+name = "bandit"
+version = "1.7.5"
+description = "Security oriented static analyser for python code."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "bandit-1.7.5-py3-none-any.whl", hash = "sha256:75665181dc1e0096369112541a056c59d1c5f66f9bb74a8d686c3c362b83f549"},
+ {file = "bandit-1.7.5.tar.gz", hash = "sha256:bdfc739baa03b880c2d15d0431b31c658ffc348e907fe197e54e0389dd59e11e"},
+]
+
+[package.dependencies]
+colorama = {version = ">=0.3.9", markers = "platform_system == \"Windows\""}
+GitPython = ">=1.0.1"
+PyYAML = ">=5.3.1"
+rich = "*"
+stevedore = ">=1.20.0"
+
+[package.extras]
+test = ["beautifulsoup4 (>=4.8.0)", "coverage (>=4.5.4)", "fixtures (>=3.0.0)", "flake8 (>=4.0.0)", "pylint (==1.9.4)", "stestr (>=2.5.0)", "testscenarios (>=0.5.0)", "testtools (>=2.3.0)", "tomli (>=1.1.0)"]
+toml = ["tomli (>=1.1.0)"]
+yaml = ["PyYAML"]
+
+[[package]]
+name = "black"
+version = "23.11.0"
+description = "The uncompromising code formatter."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "black-23.11.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:dbea0bb8575c6b6303cc65017b46351dc5953eea5c0a59d7b7e3a2d2f433a911"},
+ {file = "black-23.11.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:412f56bab20ac85927f3a959230331de5614aecda1ede14b373083f62ec24e6f"},
+ {file = "black-23.11.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d136ef5b418c81660ad847efe0e55c58c8208b77a57a28a503a5f345ccf01394"},
+ {file = "black-23.11.0-cp310-cp310-win_amd64.whl", hash = "sha256:6c1cac07e64433f646a9a838cdc00c9768b3c362805afc3fce341af0e6a9ae9f"},
+ {file = "black-23.11.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:cf57719e581cfd48c4efe28543fea3d139c6b6f1238b3f0102a9c73992cbb479"},
+ {file = "black-23.11.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:698c1e0d5c43354ec5d6f4d914d0d553a9ada56c85415700b81dc90125aac244"},
+ {file = "black-23.11.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:760415ccc20f9e8747084169110ef75d545f3b0932ee21368f63ac0fee86b221"},
+ {file = "black-23.11.0-cp311-cp311-win_amd64.whl", hash = "sha256:58e5f4d08a205b11800332920e285bd25e1a75c54953e05502052738fe16b3b5"},
+ {file = "black-23.11.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:45aa1d4675964946e53ab81aeec7a37613c1cb71647b5394779e6efb79d6d187"},
+ {file = "black-23.11.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4c44b7211a3a0570cc097e81135faa5f261264f4dfaa22bd5ee2875a4e773bd6"},
+ {file = "black-23.11.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2a9acad1451632021ee0d146c8765782a0c3846e0e0ea46659d7c4f89d9b212b"},
+ {file = "black-23.11.0-cp38-cp38-win_amd64.whl", hash = "sha256:fc7f6a44d52747e65a02558e1d807c82df1d66ffa80a601862040a43ec2e3142"},
+ {file = "black-23.11.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7f622b6822f02bfaf2a5cd31fdb7cd86fcf33dab6ced5185c35f5db98260b055"},
+ {file = "black-23.11.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:250d7e60f323fcfc8ea6c800d5eba12f7967400eb6c2d21ae85ad31c204fb1f4"},
+ {file = "black-23.11.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5133f5507007ba08d8b7b263c7aa0f931af5ba88a29beacc4b2dc23fcefe9c06"},
+ {file = "black-23.11.0-cp39-cp39-win_amd64.whl", hash = "sha256:421f3e44aa67138ab1b9bfbc22ee3780b22fa5b291e4db8ab7eee95200726b07"},
+ {file = "black-23.11.0-py3-none-any.whl", hash = "sha256:54caaa703227c6e0c87b76326d0862184729a69b73d3b7305b6288e1d830067e"},
+ {file = "black-23.11.0.tar.gz", hash = "sha256:4c68855825ff432d197229846f971bc4d6666ce90492e5b02013bcaca4d9ab05"},
+]
+
+[package.dependencies]
+click = ">=8.0.0"
+mypy-extensions = ">=0.4.3"
+packaging = ">=22.0"
+pathspec = ">=0.9.0"
+platformdirs = ">=2"
+tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
+typing-extensions = {version = ">=4.0.1", markers = "python_version < \"3.11\""}
+
+[package.extras]
+colorama = ["colorama (>=0.4.3)"]
+d = ["aiohttp (>=3.7.4)"]
+jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"]
+uvloop = ["uvloop (>=0.15.2)"]
+
+[[package]]
+name = "blinker"
+version = "1.7.0"
+description = "Fast, simple object-to-object and broadcast signaling"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "blinker-1.7.0-py3-none-any.whl", hash = "sha256:c3f865d4d54db7abc53758a01601cf343fe55b84c1de4e3fa910e420b438d5b9"},
+ {file = "blinker-1.7.0.tar.gz", hash = "sha256:e6820ff6fa4e4d1d8e2747c2283749c3f547e4fee112b98555cdcdae32996182"},
+]
+
+[[package]]
+name = "boto3"
+version = "1.33.7"
+description = "The AWS SDK for Python"
+optional = false
+python-versions = ">= 3.7"
+files = [
+ {file = "boto3-1.33.7-py3-none-any.whl", hash = "sha256:d12467fb3a64d359b0bda0570a8163a5859fcac13e786f2a3db0392523178556"},
+ {file = "boto3-1.33.7.tar.gz", hash = "sha256:eed0f7df91066b6ac63a53d16459ac082458d57061bedf766135d9e1c2b75a6b"},
+]
+
+[package.dependencies]
+botocore = ">=1.33.7,<1.34.0"
+jmespath = ">=0.7.1,<2.0.0"
+s3transfer = ">=0.8.2,<0.9.0"
+
+[package.extras]
+crt = ["botocore[crt] (>=1.21.0,<2.0a0)"]
+
+[[package]]
+name = "botocore"
+version = "1.33.7"
+description = "Low-level, data-driven core of boto 3."
+optional = false
+python-versions = ">= 3.7"
+files = [
+ {file = "botocore-1.33.7-py3-none-any.whl", hash = "sha256:71ec0e85b996cf9def3dd8f4ca6cb4a9fd3a614aa4c9c7cbf33f2f68e1d0649a"},
+ {file = "botocore-1.33.7.tar.gz", hash = "sha256:b2299bc13bb8c0928edc98bf4594deb14cba2357536120f63772027a16ce7374"},
+]
+
+[package.dependencies]
+jmespath = ">=0.7.1,<2.0.0"
+python-dateutil = ">=2.1,<3.0.0"
+urllib3 = {version = ">=1.25.4,<1.27", markers = "python_version < \"3.10\""}
+
+[package.extras]
+crt = ["awscrt (==0.19.17)"]
+
+[[package]]
+name = "certifi"
+version = "2023.11.17"
+description = "Python package for providing Mozilla's CA Bundle."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "certifi-2023.11.17-py3-none-any.whl", hash = "sha256:e036ab49d5b79556f99cfc2d9320b34cfbe5be05c5871b51de9329f0603b0474"},
+ {file = "certifi-2023.11.17.tar.gz", hash = "sha256:9b469f3a900bf28dc19b8cfbf8019bf47f7fdd1a65a1d4ffb98fc14166beb4d1"},
+]
+
+[[package]]
+name = "cffi"
+version = "1.16.0"
+description = "Foreign Function Interface for Python calling C code."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "cffi-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6b3d6606d369fc1da4fd8c357d026317fbb9c9b75d36dc16e90e84c26854b088"},
+ {file = "cffi-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ac0f5edd2360eea2f1daa9e26a41db02dd4b0451b48f7c318e217ee092a213e9"},
+ {file = "cffi-1.16.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7e61e3e4fa664a8588aa25c883eab612a188c725755afff6289454d6362b9673"},
+ {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a72e8961a86d19bdb45851d8f1f08b041ea37d2bd8d4fd19903bc3083d80c896"},
+ {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5b50bf3f55561dac5438f8e70bfcdfd74543fd60df5fa5f62d94e5867deca684"},
+ {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7651c50c8c5ef7bdb41108b7b8c5a83013bfaa8a935590c5d74627c047a583c7"},
+ {file = "cffi-1.16.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e4108df7fe9b707191e55f33efbcb2d81928e10cea45527879a4749cbe472614"},
+ {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:32c68ef735dbe5857c810328cb2481e24722a59a2003018885514d4c09af9743"},
+ {file = "cffi-1.16.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:673739cb539f8cdaa07d92d02efa93c9ccf87e345b9a0b556e3ecc666718468d"},
+ {file = "cffi-1.16.0-cp310-cp310-win32.whl", hash = "sha256:9f90389693731ff1f659e55c7d1640e2ec43ff725cc61b04b2f9c6d8d017df6a"},
+ {file = "cffi-1.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:e6024675e67af929088fda399b2094574609396b1decb609c55fa58b028a32a1"},
+ {file = "cffi-1.16.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b84834d0cf97e7d27dd5b7f3aca7b6e9263c56308ab9dc8aae9784abb774d404"},
+ {file = "cffi-1.16.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:1b8ebc27c014c59692bb2664c7d13ce7a6e9a629be20e54e7271fa696ff2b417"},
+ {file = "cffi-1.16.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ee07e47c12890ef248766a6e55bd38ebfb2bb8edd4142d56db91b21ea68b7627"},
+ {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8a9d3ebe49f084ad71f9269834ceccbf398253c9fac910c4fd7053ff1386936"},
+ {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e70f54f1796669ef691ca07d046cd81a29cb4deb1e5f942003f401c0c4a2695d"},
+ {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5bf44d66cdf9e893637896c7faa22298baebcd18d1ddb6d2626a6e39793a1d56"},
+ {file = "cffi-1.16.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7b78010e7b97fef4bee1e896df8a4bbb6712b7f05b7ef630f9d1da00f6444d2e"},
+ {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:c6a164aa47843fb1b01e941d385aab7215563bb8816d80ff3a363a9f8448a8dc"},
+ {file = "cffi-1.16.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e09f3ff613345df5e8c3667da1d918f9149bd623cd9070c983c013792a9a62eb"},
+ {file = "cffi-1.16.0-cp311-cp311-win32.whl", hash = "sha256:2c56b361916f390cd758a57f2e16233eb4f64bcbeee88a4881ea90fca14dc6ab"},
+ {file = "cffi-1.16.0-cp311-cp311-win_amd64.whl", hash = "sha256:db8e577c19c0fda0beb7e0d4e09e0ba74b1e4c092e0e40bfa12fe05b6f6d75ba"},
+ {file = "cffi-1.16.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:fa3a0128b152627161ce47201262d3140edb5a5c3da88d73a1b790a959126956"},
+ {file = "cffi-1.16.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:68e7c44931cc171c54ccb702482e9fc723192e88d25a0e133edd7aff8fcd1f6e"},
+ {file = "cffi-1.16.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:abd808f9c129ba2beda4cfc53bde801e5bcf9d6e0f22f095e45327c038bfe68e"},
+ {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:88e2b3c14bdb32e440be531ade29d3c50a1a59cd4e51b1dd8b0865c54ea5d2e2"},
+ {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcc8eb6d5902bb1cf6dc4f187ee3ea80a1eba0a89aba40a5cb20a5087d961357"},
+ {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b7be2d771cdba2942e13215c4e340bfd76398e9227ad10402a8767ab1865d2e6"},
+ {file = "cffi-1.16.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e715596e683d2ce000574bae5d07bd522c781a822866c20495e52520564f0969"},
+ {file = "cffi-1.16.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:2d92b25dbf6cae33f65005baf472d2c245c050b1ce709cc4588cdcdd5495b520"},
+ {file = "cffi-1.16.0-cp312-cp312-win32.whl", hash = "sha256:b2ca4e77f9f47c55c194982e10f058db063937845bb2b7a86c84a6cfe0aefa8b"},
+ {file = "cffi-1.16.0-cp312-cp312-win_amd64.whl", hash = "sha256:68678abf380b42ce21a5f2abde8efee05c114c2fdb2e9eef2efdb0257fba1235"},
+ {file = "cffi-1.16.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0c9ef6ff37e974b73c25eecc13952c55bceed9112be2d9d938ded8e856138bcc"},
+ {file = "cffi-1.16.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a09582f178759ee8128d9270cd1344154fd473bb77d94ce0aeb2a93ebf0feaf0"},
+ {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e760191dd42581e023a68b758769e2da259b5d52e3103c6060ddc02c9edb8d7b"},
+ {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:80876338e19c951fdfed6198e70bc88f1c9758b94578d5a7c4c91a87af3cf31c"},
+ {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a6a14b17d7e17fa0d207ac08642c8820f84f25ce17a442fd15e27ea18d67c59b"},
+ {file = "cffi-1.16.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6602bc8dc6f3a9e02b6c22c4fc1e47aa50f8f8e6d3f78a5e16ac33ef5fefa324"},
+ {file = "cffi-1.16.0-cp38-cp38-win32.whl", hash = "sha256:131fd094d1065b19540c3d72594260f118b231090295d8c34e19a7bbcf2e860a"},
+ {file = "cffi-1.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:31d13b0f99e0836b7ff893d37af07366ebc90b678b6664c955b54561fc36ef36"},
+ {file = "cffi-1.16.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:582215a0e9adbe0e379761260553ba11c58943e4bbe9c36430c4ca6ac74b15ed"},
+ {file = "cffi-1.16.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b29ebffcf550f9da55bec9e02ad430c992a87e5f512cd63388abb76f1036d8d2"},
+ {file = "cffi-1.16.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dc9b18bf40cc75f66f40a7379f6a9513244fe33c0e8aa72e2d56b0196a7ef872"},
+ {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9cb4a35b3642fc5c005a6755a5d17c6c8b6bcb6981baf81cea8bfbc8903e8ba8"},
+ {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b86851a328eedc692acf81fb05444bdf1891747c25af7529e39ddafaf68a4f3f"},
+ {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c0f31130ebc2d37cdd8e44605fb5fa7ad59049298b3f745c74fa74c62fbfcfc4"},
+ {file = "cffi-1.16.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f8e709127c6c77446a8c0a8c8bf3c8ee706a06cd44b1e827c3e6a2ee6b8c098"},
+ {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:748dcd1e3d3d7cd5443ef03ce8685043294ad6bd7c02a38d1bd367cfd968e000"},
+ {file = "cffi-1.16.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8895613bcc094d4a1b2dbe179d88d7fb4a15cee43c052e8885783fac397d91fe"},
+ {file = "cffi-1.16.0-cp39-cp39-win32.whl", hash = "sha256:ed86a35631f7bfbb28e108dd96773b9d5a6ce4811cf6ea468bb6a359b256b1e4"},
+ {file = "cffi-1.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:3686dffb02459559c74dd3d81748269ffb0eb027c39a6fc99502de37d501faa8"},
+ {file = "cffi-1.16.0.tar.gz", hash = "sha256:bcb3ef43e58665bbda2fb198698fcae6776483e0c4a631aa5647806c25e02cc0"},
+]
+
+[package.dependencies]
+pycparser = "*"
+
+[[package]]
+name = "cfgv"
+version = "3.4.0"
+description = "Validate configuration and produce human readable error messages."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"},
+ {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"},
+]
+
+[[package]]
+name = "charset-normalizer"
+version = "3.3.2"
+description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
+optional = false
+python-versions = ">=3.7.0"
+files = [
+ {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"},
+ {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"},
+ {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"},
+ {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"},
+ {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"},
+ {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"},
+ {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"},
+ {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"},
+]
+
+[[package]]
+name = "click"
+version = "8.1.7"
+description = "Composable command line interface toolkit"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"},
+ {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+
+[[package]]
+name = "cloudpathlib"
+version = "0.16.0"
+description = "pathlib-style classes for cloud storage services."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "cloudpathlib-0.16.0-py3-none-any.whl", hash = "sha256:f46267556bf91f03db52b5df7a152548596a15aabca1c8731ef32b0b25a1a6a3"},
+ {file = "cloudpathlib-0.16.0.tar.gz", hash = "sha256:cdfcd35d46d529587d744154a0bdf962aca953b725c8784cd2ec478354ea63a3"},
+]
+
+[package.dependencies]
+boto3 = {version = "*", optional = true, markers = "extra == \"s3\""}
+typing_extensions = {version = ">4", markers = "python_version < \"3.11\""}
+
+[package.extras]
+all = ["cloudpathlib[azure]", "cloudpathlib[gs]", "cloudpathlib[s3]"]
+azure = ["azure-storage-blob (>=12)"]
+gs = ["google-cloud-storage"]
+s3 = ["boto3"]
+
+[[package]]
+name = "colorama"
+version = "0.4.6"
+description = "Cross-platform colored terminal text."
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
+files = [
+ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
+ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
+]
+
+[[package]]
+name = "convert-case"
+version = "1.2.3"
+description = "Convert between string cases with built-in case inference."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "convert-case-1.2.3.tar.gz", hash = "sha256:a8c4329e47233a2b16cac3c5d020e8ba0305293efbe22a6d80f8ffddf049703f"},
+ {file = "convert_case-1.2.3-py3-none-any.whl", hash = "sha256:ec8884050ca548e990666f82cba7ae2edfaa3c85dbead3042c2fd663b292373a"},
+]
+
+[package.extras]
+all = ["assertpy", "autoflake", "bandit", "black", "bump2version", "coverage", "freezegun", "isort", "mock", "mypy", "pylint", "pytest", "pytest-mocha", "pytest-sugar", "quickdocs", "tox", "twine", "wheel"]
+formatters = ["autoflake", "black", "isort"]
+linters = ["bandit", "mypy", "pylint"]
+release = ["bump2version", "quickdocs", "twine", "wheel"]
+tests = ["assertpy", "coverage", "freezegun", "mock", "pytest", "pytest-mocha", "pytest-sugar", "tox"]
+
+[[package]]
+name = "coverage"
+version = "7.3.2"
+description = "Code coverage measurement for Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "coverage-7.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d872145f3a3231a5f20fd48500274d7df222e291d90baa2026cc5152b7ce86bf"},
+ {file = "coverage-7.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:310b3bb9c91ea66d59c53fa4989f57d2436e08f18fb2f421a1b0b6b8cc7fffda"},
+ {file = "coverage-7.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f47d39359e2c3779c5331fc740cf4bce6d9d680a7b4b4ead97056a0ae07cb49a"},
+ {file = "coverage-7.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:aa72dbaf2c2068404b9870d93436e6d23addd8bbe9295f49cbca83f6e278179c"},
+ {file = "coverage-7.3.2-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:beaa5c1b4777f03fc63dfd2a6bd820f73f036bfb10e925fce067b00a340d0f3f"},
+ {file = "coverage-7.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:dbc1b46b92186cc8074fee9d9fbb97a9dd06c6cbbef391c2f59d80eabdf0faa6"},
+ {file = "coverage-7.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:315a989e861031334d7bee1f9113c8770472db2ac484e5b8c3173428360a9148"},
+ {file = "coverage-7.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d1bc430677773397f64a5c88cb522ea43175ff16f8bfcc89d467d974cb2274f9"},
+ {file = "coverage-7.3.2-cp310-cp310-win32.whl", hash = "sha256:a889ae02f43aa45032afe364c8ae84ad3c54828c2faa44f3bfcafecb5c96b02f"},
+ {file = "coverage-7.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:c0ba320de3fb8c6ec16e0be17ee1d3d69adcda99406c43c0409cb5c41788a611"},
+ {file = "coverage-7.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ac8c802fa29843a72d32ec56d0ca792ad15a302b28ca6203389afe21f8fa062c"},
+ {file = "coverage-7.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:89a937174104339e3a3ffcf9f446c00e3a806c28b1841c63edb2b369310fd074"},
+ {file = "coverage-7.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e267e9e2b574a176ddb983399dec325a80dbe161f1a32715c780b5d14b5f583a"},
+ {file = "coverage-7.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2443cbda35df0d35dcfb9bf8f3c02c57c1d6111169e3c85fc1fcc05e0c9f39a3"},
+ {file = "coverage-7.3.2-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4175e10cc8dda0265653e8714b3174430b07c1dca8957f4966cbd6c2b1b8065a"},
+ {file = "coverage-7.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0cbf38419fb1a347aaf63481c00f0bdc86889d9fbf3f25109cf96c26b403fda1"},
+ {file = "coverage-7.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:5c913b556a116b8d5f6ef834038ba983834d887d82187c8f73dec21049abd65c"},
+ {file = "coverage-7.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:1981f785239e4e39e6444c63a98da3a1db8e971cb9ceb50a945ba6296b43f312"},
+ {file = "coverage-7.3.2-cp311-cp311-win32.whl", hash = "sha256:43668cabd5ca8258f5954f27a3aaf78757e6acf13c17604d89648ecc0cc66640"},
+ {file = "coverage-7.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:e10c39c0452bf6e694511c901426d6b5ac005acc0f78ff265dbe36bf81f808a2"},
+ {file = "coverage-7.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:4cbae1051ab791debecc4a5dcc4a1ff45fc27b91b9aee165c8a27514dd160836"},
+ {file = "coverage-7.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:12d15ab5833a997716d76f2ac1e4b4d536814fc213c85ca72756c19e5a6b3d63"},
+ {file = "coverage-7.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3c7bba973ebee5e56fe9251300c00f1579652587a9f4a5ed8404b15a0471f216"},
+ {file = "coverage-7.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fe494faa90ce6381770746077243231e0b83ff3f17069d748f645617cefe19d4"},
+ {file = "coverage-7.3.2-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6e9589bd04d0461a417562649522575d8752904d35c12907d8c9dfeba588faf"},
+ {file = "coverage-7.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:d51ac2a26f71da1b57f2dc81d0e108b6ab177e7d30e774db90675467c847bbdf"},
+ {file = "coverage-7.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:99b89d9f76070237975b315b3d5f4d6956ae354a4c92ac2388a5695516e47c84"},
+ {file = "coverage-7.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:fa28e909776dc69efb6ed975a63691bc8172b64ff357e663a1bb06ff3c9b589a"},
+ {file = "coverage-7.3.2-cp312-cp312-win32.whl", hash = "sha256:289fe43bf45a575e3ab10b26d7b6f2ddb9ee2dba447499f5401cfb5ecb8196bb"},
+ {file = "coverage-7.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:7dbc3ed60e8659bc59b6b304b43ff9c3ed858da2839c78b804973f613d3e92ed"},
+ {file = "coverage-7.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f94b734214ea6a36fe16e96a70d941af80ff3bfd716c141300d95ebc85339738"},
+ {file = "coverage-7.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:af3d828d2c1cbae52d34bdbb22fcd94d1ce715d95f1a012354a75e5913f1bda2"},
+ {file = "coverage-7.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:630b13e3036e13c7adc480ca42fa7afc2a5d938081d28e20903cf7fd687872e2"},
+ {file = "coverage-7.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c9eacf273e885b02a0273bb3a2170f30e2d53a6d53b72dbe02d6701b5296101c"},
+ {file = "coverage-7.3.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8f17966e861ff97305e0801134e69db33b143bbfb36436efb9cfff6ec7b2fd9"},
+ {file = "coverage-7.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b4275802d16882cf9c8b3d057a0839acb07ee9379fa2749eca54efbce1535b82"},
+ {file = "coverage-7.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:72c0cfa5250f483181e677ebc97133ea1ab3eb68645e494775deb6a7f6f83901"},
+ {file = "coverage-7.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:cb536f0dcd14149425996821a168f6e269d7dcd2c273a8bff8201e79f5104e76"},
+ {file = "coverage-7.3.2-cp38-cp38-win32.whl", hash = "sha256:307adb8bd3abe389a471e649038a71b4eb13bfd6b7dd9a129fa856f5c695cf92"},
+ {file = "coverage-7.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:88ed2c30a49ea81ea3b7f172e0269c182a44c236eb394718f976239892c0a27a"},
+ {file = "coverage-7.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b631c92dfe601adf8f5ebc7fc13ced6bb6e9609b19d9a8cd59fa47c4186ad1ce"},
+ {file = "coverage-7.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:d3d9df4051c4a7d13036524b66ecf7a7537d14c18a384043f30a303b146164e9"},
+ {file = "coverage-7.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5f7363d3b6a1119ef05015959ca24a9afc0ea8a02c687fe7e2d557705375c01f"},
+ {file = "coverage-7.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f11cc3c967a09d3695d2a6f03fb3e6236622b93be7a4b5dc09166a861be6d25"},
+ {file = "coverage-7.3.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:149de1d2401ae4655c436a3dced6dd153f4c3309f599c3d4bd97ab172eaf02d9"},
+ {file = "coverage-7.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:3a4006916aa6fee7cd38db3bfc95aa9c54ebb4ffbfc47c677c8bba949ceba0a6"},
+ {file = "coverage-7.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9028a3871280110d6e1aa2df1afd5ef003bab5fb1ef421d6dc748ae1c8ef2ebc"},
+ {file = "coverage-7.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9f805d62aec8eb92bab5b61c0f07329275b6f41c97d80e847b03eb894f38d083"},
+ {file = "coverage-7.3.2-cp39-cp39-win32.whl", hash = "sha256:d1c88ec1a7ff4ebca0219f5b1ef863451d828cccf889c173e1253aa84b1e07ce"},
+ {file = "coverage-7.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b4767da59464bb593c07afceaddea61b154136300881844768037fd5e859353f"},
+ {file = "coverage-7.3.2-pp38.pp39.pp310-none-any.whl", hash = "sha256:ae97af89f0fbf373400970c0a21eef5aa941ffeed90aee43650b81f7d7f47637"},
+ {file = "coverage-7.3.2.tar.gz", hash = "sha256:be32ad29341b0170e795ca590e1c07e81fc061cb5b10c74ce7203491484404ef"},
+]
+
+[package.dependencies]
+tomli = {version = "*", optional = true, markers = "python_full_version <= \"3.11.0a6\" and extra == \"toml\""}
+
+[package.extras]
+toml = ["tomli"]
+
+[[package]]
+name = "darglint"
+version = "1.8.1"
+description = "A utility for ensuring Google-style docstrings stay up to date with the source code."
+optional = false
+python-versions = ">=3.6,<4.0"
+files = [
+ {file = "darglint-1.8.1-py3-none-any.whl", hash = "sha256:5ae11c259c17b0701618a20c3da343a3eb98b3bc4b5a83d31cdd94f5ebdced8d"},
+ {file = "darglint-1.8.1.tar.gz", hash = "sha256:080d5106df149b199822e7ee7deb9c012b49891538f14a11be681044f0bb20da"},
+]
+
+[[package]]
+name = "decopatch"
+version = "1.4.10"
+description = "Create decorators easily in python."
+optional = false
+python-versions = "*"
+files = [
+ {file = "decopatch-1.4.10-py2.py3-none-any.whl", hash = "sha256:e151f7f93de2b1b3fd3f3272dcc7cefd1a69f68ec1c2d8e288ecd9deb36dc5f7"},
+ {file = "decopatch-1.4.10.tar.gz", hash = "sha256:957f49c93f4150182c23f8fb51d13bb3213e0f17a79e09c8cca7057598b55720"},
+]
+
+[package.dependencies]
+makefun = ">=1.5.0"
+
+[[package]]
+name = "deepdiff"
+version = "6.7.1"
+description = "Deep Difference and Search of any Python object/data. Recreate objects by adding adding deltas to each other."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "deepdiff-6.7.1-py3-none-any.whl", hash = "sha256:58396bb7a863cbb4ed5193f548c56f18218060362311aa1dc36397b2f25108bd"},
+ {file = "deepdiff-6.7.1.tar.gz", hash = "sha256:b367e6fa6caac1c9f500adc79ada1b5b1242c50d5f716a1a4362030197847d30"},
+]
+
+[package.dependencies]
+ordered-set = ">=4.0.2,<4.2.0"
+
+[package.extras]
+cli = ["click (==8.1.3)", "pyyaml (==6.0.1)"]
+optimize = ["orjson"]
+
+[[package]]
+name = "deepmerge"
+version = "1.1.0"
+description = "a toolset to deeply merge python dictionaries."
+optional = false
+python-versions = "*"
+files = [
+ {file = "deepmerge-1.1.0-py3-none-any.whl", hash = "sha256:59e6ef80b77dc52af3882a1ea78da22bcfc91ae9cdabc0c80729049fe295ff8b"},
+ {file = "deepmerge-1.1.0.tar.gz", hash = "sha256:4c27a0db5de285e1a7ceac7dbc1531deaa556b627dea4900c8244581ecdfea2d"},
+]
+
+[[package]]
+name = "distlib"
+version = "0.3.7"
+description = "Distribution utilities"
+optional = false
+python-versions = "*"
+files = [
+ {file = "distlib-0.3.7-py2.py3-none-any.whl", hash = "sha256:2e24928bc811348f0feb63014e97aaae3037f2cf48712d51ae61df7fd6075057"},
+ {file = "distlib-0.3.7.tar.gz", hash = "sha256:9dafe54b34a028eafd95039d5e5d4851a13734540f1331060d31c9916e7147a8"},
+]
+
+[[package]]
+name = "dnspython"
+version = "2.4.2"
+description = "DNS toolkit"
+optional = false
+python-versions = ">=3.8,<4.0"
+files = [
+ {file = "dnspython-2.4.2-py3-none-any.whl", hash = "sha256:57c6fbaaeaaf39c891292012060beb141791735dbb4004798328fc2c467402d8"},
+ {file = "dnspython-2.4.2.tar.gz", hash = "sha256:8dcfae8c7460a2f84b4072e26f1c9f4101ca20c071649cb7c34e8b6a93d58984"},
+]
+
+[package.extras]
+dnssec = ["cryptography (>=2.6,<42.0)"]
+doh = ["h2 (>=4.1.0)", "httpcore (>=0.17.3)", "httpx (>=0.24.1)"]
+doq = ["aioquic (>=0.9.20)"]
+idna = ["idna (>=2.1,<4.0)"]
+trio = ["trio (>=0.14,<0.23)"]
+wmi = ["wmi (>=1.5.1,<2.0.0)"]
+
+[[package]]
+name = "docker-pycreds"
+version = "0.4.0"
+description = "Python bindings for the docker credentials store API"
+optional = false
+python-versions = "*"
+files = [
+ {file = "docker-pycreds-0.4.0.tar.gz", hash = "sha256:6ce3270bcaf404cc4c3e27e4b6c70d3521deae82fb508767870fdbf772d584d4"},
+ {file = "docker_pycreds-0.4.0-py2.py3-none-any.whl", hash = "sha256:7266112468627868005106ec19cd0d722702d2b7d5912a28e19b826c3d37af49"},
+]
+
+[package.dependencies]
+six = ">=1.4.0"
+
+[[package]]
+name = "docutils"
+version = "0.20.1"
+description = "Docutils -- Python Documentation Utilities"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "docutils-0.20.1-py3-none-any.whl", hash = "sha256:96f387a2c5562db4476f09f13bbab2192e764cac08ebbf3a34a95d9b1e4a59d6"},
+ {file = "docutils-0.20.1.tar.gz", hash = "sha256:f08a4e276c3a1583a86dce3e34aba3fe04d02bba2dd51ed16106244e8a923e3b"},
+]
+
+[[package]]
+name = "email-validator"
+version = "2.1.0.post1"
+description = "A robust email address syntax and deliverability validation library."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "email_validator-2.1.0.post1-py3-none-any.whl", hash = "sha256:c973053efbeddfef924dc0bd93f6e77a1ea7ee0fce935aea7103c7a3d6d2d637"},
+ {file = "email_validator-2.1.0.post1.tar.gz", hash = "sha256:a4b0bd1cf55f073b924258d19321b1f3aa74b4b5a71a42c305575dba920e1a44"},
+]
+
+[package.dependencies]
+dnspython = ">=2.0.0"
+idna = ">=2.0.0"
+
+[[package]]
+name = "emma-common"
+version = "2.3.1"
+description = "Common modules which are used a lot throughout EMMA repositories"
+optional = false
+python-versions = ">=3.9,<3.11"
+files = []
+develop = false
+
+[package.dependencies]
+fastapi = {version = ">=0.88.0", extras = ["all"]}
+gunicorn = ">=20.1.0"
+httpx = ">=0.23.1"
+huggingface-hub = ">=0.19.4"
+loguru = ">=0.6.0"
+numpy = ">1.22"
+orjson = ">=3.8.3"
+pydantic = {version = ">=1.10.0,<2", extras = ["dotenv"]}
+rich = ">=12.6.0"
+torch = ">=1.10,!=1.13.0,<2"
+
+[package.source]
+type = "git"
+url = "https://github.com/emma-heriot-watt/common.git"
+reference = "HEAD"
+resolved_reference = "d86e146d2a4cf58a2dd55fd0776fadd005dd6653"
+
+[[package]]
+name = "emma-experience-hub"
+version = "8.72.1"
+description = ""
+optional = false
+python-versions = ">=3.9,<3.10"
+files = []
+develop = false
+
+[package.dependencies]
+cloudpathlib = {version = ">=0.10.0", extras = ["s3"]}
+convert-case = ">=1.1.1"
+emma-common = {git = "https://github.com/emma-heriot-watt/common.git"}
+fastapi = ">=0.88.0"
+gunicorn = ">=20.1.0"
+httpx = ">=0.23.0"
+methodtools = ">=0.4.5"
+more-itertools = ">=9.0.0"
+numpy = ">=1.23.5"
+orjson = ">=3.8.0"
+overrides = ">=6.1.0"
+Pillow = ">=9.2.0"
+pydantic = {version = ">=1.10.0,<2", extras = ["dotenv"]}
+rich = ">=12.5.1"
+rule-engine = ">=3.5.0"
+torch = ">=1.13.1,<2"
+typer = ">=0.6.1"
+
+[package.source]
+type = "git"
+url = "https://github.com/emma-heriot-watt/experience-hub.git"
+reference = "HEAD"
+resolved_reference = "9ab763332a620f3e4d25e241e4126dee4e42c94d"
+
+[[package]]
+name = "eradicate"
+version = "2.3.0"
+description = "Removes commented-out code."
+optional = false
+python-versions = "*"
+files = [
+ {file = "eradicate-2.3.0-py3-none-any.whl", hash = "sha256:2b29b3dd27171f209e4ddd8204b70c02f0682ae95eecb353f10e8d72b149c63e"},
+ {file = "eradicate-2.3.0.tar.gz", hash = "sha256:06df115be3b87d0fc1c483db22a2ebb12bcf40585722810d809cc770f5031c37"},
+]
+
+[[package]]
+name = "exceptiongroup"
+version = "1.2.0"
+description = "Backport of PEP 654 (exception groups)"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "exceptiongroup-1.2.0-py3-none-any.whl", hash = "sha256:4bfd3996ac73b41e9b9628b04e079f193850720ea5945fc96a08633c66912f14"},
+ {file = "exceptiongroup-1.2.0.tar.gz", hash = "sha256:91f5c769735f051a4290d52edd0858999b57e5876e9f85937691bd4c9fa3ed68"},
+]
+
+[package.extras]
+test = ["pytest (>=6)"]
+
+[[package]]
+name = "execnet"
+version = "2.0.2"
+description = "execnet: rapid multi-Python deployment"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "execnet-2.0.2-py3-none-any.whl", hash = "sha256:88256416ae766bc9e8895c76a87928c0012183da3cc4fc18016e6f050e025f41"},
+ {file = "execnet-2.0.2.tar.gz", hash = "sha256:cc59bc4423742fd71ad227122eb0dd44db51efb3dc4095b45ac9a08c770096af"},
+]
+
+[package.extras]
+testing = ["hatch", "pre-commit", "pytest", "tox"]
+
+[[package]]
+name = "fastapi"
+version = "0.99.1"
+description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "fastapi-0.99.1-py3-none-any.whl", hash = "sha256:976df7bab51ac7beda9f68c4513b8c4490b5c1135c72aafd0a5ee4023ec5282e"},
+ {file = "fastapi-0.99.1.tar.gz", hash = "sha256:ac78f717cd80d657bd183f94d33b9bda84aa376a46a9dab513586b8eef1dc6fc"},
+]
+
+[package.dependencies]
+email-validator = {version = ">=1.1.1", optional = true, markers = "extra == \"all\""}
+httpx = {version = ">=0.23.0", optional = true, markers = "extra == \"all\""}
+itsdangerous = {version = ">=1.1.0", optional = true, markers = "extra == \"all\""}
+jinja2 = {version = ">=2.11.2", optional = true, markers = "extra == \"all\""}
+orjson = {version = ">=3.2.1", optional = true, markers = "extra == \"all\""}
+pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0"
+python-multipart = {version = ">=0.0.5", optional = true, markers = "extra == \"all\""}
+pyyaml = {version = ">=5.3.1", optional = true, markers = "extra == \"all\""}
+starlette = ">=0.27.0,<0.28.0"
+typing-extensions = ">=4.5.0"
+ujson = {version = ">=4.0.1,<4.0.2 || >4.0.2,<4.1.0 || >4.1.0,<4.2.0 || >4.2.0,<4.3.0 || >4.3.0,<5.0.0 || >5.0.0,<5.1.0 || >5.1.0", optional = true, markers = "extra == \"all\""}
+uvicorn = {version = ">=0.12.0", extras = ["standard"], optional = true, markers = "extra == \"all\""}
+
+[package.extras]
+all = ["email-validator (>=1.1.1)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "python-multipart (>=0.0.5)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"]
+
+[[package]]
+name = "filelock"
+version = "3.13.1"
+description = "A platform independent file lock."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "filelock-3.13.1-py3-none-any.whl", hash = "sha256:57dbda9b35157b05fb3e58ee91448612eb674172fab98ee235ccb0b5bee19a1c"},
+ {file = "filelock-3.13.1.tar.gz", hash = "sha256:521f5f56c50f8426f5e03ad3b281b490a87ef15bc6c526f168290f0c7148d44e"},
+]
+
+[package.extras]
+docs = ["furo (>=2023.9.10)", "sphinx (>=7.2.6)", "sphinx-autodoc-typehints (>=1.24)"]
+testing = ["covdefaults (>=2.3)", "coverage (>=7.3.2)", "diff-cover (>=8)", "pytest (>=7.4.3)", "pytest-cov (>=4.1)", "pytest-mock (>=3.12)", "pytest-timeout (>=2.2)"]
+typing = ["typing-extensions (>=4.8)"]
+
+[[package]]
+name = "flake8"
+version = "6.1.0"
+description = "the modular source code checker: pep8 pyflakes and co"
+optional = false
+python-versions = ">=3.8.1"
+files = [
+ {file = "flake8-6.1.0-py2.py3-none-any.whl", hash = "sha256:ffdfce58ea94c6580c77888a86506937f9a1a227dfcd15f245d694ae20a6b6e5"},
+ {file = "flake8-6.1.0.tar.gz", hash = "sha256:d5b3857f07c030bdb5bf41c7f53799571d75c4491748a3adcd47de929e34cd23"},
+]
+
+[package.dependencies]
+mccabe = ">=0.7.0,<0.8.0"
+pycodestyle = ">=2.11.0,<2.12.0"
+pyflakes = ">=3.1.0,<3.2.0"
+
+[[package]]
+name = "flake8-bandit"
+version = "4.1.1"
+description = "Automated security testing with bandit and flake8."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "flake8_bandit-4.1.1-py3-none-any.whl", hash = "sha256:4c8a53eb48f23d4ef1e59293657181a3c989d0077c9952717e98a0eace43e06d"},
+ {file = "flake8_bandit-4.1.1.tar.gz", hash = "sha256:068e09287189cbfd7f986e92605adea2067630b75380c6b5733dab7d87f9a84e"},
+]
+
+[package.dependencies]
+bandit = ">=1.7.3"
+flake8 = ">=5.0.0"
+
+[[package]]
+name = "flake8-broken-line"
+version = "1.0.0"
+description = "Flake8 plugin to forbid backslashes for line breaks"
+optional = false
+python-versions = ">=3.8,<4.0"
+files = [
+ {file = "flake8_broken_line-1.0.0-py3-none-any.whl", hash = "sha256:96c964336024a5030dc536a9f6fb02aa679e2d2a6b35b80a558b5136c35832a9"},
+ {file = "flake8_broken_line-1.0.0.tar.gz", hash = "sha256:e2c6a17f8d9a129e99c1320fce89b33843e2963871025c4c2bb7b8b8d8732a85"},
+]
+
+[package.dependencies]
+flake8 = ">5"
+
+[[package]]
+name = "flake8-bugbear"
+version = "23.12.2"
+description = "A plugin for flake8 finding likely bugs and design problems in your program. Contains warnings that don't belong in pyflakes and pycodestyle."
+optional = false
+python-versions = ">=3.8.1"
+files = [
+ {file = "flake8-bugbear-23.12.2.tar.gz", hash = "sha256:32b2903e22331ae04885dae25756a32a8c666c85142e933f43512a70f342052a"},
+ {file = "flake8_bugbear-23.12.2-py3-none-any.whl", hash = "sha256:83324bad4d90fee4bf64dd69c61aff94debf8073fbd807c8b6a36eec7a2f0719"},
+]
+
+[package.dependencies]
+attrs = ">=19.2.0"
+flake8 = ">=6.0.0"
+
+[package.extras]
+dev = ["coverage", "hypothesis", "hypothesmith (>=0.2)", "pre-commit", "pytest", "tox"]
+
+[[package]]
+name = "flake8-commas"
+version = "2.1.0"
+description = "Flake8 lint for trailing commas."
+optional = false
+python-versions = "*"
+files = [
+ {file = "flake8-commas-2.1.0.tar.gz", hash = "sha256:940441ab8ee544df564ae3b3f49f20462d75d5c7cac2463e0b27436e2050f263"},
+ {file = "flake8_commas-2.1.0-py2.py3-none-any.whl", hash = "sha256:ebb96c31e01d0ef1d0685a21f3f0e2f8153a0381430e748bf0bbbb5d5b453d54"},
+]
+
+[package.dependencies]
+flake8 = ">=2"
+
+[[package]]
+name = "flake8-comprehensions"
+version = "3.14.0"
+description = "A flake8 plugin to help you write better list/set/dict comprehensions."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "flake8_comprehensions-3.14.0-py3-none-any.whl", hash = "sha256:7b9d07d94aa88e62099a6d1931ddf16c344d4157deedf90fe0d8ee2846f30e97"},
+ {file = "flake8_comprehensions-3.14.0.tar.gz", hash = "sha256:81768c61bfc064e1a06222df08a2580d97de10cb388694becaf987c331c6c0cf"},
+]
+
+[package.dependencies]
+flake8 = ">=3.0,<3.2.0 || >3.2.0"
+
+[[package]]
+name = "flake8-debugger"
+version = "4.1.2"
+description = "ipdb/pdb statement checker plugin for flake8"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "flake8-debugger-4.1.2.tar.gz", hash = "sha256:52b002560941e36d9bf806fca2523dc7fb8560a295d5f1a6e15ac2ded7a73840"},
+ {file = "flake8_debugger-4.1.2-py3-none-any.whl", hash = "sha256:0a5e55aeddcc81da631ad9c8c366e7318998f83ff00985a49e6b3ecf61e571bf"},
+]
+
+[package.dependencies]
+flake8 = ">=3.0"
+pycodestyle = "*"
+
+[[package]]
+name = "flake8-docstrings"
+version = "1.7.0"
+description = "Extension for flake8 which uses pydocstyle to check docstrings"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "flake8_docstrings-1.7.0-py2.py3-none-any.whl", hash = "sha256:51f2344026da083fc084166a9353f5082b01f72901df422f74b4d953ae88ac75"},
+ {file = "flake8_docstrings-1.7.0.tar.gz", hash = "sha256:4c8cc748dc16e6869728699e5d0d685da9a10b0ea718e090b1ba088e67a941af"},
+]
+
+[package.dependencies]
+flake8 = ">=3"
+pydocstyle = ">=2.1"
+
+[[package]]
+name = "flake8-eradicate"
+version = "1.5.0"
+description = "Flake8 plugin to find commented out code"
+optional = false
+python-versions = ">=3.8,<4.0"
+files = [
+ {file = "flake8_eradicate-1.5.0-py3-none-any.whl", hash = "sha256:18acc922ad7de623f5247c7d5595da068525ec5437dd53b22ec2259b96ce9d22"},
+ {file = "flake8_eradicate-1.5.0.tar.gz", hash = "sha256:aee636cb9ecb5594a7cd92d67ad73eb69909e5cc7bd81710cf9d00970f3983a6"},
+]
+
+[package.dependencies]
+attrs = "*"
+eradicate = ">=2.0,<3.0"
+flake8 = ">5"
+
+[[package]]
+name = "flake8-isort"
+version = "6.1.1"
+description = "flake8 plugin that integrates isort"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "flake8_isort-6.1.1-py3-none-any.whl", hash = "sha256:0fec4dc3a15aefbdbe4012e51d5531a2eb5fa8b981cdfbc882296a59b54ede12"},
+ {file = "flake8_isort-6.1.1.tar.gz", hash = "sha256:c1f82f3cf06a80c13e1d09bfae460e9666255d5c780b859f19f8318d420370b3"},
+]
+
+[package.dependencies]
+flake8 = "*"
+isort = ">=5.0.0,<6"
+
+[package.extras]
+test = ["pytest"]
+
+[[package]]
+name = "flake8-quotes"
+version = "3.3.2"
+description = "Flake8 lint for quotes."
+optional = false
+python-versions = "*"
+files = [
+ {file = "flake8-quotes-3.3.2.tar.gz", hash = "sha256:6e26892b632dacba517bf27219c459a8396dcfac0f5e8204904c5a4ba9b480e1"},
+]
+
+[package.dependencies]
+flake8 = "*"
+
+[[package]]
+name = "flake8-rst-docstrings"
+version = "0.3.0"
+description = "Python docstring reStructuredText (RST) validator for flake8"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "flake8-rst-docstrings-0.3.0.tar.gz", hash = "sha256:d1ce22b4bd37b73cd86b8d980e946ef198cfcc18ed82fedb674ceaa2f8d1afa4"},
+ {file = "flake8_rst_docstrings-0.3.0-py3-none-any.whl", hash = "sha256:f8c3c6892ff402292651c31983a38da082480ad3ba253743de52989bdc84ca1c"},
+]
+
+[package.dependencies]
+flake8 = ">=3"
+pygments = "*"
+restructuredtext-lint = "*"
+
+[package.extras]
+develop = ["build", "twine"]
+
+[[package]]
+name = "flake8-string-format"
+version = "0.3.0"
+description = "string format checker, plugin for flake8"
+optional = false
+python-versions = "*"
+files = [
+ {file = "flake8-string-format-0.3.0.tar.gz", hash = "sha256:65f3da786a1461ef77fca3780b314edb2853c377f2e35069723348c8917deaa2"},
+ {file = "flake8_string_format-0.3.0-py2.py3-none-any.whl", hash = "sha256:812ff431f10576a74c89be4e85b8e075a705be39bc40c4b4278b5b13e2afa9af"},
+]
+
+[package.dependencies]
+flake8 = "*"
+
+[[package]]
+name = "flask"
+version = "2.3.3"
+description = "A simple framework for building complex web applications."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "flask-2.3.3-py3-none-any.whl", hash = "sha256:f69fcd559dc907ed196ab9df0e48471709175e696d6e698dd4dbe940f96ce66b"},
+ {file = "flask-2.3.3.tar.gz", hash = "sha256:09c347a92aa7ff4a8e7f3206795f30d826654baf38b873d0744cd571ca609efc"},
+]
+
+[package.dependencies]
+blinker = ">=1.6.2"
+click = ">=8.1.3"
+importlib-metadata = {version = ">=3.6.0", markers = "python_version < \"3.10\""}
+itsdangerous = ">=2.1.2"
+Jinja2 = ">=3.1.2"
+Werkzeug = ">=2.3.7"
+
+[package.extras]
+async = ["asgiref (>=3.2)"]
+dotenv = ["python-dotenv"]
+
+[[package]]
+name = "flask-cors"
+version = "4.0.0"
+description = "A Flask extension adding a decorator for CORS support"
+optional = false
+python-versions = "*"
+files = [
+ {file = "Flask-Cors-4.0.0.tar.gz", hash = "sha256:f268522fcb2f73e2ecdde1ef45e2fd5c71cc48fe03cffb4b441c6d1b40684eb0"},
+ {file = "Flask_Cors-4.0.0-py2.py3-none-any.whl", hash = "sha256:bc3492bfd6368d27cfe79c7821df5a8a319e1a6d5eab277a3794be19bdc51783"},
+]
+
+[package.dependencies]
+Flask = ">=0.9"
+
+[[package]]
+name = "fsspec"
+version = "2023.12.0"
+description = "File-system specification"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "fsspec-2023.12.0-py3-none-any.whl", hash = "sha256:f807252ee2018f2223760315beb87a2166c2b9532786eeca9e6548dfcf2cfac9"},
+ {file = "fsspec-2023.12.0.tar.gz", hash = "sha256:8e0bb2db2a94082968483b7ba2eaebf3949835e2dfdf09243dda387539464b31"},
+]
+
+[package.extras]
+abfs = ["adlfs"]
+adl = ["adlfs"]
+arrow = ["pyarrow (>=1)"]
+dask = ["dask", "distributed"]
+devel = ["pytest", "pytest-cov"]
+dropbox = ["dropbox", "dropboxdrivefs", "requests"]
+full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"]
+fuse = ["fusepy"]
+gcs = ["gcsfs"]
+git = ["pygit2"]
+github = ["requests"]
+gs = ["gcsfs"]
+gui = ["panel"]
+hdfs = ["pyarrow (>=1)"]
+http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "requests"]
+libarchive = ["libarchive-c"]
+oci = ["ocifs"]
+s3 = ["s3fs"]
+sftp = ["paramiko"]
+smb = ["smbprotocol"]
+ssh = ["paramiko"]
+tqdm = ["tqdm"]
+
+[[package]]
+name = "gevent"
+version = "23.9.1"
+description = "Coroutine-based network library"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "gevent-23.9.1-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:a3c5e9b1f766a7a64833334a18539a362fb563f6c4682f9634dea72cbe24f771"},
+ {file = "gevent-23.9.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b101086f109168b23fa3586fccd1133494bdb97f86920a24dc0b23984dc30b69"},
+ {file = "gevent-23.9.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36a549d632c14684bcbbd3014a6ce2666c5f2a500f34d58d32df6c9ea38b6535"},
+ {file = "gevent-23.9.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:272cffdf535978d59c38ed837916dfd2b5d193be1e9e5dcc60a5f4d5025dd98a"},
+ {file = "gevent-23.9.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcb8612787a7f4626aa881ff15ff25439561a429f5b303048f0fca8a1c781c39"},
+ {file = "gevent-23.9.1-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:d57737860bfc332b9b5aa438963986afe90f49645f6e053140cfa0fa1bdae1ae"},
+ {file = "gevent-23.9.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:5f3c781c84794926d853d6fb58554dc0dcc800ba25c41d42f6959c344b4db5a6"},
+ {file = "gevent-23.9.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:dbb22a9bbd6a13e925815ce70b940d1578dbe5d4013f20d23e8a11eddf8d14a7"},
+ {file = "gevent-23.9.1-cp310-cp310-win_amd64.whl", hash = "sha256:707904027d7130ff3e59ea387dddceedb133cc742b00b3ffe696d567147a9c9e"},
+ {file = "gevent-23.9.1-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:45792c45d60f6ce3d19651d7fde0bc13e01b56bb4db60d3f32ab7d9ec467374c"},
+ {file = "gevent-23.9.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4e24c2af9638d6c989caffc691a039d7c7022a31c0363da367c0d32ceb4a0648"},
+ {file = "gevent-23.9.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e1ead6863e596a8cc2a03e26a7a0981f84b6b3e956101135ff6d02df4d9a6b07"},
+ {file = "gevent-23.9.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65883ac026731ac112184680d1f0f1e39fa6f4389fd1fc0bf46cc1388e2599f9"},
+ {file = "gevent-23.9.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf7af500da05363e66f122896012acb6e101a552682f2352b618e541c941a011"},
+ {file = "gevent-23.9.1-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:c3e5d2fa532e4d3450595244de8ccf51f5721a05088813c1abd93ad274fe15e7"},
+ {file = "gevent-23.9.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:c84d34256c243b0a53d4335ef0bc76c735873986d478c53073861a92566a8d71"},
+ {file = "gevent-23.9.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ada07076b380918829250201df1d016bdafb3acf352f35e5693b59dceee8dd2e"},
+ {file = "gevent-23.9.1-cp311-cp311-win_amd64.whl", hash = "sha256:921dda1c0b84e3d3b1778efa362d61ed29e2b215b90f81d498eb4d8eafcd0b7a"},
+ {file = "gevent-23.9.1-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:ed7a048d3e526a5c1d55c44cb3bc06cfdc1947d06d45006cc4cf60dedc628904"},
+ {file = "gevent-23.9.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7c1abc6f25f475adc33e5fc2dbcc26a732608ac5375d0d306228738a9ae14d3b"},
+ {file = "gevent-23.9.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4368f341a5f51611411ec3fc62426f52ac3d6d42eaee9ed0f9eebe715c80184e"},
+ {file = "gevent-23.9.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:52b4abf28e837f1865a9bdeef58ff6afd07d1d888b70b6804557e7908032e599"},
+ {file = "gevent-23.9.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:52e9f12cd1cda96603ce6b113d934f1aafb873e2c13182cf8e86d2c5c41982ea"},
+ {file = "gevent-23.9.1-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:de350fde10efa87ea60d742901e1053eb2127ebd8b59a7d3b90597eb4e586599"},
+ {file = "gevent-23.9.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:fde6402c5432b835fbb7698f1c7f2809c8d6b2bd9d047ac1f5a7c1d5aa569303"},
+ {file = "gevent-23.9.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:dd6c32ab977ecf7c7b8c2611ed95fa4aaebd69b74bf08f4b4960ad516861517d"},
+ {file = "gevent-23.9.1-cp312-cp312-win_amd64.whl", hash = "sha256:455e5ee8103f722b503fa45dedb04f3ffdec978c1524647f8ba72b4f08490af1"},
+ {file = "gevent-23.9.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:7ccf0fd378257cb77d91c116e15c99e533374a8153632c48a3ecae7f7f4f09fe"},
+ {file = "gevent-23.9.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d163d59f1be5a4c4efcdd13c2177baaf24aadf721fdf2e1af9ee54a998d160f5"},
+ {file = "gevent-23.9.1-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:7532c17bc6c1cbac265e751b95000961715adef35a25d2b0b1813aa7263fb397"},
+ {file = "gevent-23.9.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:78eebaf5e73ff91d34df48f4e35581ab4c84e22dd5338ef32714264063c57507"},
+ {file = "gevent-23.9.1-cp38-cp38-win32.whl", hash = "sha256:f632487c87866094546a74eefbca2c74c1d03638b715b6feb12e80120960185a"},
+ {file = "gevent-23.9.1-cp38-cp38-win_amd64.whl", hash = "sha256:62d121344f7465e3739989ad6b91f53a6ca9110518231553fe5846dbe1b4518f"},
+ {file = "gevent-23.9.1-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:bf456bd6b992eb0e1e869e2fd0caf817f0253e55ca7977fd0e72d0336a8c1c6a"},
+ {file = "gevent-23.9.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43daf68496c03a35287b8b617f9f91e0e7c0d042aebcc060cadc3f049aadd653"},
+ {file = "gevent-23.9.1-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:7c28e38dcde327c217fdafb9d5d17d3e772f636f35df15ffae2d933a5587addd"},
+ {file = "gevent-23.9.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:fae8d5b5b8fa2a8f63b39f5447168b02db10c888a3e387ed7af2bd1b8612e543"},
+ {file = "gevent-23.9.1-cp39-cp39-win32.whl", hash = "sha256:2c7b5c9912378e5f5ccf180d1fdb1e83f42b71823483066eddbe10ef1a2fcaa2"},
+ {file = "gevent-23.9.1-cp39-cp39-win_amd64.whl", hash = "sha256:a2898b7048771917d85a1d548fd378e8a7b2ca963db8e17c6d90c76b495e0e2b"},
+ {file = "gevent-23.9.1.tar.gz", hash = "sha256:72c002235390d46f94938a96920d8856d4ffd9ddf62a303a0d7c118894097e34"},
+]
+
+[package.dependencies]
+cffi = {version = ">=1.12.2", markers = "platform_python_implementation == \"CPython\" and sys_platform == \"win32\""}
+greenlet = {version = ">=2.0.0", markers = "platform_python_implementation == \"CPython\" and python_version < \"3.11\""}
+"zope.event" = "*"
+"zope.interface" = "*"
+
+[package.extras]
+dnspython = ["dnspython (>=1.16.0,<2.0)", "idna"]
+docs = ["furo", "repoze.sphinx.autointerface", "sphinx", "sphinxcontrib-programoutput", "zope.schema"]
+monitor = ["psutil (>=5.7.0)"]
+recommended = ["cffi (>=1.12.2)", "dnspython (>=1.16.0,<2.0)", "idna", "psutil (>=5.7.0)"]
+test = ["cffi (>=1.12.2)", "coverage (>=5.0)", "dnspython (>=1.16.0,<2.0)", "idna", "objgraph", "psutil (>=5.7.0)", "requests", "setuptools"]
+
+[[package]]
+name = "gitdb"
+version = "4.0.11"
+description = "Git Object Database"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "gitdb-4.0.11-py3-none-any.whl", hash = "sha256:81a3407ddd2ee8df444cbacea00e2d038e40150acfa3001696fe0dcf1d3adfa4"},
+ {file = "gitdb-4.0.11.tar.gz", hash = "sha256:bf5421126136d6d0af55bc1e7c1af1c397a34f5b7bd79e776cd3e89785c2b04b"},
+]
+
+[package.dependencies]
+smmap = ">=3.0.1,<6"
+
+[[package]]
+name = "gitpython"
+version = "3.1.40"
+description = "GitPython is a Python library used to interact with Git repositories"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "GitPython-3.1.40-py3-none-any.whl", hash = "sha256:cf14627d5a8049ffbf49915732e5eddbe8134c3bdb9d476e6182b676fc573f8a"},
+ {file = "GitPython-3.1.40.tar.gz", hash = "sha256:22b126e9ffb671fdd0c129796343a02bf67bf2994b35449ffc9321aa755e18a4"},
+]
+
+[package.dependencies]
+gitdb = ">=4.0.1,<5"
+
+[package.extras]
+test = ["black", "coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", "pytest", "pytest-cov", "pytest-instafail", "pytest-subtests", "pytest-sugar"]
+
+[[package]]
+name = "greenlet"
+version = "3.0.1"
+description = "Lightweight in-process concurrent programming"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "greenlet-3.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f89e21afe925fcfa655965ca8ea10f24773a1791400989ff32f467badfe4a064"},
+ {file = "greenlet-3.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28e89e232c7593d33cac35425b58950789962011cc274aa43ef8865f2e11f46d"},
+ {file = "greenlet-3.0.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8ba29306c5de7717b5761b9ea74f9c72b9e2b834e24aa984da99cbfc70157fd"},
+ {file = "greenlet-3.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19bbdf1cce0346ef7341705d71e2ecf6f41a35c311137f29b8a2dc2341374565"},
+ {file = "greenlet-3.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:599daf06ea59bfedbec564b1692b0166a0045f32b6f0933b0dd4df59a854caf2"},
+ {file = "greenlet-3.0.1-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:b641161c302efbb860ae6b081f406839a8b7d5573f20a455539823802c655f63"},
+ {file = "greenlet-3.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d57e20ba591727da0c230ab2c3f200ac9d6d333860d85348816e1dca4cc4792e"},
+ {file = "greenlet-3.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5805e71e5b570d490938d55552f5a9e10f477c19400c38bf1d5190d760691846"},
+ {file = "greenlet-3.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:52e93b28db27ae7d208748f45d2db8a7b6a380e0d703f099c949d0f0d80b70e9"},
+ {file = "greenlet-3.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f7bfb769f7efa0eefcd039dd19d843a4fbfbac52f1878b1da2ed5793ec9b1a65"},
+ {file = "greenlet-3.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:91e6c7db42638dc45cf2e13c73be16bf83179f7859b07cfc139518941320be96"},
+ {file = "greenlet-3.0.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1757936efea16e3f03db20efd0cd50a1c86b06734f9f7338a90c4ba85ec2ad5a"},
+ {file = "greenlet-3.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:19075157a10055759066854a973b3d1325d964d498a805bb68a1f9af4aaef8ec"},
+ {file = "greenlet-3.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e9d21aaa84557d64209af04ff48e0ad5e28c5cca67ce43444e939579d085da72"},
+ {file = "greenlet-3.0.1-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2847e5d7beedb8d614186962c3d774d40d3374d580d2cbdab7f184580a39d234"},
+ {file = "greenlet-3.0.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:97e7ac860d64e2dcba5c5944cfc8fa9ea185cd84061c623536154d5a89237884"},
+ {file = "greenlet-3.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b2c02d2ad98116e914d4f3155ffc905fd0c025d901ead3f6ed07385e19122c94"},
+ {file = "greenlet-3.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:22f79120a24aeeae2b4471c711dcf4f8c736a2bb2fabad2a67ac9a55ea72523c"},
+ {file = "greenlet-3.0.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:100f78a29707ca1525ea47388cec8a049405147719f47ebf3895e7509c6446aa"},
+ {file = "greenlet-3.0.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:60d5772e8195f4e9ebf74046a9121bbb90090f6550f81d8956a05387ba139353"},
+ {file = "greenlet-3.0.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:daa7197b43c707462f06d2c693ffdbb5991cbb8b80b5b984007de431493a319c"},
+ {file = "greenlet-3.0.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ea6b8aa9e08eea388c5f7a276fabb1d4b6b9d6e4ceb12cc477c3d352001768a9"},
+ {file = "greenlet-3.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d11ebbd679e927593978aa44c10fc2092bc454b7d13fdc958d3e9d508aba7d0"},
+ {file = "greenlet-3.0.1-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:dbd4c177afb8a8d9ba348d925b0b67246147af806f0b104af4d24f144d461cd5"},
+ {file = "greenlet-3.0.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:20107edf7c2c3644c67c12205dc60b1bb11d26b2610b276f97d666110d1b511d"},
+ {file = "greenlet-3.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8bef097455dea90ffe855286926ae02d8faa335ed8e4067326257cb571fc1445"},
+ {file = "greenlet-3.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:b2d3337dcfaa99698aa2377c81c9ca72fcd89c07e7eb62ece3f23a3fe89b2ce4"},
+ {file = "greenlet-3.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:80ac992f25d10aaebe1ee15df45ca0d7571d0f70b645c08ec68733fb7a020206"},
+ {file = "greenlet-3.0.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:337322096d92808f76ad26061a8f5fccb22b0809bea39212cd6c406f6a7060d2"},
+ {file = "greenlet-3.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9934adbd0f6e476f0ecff3c94626529f344f57b38c9a541f87098710b18af0a"},
+ {file = "greenlet-3.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dc4d815b794fd8868c4d67602692c21bf5293a75e4b607bb92a11e821e2b859a"},
+ {file = "greenlet-3.0.1-cp37-cp37m-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41bdeeb552d814bcd7fb52172b304898a35818107cc8778b5101423c9017b3de"},
+ {file = "greenlet-3.0.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:6e6061bf1e9565c29002e3c601cf68569c450be7fc3f7336671af7ddb4657166"},
+ {file = "greenlet-3.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:fa24255ae3c0ab67e613556375a4341af04a084bd58764731972bcbc8baeba36"},
+ {file = "greenlet-3.0.1-cp37-cp37m-win32.whl", hash = "sha256:b489c36d1327868d207002391f662a1d163bdc8daf10ab2e5f6e41b9b96de3b1"},
+ {file = "greenlet-3.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:f33f3258aae89da191c6ebaa3bc517c6c4cbc9b9f689e5d8452f7aedbb913fa8"},
+ {file = "greenlet-3.0.1-cp38-cp38-macosx_11_0_universal2.whl", hash = "sha256:d2905ce1df400360463c772b55d8e2518d0e488a87cdea13dd2c71dcb2a1fa16"},
+ {file = "greenlet-3.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a02d259510b3630f330c86557331a3b0e0c79dac3d166e449a39363beaae174"},
+ {file = "greenlet-3.0.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:55d62807f1c5a1682075c62436702aaba941daa316e9161e4b6ccebbbf38bda3"},
+ {file = "greenlet-3.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3fcc780ae8edbb1d050d920ab44790201f027d59fdbd21362340a85c79066a74"},
+ {file = "greenlet-3.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4eddd98afc726f8aee1948858aed9e6feeb1758889dfd869072d4465973f6bfd"},
+ {file = "greenlet-3.0.1-cp38-cp38-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:eabe7090db68c981fca689299c2d116400b553f4b713266b130cfc9e2aa9c5a9"},
+ {file = "greenlet-3.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f2f6d303f3dee132b322a14cd8765287b8f86cdc10d2cb6a6fae234ea488888e"},
+ {file = "greenlet-3.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d923ff276f1c1f9680d32832f8d6c040fe9306cbfb5d161b0911e9634be9ef0a"},
+ {file = "greenlet-3.0.1-cp38-cp38-win32.whl", hash = "sha256:0b6f9f8ca7093fd4433472fd99b5650f8a26dcd8ba410e14094c1e44cd3ceddd"},
+ {file = "greenlet-3.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:990066bff27c4fcf3b69382b86f4c99b3652bab2a7e685d968cd4d0cfc6f67c6"},
+ {file = "greenlet-3.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:ce85c43ae54845272f6f9cd8320d034d7a946e9773c693b27d620edec825e376"},
+ {file = "greenlet-3.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:89ee2e967bd7ff85d84a2de09df10e021c9b38c7d91dead95b406ed6350c6997"},
+ {file = "greenlet-3.0.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:87c8ceb0cf8a5a51b8008b643844b7f4a8264a2c13fcbcd8a8316161725383fe"},
+ {file = "greenlet-3.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d6a8c9d4f8692917a3dc7eb25a6fb337bff86909febe2f793ec1928cd97bedfc"},
+ {file = "greenlet-3.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fbc5b8f3dfe24784cee8ce0be3da2d8a79e46a276593db6868382d9c50d97b1"},
+ {file = "greenlet-3.0.1-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:85d2b77e7c9382f004b41d9c72c85537fac834fb141b0296942d52bf03fe4a3d"},
+ {file = "greenlet-3.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:696d8e7d82398e810f2b3622b24e87906763b6ebfd90e361e88eb85b0e554dc8"},
+ {file = "greenlet-3.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:329c5a2e5a0ee942f2992c5e3ff40be03e75f745f48847f118a3cfece7a28546"},
+ {file = "greenlet-3.0.1-cp39-cp39-win32.whl", hash = "sha256:cf868e08690cb89360eebc73ba4be7fb461cfbc6168dd88e2fbbe6f31812cd57"},
+ {file = "greenlet-3.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:ac4a39d1abae48184d420aa8e5e63efd1b75c8444dd95daa3e03f6c6310e9619"},
+ {file = "greenlet-3.0.1.tar.gz", hash = "sha256:816bd9488a94cba78d93e1abb58000e8266fa9cc2aa9ccdd6eb0696acb24005b"},
+]
+
+[package.extras]
+docs = ["Sphinx"]
+test = ["objgraph", "psutil"]
+
+[[package]]
+name = "gunicorn"
+version = "21.2.0"
+description = "WSGI HTTP Server for UNIX"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "gunicorn-21.2.0-py3-none-any.whl", hash = "sha256:3213aa5e8c24949e792bcacfc176fef362e7aac80b76c56f6b5122bf350722f0"},
+ {file = "gunicorn-21.2.0.tar.gz", hash = "sha256:88ec8bff1d634f98e61b9f65bc4bf3cd918a90806c6f5c48bc5603849ec81033"},
+]
+
+[package.dependencies]
+packaging = "*"
+
+[package.extras]
+eventlet = ["eventlet (>=0.24.1)"]
+gevent = ["gevent (>=1.4.0)"]
+setproctitle = ["setproctitle"]
+tornado = ["tornado (>=0.2)"]
+
+[[package]]
+name = "h11"
+version = "0.14.0"
+description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
+ {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
+]
+
+[[package]]
+name = "httpcore"
+version = "1.0.2"
+description = "A minimal low-level HTTP client."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "httpcore-1.0.2-py3-none-any.whl", hash = "sha256:096cc05bca73b8e459a1fc3dcf585148f63e534eae4339559c9b8a8d6399acc7"},
+ {file = "httpcore-1.0.2.tar.gz", hash = "sha256:9fc092e4799b26174648e54b74ed5f683132a464e95643b226e00c2ed2fa6535"},
+]
+
+[package.dependencies]
+certifi = "*"
+h11 = ">=0.13,<0.15"
+
+[package.extras]
+asyncio = ["anyio (>=4.0,<5.0)"]
+http2 = ["h2 (>=3,<5)"]
+socks = ["socksio (==1.*)"]
+trio = ["trio (>=0.22.0,<0.23.0)"]
+
+[[package]]
+name = "httptools"
+version = "0.6.1"
+description = "A collection of framework independent HTTP protocol utils."
+optional = false
+python-versions = ">=3.8.0"
+files = [
+ {file = "httptools-0.6.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d2f6c3c4cb1948d912538217838f6e9960bc4a521d7f9b323b3da579cd14532f"},
+ {file = "httptools-0.6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:00d5d4b68a717765b1fabfd9ca755bd12bf44105eeb806c03d1962acd9b8e563"},
+ {file = "httptools-0.6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:639dc4f381a870c9ec860ce5c45921db50205a37cc3334e756269736ff0aac58"},
+ {file = "httptools-0.6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e57997ac7fb7ee43140cc03664de5f268813a481dff6245e0075925adc6aa185"},
+ {file = "httptools-0.6.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0ac5a0ae3d9f4fe004318d64b8a854edd85ab76cffbf7ef5e32920faef62f142"},
+ {file = "httptools-0.6.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3f30d3ce413088a98b9db71c60a6ada2001a08945cb42dd65a9a9fe228627658"},
+ {file = "httptools-0.6.1-cp310-cp310-win_amd64.whl", hash = "sha256:1ed99a373e327f0107cb513b61820102ee4f3675656a37a50083eda05dc9541b"},
+ {file = "httptools-0.6.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:7a7ea483c1a4485c71cb5f38be9db078f8b0e8b4c4dc0210f531cdd2ddac1ef1"},
+ {file = "httptools-0.6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:85ed077c995e942b6f1b07583e4eb0a8d324d418954fc6af913d36db7c05a5a0"},
+ {file = "httptools-0.6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b0bb634338334385351a1600a73e558ce619af390c2b38386206ac6a27fecfc"},
+ {file = "httptools-0.6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d9ceb2c957320def533671fc9c715a80c47025139c8d1f3797477decbc6edd2"},
+ {file = "httptools-0.6.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4f0f8271c0a4db459f9dc807acd0eadd4839934a4b9b892f6f160e94da309837"},
+ {file = "httptools-0.6.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:6a4f5ccead6d18ec072ac0b84420e95d27c1cdf5c9f1bc8fbd8daf86bd94f43d"},
+ {file = "httptools-0.6.1-cp311-cp311-win_amd64.whl", hash = "sha256:5cceac09f164bcba55c0500a18fe3c47df29b62353198e4f37bbcc5d591172c3"},
+ {file = "httptools-0.6.1-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:75c8022dca7935cba14741a42744eee13ba05db00b27a4b940f0d646bd4d56d0"},
+ {file = "httptools-0.6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:48ed8129cd9a0d62cf4d1575fcf90fb37e3ff7d5654d3a5814eb3d55f36478c2"},
+ {file = "httptools-0.6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f58e335a1402fb5a650e271e8c2d03cfa7cea46ae124649346d17bd30d59c90"},
+ {file = "httptools-0.6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:93ad80d7176aa5788902f207a4e79885f0576134695dfb0fefc15b7a4648d503"},
+ {file = "httptools-0.6.1-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9bb68d3a085c2174c2477eb3ffe84ae9fb4fde8792edb7bcd09a1d8467e30a84"},
+ {file = "httptools-0.6.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:b512aa728bc02354e5ac086ce76c3ce635b62f5fbc32ab7082b5e582d27867bb"},
+ {file = "httptools-0.6.1-cp312-cp312-win_amd64.whl", hash = "sha256:97662ce7fb196c785344d00d638fc9ad69e18ee4bfb4000b35a52efe5adcc949"},
+ {file = "httptools-0.6.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:8e216a038d2d52ea13fdd9b9c9c7459fb80d78302b257828285eca1c773b99b3"},
+ {file = "httptools-0.6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3e802e0b2378ade99cd666b5bffb8b2a7cc8f3d28988685dc300469ea8dd86cb"},
+ {file = "httptools-0.6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4bd3e488b447046e386a30f07af05f9b38d3d368d1f7b4d8f7e10af85393db97"},
+ {file = "httptools-0.6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fe467eb086d80217b7584e61313ebadc8d187a4d95bb62031b7bab4b205c3ba3"},
+ {file = "httptools-0.6.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3c3b214ce057c54675b00108ac42bacf2ab8f85c58e3f324a4e963bbc46424f4"},
+ {file = "httptools-0.6.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:8ae5b97f690badd2ca27cbf668494ee1b6d34cf1c464271ef7bfa9ca6b83ffaf"},
+ {file = "httptools-0.6.1-cp38-cp38-win_amd64.whl", hash = "sha256:405784577ba6540fa7d6ff49e37daf104e04f4b4ff2d1ac0469eaa6a20fde084"},
+ {file = "httptools-0.6.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:95fb92dd3649f9cb139e9c56604cc2d7c7bf0fc2e7c8d7fbd58f96e35eddd2a3"},
+ {file = "httptools-0.6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:dcbab042cc3ef272adc11220517278519adf8f53fd3056d0e68f0a6f891ba94e"},
+ {file = "httptools-0.6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cf2372e98406efb42e93bfe10f2948e467edfd792b015f1b4ecd897903d3e8d"},
+ {file = "httptools-0.6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:678fcbae74477a17d103b7cae78b74800d795d702083867ce160fc202104d0da"},
+ {file = "httptools-0.6.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e0b281cf5a125c35f7f6722b65d8542d2e57331be573e9e88bc8b0115c4a7a81"},
+ {file = "httptools-0.6.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:95658c342529bba4e1d3d2b1a874db16c7cca435e8827422154c9da76ac4e13a"},
+ {file = "httptools-0.6.1-cp39-cp39-win_amd64.whl", hash = "sha256:7ebaec1bf683e4bf5e9fbb49b8cc36da482033596a415b3e4ebab5a4c0d7ec5e"},
+ {file = "httptools-0.6.1.tar.gz", hash = "sha256:c6e26c30455600b95d94b1b836085138e82f177351454ee841c148f93a9bad5a"},
+]
+
+[package.extras]
+test = ["Cython (>=0.29.24,<0.30.0)"]
+
+[[package]]
+name = "httpx"
+version = "0.25.2"
+description = "The next generation HTTP client."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "httpx-0.25.2-py3-none-any.whl", hash = "sha256:a05d3d052d9b2dfce0e3896636467f8a5342fb2b902c819428e1ac65413ca118"},
+ {file = "httpx-0.25.2.tar.gz", hash = "sha256:8b8fcaa0c8ea7b05edd69a094e63a2094c4efcb48129fb757361bc423c0ad9e8"},
+]
+
+[package.dependencies]
+anyio = "*"
+certifi = "*"
+httpcore = "==1.*"
+idna = "*"
+sniffio = "*"
+
+[package.extras]
+brotli = ["brotli", "brotlicffi"]
+cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
+http2 = ["h2 (>=3,<5)"]
+socks = ["socksio (==1.*)"]
+
+[[package]]
+name = "huggingface-hub"
+version = "0.19.4"
+description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub"
+optional = false
+python-versions = ">=3.8.0"
+files = [
+ {file = "huggingface_hub-0.19.4-py3-none-any.whl", hash = "sha256:dba013f779da16f14b606492828f3760600a1e1801432d09fe1c33e50b825bb5"},
+ {file = "huggingface_hub-0.19.4.tar.gz", hash = "sha256:176a4fc355a851c17550e7619488f383189727eab209534d7cef2114dae77b22"},
+]
+
+[package.dependencies]
+filelock = "*"
+fsspec = ">=2023.5.0"
+packaging = ">=20.9"
+pyyaml = ">=5.1"
+requests = "*"
+tqdm = ">=4.42.1"
+typing-extensions = ">=3.7.4.3"
+
+[package.extras]
+all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.1.3)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"]
+cli = ["InquirerPy (==0.3.4)"]
+dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.1.3)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)"]
+docs = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "hf-doc-builder", "jedi", "mypy (==1.5.1)", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.1.3)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)", "urllib3 (<2.0)", "watchdog"]
+fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"]
+inference = ["aiohttp", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)"]
+quality = ["mypy (==1.5.1)", "ruff (>=0.1.3)"]
+tensorflow = ["graphviz", "pydot", "tensorflow"]
+testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "numpy", "pydantic (>1.1,<2.0)", "pydantic (>1.1,<3.0)", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"]
+torch = ["torch"]
+typing = ["types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "typing-extensions (>=4.8.0)"]
+
+[[package]]
+name = "identify"
+version = "2.5.32"
+description = "File identification library for Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "identify-2.5.32-py2.py3-none-any.whl", hash = "sha256:0b7656ef6cba81664b783352c73f8c24b39cf82f926f78f4550eda928e5e0545"},
+ {file = "identify-2.5.32.tar.gz", hash = "sha256:5d9979348ec1a21c768ae07e0a652924538e8bce67313a73cb0f681cf08ba407"},
+]
+
+[package.extras]
+license = ["ukkonen"]
+
+[[package]]
+name = "idna"
+version = "3.6"
+description = "Internationalized Domain Names in Applications (IDNA)"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "idna-3.6-py3-none-any.whl", hash = "sha256:c05567e9c24a6b9faaa835c4821bad0590fbb9d5779e7caa6e1cc4978e7eb24f"},
+ {file = "idna-3.6.tar.gz", hash = "sha256:9ecdbbd083b06798ae1e86adcbfe8ab1479cf864e4ee30fe4e46a003d12491ca"},
+]
+
+[[package]]
+name = "importlib-metadata"
+version = "7.0.0"
+description = "Read metadata from Python packages"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "importlib_metadata-7.0.0-py3-none-any.whl", hash = "sha256:d97503976bb81f40a193d41ee6570868479c69d5068651eb039c40d850c59d67"},
+ {file = "importlib_metadata-7.0.0.tar.gz", hash = "sha256:7fc841f8b8332803464e5dc1c63a2e59121f46ca186c0e2e182e80bf8c1319f7"},
+]
+
+[package.dependencies]
+zipp = ">=0.5"
+
+[package.extras]
+docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"]
+perf = ["ipython"]
+testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"]
+
+[[package]]
+name = "iniconfig"
+version = "2.0.0"
+description = "brain-dead simple config-ini parsing"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
+ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
+]
+
+[[package]]
+name = "isort"
+version = "5.12.0"
+description = "A Python utility / library to sort Python imports."
+optional = false
+python-versions = ">=3.8.0"
+files = [
+ {file = "isort-5.12.0-py3-none-any.whl", hash = "sha256:f84c2818376e66cf843d497486ea8fed8700b340f308f076c6fb1229dff318b6"},
+ {file = "isort-5.12.0.tar.gz", hash = "sha256:8bef7dde241278824a6d83f44a544709b065191b95b6e50894bdc722fcba0504"},
+]
+
+[package.extras]
+colors = ["colorama (>=0.4.3)"]
+pipfile-deprecated-finder = ["pip-shims (>=0.5.2)", "pipreqs", "requirementslib"]
+plugins = ["setuptools"]
+requirements-deprecated-finder = ["pip-api", "pipreqs"]
+
+[[package]]
+name = "itsdangerous"
+version = "2.1.2"
+description = "Safely pass data to untrusted environments and back."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "itsdangerous-2.1.2-py3-none-any.whl", hash = "sha256:2c2349112351b88699d8d4b6b075022c0808887cb7ad10069318a8b0bc88db44"},
+ {file = "itsdangerous-2.1.2.tar.gz", hash = "sha256:5dbbc68b317e5e42f327f9021763545dc3fc3bfe22e6deb96aaf1fc38874156a"},
+]
+
+[[package]]
+name = "jinja2"
+version = "3.1.2"
+description = "A very fast and expressive template engine."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"},
+ {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"},
+]
+
+[package.dependencies]
+MarkupSafe = ">=2.0"
+
+[package.extras]
+i18n = ["Babel (>=2.7)"]
+
+[[package]]
+name = "jmespath"
+version = "1.0.1"
+description = "JSON Matching Expressions"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "jmespath-1.0.1-py3-none-any.whl", hash = "sha256:02e2e4cc71b5bcab88332eebf907519190dd9e6e82107fa7f83b1003a6252980"},
+ {file = "jmespath-1.0.1.tar.gz", hash = "sha256:90261b206d6defd58fdd5e85f478bf633a2901798906be2ad389150c5c60edbe"},
+]
+
+[[package]]
+name = "lightning-utilities"
+version = "0.10.0"
+description = "PyTorch Lightning Sample project."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "lightning-utilities-0.10.0.tar.gz", hash = "sha256:9e31617eccbbadc6b737a2432fd7076ff8e24957f9c63aeba2530b189e19319c"},
+ {file = "lightning_utilities-0.10.0-py3-none-any.whl", hash = "sha256:84d09b11fe9bc16c803ae5e412874748239d73ad2f3d1b90862f99ce15a03aa0"},
+]
+
+[package.dependencies]
+packaging = ">=17.1"
+setuptools = "*"
+typing-extensions = "*"
+
+[package.extras]
+cli = ["fire"]
+docs = ["requests (>=2.0.0)"]
+typing = ["mypy (>=1.0.0)", "types-setuptools"]
+
+[[package]]
+name = "loguru"
+version = "0.7.2"
+description = "Python logging made (stupidly) simple"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "loguru-0.7.2-py3-none-any.whl", hash = "sha256:003d71e3d3ed35f0f8984898359d65b79e5b21943f78af86aa5491210429b8eb"},
+ {file = "loguru-0.7.2.tar.gz", hash = "sha256:e671a53522515f34fd406340ee968cb9ecafbc4b36c679da03c18fd8d0bd51ac"},
+]
+
+[package.dependencies]
+colorama = {version = ">=0.3.4", markers = "sys_platform == \"win32\""}
+win32-setctime = {version = ">=1.0.0", markers = "sys_platform == \"win32\""}
+
+[package.extras]
+dev = ["Sphinx (==7.2.5)", "colorama (==0.4.5)", "colorama (==0.4.6)", "exceptiongroup (==1.1.3)", "freezegun (==1.1.0)", "freezegun (==1.2.2)", "mypy (==v0.910)", "mypy (==v0.971)", "mypy (==v1.4.1)", "mypy (==v1.5.1)", "pre-commit (==3.4.0)", "pytest (==6.1.2)", "pytest (==7.4.0)", "pytest-cov (==2.12.1)", "pytest-cov (==4.1.0)", "pytest-mypy-plugins (==1.9.3)", "pytest-mypy-plugins (==3.0.0)", "sphinx-autobuild (==2021.3.14)", "sphinx-rtd-theme (==1.3.0)", "tox (==3.27.1)", "tox (==4.11.0)"]
+
+[[package]]
+name = "makefun"
+version = "1.15.2"
+description = "Small library to dynamically create python functions."
+optional = false
+python-versions = "*"
+files = [
+ {file = "makefun-1.15.2-py2.py3-none-any.whl", hash = "sha256:1c83abfaefb6c3c7c83ed4a993b4a310af80adf6db15625b184b1f0f7545a041"},
+ {file = "makefun-1.15.2.tar.gz", hash = "sha256:16f2a2b34d9ee0c2b578c960a1808c974e2822cf79f6e9b9c455aace10882d45"},
+]
+
+[[package]]
+name = "markdown-it-py"
+version = "3.0.0"
+description = "Python port of markdown-it. Markdown parsing, done right!"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "markdown-it-py-3.0.0.tar.gz", hash = "sha256:e3f60a94fa066dc52ec76661e37c851cb232d92f9886b15cb560aaada2df8feb"},
+ {file = "markdown_it_py-3.0.0-py3-none-any.whl", hash = "sha256:355216845c60bd96232cd8d8c40e8f9765cc86f46880e43a8fd22dc1a1a8cab1"},
+]
+
+[package.dependencies]
+mdurl = ">=0.1,<1.0"
+
+[package.extras]
+benchmarking = ["psutil", "pytest", "pytest-benchmark"]
+code-style = ["pre-commit (>=3.0,<4.0)"]
+compare = ["commonmark (>=0.9,<1.0)", "markdown (>=3.4,<4.0)", "mistletoe (>=1.0,<2.0)", "mistune (>=2.0,<3.0)", "panflute (>=2.3,<3.0)"]
+linkify = ["linkify-it-py (>=1,<3)"]
+plugins = ["mdit-py-plugins"]
+profiling = ["gprof2dot"]
+rtd = ["jupyter_sphinx", "mdit-py-plugins", "myst-parser", "pyyaml", "sphinx", "sphinx-copybutton", "sphinx-design", "sphinx_book_theme"]
+testing = ["coverage", "pytest", "pytest-cov", "pytest-regressions"]
+
+[[package]]
+name = "markupsafe"
+version = "2.1.3"
+description = "Safely add untrusted strings to HTML/XML markup."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"},
+ {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"},
+ {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"},
+ {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:f698de3fd0c4e6972b92290a45bd9b1536bffe8c6759c62471efaa8acb4c37bc"},
+ {file = "MarkupSafe-2.1.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:aa57bd9cf8ae831a362185ee444e15a93ecb2e344c8e52e4d721ea3ab6ef1823"},
+ {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffcc3f7c66b5f5b7931a5aa68fc9cecc51e685ef90282f4a82f0f5e9b704ad11"},
+ {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47d4f1c5f80fc62fdd7777d0d40a2e9dda0a05883ab11374334f6c4de38adffd"},
+ {file = "MarkupSafe-2.1.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1f67c7038d560d92149c060157d623c542173016c4babc0c1913cca0564b9939"},
+ {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:9aad3c1755095ce347e26488214ef77e0485a3c34a50c5a5e2471dff60b9dd9c"},
+ {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:14ff806850827afd6b07a5f32bd917fb7f45b046ba40c57abdb636674a8b559c"},
+ {file = "MarkupSafe-2.1.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8f9293864fe09b8149f0cc42ce56e3f0e54de883a9de90cd427f191c346eb2e1"},
+ {file = "MarkupSafe-2.1.3-cp312-cp312-win32.whl", hash = "sha256:715d3562f79d540f251b99ebd6d8baa547118974341db04f5ad06d5ea3eb8007"},
+ {file = "MarkupSafe-2.1.3-cp312-cp312-win_amd64.whl", hash = "sha256:1b8dd8c3fd14349433c79fa8abeb573a55fc0fdd769133baac1f5e07abf54aeb"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-win32.whl", hash = "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0"},
+ {file = "MarkupSafe-2.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-win32.whl", hash = "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5"},
+ {file = "MarkupSafe-2.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-win32.whl", hash = "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"},
+ {file = "MarkupSafe-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba"},
+ {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"},
+]
+
+[[package]]
+name = "mccabe"
+version = "0.7.0"
+description = "McCabe checker, plugin for flake8"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"},
+ {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"},
+]
+
+[[package]]
+name = "mdurl"
+version = "0.1.2"
+description = "Markdown URL utilities"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "mdurl-0.1.2-py3-none-any.whl", hash = "sha256:84008a41e51615a49fc9966191ff91509e3c40b939176e643fd50a5c2196b8f8"},
+ {file = "mdurl-0.1.2.tar.gz", hash = "sha256:bb413d29f5eea38f31dd4754dd7377d4465116fb207585f97bf925588687c1ba"},
+]
+
+[[package]]
+name = "methodtools"
+version = "0.4.7"
+description = "Expand standard functools to methods"
+optional = false
+python-versions = "*"
+files = [
+ {file = "methodtools-0.4.7.tar.gz", hash = "sha256:e213439dd64cfe60213f7015da6efe5dd4003fd89376db3baa09fe13ec2bb0ba"},
+]
+
+[package.dependencies]
+wirerope = ">=0.4.7"
+
+[package.extras]
+doc = ["sphinx"]
+test = ["functools32 (>=3.2.3-2)", "pytest (>=4.6.7)", "pytest-cov (>=2.6.1)"]
+
+[[package]]
+name = "more-itertools"
+version = "10.1.0"
+description = "More routines for operating on iterables, beyond itertools"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "more-itertools-10.1.0.tar.gz", hash = "sha256:626c369fa0eb37bac0291bce8259b332fd59ac792fa5497b59837309cd5b114a"},
+ {file = "more_itertools-10.1.0-py3-none-any.whl", hash = "sha256:64e0735fcfdc6f3464ea133afe8ea4483b1c5fe3a3d69852e6503b43a0b222e6"},
+]
+
+[[package]]
+name = "mypy"
+version = "1.7.1"
+description = "Optional static typing for Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "mypy-1.7.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:12cce78e329838d70a204293e7b29af9faa3ab14899aec397798a4b41be7f340"},
+ {file = "mypy-1.7.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1484b8fa2c10adf4474f016e09d7a159602f3239075c7bf9f1627f5acf40ad49"},
+ {file = "mypy-1.7.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:31902408f4bf54108bbfb2e35369877c01c95adc6192958684473658c322c8a5"},
+ {file = "mypy-1.7.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f2c2521a8e4d6d769e3234350ba7b65ff5d527137cdcde13ff4d99114b0c8e7d"},
+ {file = "mypy-1.7.1-cp310-cp310-win_amd64.whl", hash = "sha256:fcd2572dd4519e8a6642b733cd3a8cfc1ef94bafd0c1ceed9c94fe736cb65b6a"},
+ {file = "mypy-1.7.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4b901927f16224d0d143b925ce9a4e6b3a758010673eeded9b748f250cf4e8f7"},
+ {file = "mypy-1.7.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2f7f6985d05a4e3ce8255396df363046c28bea790e40617654e91ed580ca7c51"},
+ {file = "mypy-1.7.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:944bdc21ebd620eafefc090cdf83158393ec2b1391578359776c00de00e8907a"},
+ {file = "mypy-1.7.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9c7ac372232c928fff0645d85f273a726970c014749b924ce5710d7d89763a28"},
+ {file = "mypy-1.7.1-cp311-cp311-win_amd64.whl", hash = "sha256:f6efc9bd72258f89a3816e3a98c09d36f079c223aa345c659622f056b760ab42"},
+ {file = "mypy-1.7.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:6dbdec441c60699288adf051f51a5d512b0d818526d1dcfff5a41f8cd8b4aaf1"},
+ {file = "mypy-1.7.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4fc3d14ee80cd22367caaaf6e014494415bf440980a3045bf5045b525680ac33"},
+ {file = "mypy-1.7.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2c6e4464ed5f01dc44dc9821caf67b60a4e5c3b04278286a85c067010653a0eb"},
+ {file = "mypy-1.7.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:d9b338c19fa2412f76e17525c1b4f2c687a55b156320acb588df79f2e6fa9fea"},
+ {file = "mypy-1.7.1-cp312-cp312-win_amd64.whl", hash = "sha256:204e0d6de5fd2317394a4eff62065614c4892d5a4d1a7ee55b765d7a3d9e3f82"},
+ {file = "mypy-1.7.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:84860e06ba363d9c0eeabd45ac0fde4b903ad7aa4f93cd8b648385a888e23200"},
+ {file = "mypy-1.7.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8c5091ebd294f7628eb25ea554852a52058ac81472c921150e3a61cdd68f75a7"},
+ {file = "mypy-1.7.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40716d1f821b89838589e5b3106ebbc23636ffdef5abc31f7cd0266db936067e"},
+ {file = "mypy-1.7.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5cf3f0c5ac72139797953bd50bc6c95ac13075e62dbfcc923571180bebb662e9"},
+ {file = "mypy-1.7.1-cp38-cp38-win_amd64.whl", hash = "sha256:78e25b2fd6cbb55ddfb8058417df193f0129cad5f4ee75d1502248e588d9e0d7"},
+ {file = "mypy-1.7.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:75c4d2a6effd015786c87774e04331b6da863fc3fc4e8adfc3b40aa55ab516fe"},
+ {file = "mypy-1.7.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2643d145af5292ee956aa0a83c2ce1038a3bdb26e033dadeb2f7066fb0c9abce"},
+ {file = "mypy-1.7.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75aa828610b67462ffe3057d4d8a4112105ed211596b750b53cbfe182f44777a"},
+ {file = "mypy-1.7.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ee5d62d28b854eb61889cde4e1dbc10fbaa5560cb39780c3995f6737f7e82120"},
+ {file = "mypy-1.7.1-cp39-cp39-win_amd64.whl", hash = "sha256:72cf32ce7dd3562373f78bd751f73c96cfb441de147cc2448a92c1a308bd0ca6"},
+ {file = "mypy-1.7.1-py3-none-any.whl", hash = "sha256:f7c5d642db47376a0cc130f0de6d055056e010debdaf0707cd2b0fc7e7ef30ea"},
+ {file = "mypy-1.7.1.tar.gz", hash = "sha256:fcb6d9afb1b6208b4c712af0dafdc650f518836065df0d4fb1d800f5d6773db2"},
+]
+
+[package.dependencies]
+mypy-extensions = ">=1.0.0"
+tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""}
+typing-extensions = ">=4.1.0"
+
+[package.extras]
+dmypy = ["psutil (>=4.0)"]
+install-types = ["pip"]
+mypyc = ["setuptools (>=50)"]
+reports = ["lxml"]
+
+[[package]]
+name = "mypy-extensions"
+version = "1.0.0"
+description = "Type system extensions for programs checked with the mypy type checker."
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "mypy_extensions-1.0.0-py3-none-any.whl", hash = "sha256:4392f6c0eb8a5668a69e23d168ffa70f0be9ccfd32b5cc2d26a34ae5b844552d"},
+ {file = "mypy_extensions-1.0.0.tar.gz", hash = "sha256:75dbf8955dc00442a438fc4d0666508a9a97b6bd41aa2f0ffe9d2f2725af0782"},
+]
+
+[[package]]
+name = "nodeenv"
+version = "1.8.0"
+description = "Node.js virtual environment builder"
+optional = false
+python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*"
+files = [
+ {file = "nodeenv-1.8.0-py2.py3-none-any.whl", hash = "sha256:df865724bb3c3adc86b3876fa209771517b0cfe596beff01a92700e0e8be4cec"},
+ {file = "nodeenv-1.8.0.tar.gz", hash = "sha256:d51e0c37e64fbf47d017feac3145cdbb58836d7eee8c6f6d3b6880c5456227d2"},
+]
+
+[package.dependencies]
+setuptools = "*"
+
+[[package]]
+name = "numpy"
+version = "1.26.2"
+description = "Fundamental package for array computing in Python"
+optional = false
+python-versions = ">=3.9"
+files = [
+ {file = "numpy-1.26.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:3703fc9258a4a122d17043e57b35e5ef1c5a5837c3db8be396c82e04c1cf9b0f"},
+ {file = "numpy-1.26.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:cc392fdcbd21d4be6ae1bb4475a03ce3b025cd49a9be5345d76d7585aea69440"},
+ {file = "numpy-1.26.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:36340109af8da8805d8851ef1d74761b3b88e81a9bd80b290bbfed61bd2b4f75"},
+ {file = "numpy-1.26.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bcc008217145b3d77abd3e4d5ef586e3bdfba8fe17940769f8aa09b99e856c00"},
+ {file = "numpy-1.26.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3ced40d4e9e18242f70dd02d739e44698df3dcb010d31f495ff00a31ef6014fe"},
+ {file = "numpy-1.26.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:b272d4cecc32c9e19911891446b72e986157e6a1809b7b56518b4f3755267523"},
+ {file = "numpy-1.26.2-cp310-cp310-win32.whl", hash = "sha256:22f8fc02fdbc829e7a8c578dd8d2e15a9074b630d4da29cda483337e300e3ee9"},
+ {file = "numpy-1.26.2-cp310-cp310-win_amd64.whl", hash = "sha256:26c9d33f8e8b846d5a65dd068c14e04018d05533b348d9eaeef6c1bd787f9919"},
+ {file = "numpy-1.26.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b96e7b9c624ef3ae2ae0e04fa9b460f6b9f17ad8b4bec6d7756510f1f6c0c841"},
+ {file = "numpy-1.26.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:aa18428111fb9a591d7a9cc1b48150097ba6a7e8299fb56bdf574df650e7d1f1"},
+ {file = "numpy-1.26.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06fa1ed84aa60ea6ef9f91ba57b5ed963c3729534e6e54055fc151fad0423f0a"},
+ {file = "numpy-1.26.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96ca5482c3dbdd051bcd1fce8034603d6ebfc125a7bd59f55b40d8f5d246832b"},
+ {file = "numpy-1.26.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:854ab91a2906ef29dc3925a064fcd365c7b4da743f84b123002f6139bcb3f8a7"},
+ {file = "numpy-1.26.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f43740ab089277d403aa07567be138fc2a89d4d9892d113b76153e0e412409f8"},
+ {file = "numpy-1.26.2-cp311-cp311-win32.whl", hash = "sha256:a2bbc29fcb1771cd7b7425f98b05307776a6baf43035d3b80c4b0f29e9545186"},
+ {file = "numpy-1.26.2-cp311-cp311-win_amd64.whl", hash = "sha256:2b3fca8a5b00184828d12b073af4d0fc5fdd94b1632c2477526f6bd7842d700d"},
+ {file = "numpy-1.26.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a4cd6ed4a339c21f1d1b0fdf13426cb3b284555c27ac2f156dfdaaa7e16bfab0"},
+ {file = "numpy-1.26.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:5d5244aabd6ed7f312268b9247be47343a654ebea52a60f002dc70c769048e75"},
+ {file = "numpy-1.26.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6a3cdb4d9c70e6b8c0814239ead47da00934666f668426fc6e94cce869e13fd7"},
+ {file = "numpy-1.26.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:aa317b2325f7aa0a9471663e6093c210cb2ae9c0ad824732b307d2c51983d5b6"},
+ {file = "numpy-1.26.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:174a8880739c16c925799c018f3f55b8130c1f7c8e75ab0a6fa9d41cab092fd6"},
+ {file = "numpy-1.26.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:f79b231bf5c16b1f39c7f4875e1ded36abee1591e98742b05d8a0fb55d8a3eec"},
+ {file = "numpy-1.26.2-cp312-cp312-win32.whl", hash = "sha256:4a06263321dfd3598cacb252f51e521a8cb4b6df471bb12a7ee5cbab20ea9167"},
+ {file = "numpy-1.26.2-cp312-cp312-win_amd64.whl", hash = "sha256:b04f5dc6b3efdaab541f7857351aac359e6ae3c126e2edb376929bd3b7f92d7e"},
+ {file = "numpy-1.26.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4eb8df4bf8d3d90d091e0146f6c28492b0be84da3e409ebef54349f71ed271ef"},
+ {file = "numpy-1.26.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1a13860fdcd95de7cf58bd6f8bc5a5ef81c0b0625eb2c9a783948847abbef2c2"},
+ {file = "numpy-1.26.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:64308ebc366a8ed63fd0bf426b6a9468060962f1a4339ab1074c228fa6ade8e3"},
+ {file = "numpy-1.26.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baf8aab04a2c0e859da118f0b38617e5ee65d75b83795055fb66c0d5e9e9b818"},
+ {file = "numpy-1.26.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d73a3abcac238250091b11caef9ad12413dab01669511779bc9b29261dd50210"},
+ {file = "numpy-1.26.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:b361d369fc7e5e1714cf827b731ca32bff8d411212fccd29ad98ad622449cc36"},
+ {file = "numpy-1.26.2-cp39-cp39-win32.whl", hash = "sha256:bd3f0091e845164a20bd5a326860c840fe2af79fa12e0469a12768a3ec578d80"},
+ {file = "numpy-1.26.2-cp39-cp39-win_amd64.whl", hash = "sha256:2beef57fb031dcc0dc8fa4fe297a742027b954949cabb52a2a376c144e5e6060"},
+ {file = "numpy-1.26.2-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:1cc3d5029a30fb5f06704ad6b23b35e11309491c999838c31f124fee32107c79"},
+ {file = "numpy-1.26.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94cc3c222bb9fb5a12e334d0479b97bb2df446fbe622b470928f5284ffca3f8d"},
+ {file = "numpy-1.26.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:fe6b44fb8fcdf7eda4ef4461b97b3f63c466b27ab151bec2366db8b197387841"},
+ {file = "numpy-1.26.2.tar.gz", hash = "sha256:f65738447676ab5777f11e6bbbdb8ce11b785e105f690bc45966574816b6d3ea"},
+]
+
+[[package]]
+name = "nvidia-cublas-cu11"
+version = "11.10.3.66"
+description = "CUBLAS native runtime libraries"
+optional = false
+python-versions = ">=3"
+files = [
+ {file = "nvidia_cublas_cu11-11.10.3.66-py3-none-manylinux1_x86_64.whl", hash = "sha256:d32e4d75f94ddfb93ea0a5dda08389bcc65d8916a25cb9f37ac89edaeed3bded"},
+ {file = "nvidia_cublas_cu11-11.10.3.66-py3-none-win_amd64.whl", hash = "sha256:8ac17ba6ade3ed56ab898a036f9ae0756f1e81052a317bf98f8c6d18dc3ae49e"},
+]
+
+[package.dependencies]
+setuptools = "*"
+wheel = "*"
+
+[[package]]
+name = "nvidia-cuda-nvrtc-cu11"
+version = "11.7.99"
+description = "NVRTC native runtime libraries"
+optional = false
+python-versions = ">=3"
+files = [
+ {file = "nvidia_cuda_nvrtc_cu11-11.7.99-2-py3-none-manylinux1_x86_64.whl", hash = "sha256:9f1562822ea264b7e34ed5930567e89242d266448e936b85bc97a3370feabb03"},
+ {file = "nvidia_cuda_nvrtc_cu11-11.7.99-py3-none-manylinux1_x86_64.whl", hash = "sha256:f7d9610d9b7c331fa0da2d1b2858a4a8315e6d49765091d28711c8946e7425e7"},
+ {file = "nvidia_cuda_nvrtc_cu11-11.7.99-py3-none-win_amd64.whl", hash = "sha256:f2effeb1309bdd1b3854fc9b17eaf997808f8b25968ce0c7070945c4265d64a3"},
+]
+
+[package.dependencies]
+setuptools = "*"
+wheel = "*"
+
+[[package]]
+name = "nvidia-cuda-runtime-cu11"
+version = "11.7.99"
+description = "CUDA Runtime native Libraries"
+optional = false
+python-versions = ">=3"
+files = [
+ {file = "nvidia_cuda_runtime_cu11-11.7.99-py3-none-manylinux1_x86_64.whl", hash = "sha256:cc768314ae58d2641f07eac350f40f99dcb35719c4faff4bc458a7cd2b119e31"},
+ {file = "nvidia_cuda_runtime_cu11-11.7.99-py3-none-win_amd64.whl", hash = "sha256:bc77fa59a7679310df9d5c70ab13c4e34c64ae2124dd1efd7e5474b71be125c7"},
+]
+
+[package.dependencies]
+setuptools = "*"
+wheel = "*"
+
+[[package]]
+name = "nvidia-cudnn-cu11"
+version = "8.5.0.96"
+description = "cuDNN runtime libraries"
+optional = false
+python-versions = ">=3"
+files = [
+ {file = "nvidia_cudnn_cu11-8.5.0.96-2-py3-none-manylinux1_x86_64.whl", hash = "sha256:402f40adfc6f418f9dae9ab402e773cfed9beae52333f6d86ae3107a1b9527e7"},
+ {file = "nvidia_cudnn_cu11-8.5.0.96-py3-none-manylinux1_x86_64.whl", hash = "sha256:71f8111eb830879ff2836db3cccf03bbd735df9b0d17cd93761732ac50a8a108"},
+]
+
+[package.dependencies]
+setuptools = "*"
+wheel = "*"
+
+[[package]]
+name = "opencv-python-headless"
+version = "4.8.1.78"
+description = "Wrapper package for OpenCV python bindings."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "opencv-python-headless-4.8.1.78.tar.gz", hash = "sha256:bc7197b42352f6f865c302a49140b889ec7cd957dd697e2d7fc016ad0d3f28f1"},
+ {file = "opencv_python_headless-4.8.1.78-cp37-abi3-macosx_10_16_x86_64.whl", hash = "sha256:f3a33f644249f9ce1c913eac580e4b3ef4ce7cab0a71900274708959c2feb5e3"},
+ {file = "opencv_python_headless-4.8.1.78-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:2c7d45721df9801c4dcd34683a15caa0e30f38b185263fec04a6eb274bc720f0"},
+ {file = "opencv_python_headless-4.8.1.78-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3b6bd6e1132b6f5dcb3a5bfe30fc4d341a7bfb26134da349a06c9255288ded94"},
+ {file = "opencv_python_headless-4.8.1.78-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58e70d2f0915fe23e02c6e405588276c9397844a47d38b9c87fac5f7f9ba2dcc"},
+ {file = "opencv_python_headless-4.8.1.78-cp37-abi3-win32.whl", hash = "sha256:382f8c7a6a14f80091284eecedd52cee4812231ee0eff1118592197b538d9252"},
+ {file = "opencv_python_headless-4.8.1.78-cp37-abi3-win_amd64.whl", hash = "sha256:0a0f1e9f836f7d5bad1dd164694944c8761711cbdf4b36ebbd4815a8ef731079"},
+]
+
+[package.dependencies]
+numpy = [
+ {version = ">=1.21.0", markers = "python_version == \"3.9\" and platform_system == \"Darwin\" and platform_machine == \"arm64\""},
+ {version = ">=1.19.3", markers = "platform_system == \"Linux\" and platform_machine == \"aarch64\" and python_version >= \"3.8\" or python_version > \"3.9\" or python_version >= \"3.9\" and platform_system != \"Darwin\" or python_version >= \"3.9\" and platform_machine != \"arm64\""},
+]
+
+[[package]]
+name = "ordered-set"
+version = "4.1.0"
+description = "An OrderedSet is a custom MutableSet that remembers its order, so that every"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "ordered-set-4.1.0.tar.gz", hash = "sha256:694a8e44c87657c59292ede72891eb91d34131f6531463aab3009191c77364a8"},
+ {file = "ordered_set-4.1.0-py3-none-any.whl", hash = "sha256:046e1132c71fcf3330438a539928932caf51ddbc582496833e23de611de14562"},
+]
+
+[package.extras]
+dev = ["black", "mypy", "pytest"]
+
+[[package]]
+name = "orjson"
+version = "3.9.10"
+description = "Fast, correct Python JSON library supporting dataclasses, datetimes, and numpy"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "orjson-3.9.10-cp310-cp310-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:c18a4da2f50050a03d1da5317388ef84a16013302a5281d6f64e4a3f406aabc4"},
+ {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5148bab4d71f58948c7c39d12b14a9005b6ab35a0bdf317a8ade9a9e4d9d0bd5"},
+ {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:4cf7837c3b11a2dfb589f8530b3cff2bd0307ace4c301e8997e95c7468c1378e"},
+ {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c62b6fa2961a1dcc51ebe88771be5319a93fd89bd247c9ddf732bc250507bc2b"},
+ {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:deeb3922a7a804755bbe6b5be9b312e746137a03600f488290318936c1a2d4dc"},
+ {file = "orjson-3.9.10-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1234dc92d011d3554d929b6cf058ac4a24d188d97be5e04355f1b9223e98bbe9"},
+ {file = "orjson-3.9.10-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:06ad5543217e0e46fd7ab7ea45d506c76f878b87b1b4e369006bdb01acc05a83"},
+ {file = "orjson-3.9.10-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4fd72fab7bddce46c6826994ce1e7de145ae1e9e106ebb8eb9ce1393ca01444d"},
+ {file = "orjson-3.9.10-cp310-none-win32.whl", hash = "sha256:b5b7d4a44cc0e6ff98da5d56cde794385bdd212a86563ac321ca64d7f80c80d1"},
+ {file = "orjson-3.9.10-cp310-none-win_amd64.whl", hash = "sha256:61804231099214e2f84998316f3238c4c2c4aaec302df12b21a64d72e2a135c7"},
+ {file = "orjson-3.9.10-cp311-cp311-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:cff7570d492bcf4b64cc862a6e2fb77edd5e5748ad715f487628f102815165e9"},
+ {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed8bc367f725dfc5cabeed1ae079d00369900231fbb5a5280cf0736c30e2adf7"},
+ {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c812312847867b6335cfb264772f2a7e85b3b502d3a6b0586aa35e1858528ab1"},
+ {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9edd2856611e5050004f4722922b7b1cd6268da34102667bd49d2a2b18bafb81"},
+ {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:674eb520f02422546c40401f4efaf8207b5e29e420c17051cddf6c02783ff5ca"},
+ {file = "orjson-3.9.10-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d0dc4310da8b5f6415949bd5ef937e60aeb0eb6b16f95041b5e43e6200821fb"},
+ {file = "orjson-3.9.10-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e99c625b8c95d7741fe057585176b1b8783d46ed4b8932cf98ee145c4facf499"},
+ {file = "orjson-3.9.10-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:ec6f18f96b47299c11203edfbdc34e1b69085070d9a3d1f302810cc23ad36bf3"},
+ {file = "orjson-3.9.10-cp311-none-win32.whl", hash = "sha256:ce0a29c28dfb8eccd0f16219360530bc3cfdf6bf70ca384dacd36e6c650ef8e8"},
+ {file = "orjson-3.9.10-cp311-none-win_amd64.whl", hash = "sha256:cf80b550092cc480a0cbd0750e8189247ff45457e5a023305f7ef1bcec811616"},
+ {file = "orjson-3.9.10-cp312-cp312-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:602a8001bdf60e1a7d544be29c82560a7b49319a0b31d62586548835bbe2c862"},
+ {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f295efcd47b6124b01255d1491f9e46f17ef40d3d7eabf7364099e463fb45f0f"},
+ {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:92af0d00091e744587221e79f68d617b432425a7e59328ca4c496f774a356071"},
+ {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5a02360e73e7208a872bf65a7554c9f15df5fe063dc047f79738998b0506a14"},
+ {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:858379cbb08d84fe7583231077d9a36a1a20eb72f8c9076a45df8b083724ad1d"},
+ {file = "orjson-3.9.10-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666c6fdcaac1f13eb982b649e1c311c08d7097cbda24f32612dae43648d8db8d"},
+ {file = "orjson-3.9.10-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:3fb205ab52a2e30354640780ce4587157a9563a68c9beaf52153e1cea9aa0921"},
+ {file = "orjson-3.9.10-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:7ec960b1b942ee3c69323b8721df2a3ce28ff40e7ca47873ae35bfafeb4555ca"},
+ {file = "orjson-3.9.10-cp312-none-win_amd64.whl", hash = "sha256:3e892621434392199efb54e69edfff9f699f6cc36dd9553c5bf796058b14b20d"},
+ {file = "orjson-3.9.10-cp38-cp38-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:8b9ba0ccd5a7f4219e67fbbe25e6b4a46ceef783c42af7dbc1da548eb28b6531"},
+ {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e2ecd1d349e62e3960695214f40939bbfdcaeaaa62ccc638f8e651cf0970e5f"},
+ {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7f433be3b3f4c66016d5a20e5b4444ef833a1f802ced13a2d852c637f69729c1"},
+ {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4689270c35d4bb3102e103ac43c3f0b76b169760aff8bcf2d401a3e0e58cdb7f"},
+ {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4bd176f528a8151a6efc5359b853ba3cc0e82d4cd1fab9c1300c5d957dc8f48c"},
+ {file = "orjson-3.9.10-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a2ce5ea4f71681623f04e2b7dadede3c7435dfb5e5e2d1d0ec25b35530e277b"},
+ {file = "orjson-3.9.10-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:49f8ad582da6e8d2cf663c4ba5bf9f83cc052570a3a767487fec6af839b0e777"},
+ {file = "orjson-3.9.10-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:2a11b4b1a8415f105d989876a19b173f6cdc89ca13855ccc67c18efbd7cbd1f8"},
+ {file = "orjson-3.9.10-cp38-none-win32.whl", hash = "sha256:a353bf1f565ed27ba71a419b2cd3db9d6151da426b61b289b6ba1422a702e643"},
+ {file = "orjson-3.9.10-cp38-none-win_amd64.whl", hash = "sha256:e28a50b5be854e18d54f75ef1bb13e1abf4bc650ab9d635e4258c58e71eb6ad5"},
+ {file = "orjson-3.9.10-cp39-cp39-macosx_10_15_x86_64.macosx_11_0_arm64.macosx_10_15_universal2.whl", hash = "sha256:ee5926746232f627a3be1cc175b2cfad24d0170d520361f4ce3fa2fd83f09e1d"},
+ {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a73160e823151f33cdc05fe2cea557c5ef12fdf276ce29bb4f1c571c8368a60"},
+ {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c338ed69ad0b8f8f8920c13f529889fe0771abbb46550013e3c3d01e5174deef"},
+ {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5869e8e130e99687d9e4be835116c4ebd83ca92e52e55810962446d841aba8de"},
+ {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d2c1e559d96a7f94a4f581e2a32d6d610df5840881a8cba8f25e446f4d792df3"},
+ {file = "orjson-3.9.10-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:81a3a3a72c9811b56adf8bcc829b010163bb2fc308877e50e9910c9357e78521"},
+ {file = "orjson-3.9.10-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:7f8fb7f5ecf4f6355683ac6881fd64b5bb2b8a60e3ccde6ff799e48791d8f864"},
+ {file = "orjson-3.9.10-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:c943b35ecdf7123b2d81d225397efddf0bce2e81db2f3ae633ead38e85cd5ade"},
+ {file = "orjson-3.9.10-cp39-none-win32.whl", hash = "sha256:fb0b361d73f6b8eeceba47cd37070b5e6c9de5beaeaa63a1cb35c7e1a73ef088"},
+ {file = "orjson-3.9.10-cp39-none-win_amd64.whl", hash = "sha256:b90f340cb6397ec7a854157fac03f0c82b744abdd1c0941a024c3c29d1340aff"},
+ {file = "orjson-3.9.10.tar.gz", hash = "sha256:9ebbdbd6a046c304b1845e96fbcc5559cd296b4dfd3ad2509e33c4d9ce07d6a1"},
+]
+
+[[package]]
+name = "overrides"
+version = "7.4.0"
+description = "A decorator to automatically detect mismatch when overriding a method."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "overrides-7.4.0-py3-none-any.whl", hash = "sha256:3ad24583f86d6d7a49049695efe9933e67ba62f0c7625d53c59fa832ce4b8b7d"},
+ {file = "overrides-7.4.0.tar.gz", hash = "sha256:9502a3cca51f4fac40b5feca985b6703a5c1f6ad815588a7ca9e285b9dca6757"},
+]
+
+[[package]]
+name = "packaging"
+version = "23.2"
+description = "Core utilities for Python packages"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "packaging-23.2-py3-none-any.whl", hash = "sha256:8c491190033a9af7e1d931d0b5dacc2ef47509b34dd0de67ed209b5203fc88c7"},
+ {file = "packaging-23.2.tar.gz", hash = "sha256:048fb0e9405036518eaaf48a55953c750c11e1a1b68e0dd1a9d62ed0c092cfc5"},
+]
+
+[[package]]
+name = "pastel"
+version = "0.2.1"
+description = "Bring colors to your terminal."
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "pastel-0.2.1-py2.py3-none-any.whl", hash = "sha256:4349225fcdf6c2bb34d483e523475de5bb04a5c10ef711263452cb37d7dd4364"},
+ {file = "pastel-0.2.1.tar.gz", hash = "sha256:e6581ac04e973cac858828c6202c1e1e81fee1dc7de7683f3e1ffe0bfd8a573d"},
+]
+
+[[package]]
+name = "pathspec"
+version = "0.11.2"
+description = "Utility library for gitignore style pattern matching of file paths."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pathspec-0.11.2-py3-none-any.whl", hash = "sha256:1d6ed233af05e679efb96b1851550ea95bbb64b7c490b0f5aa52996c11e92a20"},
+ {file = "pathspec-0.11.2.tar.gz", hash = "sha256:e0d8d0ac2f12da61956eb2306b69f9469b42f4deb0f3cb6ed47b9cce9996ced3"},
+]
+
+[[package]]
+name = "pbr"
+version = "6.0.0"
+description = "Python Build Reasonableness"
+optional = false
+python-versions = ">=2.6"
+files = [
+ {file = "pbr-6.0.0-py2.py3-none-any.whl", hash = "sha256:4a7317d5e3b17a3dccb6a8cfe67dab65b20551404c52c8ed41279fa4f0cb4cda"},
+ {file = "pbr-6.0.0.tar.gz", hash = "sha256:d1377122a5a00e2f940ee482999518efe16d745d423a670c27773dfbc3c9a7d9"},
+]
+
+[[package]]
+name = "pep8-naming"
+version = "0.13.3"
+description = "Check PEP-8 naming conventions, plugin for flake8"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pep8-naming-0.13.3.tar.gz", hash = "sha256:1705f046dfcd851378aac3be1cd1551c7c1e5ff363bacad707d43007877fa971"},
+ {file = "pep8_naming-0.13.3-py3-none-any.whl", hash = "sha256:1a86b8c71a03337c97181917e2b472f0f5e4ccb06844a0d6f0a33522549e7a80"},
+]
+
+[package.dependencies]
+flake8 = ">=5.0.0"
+
+[[package]]
+name = "pillow"
+version = "10.1.0"
+description = "Python Imaging Library (Fork)"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "Pillow-10.1.0-cp310-cp310-macosx_10_10_x86_64.whl", hash = "sha256:1ab05f3db77e98f93964697c8efc49c7954b08dd61cff526b7f2531a22410106"},
+ {file = "Pillow-10.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:6932a7652464746fcb484f7fc3618e6503d2066d853f68a4bd97193a3996e273"},
+ {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5f63b5a68daedc54c7c3464508d8c12075e56dcfbd42f8c1bf40169061ae666"},
+ {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c0949b55eb607898e28eaccb525ab104b2d86542a85c74baf3a6dc24002edec2"},
+ {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_aarch64.whl", hash = "sha256:ae88931f93214777c7a3aa0a8f92a683f83ecde27f65a45f95f22d289a69e593"},
+ {file = "Pillow-10.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:b0eb01ca85b2361b09480784a7931fc648ed8b7836f01fb9241141b968feb1db"},
+ {file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:d27b5997bdd2eb9fb199982bb7eb6164db0426904020dc38c10203187ae2ff2f"},
+ {file = "Pillow-10.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7df5608bc38bd37ef585ae9c38c9cd46d7c81498f086915b0f97255ea60c2818"},
+ {file = "Pillow-10.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:41f67248d92a5e0a2076d3517d8d4b1e41a97e2df10eb8f93106c89107f38b57"},
+ {file = "Pillow-10.1.0-cp311-cp311-macosx_10_10_x86_64.whl", hash = "sha256:1fb29c07478e6c06a46b867e43b0bcdb241b44cc52be9bc25ce5944eed4648e7"},
+ {file = "Pillow-10.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2cdc65a46e74514ce742c2013cd4a2d12e8553e3a2563c64879f7c7e4d28bce7"},
+ {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50d08cd0a2ecd2a8657bd3d82c71efd5a58edb04d9308185d66c3a5a5bed9610"},
+ {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:062a1610e3bc258bff2328ec43f34244fcec972ee0717200cb1425214fe5b839"},
+ {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_aarch64.whl", hash = "sha256:61f1a9d247317fa08a308daaa8ee7b3f760ab1809ca2da14ecc88ae4257d6172"},
+ {file = "Pillow-10.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:a646e48de237d860c36e0db37ecaecaa3619e6f3e9d5319e527ccbc8151df061"},
+ {file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:47e5bf85b80abc03be7455c95b6d6e4896a62f6541c1f2ce77a7d2bb832af262"},
+ {file = "Pillow-10.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:a92386125e9ee90381c3369f57a2a50fa9e6aa8b1cf1d9c4b200d41a7dd8e992"},
+ {file = "Pillow-10.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:0f7c276c05a9767e877a0b4c5050c8bee6a6d960d7f0c11ebda6b99746068c2a"},
+ {file = "Pillow-10.1.0-cp312-cp312-macosx_10_10_x86_64.whl", hash = "sha256:a89b8312d51715b510a4fe9fc13686283f376cfd5abca8cd1c65e4c76e21081b"},
+ {file = "Pillow-10.1.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:00f438bb841382b15d7deb9a05cc946ee0f2c352653c7aa659e75e592f6fa17d"},
+ {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3d929a19f5469b3f4df33a3df2983db070ebb2088a1e145e18facbc28cae5b27"},
+ {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9a92109192b360634a4489c0c756364c0c3a2992906752165ecb50544c251312"},
+ {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_aarch64.whl", hash = "sha256:0248f86b3ea061e67817c47ecbe82c23f9dd5d5226200eb9090b3873d3ca32de"},
+ {file = "Pillow-10.1.0-cp312-cp312-manylinux_2_28_x86_64.whl", hash = "sha256:9882a7451c680c12f232a422730f986a1fcd808da0fd428f08b671237237d651"},
+ {file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1c3ac5423c8c1da5928aa12c6e258921956757d976405e9467c5f39d1d577a4b"},
+ {file = "Pillow-10.1.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:806abdd8249ba3953c33742506fe414880bad78ac25cc9a9b1c6ae97bedd573f"},
+ {file = "Pillow-10.1.0-cp312-cp312-win_amd64.whl", hash = "sha256:eaed6977fa73408b7b8a24e8b14e59e1668cfc0f4c40193ea7ced8e210adf996"},
+ {file = "Pillow-10.1.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:fe1e26e1ffc38be097f0ba1d0d07fcade2bcfd1d023cda5b29935ae8052bd793"},
+ {file = "Pillow-10.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7a7e3daa202beb61821c06d2517428e8e7c1aab08943e92ec9e5755c2fc9ba5e"},
+ {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:24fadc71218ad2b8ffe437b54876c9382b4a29e030a05a9879f615091f42ffc2"},
+ {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fa1d323703cfdac2036af05191b969b910d8f115cf53093125e4058f62012c9a"},
+ {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_aarch64.whl", hash = "sha256:912e3812a1dbbc834da2b32299b124b5ddcb664ed354916fd1ed6f193f0e2d01"},
+ {file = "Pillow-10.1.0-cp38-cp38-manylinux_2_28_x86_64.whl", hash = "sha256:7dbaa3c7de82ef37e7708521be41db5565004258ca76945ad74a8e998c30af8d"},
+ {file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:9d7bc666bd8c5a4225e7ac71f2f9d12466ec555e89092728ea0f5c0c2422ea80"},
+ {file = "Pillow-10.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:baada14941c83079bf84c037e2d8b7506ce201e92e3d2fa0d1303507a8538212"},
+ {file = "Pillow-10.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:2ef6721c97894a7aa77723740a09547197533146fba8355e86d6d9a4a1056b14"},
+ {file = "Pillow-10.1.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:0a026c188be3b443916179f5d04548092e253beb0c3e2ee0a4e2cdad72f66099"},
+ {file = "Pillow-10.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:04f6f6149f266a100374ca3cc368b67fb27c4af9f1cc8cb6306d849dcdf12616"},
+ {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb40c011447712d2e19cc261c82655f75f32cb724788df315ed992a4d65696bb"},
+ {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1a8413794b4ad9719346cd9306118450b7b00d9a15846451549314a58ac42219"},
+ {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_aarch64.whl", hash = "sha256:c9aeea7b63edb7884b031a35305629a7593272b54f429a9869a4f63a1bf04c34"},
+ {file = "Pillow-10.1.0-cp39-cp39-manylinux_2_28_x86_64.whl", hash = "sha256:b4005fee46ed9be0b8fb42be0c20e79411533d1fd58edabebc0dd24626882cfd"},
+ {file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4d0152565c6aa6ebbfb1e5d8624140a440f2b99bf7afaafbdbf6430426497f28"},
+ {file = "Pillow-10.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d921bc90b1defa55c9917ca6b6b71430e4286fc9e44c55ead78ca1a9f9eba5f2"},
+ {file = "Pillow-10.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:cfe96560c6ce2f4c07d6647af2d0f3c54cc33289894ebd88cfbb3bcd5391e256"},
+ {file = "Pillow-10.1.0-pp310-pypy310_pp73-macosx_10_10_x86_64.whl", hash = "sha256:937bdc5a7f5343d1c97dc98149a0be7eb9704e937fe3dc7140e229ae4fc572a7"},
+ {file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1c25762197144e211efb5f4e8ad656f36c8d214d390585d1d21281f46d556ba"},
+ {file = "Pillow-10.1.0-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:afc8eef765d948543a4775f00b7b8c079b3321d6b675dde0d02afa2ee23000b4"},
+ {file = "Pillow-10.1.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:883f216eac8712b83a63f41b76ddfb7b2afab1b74abbb413c5df6680f071a6b9"},
+ {file = "Pillow-10.1.0-pp39-pypy39_pp73-macosx_10_10_x86_64.whl", hash = "sha256:b920e4d028f6442bea9a75b7491c063f0b9a3972520731ed26c83e254302eb1e"},
+ {file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c41d960babf951e01a49c9746f92c5a7e0d939d1652d7ba30f6b3090f27e412"},
+ {file = "Pillow-10.1.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:1fafabe50a6977ac70dfe829b2d5735fd54e190ab55259ec8aea4aaea412fa0b"},
+ {file = "Pillow-10.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:3b834f4b16173e5b92ab6566f0473bfb09f939ba14b23b8da1f54fa63e4b623f"},
+ {file = "Pillow-10.1.0.tar.gz", hash = "sha256:e6bf8de6c36ed96c86ea3b6e1d5273c53f46ef518a062464cd7ef5dd2cf92e38"},
+]
+
+[package.extras]
+docs = ["furo", "olefile", "sphinx (>=2.4)", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-removed-in", "sphinxext-opengraph"]
+tests = ["check-manifest", "coverage", "defusedxml", "markdown2", "olefile", "packaging", "pyroma", "pytest", "pytest-cov", "pytest-timeout"]
+
+[[package]]
+name = "platformdirs"
+version = "4.1.0"
+description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "platformdirs-4.1.0-py3-none-any.whl", hash = "sha256:11c8f37bcca40db96d8144522d925583bdb7a31f7b0e37e3ed4318400a8e2380"},
+ {file = "platformdirs-4.1.0.tar.gz", hash = "sha256:906d548203468492d432bcb294d4bc2fff751bf84971fbb2c10918cc206ee420"},
+]
+
+[package.extras]
+docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.1)", "sphinx-autodoc-typehints (>=1.24)"]
+test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)"]
+
+[[package]]
+name = "pluggy"
+version = "1.3.0"
+description = "plugin and hook calling mechanisms for python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"},
+ {file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"},
+]
+
+[package.extras]
+dev = ["pre-commit", "tox"]
+testing = ["pytest", "pytest-benchmark"]
+
+[[package]]
+name = "ply"
+version = "3.11"
+description = "Python Lex & Yacc"
+optional = false
+python-versions = "*"
+files = [
+ {file = "ply-3.11-py2.py3-none-any.whl", hash = "sha256:096f9b8350b65ebd2fd1346b12452efe5b9607f7482813ffca50c22722a807ce"},
+ {file = "ply-3.11.tar.gz", hash = "sha256:00c7c1aaa88358b9c765b6d3000c6eec0ba42abca5351b095321aef446081da3"},
+]
+
+[[package]]
+name = "poethepoet"
+version = "0.24.4"
+description = "A task runner that works well with poetry."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "poethepoet-0.24.4-py3-none-any.whl", hash = "sha256:fb4ea35d7f40fe2081ea917d2e4102e2310fda2cde78974050ca83896e229075"},
+ {file = "poethepoet-0.24.4.tar.gz", hash = "sha256:ff4220843a87c888cbcb5312c8905214701d0af60ac7271795baa8369b428fef"},
+]
+
+[package.dependencies]
+pastel = ">=0.2.1,<0.3.0"
+tomli = ">=1.2.2"
+
+[package.extras]
+poetry-plugin = ["poetry (>=1.0,<2.0)"]
+
+[[package]]
+name = "pre-commit"
+version = "3.5.0"
+description = "A framework for managing and maintaining multi-language pre-commit hooks."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pre_commit-3.5.0-py2.py3-none-any.whl", hash = "sha256:841dc9aef25daba9a0238cd27984041fa0467b4199fc4852e27950664919f660"},
+ {file = "pre_commit-3.5.0.tar.gz", hash = "sha256:5804465c675b659b0862f07907f96295d490822a450c4c40e747d0b1c6ebcb32"},
+]
+
+[package.dependencies]
+cfgv = ">=2.0.0"
+identify = ">=1.0.0"
+nodeenv = ">=0.11.1"
+pyyaml = ">=5.1"
+virtualenv = ">=20.10.0"
+
+[[package]]
+name = "protobuf"
+version = "4.25.1"
+description = ""
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "protobuf-4.25.1-cp310-abi3-win32.whl", hash = "sha256:193f50a6ab78a970c9b4f148e7c750cfde64f59815e86f686c22e26b4fe01ce7"},
+ {file = "protobuf-4.25.1-cp310-abi3-win_amd64.whl", hash = "sha256:3497c1af9f2526962f09329fd61a36566305e6c72da2590ae0d7d1322818843b"},
+ {file = "protobuf-4.25.1-cp37-abi3-macosx_10_9_universal2.whl", hash = "sha256:0bf384e75b92c42830c0a679b0cd4d6e2b36ae0cf3dbb1e1dfdda48a244f4bcd"},
+ {file = "protobuf-4.25.1-cp37-abi3-manylinux2014_aarch64.whl", hash = "sha256:0f881b589ff449bf0b931a711926e9ddaad3b35089cc039ce1af50b21a4ae8cb"},
+ {file = "protobuf-4.25.1-cp37-abi3-manylinux2014_x86_64.whl", hash = "sha256:ca37bf6a6d0046272c152eea90d2e4ef34593aaa32e8873fc14c16440f22d4b7"},
+ {file = "protobuf-4.25.1-cp38-cp38-win32.whl", hash = "sha256:abc0525ae2689a8000837729eef7883b9391cd6aa7950249dcf5a4ede230d5dd"},
+ {file = "protobuf-4.25.1-cp38-cp38-win_amd64.whl", hash = "sha256:1484f9e692091450e7edf418c939e15bfc8fc68856e36ce399aed6889dae8bb0"},
+ {file = "protobuf-4.25.1-cp39-cp39-win32.whl", hash = "sha256:8bdbeaddaac52d15c6dce38c71b03038ef7772b977847eb6d374fc86636fa510"},
+ {file = "protobuf-4.25.1-cp39-cp39-win_amd64.whl", hash = "sha256:becc576b7e6b553d22cbdf418686ee4daa443d7217999125c045ad56322dda10"},
+ {file = "protobuf-4.25.1-py3-none-any.whl", hash = "sha256:a19731d5e83ae4737bb2a089605e636077ac001d18781b3cf489b9546c7c80d6"},
+ {file = "protobuf-4.25.1.tar.gz", hash = "sha256:57d65074b4f5baa4ab5da1605c02be90ac20c8b40fb137d6a8df9f416b0d0ce2"},
+]
+
+[[package]]
+name = "psutil"
+version = "5.9.6"
+description = "Cross-platform lib for process and system monitoring in Python."
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
+files = [
+ {file = "psutil-5.9.6-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:fb8a697f11b0f5994550555fcfe3e69799e5b060c8ecf9e2f75c69302cc35c0d"},
+ {file = "psutil-5.9.6-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:91ecd2d9c00db9817a4b4192107cf6954addb5d9d67a969a4f436dbc9200f88c"},
+ {file = "psutil-5.9.6-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:10e8c17b4f898d64b121149afb136c53ea8b68c7531155147867b7b1ac9e7e28"},
+ {file = "psutil-5.9.6-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:18cd22c5db486f33998f37e2bb054cc62fd06646995285e02a51b1e08da97017"},
+ {file = "psutil-5.9.6-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:ca2780f5e038379e520281e4c032dddd086906ddff9ef0d1b9dcf00710e5071c"},
+ {file = "psutil-5.9.6-cp27-none-win32.whl", hash = "sha256:70cb3beb98bc3fd5ac9ac617a327af7e7f826373ee64c80efd4eb2856e5051e9"},
+ {file = "psutil-5.9.6-cp27-none-win_amd64.whl", hash = "sha256:51dc3d54607c73148f63732c727856f5febec1c7c336f8f41fcbd6315cce76ac"},
+ {file = "psutil-5.9.6-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:c69596f9fc2f8acd574a12d5f8b7b1ba3765a641ea5d60fb4736bf3c08a8214a"},
+ {file = "psutil-5.9.6-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:92e0cc43c524834af53e9d3369245e6cc3b130e78e26100d1f63cdb0abeb3d3c"},
+ {file = "psutil-5.9.6-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:748c9dd2583ed86347ed65d0035f45fa8c851e8d90354c122ab72319b5f366f4"},
+ {file = "psutil-5.9.6-cp36-cp36m-win32.whl", hash = "sha256:3ebf2158c16cc69db777e3c7decb3c0f43a7af94a60d72e87b2823aebac3d602"},
+ {file = "psutil-5.9.6-cp36-cp36m-win_amd64.whl", hash = "sha256:ff18b8d1a784b810df0b0fff3bcb50ab941c3b8e2c8de5726f9c71c601c611aa"},
+ {file = "psutil-5.9.6-cp37-abi3-win32.whl", hash = "sha256:a6f01f03bf1843280f4ad16f4bde26b817847b4c1a0db59bf6419807bc5ce05c"},
+ {file = "psutil-5.9.6-cp37-abi3-win_amd64.whl", hash = "sha256:6e5fb8dc711a514da83098bc5234264e551ad980cec5f85dabf4d38ed6f15e9a"},
+ {file = "psutil-5.9.6-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:daecbcbd29b289aac14ece28eca6a3e60aa361754cf6da3dfb20d4d32b6c7f57"},
+ {file = "psutil-5.9.6.tar.gz", hash = "sha256:e4b92ddcd7dd4cdd3f900180ea1e104932c7bce234fb88976e2a3b296441225a"},
+]
+
+[package.extras]
+test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"]
+
+[[package]]
+name = "pycodestyle"
+version = "2.11.1"
+description = "Python style guide checker"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pycodestyle-2.11.1-py2.py3-none-any.whl", hash = "sha256:44fe31000b2d866f2e41841b18528a505fbd7fef9017b04eff4e2648a0fadc67"},
+ {file = "pycodestyle-2.11.1.tar.gz", hash = "sha256:41ba0e7afc9752dfb53ced5489e89f8186be00e599e712660695b7a75ff2663f"},
+]
+
+[[package]]
+name = "pycparser"
+version = "2.21"
+description = "C parser in Python"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"},
+ {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"},
+]
+
+[[package]]
+name = "pydantic"
+version = "1.10.13"
+description = "Data validation and settings management using python type hints"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pydantic-1.10.13-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:efff03cc7a4f29d9009d1c96ceb1e7a70a65cfe86e89d34e4a5f2ab1e5693737"},
+ {file = "pydantic-1.10.13-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3ecea2b9d80e5333303eeb77e180b90e95eea8f765d08c3d278cd56b00345d01"},
+ {file = "pydantic-1.10.13-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1740068fd8e2ef6eb27a20e5651df000978edce6da6803c2bef0bc74540f9548"},
+ {file = "pydantic-1.10.13-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:84bafe2e60b5e78bc64a2941b4c071a4b7404c5c907f5f5a99b0139781e69ed8"},
+ {file = "pydantic-1.10.13-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:bc0898c12f8e9c97f6cd44c0ed70d55749eaf783716896960b4ecce2edfd2d69"},
+ {file = "pydantic-1.10.13-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:654db58ae399fe6434e55325a2c3e959836bd17a6f6a0b6ca8107ea0571d2e17"},
+ {file = "pydantic-1.10.13-cp310-cp310-win_amd64.whl", hash = "sha256:75ac15385a3534d887a99c713aa3da88a30fbd6204a5cd0dc4dab3d770b9bd2f"},
+ {file = "pydantic-1.10.13-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c553f6a156deb868ba38a23cf0df886c63492e9257f60a79c0fd8e7173537653"},
+ {file = "pydantic-1.10.13-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:5e08865bc6464df8c7d61439ef4439829e3ab62ab1669cddea8dd00cd74b9ffe"},
+ {file = "pydantic-1.10.13-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e31647d85a2013d926ce60b84f9dd5300d44535a9941fe825dc349ae1f760df9"},
+ {file = "pydantic-1.10.13-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:210ce042e8f6f7c01168b2d84d4c9eb2b009fe7bf572c2266e235edf14bacd80"},
+ {file = "pydantic-1.10.13-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:8ae5dd6b721459bfa30805f4c25880e0dd78fc5b5879f9f7a692196ddcb5a580"},
+ {file = "pydantic-1.10.13-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:f8e81fc5fb17dae698f52bdd1c4f18b6ca674d7068242b2aff075f588301bbb0"},
+ {file = "pydantic-1.10.13-cp311-cp311-win_amd64.whl", hash = "sha256:61d9dce220447fb74f45e73d7ff3b530e25db30192ad8d425166d43c5deb6df0"},
+ {file = "pydantic-1.10.13-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:4b03e42ec20286f052490423682016fd80fda830d8e4119f8ab13ec7464c0132"},
+ {file = "pydantic-1.10.13-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f59ef915cac80275245824e9d771ee939133be38215555e9dc90c6cb148aaeb5"},
+ {file = "pydantic-1.10.13-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a1f9f747851338933942db7af7b6ee8268568ef2ed86c4185c6ef4402e80ba8"},
+ {file = "pydantic-1.10.13-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:97cce3ae7341f7620a0ba5ef6cf043975cd9d2b81f3aa5f4ea37928269bc1b87"},
+ {file = "pydantic-1.10.13-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:854223752ba81e3abf663d685f105c64150873cc6f5d0c01d3e3220bcff7d36f"},
+ {file = "pydantic-1.10.13-cp37-cp37m-win_amd64.whl", hash = "sha256:b97c1fac8c49be29486df85968682b0afa77e1b809aff74b83081cc115e52f33"},
+ {file = "pydantic-1.10.13-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:c958d053453a1c4b1c2062b05cd42d9d5c8eb67537b8d5a7e3c3032943ecd261"},
+ {file = "pydantic-1.10.13-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4c5370a7edaac06daee3af1c8b1192e305bc102abcbf2a92374b5bc793818599"},
+ {file = "pydantic-1.10.13-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d6f6e7305244bddb4414ba7094ce910560c907bdfa3501e9db1a7fd7eaea127"},
+ {file = "pydantic-1.10.13-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d3a3c792a58e1622667a2837512099eac62490cdfd63bd407993aaf200a4cf1f"},
+ {file = "pydantic-1.10.13-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c636925f38b8db208e09d344c7aa4f29a86bb9947495dd6b6d376ad10334fb78"},
+ {file = "pydantic-1.10.13-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:678bcf5591b63cc917100dc50ab6caebe597ac67e8c9ccb75e698f66038ea953"},
+ {file = "pydantic-1.10.13-cp38-cp38-win_amd64.whl", hash = "sha256:6cf25c1a65c27923a17b3da28a0bdb99f62ee04230c931d83e888012851f4e7f"},
+ {file = "pydantic-1.10.13-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8ef467901d7a41fa0ca6db9ae3ec0021e3f657ce2c208e98cd511f3161c762c6"},
+ {file = "pydantic-1.10.13-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:968ac42970f57b8344ee08837b62f6ee6f53c33f603547a55571c954a4225691"},
+ {file = "pydantic-1.10.13-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9849f031cf8a2f0a928fe885e5a04b08006d6d41876b8bbd2fc68a18f9f2e3fd"},
+ {file = "pydantic-1.10.13-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:56e3ff861c3b9c6857579de282ce8baabf443f42ffba355bf070770ed63e11e1"},
+ {file = "pydantic-1.10.13-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f00790179497767aae6bcdc36355792c79e7bbb20b145ff449700eb076c5f96"},
+ {file = "pydantic-1.10.13-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:75b297827b59bc229cac1a23a2f7a4ac0031068e5be0ce385be1462e7e17a35d"},
+ {file = "pydantic-1.10.13-cp39-cp39-win_amd64.whl", hash = "sha256:e70ca129d2053fb8b728ee7d1af8e553a928d7e301a311094b8a0501adc8763d"},
+ {file = "pydantic-1.10.13-py3-none-any.whl", hash = "sha256:b87326822e71bd5f313e7d3bfdc77ac3247035ac10b0c0618bd99dcf95b1e687"},
+ {file = "pydantic-1.10.13.tar.gz", hash = "sha256:32c8b48dcd3b2ac4e78b0ba4af3a2c2eb6048cb75202f0ea7b34feb740efc340"},
+]
+
+[package.dependencies]
+python-dotenv = {version = ">=0.10.4", optional = true, markers = "extra == \"dotenv\""}
+typing-extensions = ">=4.2.0"
+
+[package.extras]
+dotenv = ["python-dotenv (>=0.10.4)"]
+email = ["email-validator (>=1.0.3)"]
+
+[[package]]
+name = "pydocstyle"
+version = "6.3.0"
+description = "Python docstring style checker"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "pydocstyle-6.3.0-py3-none-any.whl", hash = "sha256:118762d452a49d6b05e194ef344a55822987a462831ade91ec5c06fd2169d019"},
+ {file = "pydocstyle-6.3.0.tar.gz", hash = "sha256:7ce43f0c0ac87b07494eb9c0b462c0b73e6ff276807f204d6b53edc72b7e44e1"},
+]
+
+[package.dependencies]
+snowballstemmer = ">=2.2.0"
+
+[package.extras]
+toml = ["tomli (>=1.2.3)"]
+
+[[package]]
+name = "pyflakes"
+version = "3.1.0"
+description = "passive checker of Python programs"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pyflakes-3.1.0-py2.py3-none-any.whl", hash = "sha256:4132f6d49cb4dae6819e5379898f2b8cce3c5f23994194c24b77d5da2e36f774"},
+ {file = "pyflakes-3.1.0.tar.gz", hash = "sha256:a0aae034c444db0071aa077972ba4768d40c830d9539fd45bf4cd3f8f6992efc"},
+]
+
+[[package]]
+name = "pygments"
+version = "2.17.2"
+description = "Pygments is a syntax highlighting package written in Python."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pygments-2.17.2-py3-none-any.whl", hash = "sha256:b27c2826c47d0f3219f29554824c30c5e8945175d888647acd804ddd04af846c"},
+ {file = "pygments-2.17.2.tar.gz", hash = "sha256:da46cec9fd2de5be3a8a784f434e4c4ab670b4ff54d605c4c2717e9d49c4c367"},
+]
+
+[package.extras]
+plugins = ["importlib-metadata"]
+windows-terminal = ["colorama (>=0.4.6)"]
+
+[[package]]
+name = "pytest"
+version = "7.4.3"
+description = "pytest: simple powerful testing with Python"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pytest-7.4.3-py3-none-any.whl", hash = "sha256:0d009c083ea859a71b76adf7c1d502e4bc170b80a8ef002da5806527b9591fac"},
+ {file = "pytest-7.4.3.tar.gz", hash = "sha256:d989d136982de4e3b29dabcc838ad581c64e8ed52c11fbe86ddebd9da0818cd5"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "sys_platform == \"win32\""}
+exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""}
+iniconfig = "*"
+packaging = "*"
+pluggy = ">=0.12,<2.0"
+tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""}
+
+[package.extras]
+testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
+
+[[package]]
+name = "pytest-cases"
+version = "3.8.1"
+description = "Separate test code from test cases in pytest."
+optional = false
+python-versions = "*"
+files = [
+ {file = "pytest-cases-3.8.1.tar.gz", hash = "sha256:49d7f6f8ad534e5a6e73fb8f5fd38986606f17b46ee55f7ebee07a55c677ca01"},
+ {file = "pytest_cases-3.8.1-py2.py3-none-any.whl", hash = "sha256:595553f5a522ad6525778a35e0fcb4a46f8eb069e0abe108de02e1d4fb136b85"},
+]
+
+[package.dependencies]
+decopatch = "*"
+makefun = ">=1.15.1"
+packaging = "*"
+
+[[package]]
+name = "pytest-cov"
+version = "4.1.0"
+description = "Pytest plugin for measuring coverage."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pytest-cov-4.1.0.tar.gz", hash = "sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6"},
+ {file = "pytest_cov-4.1.0-py3-none-any.whl", hash = "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a"},
+]
+
+[package.dependencies]
+coverage = {version = ">=5.2.1", extras = ["toml"]}
+pytest = ">=4.6"
+
+[package.extras]
+testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtualenv"]
+
+[[package]]
+name = "pytest-xdist"
+version = "3.5.0"
+description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pytest-xdist-3.5.0.tar.gz", hash = "sha256:cbb36f3d67e0c478baa57fa4edc8843887e0f6cfc42d677530a36d7472b32d8a"},
+ {file = "pytest_xdist-3.5.0-py3-none-any.whl", hash = "sha256:d075629c7e00b611df89f490a5063944bee7a4362a5ff11c7cc7824a03dfce24"},
+]
+
+[package.dependencies]
+execnet = ">=1.1"
+pytest = ">=6.2.0"
+
+[package.extras]
+psutil = ["psutil (>=3.0)"]
+setproctitle = ["setproctitle"]
+testing = ["filelock"]
+
+[[package]]
+name = "python-dateutil"
+version = "2.8.2"
+description = "Extensions to the standard Python datetime module"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+files = [
+ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
+ {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
+]
+
+[package.dependencies]
+six = ">=1.5"
+
+[[package]]
+name = "python-dotenv"
+version = "1.0.0"
+description = "Read key-value pairs from a .env file and set them as environment variables"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "python-dotenv-1.0.0.tar.gz", hash = "sha256:a8df96034aae6d2d50a4ebe8216326c61c3eb64836776504fcca410e5937a3ba"},
+ {file = "python_dotenv-1.0.0-py3-none-any.whl", hash = "sha256:f5971a9226b701070a4bf2c38c89e5a3f0d64de8debda981d1db98583009122a"},
+]
+
+[package.extras]
+cli = ["click (>=5.0)"]
+
+[[package]]
+name = "python-multipart"
+version = "0.0.6"
+description = "A streaming multipart parser for Python"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "python_multipart-0.0.6-py3-none-any.whl", hash = "sha256:ee698bab5ef148b0a760751c261902cd096e57e10558e11aca17646b74ee1c18"},
+ {file = "python_multipart-0.0.6.tar.gz", hash = "sha256:e9925a80bb668529f1b67c7fdb0a5dacdd7cbfc6fb0bff3ea443fe22bdd62132"},
+]
+
+[package.extras]
+dev = ["atomicwrites (==1.2.1)", "attrs (==19.2.0)", "coverage (==6.5.0)", "hatch", "invoke (==1.7.3)", "more-itertools (==4.3.0)", "pbr (==4.3.0)", "pluggy (==1.0.0)", "py (==1.11.0)", "pytest (==7.2.0)", "pytest-cov (==4.0.0)", "pytest-timeout (==2.1.0)", "pyyaml (==5.1)"]
+
+[[package]]
+name = "pyyaml"
+version = "5.4"
+description = "YAML parser and emitter for Python"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
+files = [
+ {file = "PyYAML-5.4-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:f7a21e3d99aa3095ef0553e7ceba36fb693998fbb1226f1392ce33681047465f"},
+ {file = "PyYAML-5.4-cp27-cp27m-win32.whl", hash = "sha256:52bf0930903818e600ae6c2901f748bc4869c0c406056f679ab9614e5d21a166"},
+ {file = "PyYAML-5.4-cp27-cp27m-win_amd64.whl", hash = "sha256:a36a48a51e5471513a5aea920cdad84cbd56d70a5057cca3499a637496ea379c"},
+ {file = "PyYAML-5.4-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:5e7ac4e0e79a53451dc2814f6876c2fa6f71452de1498bbe29c0b54b69a986f4"},
+ {file = "PyYAML-5.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:cc552b6434b90d9dbed6a4f13339625dc466fd82597119897e9489c953acbc22"},
+ {file = "PyYAML-5.4-cp36-cp36m-manylinux1_x86_64.whl", hash = "sha256:0dc9f2eb2e3c97640928dec63fd8dc1dd91e6b6ed236bd5ac00332b99b5c2ff9"},
+ {file = "PyYAML-5.4-cp36-cp36m-win32.whl", hash = "sha256:5a3f345acff76cad4aa9cb171ee76c590f37394186325d53d1aa25318b0d4a09"},
+ {file = "PyYAML-5.4-cp36-cp36m-win_amd64.whl", hash = "sha256:f3790156c606299ff499ec44db422f66f05a7363b39eb9d5b064f17bd7d7c47b"},
+ {file = "PyYAML-5.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:124fd7c7bc1e95b1eafc60825f2daf67c73ce7b33f1194731240d24b0d1bf628"},
+ {file = "PyYAML-5.4-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:8b818b6c5a920cbe4203b5a6b14256f0e5244338244560da89b7b0f1313ea4b6"},
+ {file = "PyYAML-5.4-cp37-cp37m-win32.whl", hash = "sha256:737bd70e454a284d456aa1fa71a0b429dd527bcbf52c5c33f7c8eee81ac16b89"},
+ {file = "PyYAML-5.4-cp37-cp37m-win_amd64.whl", hash = "sha256:7242790ab6c20316b8e7bb545be48d7ed36e26bbe279fd56f2c4a12510e60b4b"},
+ {file = "PyYAML-5.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cc547d3ead3754712223abb7b403f0a184e4c3eae18c9bb7fd15adef1597cc4b"},
+ {file = "PyYAML-5.4-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:8635d53223b1f561b081ff4adecb828fd484b8efffe542edcfdff471997f7c39"},
+ {file = "PyYAML-5.4-cp38-cp38-win32.whl", hash = "sha256:26fcb33776857f4072601502d93e1a619f166c9c00befb52826e7b774efaa9db"},
+ {file = "PyYAML-5.4-cp38-cp38-win_amd64.whl", hash = "sha256:b2243dd033fd02c01212ad5c601dafb44fbb293065f430b0d3dbf03f3254d615"},
+ {file = "PyYAML-5.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:31ba07c54ef4a897758563e3a0fcc60077698df10180abe4b8165d9895c00ebf"},
+ {file = "PyYAML-5.4-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:02c78d77281d8f8d07a255e57abdbf43b02257f59f50cc6b636937d68efa5dd0"},
+ {file = "PyYAML-5.4-cp39-cp39-win32.whl", hash = "sha256:fdc6b2cb4b19e431994f25a9160695cc59a4e861710cc6fc97161c5e845fc579"},
+ {file = "PyYAML-5.4-cp39-cp39-win_amd64.whl", hash = "sha256:8bf38641b4713d77da19e91f8b5296b832e4db87338d6aeffe422d42f1ca896d"},
+ {file = "PyYAML-5.4.tar.gz", hash = "sha256:3c49e39ac034fd64fd576d63bb4db53cda89b362768a67f07749d55f128ac18a"},
+]
+
+[[package]]
+name = "requests"
+version = "2.31.0"
+description = "Python HTTP for Humans."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"},
+ {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"},
+]
+
+[package.dependencies]
+certifi = ">=2017.4.17"
+charset-normalizer = ">=2,<4"
+idna = ">=2.5,<4"
+urllib3 = ">=1.21.1,<3"
+
+[package.extras]
+socks = ["PySocks (>=1.5.6,!=1.5.7)"]
+use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
+
+[[package]]
+name = "restructuredtext-lint"
+version = "1.4.0"
+description = "reStructuredText linter"
+optional = false
+python-versions = "*"
+files = [
+ {file = "restructuredtext_lint-1.4.0.tar.gz", hash = "sha256:1b235c0c922341ab6c530390892eb9e92f90b9b75046063e047cacfb0f050c45"},
+]
+
+[package.dependencies]
+docutils = ">=0.11,<1.0"
+
+[[package]]
+name = "rich"
+version = "13.7.0"
+description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal"
+optional = false
+python-versions = ">=3.7.0"
+files = [
+ {file = "rich-13.7.0-py3-none-any.whl", hash = "sha256:6da14c108c4866ee9520bbffa71f6fe3962e193b7da68720583850cd4548e235"},
+ {file = "rich-13.7.0.tar.gz", hash = "sha256:5cb5123b5cf9ee70584244246816e9114227e0b98ad9176eede6ad54bf5403fa"},
+]
+
+[package.dependencies]
+markdown-it-py = ">=2.2.0"
+pygments = ">=2.13.0,<3.0.0"
+
+[package.extras]
+jupyter = ["ipywidgets (>=7.5.1,<9)"]
+
+[[package]]
+name = "rule-engine"
+version = "4.1.0"
+description = "A lightweight, optionally typed expression language with a custom grammar for matching arbitrary Python objects."
+optional = false
+python-versions = "*"
+files = [
+ {file = "rule-engine-4.1.0.tar.gz", hash = "sha256:ef00ad79a935be3d7932889d4752e381efd7070d5d0a6fc97f09418557dbfdea"},
+]
+
+[package.dependencies]
+ply = ">=3.9"
+python-dateutil = ">=2.7,<3.0"
+
+[[package]]
+name = "s3transfer"
+version = "0.8.2"
+description = "An Amazon S3 Transfer Manager"
+optional = false
+python-versions = ">= 3.7"
+files = [
+ {file = "s3transfer-0.8.2-py3-none-any.whl", hash = "sha256:c9e56cbe88b28d8e197cf841f1f0c130f246595e77ae5b5a05b69fe7cb83de76"},
+ {file = "s3transfer-0.8.2.tar.gz", hash = "sha256:368ac6876a9e9ed91f6bc86581e319be08188dc60d50e0d56308ed5765446283"},
+]
+
+[package.dependencies]
+botocore = ">=1.33.2,<2.0a.0"
+
+[package.extras]
+crt = ["botocore[crt] (>=1.33.2,<2.0a.0)"]
+
+[[package]]
+name = "scipy"
+version = "1.11.4"
+description = "Fundamental algorithms for scientific computing in Python"
+optional = false
+python-versions = ">=3.9"
+files = [
+ {file = "scipy-1.11.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bc9a714581f561af0848e6b69947fda0614915f072dfd14142ed1bfe1b806710"},
+ {file = "scipy-1.11.4-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:cf00bd2b1b0211888d4dc75656c0412213a8b25e80d73898083f402b50f47e41"},
+ {file = "scipy-1.11.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b9999c008ccf00e8fbcce1236f85ade5c569d13144f77a1946bef8863e8f6eb4"},
+ {file = "scipy-1.11.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:933baf588daa8dc9a92c20a0be32f56d43faf3d1a60ab11b3f08c356430f6e56"},
+ {file = "scipy-1.11.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8fce70f39076a5aa62e92e69a7f62349f9574d8405c0a5de6ed3ef72de07f446"},
+ {file = "scipy-1.11.4-cp310-cp310-win_amd64.whl", hash = "sha256:6550466fbeec7453d7465e74d4f4b19f905642c89a7525571ee91dd7adabb5a3"},
+ {file = "scipy-1.11.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:f313b39a7e94f296025e3cffc2c567618174c0b1dde173960cf23808f9fae4be"},
+ {file = "scipy-1.11.4-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:1b7c3dca977f30a739e0409fb001056484661cb2541a01aba0bb0029f7b68db8"},
+ {file = "scipy-1.11.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00150c5eae7b610c32589dda259eacc7c4f1665aedf25d921907f4d08a951b1c"},
+ {file = "scipy-1.11.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:530f9ad26440e85766509dbf78edcfe13ffd0ab7fec2560ee5c36ff74d6269ff"},
+ {file = "scipy-1.11.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5e347b14fe01003d3b78e196e84bd3f48ffe4c8a7b8a1afbcb8f5505cb710993"},
+ {file = "scipy-1.11.4-cp311-cp311-win_amd64.whl", hash = "sha256:acf8ed278cc03f5aff035e69cb511741e0418681d25fbbb86ca65429c4f4d9cd"},
+ {file = "scipy-1.11.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:028eccd22e654b3ea01ee63705681ee79933652b2d8f873e7949898dda6d11b6"},
+ {file = "scipy-1.11.4-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:2c6ff6ef9cc27f9b3db93a6f8b38f97387e6e0591600369a297a50a8e96e835d"},
+ {file = "scipy-1.11.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b030c6674b9230d37c5c60ab456e2cf12f6784596d15ce8da9365e70896effc4"},
+ {file = "scipy-1.11.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad669df80528aeca5f557712102538f4f37e503f0c5b9541655016dd0932ca79"},
+ {file = "scipy-1.11.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ce7fff2e23ab2cc81ff452a9444c215c28e6305f396b2ba88343a567feec9660"},
+ {file = "scipy-1.11.4-cp312-cp312-win_amd64.whl", hash = "sha256:36750b7733d960d7994888f0d148d31ea3017ac15eef664194b4ef68d36a4a97"},
+ {file = "scipy-1.11.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6e619aba2df228a9b34718efb023966da781e89dd3d21637b27f2e54db0410d7"},
+ {file = "scipy-1.11.4-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:f3cd9e7b3c2c1ec26364856f9fbe78695fe631150f94cd1c22228456404cf1ec"},
+ {file = "scipy-1.11.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d10e45a6c50211fe256da61a11c34927c68f277e03138777bdebedd933712fea"},
+ {file = "scipy-1.11.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91af76a68eeae0064887a48e25c4e616fa519fa0d38602eda7e0f97d65d57937"},
+ {file = "scipy-1.11.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:6df1468153a31cf55ed5ed39647279beb9cfb5d3f84369453b49e4b8502394fd"},
+ {file = "scipy-1.11.4-cp39-cp39-win_amd64.whl", hash = "sha256:ee410e6de8f88fd5cf6eadd73c135020bfbbbdfcd0f6162c36a7638a1ea8cc65"},
+ {file = "scipy-1.11.4.tar.gz", hash = "sha256:90a2b78e7f5733b9de748f589f09225013685f9b218275257f8a8168ededaeaa"},
+]
+
+[package.dependencies]
+numpy = ">=1.21.6,<1.28.0"
+
+[package.extras]
+dev = ["click", "cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy", "pycodestyle", "pydevtool", "rich-click", "ruff", "types-psutil", "typing_extensions"]
+doc = ["jupytext", "matplotlib (>2)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-design (>=0.2.0)"]
+test = ["asv", "gmpy2", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"]
+
+[[package]]
+name = "sentry-sdk"
+version = "1.38.0"
+description = "Python client for Sentry (https://sentry.io)"
+optional = false
+python-versions = "*"
+files = [
+ {file = "sentry-sdk-1.38.0.tar.gz", hash = "sha256:8feab81de6bbf64f53279b085bd3820e3e737403b0a0d9317f73a2c3374ae359"},
+ {file = "sentry_sdk-1.38.0-py2.py3-none-any.whl", hash = "sha256:0017fa73b8ae2d4e57fd2522ee3df30453715b29d2692142793ec5d5f90b94a6"},
+]
+
+[package.dependencies]
+certifi = "*"
+urllib3 = {version = ">=1.26.11", markers = "python_version >= \"3.6\""}
+
+[package.extras]
+aiohttp = ["aiohttp (>=3.5)"]
+arq = ["arq (>=0.23)"]
+asyncpg = ["asyncpg (>=0.23)"]
+beam = ["apache-beam (>=2.12)"]
+bottle = ["bottle (>=0.12.13)"]
+celery = ["celery (>=3)"]
+chalice = ["chalice (>=1.16.0)"]
+clickhouse-driver = ["clickhouse-driver (>=0.2.0)"]
+django = ["django (>=1.8)"]
+falcon = ["falcon (>=1.4)"]
+fastapi = ["fastapi (>=0.79.0)"]
+flask = ["blinker (>=1.1)", "flask (>=0.11)", "markupsafe"]
+grpcio = ["grpcio (>=1.21.1)"]
+httpx = ["httpx (>=0.16.0)"]
+huey = ["huey (>=2)"]
+loguru = ["loguru (>=0.5)"]
+opentelemetry = ["opentelemetry-distro (>=0.35b0)"]
+opentelemetry-experimental = ["opentelemetry-distro (>=0.40b0,<1.0)", "opentelemetry-instrumentation-aiohttp-client (>=0.40b0,<1.0)", "opentelemetry-instrumentation-django (>=0.40b0,<1.0)", "opentelemetry-instrumentation-fastapi (>=0.40b0,<1.0)", "opentelemetry-instrumentation-flask (>=0.40b0,<1.0)", "opentelemetry-instrumentation-requests (>=0.40b0,<1.0)", "opentelemetry-instrumentation-sqlite3 (>=0.40b0,<1.0)", "opentelemetry-instrumentation-urllib (>=0.40b0,<1.0)"]
+pure-eval = ["asttokens", "executing", "pure-eval"]
+pymongo = ["pymongo (>=3.1)"]
+pyspark = ["pyspark (>=2.4.4)"]
+quart = ["blinker (>=1.1)", "quart (>=0.16.1)"]
+rq = ["rq (>=0.6)"]
+sanic = ["sanic (>=0.8)"]
+sqlalchemy = ["sqlalchemy (>=1.2)"]
+starlette = ["starlette (>=0.19.1)"]
+starlite = ["starlite (>=1.48)"]
+tornado = ["tornado (>=5)"]
+
+[[package]]
+name = "setproctitle"
+version = "1.3.3"
+description = "A Python module to customize the process title"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "setproctitle-1.3.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:897a73208da48db41e687225f355ce993167079eda1260ba5e13c4e53be7f754"},
+ {file = "setproctitle-1.3.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:8c331e91a14ba4076f88c29c777ad6b58639530ed5b24b5564b5ed2fd7a95452"},
+ {file = "setproctitle-1.3.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbbd6c7de0771c84b4aa30e70b409565eb1fc13627a723ca6be774ed6b9d9fa3"},
+ {file = "setproctitle-1.3.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c05ac48ef16ee013b8a326c63e4610e2430dbec037ec5c5b58fcced550382b74"},
+ {file = "setproctitle-1.3.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1342f4fdb37f89d3e3c1c0a59d6ddbedbde838fff5c51178a7982993d238fe4f"},
+ {file = "setproctitle-1.3.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc74e84fdfa96821580fb5e9c0b0777c1c4779434ce16d3d62a9c4d8c710df39"},
+ {file = "setproctitle-1.3.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9617b676b95adb412bb69645d5b077d664b6882bb0d37bfdafbbb1b999568d85"},
+ {file = "setproctitle-1.3.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6a249415f5bb88b5e9e8c4db47f609e0bf0e20a75e8d744ea787f3092ba1f2d0"},
+ {file = "setproctitle-1.3.3-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:38da436a0aaace9add67b999eb6abe4b84397edf4a78ec28f264e5b4c9d53cd5"},
+ {file = "setproctitle-1.3.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:da0d57edd4c95bf221b2ebbaa061e65b1788f1544977288bdf95831b6e44e44d"},
+ {file = "setproctitle-1.3.3-cp310-cp310-win32.whl", hash = "sha256:a1fcac43918b836ace25f69b1dca8c9395253ad8152b625064415b1d2f9be4fb"},
+ {file = "setproctitle-1.3.3-cp310-cp310-win_amd64.whl", hash = "sha256:200620c3b15388d7f3f97e0ae26599c0c378fdf07ae9ac5a13616e933cbd2086"},
+ {file = "setproctitle-1.3.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:334f7ed39895d692f753a443102dd5fed180c571eb6a48b2a5b7f5b3564908c8"},
+ {file = "setproctitle-1.3.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:950f6476d56ff7817a8fed4ab207727fc5260af83481b2a4b125f32844df513a"},
+ {file = "setproctitle-1.3.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:195c961f54a09eb2acabbfc90c413955cf16c6e2f8caa2adbf2237d1019c7dd8"},
+ {file = "setproctitle-1.3.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f05e66746bf9fe6a3397ec246fe481096664a9c97eb3fea6004735a4daf867fd"},
+ {file = "setproctitle-1.3.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b5901a31012a40ec913265b64e48c2a4059278d9f4e6be628441482dd13fb8b5"},
+ {file = "setproctitle-1.3.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64286f8a995f2cd934082b398fc63fca7d5ffe31f0e27e75b3ca6b4efda4e353"},
+ {file = "setproctitle-1.3.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:184239903bbc6b813b1a8fc86394dc6ca7d20e2ebe6f69f716bec301e4b0199d"},
+ {file = "setproctitle-1.3.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:664698ae0013f986118064b6676d7dcd28fefd0d7d5a5ae9497cbc10cba48fa5"},
+ {file = "setproctitle-1.3.3-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e5119a211c2e98ff18b9908ba62a3bd0e3fabb02a29277a7232a6fb4b2560aa0"},
+ {file = "setproctitle-1.3.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:417de6b2e214e837827067048f61841f5d7fc27926f2e43954567094051aff18"},
+ {file = "setproctitle-1.3.3-cp311-cp311-win32.whl", hash = "sha256:6a143b31d758296dc2f440175f6c8e0b5301ced3b0f477b84ca43cdcf7f2f476"},
+ {file = "setproctitle-1.3.3-cp311-cp311-win_amd64.whl", hash = "sha256:a680d62c399fa4b44899094027ec9a1bdaf6f31c650e44183b50d4c4d0ccc085"},
+ {file = "setproctitle-1.3.3-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:d4460795a8a7a391e3567b902ec5bdf6c60a47d791c3b1d27080fc203d11c9dc"},
+ {file = "setproctitle-1.3.3-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:bdfd7254745bb737ca1384dee57e6523651892f0ea2a7344490e9caefcc35e64"},
+ {file = "setproctitle-1.3.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:477d3da48e216d7fc04bddab67b0dcde633e19f484a146fd2a34bb0e9dbb4a1e"},
+ {file = "setproctitle-1.3.3-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ab2900d111e93aff5df9fddc64cf51ca4ef2c9f98702ce26524f1acc5a786ae7"},
+ {file = "setproctitle-1.3.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:088b9efc62d5aa5d6edf6cba1cf0c81f4488b5ce1c0342a8b67ae39d64001120"},
+ {file = "setproctitle-1.3.3-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a6d50252377db62d6a0bb82cc898089916457f2db2041e1d03ce7fadd4a07381"},
+ {file = "setproctitle-1.3.3-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:87e668f9561fd3a457ba189edfc9e37709261287b52293c115ae3487a24b92f6"},
+ {file = "setproctitle-1.3.3-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:287490eb90e7a0ddd22e74c89a92cc922389daa95babc833c08cf80c84c4df0a"},
+ {file = "setproctitle-1.3.3-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:4fe1c49486109f72d502f8be569972e27f385fe632bd8895f4730df3c87d5ac8"},
+ {file = "setproctitle-1.3.3-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4a6ba2494a6449b1f477bd3e67935c2b7b0274f2f6dcd0f7c6aceae10c6c6ba3"},
+ {file = "setproctitle-1.3.3-cp312-cp312-win32.whl", hash = "sha256:2df2b67e4b1d7498632e18c56722851ba4db5d6a0c91aaf0fd395111e51cdcf4"},
+ {file = "setproctitle-1.3.3-cp312-cp312-win_amd64.whl", hash = "sha256:f38d48abc121263f3b62943f84cbaede05749047e428409c2c199664feb6abc7"},
+ {file = "setproctitle-1.3.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:816330675e3504ae4d9a2185c46b573105d2310c20b19ea2b4596a9460a4f674"},
+ {file = "setproctitle-1.3.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68f960bc22d8d8e4ac886d1e2e21ccbd283adcf3c43136161c1ba0fa509088e0"},
+ {file = "setproctitle-1.3.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:00e6e7adff74796ef12753ff399491b8827f84f6c77659d71bd0b35870a17d8f"},
+ {file = "setproctitle-1.3.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:53bc0d2358507596c22b02db079618451f3bd720755d88e3cccd840bafb4c41c"},
+ {file = "setproctitle-1.3.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad6d20f9541f5f6ac63df553b6d7a04f313947f550eab6a61aa758b45f0d5657"},
+ {file = "setproctitle-1.3.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c1c84beab776b0becaa368254801e57692ed749d935469ac10e2b9b825dbdd8e"},
+ {file = "setproctitle-1.3.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:507e8dc2891021350eaea40a44ddd887c9f006e6b599af8d64a505c0f718f170"},
+ {file = "setproctitle-1.3.3-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:b1067647ac7aba0b44b591936118a22847bda3c507b0a42d74272256a7a798e9"},
+ {file = "setproctitle-1.3.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:2e71f6365744bf53714e8bd2522b3c9c1d83f52ffa6324bd7cbb4da707312cd8"},
+ {file = "setproctitle-1.3.3-cp37-cp37m-win32.whl", hash = "sha256:7f1d36a1e15a46e8ede4e953abb104fdbc0845a266ec0e99cc0492a4364f8c44"},
+ {file = "setproctitle-1.3.3-cp37-cp37m-win_amd64.whl", hash = "sha256:c9a402881ec269d0cc9c354b149fc29f9ec1a1939a777f1c858cdb09c7a261df"},
+ {file = "setproctitle-1.3.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ff814dea1e5c492a4980e3e7d094286077054e7ea116cbeda138819db194b2cd"},
+ {file = "setproctitle-1.3.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:accb66d7b3ccb00d5cd11d8c6e07055a4568a24c95cf86109894dcc0c134cc89"},
+ {file = "setproctitle-1.3.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:554eae5a5b28f02705b83a230e9d163d645c9a08914c0ad921df363a07cf39b1"},
+ {file = "setproctitle-1.3.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a911b26264dbe9e8066c7531c0591cfab27b464459c74385b276fe487ca91c12"},
+ {file = "setproctitle-1.3.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2982efe7640c4835f7355fdb4da313ad37fb3b40f5c69069912f8048f77b28c8"},
+ {file = "setproctitle-1.3.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df3f4274b80709d8bcab2f9a862973d453b308b97a0b423a501bcd93582852e3"},
+ {file = "setproctitle-1.3.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:af2c67ae4c795d1674a8d3ac1988676fa306bcfa1e23fddb5e0bd5f5635309ca"},
+ {file = "setproctitle-1.3.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:af4061f67fd7ec01624c5e3c21f6b7af2ef0e6bab7fbb43f209e6506c9ce0092"},
+ {file = "setproctitle-1.3.3-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:37a62cbe16d4c6294e84670b59cf7adcc73faafe6af07f8cb9adaf1f0e775b19"},
+ {file = "setproctitle-1.3.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a83ca086fbb017f0d87f240a8f9bbcf0809f3b754ee01cec928fff926542c450"},
+ {file = "setproctitle-1.3.3-cp38-cp38-win32.whl", hash = "sha256:059f4ce86f8cc92e5860abfc43a1dceb21137b26a02373618d88f6b4b86ba9b2"},
+ {file = "setproctitle-1.3.3-cp38-cp38-win_amd64.whl", hash = "sha256:ab92e51cd4a218208efee4c6d37db7368fdf182f6e7ff148fb295ecddf264287"},
+ {file = "setproctitle-1.3.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c7951820b77abe03d88b114b998867c0f99da03859e5ab2623d94690848d3e45"},
+ {file = "setproctitle-1.3.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5bc94cf128676e8fac6503b37763adb378e2b6be1249d207630f83fc325d9b11"},
+ {file = "setproctitle-1.3.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f5d9027eeda64d353cf21a3ceb74bb1760bd534526c9214e19f052424b37e42"},
+ {file = "setproctitle-1.3.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2e4a8104db15d3462e29d9946f26bed817a5b1d7a47eabca2d9dc2b995991503"},
+ {file = "setproctitle-1.3.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c32c41ace41f344d317399efff4cffb133e709cec2ef09c99e7a13e9f3b9483c"},
+ {file = "setproctitle-1.3.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cbf16381c7bf7f963b58fb4daaa65684e10966ee14d26f5cc90f07049bfd8c1e"},
+ {file = "setproctitle-1.3.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:e18b7bd0898398cc97ce2dfc83bb192a13a087ef6b2d5a8a36460311cb09e775"},
+ {file = "setproctitle-1.3.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:69d565d20efe527bd8a9b92e7f299ae5e73b6c0470f3719bd66f3cd821e0d5bd"},
+ {file = "setproctitle-1.3.3-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:ddedd300cd690a3b06e7eac90ed4452348b1348635777ce23d460d913b5b63c3"},
+ {file = "setproctitle-1.3.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:415bfcfd01d1fbf5cbd75004599ef167a533395955305f42220a585f64036081"},
+ {file = "setproctitle-1.3.3-cp39-cp39-win32.whl", hash = "sha256:21112fcd2195d48f25760f0eafa7a76510871bbb3b750219310cf88b04456ae3"},
+ {file = "setproctitle-1.3.3-cp39-cp39-win_amd64.whl", hash = "sha256:5a740f05d0968a5a17da3d676ce6afefebeeeb5ce137510901bf6306ba8ee002"},
+ {file = "setproctitle-1.3.3-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:6b9e62ddb3db4b5205c0321dd69a406d8af9ee1693529d144e86bd43bcb4b6c0"},
+ {file = "setproctitle-1.3.3-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9e3b99b338598de0bd6b2643bf8c343cf5ff70db3627af3ca427a5e1a1a90dd9"},
+ {file = "setproctitle-1.3.3-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:38ae9a02766dad331deb06855fb7a6ca15daea333b3967e214de12cfae8f0ef5"},
+ {file = "setproctitle-1.3.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:200ede6fd11233085ba9b764eb055a2a191fb4ffb950c68675ac53c874c22e20"},
+ {file = "setproctitle-1.3.3-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0d3a953c50776751e80fe755a380a64cb14d61e8762bd43041ab3f8cc436092f"},
+ {file = "setproctitle-1.3.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5e08e232b78ba3ac6bc0d23ce9e2bee8fad2be391b7e2da834fc9a45129eb87"},
+ {file = "setproctitle-1.3.3-pp37-pypy37_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1da82c3e11284da4fcbf54957dafbf0655d2389cd3d54e4eaba636faf6d117a"},
+ {file = "setproctitle-1.3.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:aeaa71fb9568ebe9b911ddb490c644fbd2006e8c940f21cb9a1e9425bd709574"},
+ {file = "setproctitle-1.3.3-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:59335d000c6250c35989394661eb6287187854e94ac79ea22315469ee4f4c244"},
+ {file = "setproctitle-1.3.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c3ba57029c9c50ecaf0c92bb127224cc2ea9fda057b5d99d3f348c9ec2855ad3"},
+ {file = "setproctitle-1.3.3-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d876d355c53d975c2ef9c4f2487c8f83dad6aeaaee1b6571453cb0ee992f55f6"},
+ {file = "setproctitle-1.3.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:224602f0939e6fb9d5dd881be1229d485f3257b540f8a900d4271a2c2aa4e5f4"},
+ {file = "setproctitle-1.3.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d7f27e0268af2d7503386e0e6be87fb9b6657afd96f5726b733837121146750d"},
+ {file = "setproctitle-1.3.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f5e7266498cd31a4572378c61920af9f6b4676a73c299fce8ba93afd694f8ae7"},
+ {file = "setproctitle-1.3.3-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33c5609ad51cd99d388e55651b19148ea99727516132fb44680e1f28dd0d1de9"},
+ {file = "setproctitle-1.3.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:eae8988e78192fd1a3245a6f4f382390b61bce6cfcc93f3809726e4c885fa68d"},
+ {file = "setproctitle-1.3.3.tar.gz", hash = "sha256:c913e151e7ea01567837ff037a23ca8740192880198b7fbb90b16d181607caae"},
+]
+
+[package.extras]
+test = ["pytest"]
+
+[[package]]
+name = "setuptools"
+version = "69.0.2"
+description = "Easily download, build, install, upgrade, and uninstall Python packages"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "setuptools-69.0.2-py3-none-any.whl", hash = "sha256:1e8fdff6797d3865f37397be788a4e3cba233608e9b509382a2777d25ebde7f2"},
+ {file = "setuptools-69.0.2.tar.gz", hash = "sha256:735896e78a4742605974de002ac60562d286fa8051a7e2299445e8e8fbb01aa6"},
+]
+
+[package.extras]
+docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
+testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
+testing-integration = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "packaging (>=23.1)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"]
+
+[[package]]
+name = "shortuuid"
+version = "1.0.11"
+description = "A generator library for concise, unambiguous and URL-safe UUIDs."
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "shortuuid-1.0.11-py3-none-any.whl", hash = "sha256:27ea8f28b1bd0bf8f15057a3ece57275d2059d2b0bb02854f02189962c13b6aa"},
+ {file = "shortuuid-1.0.11.tar.gz", hash = "sha256:fc75f2615914815a8e4cb1501b3a513745cb66ef0fd5fc6fb9f8c3fa3481f789"},
+]
+
+[[package]]
+name = "six"
+version = "1.16.0"
+description = "Python 2 and 3 compatibility utilities"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
+files = [
+ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
+ {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
+]
+
+[[package]]
+name = "smmap"
+version = "5.0.1"
+description = "A pure Python implementation of a sliding window memory map manager"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "smmap-5.0.1-py3-none-any.whl", hash = "sha256:e6d8668fa5f93e706934a62d7b4db19c8d9eb8cf2adbb75ef1b675aa332b69da"},
+ {file = "smmap-5.0.1.tar.gz", hash = "sha256:dceeb6c0028fdb6734471eb07c0cd2aae706ccaecab45965ee83f11c8d3b1f62"},
+]
+
+[[package]]
+name = "sniffio"
+version = "1.3.0"
+description = "Sniff out which async library your code is running under"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"},
+ {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"},
+]
+
+[[package]]
+name = "snowballstemmer"
+version = "2.2.0"
+description = "This package provides 29 stemmers for 28 languages generated from Snowball algorithms."
+optional = false
+python-versions = "*"
+files = [
+ {file = "snowballstemmer-2.2.0-py2.py3-none-any.whl", hash = "sha256:c8e1716e83cc398ae16824e5572ae04e0d9fc2c6b985fb0f900f5f0c96ecba1a"},
+ {file = "snowballstemmer-2.2.0.tar.gz", hash = "sha256:09b16deb8547d3412ad7b590689584cd0fe25ec8db3be37788be3810cbf19cb1"},
+]
+
+[[package]]
+name = "starlette"
+version = "0.27.0"
+description = "The little ASGI library that shines."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "starlette-0.27.0-py3-none-any.whl", hash = "sha256:918416370e846586541235ccd38a474c08b80443ed31c578a418e2209b3eef91"},
+ {file = "starlette-0.27.0.tar.gz", hash = "sha256:6a6b0d042acb8d469a01eba54e9cda6cbd24ac602c4cd016723117d6a7e73b75"},
+]
+
+[package.dependencies]
+anyio = ">=3.4.0,<5"
+typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""}
+
+[package.extras]
+full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart", "pyyaml"]
+
+[[package]]
+name = "stevedore"
+version = "5.1.0"
+description = "Manage dynamic plugins for Python applications"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "stevedore-5.1.0-py3-none-any.whl", hash = "sha256:8cc040628f3cea5d7128f2e76cf486b2251a4e543c7b938f58d9a377f6694a2d"},
+ {file = "stevedore-5.1.0.tar.gz", hash = "sha256:a54534acf9b89bc7ed264807013b505bf07f74dbe4bcfa37d32bd063870b087c"},
+]
+
+[package.dependencies]
+pbr = ">=2.0.0,<2.1.0 || >2.1.0"
+
+[[package]]
+name = "tomli"
+version = "2.0.1"
+description = "A lil' TOML parser"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"},
+ {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"},
+]
+
+[[package]]
+name = "torch"
+version = "1.13.1"
+description = "Tensors and Dynamic neural networks in Python with strong GPU acceleration"
+optional = false
+python-versions = ">=3.7.0"
+files = [
+ {file = "torch-1.13.1-cp310-cp310-manylinux1_x86_64.whl", hash = "sha256:fd12043868a34a8da7d490bf6db66991108b00ffbeecb034228bfcbbd4197143"},
+ {file = "torch-1.13.1-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:d9fe785d375f2e26a5d5eba5de91f89e6a3be5d11efb497e76705fdf93fa3c2e"},
+ {file = "torch-1.13.1-cp310-cp310-win_amd64.whl", hash = "sha256:98124598cdff4c287dbf50f53fb455f0c1e3a88022b39648102957f3445e9b76"},
+ {file = "torch-1.13.1-cp310-none-macosx_10_9_x86_64.whl", hash = "sha256:393a6273c832e047581063fb74335ff50b4c566217019cc6ace318cd79eb0566"},
+ {file = "torch-1.13.1-cp310-none-macosx_11_0_arm64.whl", hash = "sha256:0122806b111b949d21fa1a5f9764d1fd2fcc4a47cb7f8ff914204fd4fc752ed5"},
+ {file = "torch-1.13.1-cp311-cp311-manylinux1_x86_64.whl", hash = "sha256:22128502fd8f5b25ac1cd849ecb64a418382ae81dd4ce2b5cebaa09ab15b0d9b"},
+ {file = "torch-1.13.1-cp37-cp37m-manylinux1_x86_64.whl", hash = "sha256:76024be052b659ac1304ab8475ab03ea0a12124c3e7626282c9c86798ac7bc11"},
+ {file = "torch-1.13.1-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:ea8dda84d796094eb8709df0fcd6b56dc20b58fdd6bc4e8d7109930dafc8e419"},
+ {file = "torch-1.13.1-cp37-cp37m-win_amd64.whl", hash = "sha256:2ee7b81e9c457252bddd7d3da66fb1f619a5d12c24d7074de91c4ddafb832c93"},
+ {file = "torch-1.13.1-cp37-none-macosx_10_9_x86_64.whl", hash = "sha256:0d9b8061048cfb78e675b9d2ea8503bfe30db43d583599ae8626b1263a0c1380"},
+ {file = "torch-1.13.1-cp37-none-macosx_11_0_arm64.whl", hash = "sha256:f402ca80b66e9fbd661ed4287d7553f7f3899d9ab54bf5c67faada1555abde28"},
+ {file = "torch-1.13.1-cp38-cp38-manylinux1_x86_64.whl", hash = "sha256:727dbf00e2cf858052364c0e2a496684b9cb5aa01dc8a8bc8bbb7c54502bdcdd"},
+ {file = "torch-1.13.1-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:df8434b0695e9ceb8cc70650afc1310d8ba949e6db2a0525ddd9c3b2b181e5fe"},
+ {file = "torch-1.13.1-cp38-cp38-win_amd64.whl", hash = "sha256:5e1e722a41f52a3f26f0c4fcec227e02c6c42f7c094f32e49d4beef7d1e213ea"},
+ {file = "torch-1.13.1-cp38-none-macosx_10_9_x86_64.whl", hash = "sha256:33e67eea526e0bbb9151263e65417a9ef2d8fa53cbe628e87310060c9dcfa312"},
+ {file = "torch-1.13.1-cp38-none-macosx_11_0_arm64.whl", hash = "sha256:eeeb204d30fd40af6a2d80879b46a7efbe3cf43cdbeb8838dd4f3d126cc90b2b"},
+ {file = "torch-1.13.1-cp39-cp39-manylinux1_x86_64.whl", hash = "sha256:50ff5e76d70074f6653d191fe4f6a42fdbe0cf942fbe2a3af0b75eaa414ac038"},
+ {file = "torch-1.13.1-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:2c3581a3fd81eb1f0f22997cddffea569fea53bafa372b2c0471db373b26aafc"},
+ {file = "torch-1.13.1-cp39-cp39-win_amd64.whl", hash = "sha256:0aa46f0ac95050c604bcf9ef71da9f1172e5037fdf2ebe051962d47b123848e7"},
+ {file = "torch-1.13.1-cp39-none-macosx_10_9_x86_64.whl", hash = "sha256:6930791efa8757cb6974af73d4996b6b50c592882a324b8fb0589c6a9ba2ddaf"},
+ {file = "torch-1.13.1-cp39-none-macosx_11_0_arm64.whl", hash = "sha256:e0df902a7c7dd6c795698532ee5970ce898672625635d885eade9976e5a04949"},
+]
+
+[package.dependencies]
+nvidia-cublas-cu11 = {version = "11.10.3.66", markers = "platform_system == \"Linux\""}
+nvidia-cuda-nvrtc-cu11 = {version = "11.7.99", markers = "platform_system == \"Linux\""}
+nvidia-cuda-runtime-cu11 = {version = "11.7.99", markers = "platform_system == \"Linux\""}
+nvidia-cudnn-cu11 = {version = "8.5.0.96", markers = "platform_system == \"Linux\""}
+typing-extensions = "*"
+
+[package.extras]
+opt-einsum = ["opt-einsum (>=3.3)"]
+
+[[package]]
+name = "torchmetrics"
+version = "1.2.1"
+description = "PyTorch native Metrics"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "torchmetrics-1.2.1-py3-none-any.whl", hash = "sha256:fe03a8c53d0ae5800d34ea615f56295fda281282cd83f647d2184e81c1d4efee"},
+ {file = "torchmetrics-1.2.1.tar.gz", hash = "sha256:217387738f84939c39b534b20d4983e737cc448d27aaa5340e0327948d97ca3e"},
+]
+
+[package.dependencies]
+lightning-utilities = ">=0.8.0"
+numpy = ">1.20.0"
+packaging = ">17.1"
+torch = ">=1.8.1"
+
+[package.extras]
+-tests = ["bert-score (==0.3.13)", "dython (<=0.7.4)", "fairlearn", "fast-bss-eval (>=0.1.0)", "faster-coco-eval (>=1.3.3)", "huggingface-hub (<0.20)", "jiwer (>=2.3.0)", "kornia (>=0.6.7)", "lpips (<=0.1.4)", "mir-eval (>=0.6)", "netcal (>1.0.0)", "numpy (<1.25.0)", "pandas (>1.0.0)", "pandas (>=1.4.0)", "pytorch-msssim (==1.0.0)", "rouge-score (>0.1.0)", "sacrebleu (>=2.0.0)", "scikit-image (>=0.19.0)", "scipy (>1.0.0)", "sewar (>=0.4.4)", "statsmodels (>0.13.5)", "torch-complex (<=0.4.3)"]
+all = ["SciencePlots (>=2.0.0)", "matplotlib (>=3.2.0)", "mypy (==1.7.1)", "nltk (>=3.6)", "piq (<=0.8.0)", "pycocotools (>2.0.0)", "pystoi (>=0.3.0)", "regex (>=2021.9.24)", "scipy (>1.0.0)", "torch (==2.1.1)", "torch-fidelity (<=0.4.0)", "torchaudio (>=0.10.0)", "torchvision (>=0.8)", "tqdm (>=4.41.0)", "transformers (>4.4.0)", "transformers (>=4.10.0)", "types-PyYAML", "types-emoji", "types-protobuf", "types-requests", "types-setuptools", "types-six", "types-tabulate"]
+audio = ["pystoi (>=0.3.0)", "torchaudio (>=0.10.0)"]
+detection = ["pycocotools (>2.0.0)", "torchvision (>=0.8)"]
+dev = ["SciencePlots (>=2.0.0)", "bert-score (==0.3.13)", "dython (<=0.7.4)", "fairlearn", "fast-bss-eval (>=0.1.0)", "faster-coco-eval (>=1.3.3)", "huggingface-hub (<0.20)", "jiwer (>=2.3.0)", "kornia (>=0.6.7)", "lpips (<=0.1.4)", "matplotlib (>=3.2.0)", "mir-eval (>=0.6)", "mypy (==1.7.1)", "netcal (>1.0.0)", "nltk (>=3.6)", "numpy (<1.25.0)", "pandas (>1.0.0)", "pandas (>=1.4.0)", "piq (<=0.8.0)", "pycocotools (>2.0.0)", "pystoi (>=0.3.0)", "pytorch-msssim (==1.0.0)", "regex (>=2021.9.24)", "rouge-score (>0.1.0)", "sacrebleu (>=2.0.0)", "scikit-image (>=0.19.0)", "scipy (>1.0.0)", "sewar (>=0.4.4)", "statsmodels (>0.13.5)", "torch (==2.1.1)", "torch-complex (<=0.4.3)", "torch-fidelity (<=0.4.0)", "torchaudio (>=0.10.0)", "torchvision (>=0.8)", "tqdm (>=4.41.0)", "transformers (>4.4.0)", "transformers (>=4.10.0)", "types-PyYAML", "types-emoji", "types-protobuf", "types-requests", "types-setuptools", "types-six", "types-tabulate"]
+image = ["scipy (>1.0.0)", "torch-fidelity (<=0.4.0)", "torchvision (>=0.8)"]
+multimodal = ["piq (<=0.8.0)", "transformers (>=4.10.0)"]
+text = ["nltk (>=3.6)", "regex (>=2021.9.24)", "tqdm (>=4.41.0)", "transformers (>4.4.0)"]
+typing = ["mypy (==1.7.1)", "torch (==2.1.1)", "types-PyYAML", "types-emoji", "types-protobuf", "types-requests", "types-setuptools", "types-six", "types-tabulate"]
+visual = ["SciencePlots (>=2.0.0)", "matplotlib (>=3.2.0)"]
+
+[[package]]
+name = "tqdm"
+version = "4.66.1"
+description = "Fast, Extensible Progress Meter"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "tqdm-4.66.1-py3-none-any.whl", hash = "sha256:d302b3c5b53d47bce91fea46679d9c3c6508cf6332229aa1e7d8653723793386"},
+ {file = "tqdm-4.66.1.tar.gz", hash = "sha256:d88e651f9db8d8551a62556d3cff9e3034274ca5d66e93197cf2490e2dcb69c7"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+
+[package.extras]
+dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"]
+notebook = ["ipywidgets (>=6)"]
+slack = ["slack-sdk"]
+telegram = ["requests"]
+
+[[package]]
+name = "typer"
+version = "0.9.0"
+description = "Typer, build great CLIs. Easy to code. Based on Python type hints."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "typer-0.9.0-py3-none-any.whl", hash = "sha256:5d96d986a21493606a358cae4461bd8cdf83cbf33a5aa950ae629ca3b51467ee"},
+ {file = "typer-0.9.0.tar.gz", hash = "sha256:50922fd79aea2f4751a8e0408ff10d2662bd0c8bbfa84755a699f3bada2978b2"},
+]
+
+[package.dependencies]
+click = ">=7.1.1,<9.0.0"
+typing-extensions = ">=3.7.4.3"
+
+[package.extras]
+all = ["colorama (>=0.4.3,<0.5.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"]
+dev = ["autoflake (>=1.3.1,<2.0.0)", "flake8 (>=3.8.3,<4.0.0)", "pre-commit (>=2.17.0,<3.0.0)"]
+doc = ["cairosvg (>=2.5.2,<3.0.0)", "mdx-include (>=1.4.1,<2.0.0)", "mkdocs (>=1.1.2,<2.0.0)", "mkdocs-material (>=8.1.4,<9.0.0)", "pillow (>=9.3.0,<10.0.0)"]
+test = ["black (>=22.3.0,<23.0.0)", "coverage (>=6.2,<7.0)", "isort (>=5.0.6,<6.0.0)", "mypy (==0.910)", "pytest (>=4.4.0,<8.0.0)", "pytest-cov (>=2.10.0,<5.0.0)", "pytest-sugar (>=0.9.4,<0.10.0)", "pytest-xdist (>=1.32.0,<4.0.0)", "rich (>=10.11.0,<14.0.0)", "shellingham (>=1.3.0,<2.0.0)"]
+
+[[package]]
+name = "types-pyyaml"
+version = "6.0.12.12"
+description = "Typing stubs for PyYAML"
+optional = false
+python-versions = "*"
+files = [
+ {file = "types-PyYAML-6.0.12.12.tar.gz", hash = "sha256:334373d392fde0fdf95af5c3f1661885fa10c52167b14593eb856289e1855062"},
+ {file = "types_PyYAML-6.0.12.12-py3-none-any.whl", hash = "sha256:c05bc6c158facb0676674b7f11fe3960db4f389718e19e62bd2b84d6205cfd24"},
+]
+
+[[package]]
+name = "typing-extensions"
+version = "4.8.0"
+description = "Backported and Experimental Type Hints for Python 3.8+"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "typing_extensions-4.8.0-py3-none-any.whl", hash = "sha256:8f92fc8806f9a6b641eaa5318da32b44d401efaac0f6678c9bc448ba3605faa0"},
+ {file = "typing_extensions-4.8.0.tar.gz", hash = "sha256:df8e4339e9cb77357558cbdbceca33c303714cf861d1eef15e1070055ae8b7ef"},
+]
+
+[[package]]
+name = "ujson"
+version = "5.8.0"
+description = "Ultra fast JSON encoder and decoder for Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "ujson-5.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f4511560d75b15ecb367eef561554959b9d49b6ec3b8d5634212f9fed74a6df1"},
+ {file = "ujson-5.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9399eaa5d1931a0ead49dce3ffacbea63f3177978588b956036bfe53cdf6af75"},
+ {file = "ujson-5.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4e7bb7eba0e1963f8b768f9c458ecb193e5bf6977090182e2b4f4408f35ac76"},
+ {file = "ujson-5.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40931d7c08c4ce99adc4b409ddb1bbb01635a950e81239c2382cfe24251b127a"},
+ {file = "ujson-5.8.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d53039d39de65360e924b511c7ca1a67b0975c34c015dd468fca492b11caa8f7"},
+ {file = "ujson-5.8.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:bdf04c6af3852161be9613e458a1fb67327910391de8ffedb8332e60800147a2"},
+ {file = "ujson-5.8.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a70f776bda2e5072a086c02792c7863ba5833d565189e09fabbd04c8b4c3abba"},
+ {file = "ujson-5.8.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f26629ac531d712f93192c233a74888bc8b8212558bd7d04c349125f10199fcf"},
+ {file = "ujson-5.8.0-cp310-cp310-win32.whl", hash = "sha256:7ecc33b107ae88405aebdb8d82c13d6944be2331ebb04399134c03171509371a"},
+ {file = "ujson-5.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:3b27a8da7a080add559a3b73ec9ebd52e82cc4419f7c6fb7266e62439a055ed0"},
+ {file = "ujson-5.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:193349a998cd821483a25f5df30b44e8f495423840ee11b3b28df092ddfd0f7f"},
+ {file = "ujson-5.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4ddeabbc78b2aed531f167d1e70387b151900bc856d61e9325fcdfefb2a51ad8"},
+ {file = "ujson-5.8.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ce24909a9c25062e60653073dd6d5e6ec9d6ad7ed6e0069450d5b673c854405"},
+ {file = "ujson-5.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27a2a3c7620ebe43641e926a1062bc04e92dbe90d3501687957d71b4bdddaec4"},
+ {file = "ujson-5.8.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b852bdf920fe9f84e2a2c210cc45f1b64f763b4f7d01468b33f7791698e455e"},
+ {file = "ujson-5.8.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:20768961a6a706170497129960762ded9c89fb1c10db2989c56956b162e2a8a3"},
+ {file = "ujson-5.8.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:e0147d41e9fb5cd174207c4a2895c5e24813204499fd0839951d4c8784a23bf5"},
+ {file = "ujson-5.8.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e3673053b036fd161ae7a5a33358ccae6793ee89fd499000204676baafd7b3aa"},
+ {file = "ujson-5.8.0-cp311-cp311-win32.whl", hash = "sha256:a89cf3cd8bf33a37600431b7024a7ccf499db25f9f0b332947fbc79043aad879"},
+ {file = "ujson-5.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:3659deec9ab9eb19e8646932bfe6fe22730757c4addbe9d7d5544e879dc1b721"},
+ {file = "ujson-5.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:102bf31c56f59538cccdfec45649780ae00657e86247c07edac434cb14d5388c"},
+ {file = "ujson-5.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:299a312c3e85edee1178cb6453645217ba23b4e3186412677fa48e9a7f986de6"},
+ {file = "ujson-5.8.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2e385a7679b9088d7bc43a64811a7713cc7c33d032d020f757c54e7d41931ae"},
+ {file = "ujson-5.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad24ec130855d4430a682c7a60ca0bc158f8253ec81feed4073801f6b6cb681b"},
+ {file = "ujson-5.8.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:16fde596d5e45bdf0d7de615346a102510ac8c405098e5595625015b0d4b5296"},
+ {file = "ujson-5.8.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:6d230d870d1ce03df915e694dcfa3f4e8714369cce2346686dbe0bc8e3f135e7"},
+ {file = "ujson-5.8.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:9571de0c53db5cbc265945e08f093f093af2c5a11e14772c72d8e37fceeedd08"},
+ {file = "ujson-5.8.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:7cba16b26efe774c096a5e822e4f27097b7c81ed6fb5264a2b3f5fd8784bab30"},
+ {file = "ujson-5.8.0-cp312-cp312-win32.whl", hash = "sha256:48c7d373ff22366eecfa36a52b9b55b0ee5bd44c2b50e16084aa88b9de038916"},
+ {file = "ujson-5.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:5ac97b1e182d81cf395ded620528c59f4177eee024b4b39a50cdd7b720fdeec6"},
+ {file = "ujson-5.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2a64cc32bb4a436e5813b83f5aab0889927e5ea1788bf99b930fad853c5625cb"},
+ {file = "ujson-5.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e54578fa8838ddc722539a752adfce9372474114f8c127bb316db5392d942f8b"},
+ {file = "ujson-5.8.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9721cd112b5e4687cb4ade12a7b8af8b048d4991227ae8066d9c4b3a6642a582"},
+ {file = "ujson-5.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d9707e5aacf63fb919f6237d6490c4e0244c7f8d3dc2a0f84d7dec5db7cb54c"},
+ {file = "ujson-5.8.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0be81bae295f65a6896b0c9030b55a106fb2dec69ef877253a87bc7c9c5308f7"},
+ {file = "ujson-5.8.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ae7f4725c344bf437e9b881019c558416fe84ad9c6b67426416c131ad577df67"},
+ {file = "ujson-5.8.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:9ab282d67ef3097105552bf151438b551cc4bedb3f24d80fada830f2e132aeb9"},
+ {file = "ujson-5.8.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:94c7bd9880fa33fcf7f6d7f4cc032e2371adee3c5dba2922b918987141d1bf07"},
+ {file = "ujson-5.8.0-cp38-cp38-win32.whl", hash = "sha256:bf5737dbcfe0fa0ac8fa599eceafae86b376492c8f1e4b84e3adf765f03fb564"},
+ {file = "ujson-5.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:11da6bed916f9bfacf13f4fc6a9594abd62b2bb115acfb17a77b0f03bee4cfd5"},
+ {file = "ujson-5.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:69b3104a2603bab510497ceabc186ba40fef38ec731c0ccaa662e01ff94a985c"},
+ {file = "ujson-5.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9249fdefeb021e00b46025e77feed89cd91ffe9b3a49415239103fc1d5d9c29a"},
+ {file = "ujson-5.8.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2873d196725a8193f56dde527b322c4bc79ed97cd60f1d087826ac3290cf9207"},
+ {file = "ujson-5.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a4dafa9010c366589f55afb0fd67084acd8added1a51251008f9ff2c3e44042"},
+ {file = "ujson-5.8.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7a42baa647a50fa8bed53d4e242be61023bd37b93577f27f90ffe521ac9dc7a3"},
+ {file = "ujson-5.8.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f3554eaadffe416c6f543af442066afa6549edbc34fe6a7719818c3e72ebfe95"},
+ {file = "ujson-5.8.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:fb87decf38cc82bcdea1d7511e73629e651bdec3a43ab40985167ab8449b769c"},
+ {file = "ujson-5.8.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:407d60eb942c318482bbfb1e66be093308bb11617d41c613e33b4ce5be789adc"},
+ {file = "ujson-5.8.0-cp39-cp39-win32.whl", hash = "sha256:0fe1b7edaf560ca6ab023f81cbeaf9946a240876a993b8c5a21a1c539171d903"},
+ {file = "ujson-5.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:3f9b63530a5392eb687baff3989d0fb5f45194ae5b1ca8276282fb647f8dcdb3"},
+ {file = "ujson-5.8.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:efeddf950fb15a832376c0c01d8d7713479fbeceaed1eaecb2665aa62c305aec"},
+ {file = "ujson-5.8.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7d8283ac5d03e65f488530c43d6610134309085b71db4f675e9cf5dff96a8282"},
+ {file = "ujson-5.8.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb0142f6f10f57598655340a3b2c70ed4646cbe674191da195eb0985a9813b83"},
+ {file = "ujson-5.8.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07d459aca895eb17eb463b00441986b021b9312c6c8cc1d06880925c7f51009c"},
+ {file = "ujson-5.8.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d524a8c15cfc863705991d70bbec998456a42c405c291d0f84a74ad7f35c5109"},
+ {file = "ujson-5.8.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d6f84a7a175c75beecde53a624881ff618e9433045a69fcfb5e154b73cdaa377"},
+ {file = "ujson-5.8.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b748797131ac7b29826d1524db1cc366d2722ab7afacc2ce1287cdafccddbf1f"},
+ {file = "ujson-5.8.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e72ba76313d48a1a3a42e7dc9d1db32ea93fac782ad8dde6f8b13e35c229130"},
+ {file = "ujson-5.8.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f504117a39cb98abba4153bf0b46b4954cc5d62f6351a14660201500ba31fe7f"},
+ {file = "ujson-5.8.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a8c91b6f4bf23f274af9002b128d133b735141e867109487d17e344d38b87d94"},
+ {file = "ujson-5.8.0.tar.gz", hash = "sha256:78e318def4ade898a461b3d92a79f9441e7e0e4d2ad5419abed4336d702c7425"},
+]
+
+[[package]]
+name = "unityparser"
+version = "2.2.1"
+description = "A python library to parse and dump Unity YAML files"
+optional = false
+python-versions = ">=3.6.0"
+files = [
+ {file = "unityparser-2.2.1-py3-none-any.whl", hash = "sha256:4b17a79d1adeed31d98506d1794958f5d5575c955fc6be838446f80b13a8fbb7"},
+ {file = "unityparser-2.2.1.tar.gz", hash = "sha256:632fe2dae5b46f38867b5eaa5593584efbd641f3045b0891c74acc602e2627cc"},
+]
+
+[package.dependencies]
+PyYAML = "5.4"
+
+[package.extras]
+ci = ["tox (>=3.24,<4.0)"]
+test = ["pytest (>=4.5,<5.0)", "pytest-cov (>=2.7,<3.0)"]
+
+[[package]]
+name = "urllib3"
+version = "1.26.18"
+description = "HTTP library with thread-safe connection pooling, file post, and more."
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
+files = [
+ {file = "urllib3-1.26.18-py2.py3-none-any.whl", hash = "sha256:34b97092d7e0a3a8cf7cd10e386f401b3737364026c45e622aa02903dffe0f07"},
+ {file = "urllib3-1.26.18.tar.gz", hash = "sha256:f8ecc1bba5667413457c529ab955bf8c67b45db799d159066261719e328580a0"},
+]
+
+[package.extras]
+brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"]
+secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"]
+socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
+
+[[package]]
+name = "uvicorn"
+version = "0.24.0.post1"
+description = "The lightning-fast ASGI server."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "uvicorn-0.24.0.post1-py3-none-any.whl", hash = "sha256:7c84fea70c619d4a710153482c0d230929af7bcf76c7bfa6de151f0a3a80121e"},
+ {file = "uvicorn-0.24.0.post1.tar.gz", hash = "sha256:09c8e5a79dc466bdf28dead50093957db184de356fcdc48697bad3bde4c2588e"},
+]
+
+[package.dependencies]
+click = ">=7.0"
+colorama = {version = ">=0.4", optional = true, markers = "sys_platform == \"win32\" and extra == \"standard\""}
+h11 = ">=0.8"
+httptools = {version = ">=0.5.0", optional = true, markers = "extra == \"standard\""}
+python-dotenv = {version = ">=0.13", optional = true, markers = "extra == \"standard\""}
+pyyaml = {version = ">=5.1", optional = true, markers = "extra == \"standard\""}
+typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""}
+uvloop = {version = ">=0.14.0,<0.15.0 || >0.15.0,<0.15.1 || >0.15.1", optional = true, markers = "(sys_platform != \"win32\" and sys_platform != \"cygwin\") and platform_python_implementation != \"PyPy\" and extra == \"standard\""}
+watchfiles = {version = ">=0.13", optional = true, markers = "extra == \"standard\""}
+websockets = {version = ">=10.4", optional = true, markers = "extra == \"standard\""}
+
+[package.extras]
+standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"]
+
+[[package]]
+name = "uvloop"
+version = "0.19.0"
+description = "Fast implementation of asyncio event loop on top of libuv"
+optional = false
+python-versions = ">=3.8.0"
+files = [
+ {file = "uvloop-0.19.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:de4313d7f575474c8f5a12e163f6d89c0a878bc49219641d49e6f1444369a90e"},
+ {file = "uvloop-0.19.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5588bd21cf1fcf06bded085f37e43ce0e00424197e7c10e77afd4bbefffef428"},
+ {file = "uvloop-0.19.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b1fd71c3843327f3bbc3237bedcdb6504fd50368ab3e04d0410e52ec293f5b8"},
+ {file = "uvloop-0.19.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a05128d315e2912791de6088c34136bfcdd0c7cbc1cf85fd6fd1bb321b7c849"},
+ {file = "uvloop-0.19.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:cd81bdc2b8219cb4b2556eea39d2e36bfa375a2dd021404f90a62e44efaaf957"},
+ {file = "uvloop-0.19.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:5f17766fb6da94135526273080f3455a112f82570b2ee5daa64d682387fe0dcd"},
+ {file = "uvloop-0.19.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4ce6b0af8f2729a02a5d1575feacb2a94fc7b2e983868b009d51c9a9d2149bef"},
+ {file = "uvloop-0.19.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:31e672bb38b45abc4f26e273be83b72a0d28d074d5b370fc4dcf4c4eb15417d2"},
+ {file = "uvloop-0.19.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:570fc0ed613883d8d30ee40397b79207eedd2624891692471808a95069a007c1"},
+ {file = "uvloop-0.19.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5138821e40b0c3e6c9478643b4660bd44372ae1e16a322b8fc07478f92684e24"},
+ {file = "uvloop-0.19.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:91ab01c6cd00e39cde50173ba4ec68a1e578fee9279ba64f5221810a9e786533"},
+ {file = "uvloop-0.19.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:47bf3e9312f63684efe283f7342afb414eea4d3011542155c7e625cd799c3b12"},
+ {file = "uvloop-0.19.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:da8435a3bd498419ee8c13c34b89b5005130a476bda1d6ca8cfdde3de35cd650"},
+ {file = "uvloop-0.19.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:02506dc23a5d90e04d4f65c7791e65cf44bd91b37f24cfc3ef6cf2aff05dc7ec"},
+ {file = "uvloop-0.19.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2693049be9d36fef81741fddb3f441673ba12a34a704e7b4361efb75cf30befc"},
+ {file = "uvloop-0.19.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7010271303961c6f0fe37731004335401eb9075a12680738731e9c92ddd96ad6"},
+ {file = "uvloop-0.19.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:5daa304d2161d2918fa9a17d5635099a2f78ae5b5960e742b2fcfbb7aefaa593"},
+ {file = "uvloop-0.19.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:7207272c9520203fea9b93843bb775d03e1cf88a80a936ce760f60bb5add92f3"},
+ {file = "uvloop-0.19.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:78ab247f0b5671cc887c31d33f9b3abfb88d2614b84e4303f1a63b46c046c8bd"},
+ {file = "uvloop-0.19.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:472d61143059c84947aa8bb74eabbace30d577a03a1805b77933d6bd13ddebbd"},
+ {file = "uvloop-0.19.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45bf4c24c19fb8a50902ae37c5de50da81de4922af65baf760f7c0c42e1088be"},
+ {file = "uvloop-0.19.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:271718e26b3e17906b28b67314c45d19106112067205119dddbd834c2b7ce797"},
+ {file = "uvloop-0.19.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:34175c9fd2a4bc3adc1380e1261f60306344e3407c20a4d684fd5f3be010fa3d"},
+ {file = "uvloop-0.19.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e27f100e1ff17f6feeb1f33968bc185bf8ce41ca557deee9d9bbbffeb72030b7"},
+ {file = "uvloop-0.19.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:13dfdf492af0aa0a0edf66807d2b465607d11c4fa48f4a1fd41cbea5b18e8e8b"},
+ {file = "uvloop-0.19.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6e3d4e85ac060e2342ff85e90d0c04157acb210b9ce508e784a944f852a40e67"},
+ {file = "uvloop-0.19.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8ca4956c9ab567d87d59d49fa3704cf29e37109ad348f2d5223c9bf761a332e7"},
+ {file = "uvloop-0.19.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f467a5fd23b4fc43ed86342641f3936a68ded707f4627622fa3f82a120e18256"},
+ {file = "uvloop-0.19.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:492e2c32c2af3f971473bc22f086513cedfc66a130756145a931a90c3958cb17"},
+ {file = "uvloop-0.19.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:2df95fca285a9f5bfe730e51945ffe2fa71ccbfdde3b0da5772b4ee4f2e770d5"},
+ {file = "uvloop-0.19.0.tar.gz", hash = "sha256:0246f4fd1bf2bf702e06b0d45ee91677ee5c31242f39aab4ea6fe0c51aedd0fd"},
+]
+
+[package.extras]
+docs = ["Sphinx (>=4.1.2,<4.2.0)", "sphinx-rtd-theme (>=0.5.2,<0.6.0)", "sphinxcontrib-asyncio (>=0.3.0,<0.4.0)"]
+test = ["Cython (>=0.29.36,<0.30.0)", "aiohttp (==3.9.0b0)", "aiohttp (>=3.8.1)", "flake8 (>=5.0,<6.0)", "mypy (>=0.800)", "psutil", "pyOpenSSL (>=23.0.0,<23.1.0)", "pycodestyle (>=2.9.0,<2.10.0)"]
+
+[[package]]
+name = "virtualenv"
+version = "20.25.0"
+description = "Virtual Python Environment builder"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "virtualenv-20.25.0-py3-none-any.whl", hash = "sha256:4238949c5ffe6876362d9c0180fc6c3a824a7b12b80604eeb8085f2ed7460de3"},
+ {file = "virtualenv-20.25.0.tar.gz", hash = "sha256:bf51c0d9c7dd63ea8e44086fa1e4fb1093a31e963b86959257378aef020e1f1b"},
+]
+
+[package.dependencies]
+distlib = ">=0.3.7,<1"
+filelock = ">=3.12.2,<4"
+platformdirs = ">=3.9.1,<5"
+
+[package.extras]
+docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"]
+test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"]
+
+[[package]]
+name = "wandb"
+version = "0.16.0"
+description = "A CLI and library for interacting with the Weights & Biases API."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "wandb-0.16.0-py3-none-any.whl", hash = "sha256:e103142a5ecdb158d29441c2bf9f935ae149ed562377f7cebffd2a6f7c9de949"},
+ {file = "wandb-0.16.0.tar.gz", hash = "sha256:8d9875f1d8d75fee32dc51f6727bc277ce4f3869d7319ccf5f36ce596597402a"},
+]
+
+[package.dependencies]
+appdirs = ">=1.4.3"
+Click = ">=7.1,<8.0.0 || >8.0.0"
+docker-pycreds = ">=0.4.0"
+GitPython = ">=1.0.0,<3.1.29 || >3.1.29"
+protobuf = [
+ {version = ">=3.15.0,<4.21.0 || >4.21.0,<5", markers = "python_version == \"3.9\" and sys_platform == \"linux\""},
+ {version = ">=3.19.0,<4.21.0 || >4.21.0,<5", markers = "sys_platform != \"linux\""},
+]
+psutil = ">=5.0.0"
+PyYAML = "*"
+requests = ">=2.0.0,<3"
+sentry-sdk = ">=1.0.0"
+setproctitle = "*"
+setuptools = "*"
+typing-extensions = {version = "*", markers = "python_version < \"3.10\""}
+
+[package.extras]
+async = ["httpx (>=0.23.0)"]
+aws = ["boto3"]
+azure = ["azure-identity", "azure-storage-blob"]
+gcp = ["google-cloud-storage"]
+kubeflow = ["google-cloud-storage", "kubernetes", "minio", "sh"]
+launch = ["PyYAML (>=6.0.0)", "awscli", "azure-containerregistry", "azure-identity", "azure-storage-blob", "boto3", "botocore", "chardet", "google-auth", "google-cloud-aiplatform", "google-cloud-artifact-registry", "google-cloud-compute", "google-cloud-storage", "iso8601", "kubernetes", "kubernetes-asyncio", "nbconvert", "nbformat", "optuna", "typing-extensions"]
+media = ["bokeh", "moviepy", "numpy", "pillow", "plotly", "rdkit-pypi", "soundfile"]
+models = ["cloudpickle"]
+nexus = ["wandb-core (>=0.17.0b1)"]
+perf = ["orjson"]
+sweeps = ["sweeps (>=0.2.0)"]
+
+[[package]]
+name = "watchfiles"
+version = "0.21.0"
+description = "Simple, modern and high performance file watching and code reload in python."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "watchfiles-0.21.0-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:27b4035013f1ea49c6c0b42d983133b136637a527e48c132d368eb19bf1ac6aa"},
+ {file = "watchfiles-0.21.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c81818595eff6e92535ff32825f31c116f867f64ff8cdf6562cd1d6b2e1e8f3e"},
+ {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6c107ea3cf2bd07199d66f156e3ea756d1b84dfd43b542b2d870b77868c98c03"},
+ {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d9ac347653ebd95839a7c607608703b20bc07e577e870d824fa4801bc1cb124"},
+ {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5eb86c6acb498208e7663ca22dbe68ca2cf42ab5bf1c776670a50919a56e64ab"},
+ {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f564bf68404144ea6b87a78a3f910cc8de216c6b12a4cf0b27718bf4ec38d303"},
+ {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d0f32ebfaa9c6011f8454994f86108c2eb9c79b8b7de00b36d558cadcedaa3d"},
+ {file = "watchfiles-0.21.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6d45d9b699ecbac6c7bd8e0a2609767491540403610962968d258fd6405c17c"},
+ {file = "watchfiles-0.21.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:aff06b2cac3ef4616e26ba17a9c250c1fe9dd8a5d907d0193f84c499b1b6e6a9"},
+ {file = "watchfiles-0.21.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d9792dff410f266051025ecfaa927078b94cc7478954b06796a9756ccc7e14a9"},
+ {file = "watchfiles-0.21.0-cp310-none-win32.whl", hash = "sha256:214cee7f9e09150d4fb42e24919a1e74d8c9b8a9306ed1474ecaddcd5479c293"},
+ {file = "watchfiles-0.21.0-cp310-none-win_amd64.whl", hash = "sha256:1ad7247d79f9f55bb25ab1778fd47f32d70cf36053941f07de0b7c4e96b5d235"},
+ {file = "watchfiles-0.21.0-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:668c265d90de8ae914f860d3eeb164534ba2e836811f91fecc7050416ee70aa7"},
+ {file = "watchfiles-0.21.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3a23092a992e61c3a6a70f350a56db7197242f3490da9c87b500f389b2d01eef"},
+ {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:e7941bbcfdded9c26b0bf720cb7e6fd803d95a55d2c14b4bd1f6a2772230c586"},
+ {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:11cd0c3100e2233e9c53106265da31d574355c288e15259c0d40a4405cbae317"},
+ {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d78f30cbe8b2ce770160d3c08cff01b2ae9306fe66ce899b73f0409dc1846c1b"},
+ {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6674b00b9756b0af620aa2a3346b01f8e2a3dc729d25617e1b89cf6af4a54eb1"},
+ {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:fd7ac678b92b29ba630d8c842d8ad6c555abda1b9ef044d6cc092dacbfc9719d"},
+ {file = "watchfiles-0.21.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9c873345680c1b87f1e09e0eaf8cf6c891b9851d8b4d3645e7efe2ec20a20cc7"},
+ {file = "watchfiles-0.21.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:49f56e6ecc2503e7dbe233fa328b2be1a7797d31548e7a193237dcdf1ad0eee0"},
+ {file = "watchfiles-0.21.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:02d91cbac553a3ad141db016e3350b03184deaafeba09b9d6439826ee594b365"},
+ {file = "watchfiles-0.21.0-cp311-none-win32.whl", hash = "sha256:ebe684d7d26239e23d102a2bad2a358dedf18e462e8808778703427d1f584400"},
+ {file = "watchfiles-0.21.0-cp311-none-win_amd64.whl", hash = "sha256:4566006aa44cb0d21b8ab53baf4b9c667a0ed23efe4aaad8c227bfba0bf15cbe"},
+ {file = "watchfiles-0.21.0-cp311-none-win_arm64.whl", hash = "sha256:c550a56bf209a3d987d5a975cdf2063b3389a5d16caf29db4bdddeae49f22078"},
+ {file = "watchfiles-0.21.0-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:51ddac60b96a42c15d24fbdc7a4bfcd02b5a29c047b7f8bf63d3f6f5a860949a"},
+ {file = "watchfiles-0.21.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:511f0b034120cd1989932bf1e9081aa9fb00f1f949fbd2d9cab6264916ae89b1"},
+ {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:cfb92d49dbb95ec7a07511bc9efb0faff8fe24ef3805662b8d6808ba8409a71a"},
+ {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3f92944efc564867bbf841c823c8b71bb0be75e06b8ce45c084b46411475a915"},
+ {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:642d66b75eda909fd1112d35c53816d59789a4b38c141a96d62f50a3ef9b3360"},
+ {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d23bcd6c8eaa6324fe109d8cac01b41fe9a54b8c498af9ce464c1aeeb99903d6"},
+ {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18d5b4da8cf3e41895b34e8c37d13c9ed294954907929aacd95153508d5d89d7"},
+ {file = "watchfiles-0.21.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1b8d1eae0f65441963d805f766c7e9cd092f91e0c600c820c764a4ff71a0764c"},
+ {file = "watchfiles-0.21.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:1fd9a5205139f3c6bb60d11f6072e0552f0a20b712c85f43d42342d162be1235"},
+ {file = "watchfiles-0.21.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a1e3014a625bcf107fbf38eece0e47fa0190e52e45dc6eee5a8265ddc6dc5ea7"},
+ {file = "watchfiles-0.21.0-cp312-none-win32.whl", hash = "sha256:9d09869f2c5a6f2d9df50ce3064b3391d3ecb6dced708ad64467b9e4f2c9bef3"},
+ {file = "watchfiles-0.21.0-cp312-none-win_amd64.whl", hash = "sha256:18722b50783b5e30a18a8a5db3006bab146d2b705c92eb9a94f78c72beb94094"},
+ {file = "watchfiles-0.21.0-cp312-none-win_arm64.whl", hash = "sha256:a3b9bec9579a15fb3ca2d9878deae789df72f2b0fdaf90ad49ee389cad5edab6"},
+ {file = "watchfiles-0.21.0-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:4ea10a29aa5de67de02256a28d1bf53d21322295cb00bd2d57fcd19b850ebd99"},
+ {file = "watchfiles-0.21.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:40bca549fdc929b470dd1dbfcb47b3295cb46a6d2c90e50588b0a1b3bd98f429"},
+ {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:9b37a7ba223b2f26122c148bb8d09a9ff312afca998c48c725ff5a0a632145f7"},
+ {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec8c8900dc5c83650a63dd48c4d1d245343f904c4b64b48798c67a3767d7e165"},
+ {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8ad3fe0a3567c2f0f629d800409cd528cb6251da12e81a1f765e5c5345fd0137"},
+ {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9d353c4cfda586db2a176ce42c88f2fc31ec25e50212650c89fdd0f560ee507b"},
+ {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:83a696da8922314ff2aec02987eefb03784f473281d740bf9170181829133765"},
+ {file = "watchfiles-0.21.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5a03651352fc20975ee2a707cd2d74a386cd303cc688f407296064ad1e6d1562"},
+ {file = "watchfiles-0.21.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:3ad692bc7792be8c32918c699638b660c0de078a6cbe464c46e1340dadb94c19"},
+ {file = "watchfiles-0.21.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06247538e8253975bdb328e7683f8515ff5ff041f43be6c40bff62d989b7d0b0"},
+ {file = "watchfiles-0.21.0-cp38-none-win32.whl", hash = "sha256:9a0aa47f94ea9a0b39dd30850b0adf2e1cd32a8b4f9c7aa443d852aacf9ca214"},
+ {file = "watchfiles-0.21.0-cp38-none-win_amd64.whl", hash = "sha256:8d5f400326840934e3507701f9f7269247f7c026d1b6cfd49477d2be0933cfca"},
+ {file = "watchfiles-0.21.0-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:7f762a1a85a12cc3484f77eee7be87b10f8c50b0b787bb02f4e357403cad0c0e"},
+ {file = "watchfiles-0.21.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6e9be3ef84e2bb9710f3f777accce25556f4a71e15d2b73223788d528fcc2052"},
+ {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:4c48a10d17571d1275701e14a601e36959ffada3add8cdbc9e5061a6e3579a5d"},
+ {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c889025f59884423428c261f212e04d438de865beda0b1e1babab85ef4c0f01"},
+ {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:66fac0c238ab9a2e72d026b5fb91cb902c146202bbd29a9a1a44e8db7b710b6f"},
+ {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b4a21f71885aa2744719459951819e7bf5a906a6448a6b2bbce8e9cc9f2c8128"},
+ {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1c9198c989f47898b2c22201756f73249de3748e0fc9de44adaf54a8b259cc0c"},
+ {file = "watchfiles-0.21.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d8f57c4461cd24fda22493109c45b3980863c58a25b8bec885ca8bea6b8d4b28"},
+ {file = "watchfiles-0.21.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:853853cbf7bf9408b404754b92512ebe3e3a83587503d766d23e6bf83d092ee6"},
+ {file = "watchfiles-0.21.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:d5b1dc0e708fad9f92c296ab2f948af403bf201db8fb2eb4c8179db143732e49"},
+ {file = "watchfiles-0.21.0-cp39-none-win32.whl", hash = "sha256:59137c0c6826bd56c710d1d2bda81553b5e6b7c84d5a676747d80caf0409ad94"},
+ {file = "watchfiles-0.21.0-cp39-none-win_amd64.whl", hash = "sha256:6cb8fdc044909e2078c248986f2fc76f911f72b51ea4a4fbbf472e01d14faa58"},
+ {file = "watchfiles-0.21.0-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:ab03a90b305d2588e8352168e8c5a1520b721d2d367f31e9332c4235b30b8994"},
+ {file = "watchfiles-0.21.0-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:927c589500f9f41e370b0125c12ac9e7d3a2fd166b89e9ee2828b3dda20bfe6f"},
+ {file = "watchfiles-0.21.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1bd467213195e76f838caf2c28cd65e58302d0254e636e7c0fca81efa4a2e62c"},
+ {file = "watchfiles-0.21.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:02b73130687bc3f6bb79d8a170959042eb56eb3a42df3671c79b428cd73f17cc"},
+ {file = "watchfiles-0.21.0-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:08dca260e85ffae975448e344834d765983237ad6dc308231aa16e7933db763e"},
+ {file = "watchfiles-0.21.0-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:3ccceb50c611c433145502735e0370877cced72a6c70fd2410238bcbc7fe51d8"},
+ {file = "watchfiles-0.21.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:57d430f5fb63fea141ab71ca9c064e80de3a20b427ca2febcbfcef70ff0ce895"},
+ {file = "watchfiles-0.21.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0dd5fad9b9c0dd89904bbdea978ce89a2b692a7ee8a0ce19b940e538c88a809c"},
+ {file = "watchfiles-0.21.0-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:be6dd5d52b73018b21adc1c5d28ac0c68184a64769052dfeb0c5d9998e7f56a2"},
+ {file = "watchfiles-0.21.0-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:b3cab0e06143768499384a8a5efb9c4dc53e19382952859e4802f294214f36ec"},
+ {file = "watchfiles-0.21.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c6ed10c2497e5fedadf61e465b3ca12a19f96004c15dcffe4bd442ebadc2d85"},
+ {file = "watchfiles-0.21.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43babacef21c519bc6631c5fce2a61eccdfc011b4bcb9047255e9620732c8097"},
+ {file = "watchfiles-0.21.0.tar.gz", hash = "sha256:c76c635fabf542bb78524905718c39f736a98e5ab25b23ec6d4abede1a85a6a3"},
+]
+
+[package.dependencies]
+anyio = ">=3.0.0"
+
+[[package]]
+name = "websockets"
+version = "12.0"
+description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "websockets-12.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d554236b2a2006e0ce16315c16eaa0d628dab009c33b63ea03f41c6107958374"},
+ {file = "websockets-12.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2d225bb6886591b1746b17c0573e29804619c8f755b5598d875bb4235ea639be"},
+ {file = "websockets-12.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:eb809e816916a3b210bed3c82fb88eaf16e8afcf9c115ebb2bacede1797d2547"},
+ {file = "websockets-12.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c588f6abc13f78a67044c6b1273a99e1cf31038ad51815b3b016ce699f0d75c2"},
+ {file = "websockets-12.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5aa9348186d79a5f232115ed3fa9020eab66d6c3437d72f9d2c8ac0c6858c558"},
+ {file = "websockets-12.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6350b14a40c95ddd53e775dbdbbbc59b124a5c8ecd6fbb09c2e52029f7a9f480"},
+ {file = "websockets-12.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:70ec754cc2a769bcd218ed8d7209055667b30860ffecb8633a834dde27d6307c"},
+ {file = "websockets-12.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:6e96f5ed1b83a8ddb07909b45bd94833b0710f738115751cdaa9da1fb0cb66e8"},
+ {file = "websockets-12.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:4d87be612cbef86f994178d5186add3d94e9f31cc3cb499a0482b866ec477603"},
+ {file = "websockets-12.0-cp310-cp310-win32.whl", hash = "sha256:befe90632d66caaf72e8b2ed4d7f02b348913813c8b0a32fae1cc5fe3730902f"},
+ {file = "websockets-12.0-cp310-cp310-win_amd64.whl", hash = "sha256:363f57ca8bc8576195d0540c648aa58ac18cf85b76ad5202b9f976918f4219cf"},
+ {file = "websockets-12.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:5d873c7de42dea355d73f170be0f23788cf3fa9f7bed718fd2830eefedce01b4"},
+ {file = "websockets-12.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3f61726cae9f65b872502ff3c1496abc93ffbe31b278455c418492016e2afc8f"},
+ {file = "websockets-12.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ed2fcf7a07334c77fc8a230755c2209223a7cc44fc27597729b8ef5425aa61a3"},
+ {file = "websockets-12.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8e332c210b14b57904869ca9f9bf4ca32f5427a03eeb625da9b616c85a3a506c"},
+ {file = "websockets-12.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5693ef74233122f8ebab026817b1b37fe25c411ecfca084b29bc7d6efc548f45"},
+ {file = "websockets-12.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e9e7db18b4539a29cc5ad8c8b252738a30e2b13f033c2d6e9d0549b45841c04"},
+ {file = "websockets-12.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:6e2df67b8014767d0f785baa98393725739287684b9f8d8a1001eb2839031447"},
+ {file = "websockets-12.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:bea88d71630c5900690fcb03161ab18f8f244805c59e2e0dc4ffadae0a7ee0ca"},
+ {file = "websockets-12.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dff6cdf35e31d1315790149fee351f9e52978130cef6c87c4b6c9b3baf78bc53"},
+ {file = "websockets-12.0-cp311-cp311-win32.whl", hash = "sha256:3e3aa8c468af01d70332a382350ee95f6986db479ce7af14d5e81ec52aa2b402"},
+ {file = "websockets-12.0-cp311-cp311-win_amd64.whl", hash = "sha256:25eb766c8ad27da0f79420b2af4b85d29914ba0edf69f547cc4f06ca6f1d403b"},
+ {file = "websockets-12.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0e6e2711d5a8e6e482cacb927a49a3d432345dfe7dea8ace7b5790df5932e4df"},
+ {file = "websockets-12.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:dbcf72a37f0b3316e993e13ecf32f10c0e1259c28ffd0a85cee26e8549595fbc"},
+ {file = "websockets-12.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:12743ab88ab2af1d17dd4acb4645677cb7063ef4db93abffbf164218a5d54c6b"},
+ {file = "websockets-12.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7b645f491f3c48d3f8a00d1fce07445fab7347fec54a3e65f0725d730d5b99cb"},
+ {file = "websockets-12.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9893d1aa45a7f8b3bc4510f6ccf8db8c3b62120917af15e3de247f0780294b92"},
+ {file = "websockets-12.0-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f38a7b376117ef7aff996e737583172bdf535932c9ca021746573bce40165ed"},
+ {file = "websockets-12.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:f764ba54e33daf20e167915edc443b6f88956f37fb606449b4a5b10ba42235a5"},
+ {file = "websockets-12.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:1e4b3f8ea6a9cfa8be8484c9221ec0257508e3a1ec43c36acdefb2a9c3b00aa2"},
+ {file = "websockets-12.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:9fdf06fd06c32205a07e47328ab49c40fc1407cdec801d698a7c41167ea45113"},
+ {file = "websockets-12.0-cp312-cp312-win32.whl", hash = "sha256:baa386875b70cbd81798fa9f71be689c1bf484f65fd6fb08d051a0ee4e79924d"},
+ {file = "websockets-12.0-cp312-cp312-win_amd64.whl", hash = "sha256:ae0a5da8f35a5be197f328d4727dbcfafa53d1824fac3d96cdd3a642fe09394f"},
+ {file = "websockets-12.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5f6ffe2c6598f7f7207eef9a1228b6f5c818f9f4d53ee920aacd35cec8110438"},
+ {file = "websockets-12.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9edf3fc590cc2ec20dc9d7a45108b5bbaf21c0d89f9fd3fd1685e223771dc0b2"},
+ {file = "websockets-12.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:8572132c7be52632201a35f5e08348137f658e5ffd21f51f94572ca6c05ea81d"},
+ {file = "websockets-12.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:604428d1b87edbf02b233e2c207d7d528460fa978f9e391bd8aaf9c8311de137"},
+ {file = "websockets-12.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1a9d160fd080c6285e202327aba140fc9a0d910b09e423afff4ae5cbbf1c7205"},
+ {file = "websockets-12.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:87b4aafed34653e465eb77b7c93ef058516cb5acf3eb21e42f33928616172def"},
+ {file = "websockets-12.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b2ee7288b85959797970114deae81ab41b731f19ebcd3bd499ae9ca0e3f1d2c8"},
+ {file = "websockets-12.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:7fa3d25e81bfe6a89718e9791128398a50dec6d57faf23770787ff441d851967"},
+ {file = "websockets-12.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a571f035a47212288e3b3519944f6bf4ac7bc7553243e41eac50dd48552b6df7"},
+ {file = "websockets-12.0-cp38-cp38-win32.whl", hash = "sha256:3c6cc1360c10c17463aadd29dd3af332d4a1adaa8796f6b0e9f9df1fdb0bad62"},
+ {file = "websockets-12.0-cp38-cp38-win_amd64.whl", hash = "sha256:1bf386089178ea69d720f8db6199a0504a406209a0fc23e603b27b300fdd6892"},
+ {file = "websockets-12.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:ab3d732ad50a4fbd04a4490ef08acd0517b6ae6b77eb967251f4c263011a990d"},
+ {file = "websockets-12.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a1d9697f3337a89691e3bd8dc56dea45a6f6d975f92e7d5f773bc715c15dde28"},
+ {file = "websockets-12.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1df2fbd2c8a98d38a66f5238484405b8d1d16f929bb7a33ed73e4801222a6f53"},
+ {file = "websockets-12.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:23509452b3bc38e3a057382c2e941d5ac2e01e251acce7adc74011d7d8de434c"},
+ {file = "websockets-12.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e5fc14ec6ea568200ea4ef46545073da81900a2b67b3e666f04adf53ad452ec"},
+ {file = "websockets-12.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46e71dbbd12850224243f5d2aeec90f0aaa0f2dde5aeeb8fc8df21e04d99eff9"},
+ {file = "websockets-12.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b81f90dcc6c85a9b7f29873beb56c94c85d6f0dac2ea8b60d995bd18bf3e2aae"},
+ {file = "websockets-12.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:a02413bc474feda2849c59ed2dfb2cddb4cd3d2f03a2fedec51d6e959d9b608b"},
+ {file = "websockets-12.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bbe6013f9f791944ed31ca08b077e26249309639313fff132bfbf3ba105673b9"},
+ {file = "websockets-12.0-cp39-cp39-win32.whl", hash = "sha256:cbe83a6bbdf207ff0541de01e11904827540aa069293696dd528a6640bd6a5f6"},
+ {file = "websockets-12.0-cp39-cp39-win_amd64.whl", hash = "sha256:fc4e7fa5414512b481a2483775a8e8be7803a35b30ca805afa4998a84f9fd9e8"},
+ {file = "websockets-12.0-pp310-pypy310_pp73-macosx_10_9_x86_64.whl", hash = "sha256:248d8e2446e13c1d4326e0a6a4e9629cb13a11195051a73acf414812700badbd"},
+ {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f44069528d45a933997a6fef143030d8ca8042f0dfaad753e2906398290e2870"},
+ {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c4e37d36f0d19f0a4413d3e18c0d03d0c268ada2061868c1e6f5ab1a6d575077"},
+ {file = "websockets-12.0-pp310-pypy310_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d829f975fc2e527a3ef2f9c8f25e553eb7bc779c6665e8e1d52aa22800bb38b"},
+ {file = "websockets-12.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:2c71bd45a777433dd9113847af751aae36e448bc6b8c361a566cb043eda6ec30"},
+ {file = "websockets-12.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0bee75f400895aef54157b36ed6d3b308fcab62e5260703add87f44cee9c82a6"},
+ {file = "websockets-12.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:423fc1ed29f7512fceb727e2d2aecb952c46aa34895e9ed96071821309951123"},
+ {file = "websockets-12.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27a5e9964ef509016759f2ef3f2c1e13f403725a5e6a1775555994966a66e931"},
+ {file = "websockets-12.0-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c3181df4583c4d3994d31fb235dc681d2aaad744fbdbf94c4802485ececdecf2"},
+ {file = "websockets-12.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:b067cb952ce8bf40115f6c19f478dc71c5e719b7fbaa511359795dfd9d1a6468"},
+ {file = "websockets-12.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:00700340c6c7ab788f176d118775202aadea7602c5cc6be6ae127761c16d6b0b"},
+ {file = "websockets-12.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e469d01137942849cff40517c97a30a93ae79917752b34029f0ec72df6b46399"},
+ {file = "websockets-12.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffefa1374cd508d633646d51a8e9277763a9b78ae71324183693959cf94635a7"},
+ {file = "websockets-12.0-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba0cab91b3956dfa9f512147860783a1829a8d905ee218a9837c18f683239611"},
+ {file = "websockets-12.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:2cb388a5bfb56df4d9a406783b7f9dbefb888c09b71629351cc6b036e9259370"},
+ {file = "websockets-12.0-py3-none-any.whl", hash = "sha256:dc284bbc8d7c78a6c69e0c7325ab46ee5e40bb4d50e494d8131a07ef47500e9e"},
+ {file = "websockets-12.0.tar.gz", hash = "sha256:81df9cbcbb6c260de1e007e58c011bfebe2dafc8435107b0537f393dd38c8b1b"},
+]
+
+[[package]]
+name = "wemake-python-styleguide"
+version = "0.18.0"
+description = "The strictest and most opinionated python linter ever"
+optional = false
+python-versions = ">=3.8.1,<4.0"
+files = [
+ {file = "wemake_python_styleguide-0.18.0-py3-none-any.whl", hash = "sha256:2219be145185edcd5e01f4ce49e3dea11acc34f2c377face0c175bb6ea6ac988"},
+ {file = "wemake_python_styleguide-0.18.0.tar.gz", hash = "sha256:69139858cf5b2a9ba09dac136e2873a4685515768f68fdef2684ebefd7b1dafd"},
+]
+
+[package.dependencies]
+astor = ">=0.8,<0.9"
+attrs = "*"
+darglint = ">=1.2,<2.0"
+flake8 = ">5"
+flake8-bandit = ">=4.1,<5.0"
+flake8-broken-line = ">=1.0,<2.0"
+flake8-bugbear = ">=23.5,<24.0"
+flake8-commas = ">=2.0,<3.0"
+flake8-comprehensions = ">=3.1,<4.0"
+flake8-debugger = ">=4.0,<5.0"
+flake8-docstrings = ">=1.3,<2.0"
+flake8-eradicate = ">=1.5,<2.0"
+flake8-isort = ">=6.0,<7.0"
+flake8-quotes = ">=3.0,<4.0"
+flake8-rst-docstrings = ">=0.3,<0.4"
+flake8-string-format = ">=0.3,<0.4"
+pep8-naming = ">=0.13,<0.14"
+pygments = ">=2.4,<3.0"
+setuptools = "*"
+typing_extensions = ">=4.0,<5.0"
+
+[[package]]
+name = "werkzeug"
+version = "3.0.1"
+description = "The comprehensive WSGI web application library."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "werkzeug-3.0.1-py3-none-any.whl", hash = "sha256:90a285dc0e42ad56b34e696398b8122ee4c681833fb35b8334a095d82c56da10"},
+ {file = "werkzeug-3.0.1.tar.gz", hash = "sha256:507e811ecea72b18a404947aded4b3390e1db8f826b494d76550ef45bb3b1dcc"},
+]
+
+[package.dependencies]
+MarkupSafe = ">=2.1.1"
+
+[package.extras]
+watchdog = ["watchdog (>=2.3)"]
+
+[[package]]
+name = "wheel"
+version = "0.42.0"
+description = "A built-package format for Python"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "wheel-0.42.0-py3-none-any.whl", hash = "sha256:177f9c9b0d45c47873b619f5b650346d632cdc35fb5e4d25058e09c9e581433d"},
+ {file = "wheel-0.42.0.tar.gz", hash = "sha256:c45be39f7882c9d34243236f2d63cbd58039e360f85d0913425fbd7ceea617a8"},
+]
+
+[package.extras]
+test = ["pytest (>=6.0.0)", "setuptools (>=65)"]
+
+[[package]]
+name = "win32-setctime"
+version = "1.1.0"
+description = "A small Python utility to set file creation time on Windows"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "win32_setctime-1.1.0-py3-none-any.whl", hash = "sha256:231db239e959c2fe7eb1d7dc129f11172354f98361c4fa2d6d2d7e278baa8aad"},
+ {file = "win32_setctime-1.1.0.tar.gz", hash = "sha256:15cf5750465118d6929ae4de4eb46e8edae9a5634350c01ba582df868e932cb2"},
+]
+
+[package.extras]
+dev = ["black (>=19.3b0)", "pytest (>=4.6.2)"]
+
+[[package]]
+name = "wirerope"
+version = "0.4.7"
+description = "'Turn functions and methods into fully controllable objects'"
+optional = false
+python-versions = "*"
+files = [
+ {file = "wirerope-0.4.7.tar.gz", hash = "sha256:f3961039218276283c5037da0fa164619def0327595f10892d562a61a8603990"},
+]
+
+[package.dependencies]
+six = ">=1.11.0"
+
+[package.extras]
+doc = ["sphinx"]
+test = ["pytest (>=4.6.7)", "pytest-cov (>=2.6.1)"]
+
+[[package]]
+name = "zipp"
+version = "3.17.0"
+description = "Backport of pathlib-compatible object wrapper for zip files"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "zipp-3.17.0-py3-none-any.whl", hash = "sha256:0e923e726174922dce09c53c59ad483ff7bbb8e572e00c7f7c46b88556409f31"},
+ {file = "zipp-3.17.0.tar.gz", hash = "sha256:84e64a1c28cf7e91ed2078bb8cc8c259cb19b76942096c8d7b84947690cabaf0"},
+]
+
+[package.extras]
+docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (<7.2.5)", "sphinx (>=3.5)", "sphinx-lint"]
+testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"]
+
+[[package]]
+name = "zope-event"
+version = "5.0"
+description = "Very basic event publishing system"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "zope.event-5.0-py3-none-any.whl", hash = "sha256:2832e95014f4db26c47a13fdaef84cef2f4df37e66b59d8f1f4a8f319a632c26"},
+ {file = "zope.event-5.0.tar.gz", hash = "sha256:bac440d8d9891b4068e2b5a2c5e2c9765a9df762944bda6955f96bb9b91e67cd"},
+]
+
+[package.dependencies]
+setuptools = "*"
+
+[package.extras]
+docs = ["Sphinx"]
+test = ["zope.testrunner"]
+
+[[package]]
+name = "zope-interface"
+version = "6.1"
+description = "Interfaces for Python"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "zope.interface-6.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:43b576c34ef0c1f5a4981163b551a8781896f2a37f71b8655fd20b5af0386abb"},
+ {file = "zope.interface-6.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:67be3ca75012c6e9b109860820a8b6c9a84bfb036fbd1076246b98e56951ca92"},
+ {file = "zope.interface-6.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b9bc671626281f6045ad61d93a60f52fd5e8209b1610972cf0ef1bbe6d808e3"},
+ {file = "zope.interface-6.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bbe81def9cf3e46f16ce01d9bfd8bea595e06505e51b7baf45115c77352675fd"},
+ {file = "zope.interface-6.1-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6dc998f6de015723196a904045e5a2217f3590b62ea31990672e31fbc5370b41"},
+ {file = "zope.interface-6.1-cp310-cp310-win_amd64.whl", hash = "sha256:239a4a08525c080ff833560171d23b249f7f4d17fcbf9316ef4159f44997616f"},
+ {file = "zope.interface-6.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:9ffdaa5290422ac0f1688cb8adb1b94ca56cee3ad11f29f2ae301df8aecba7d1"},
+ {file = "zope.interface-6.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:34c15ca9248f2e095ef2e93af2d633358c5f048c49fbfddf5fdfc47d5e263736"},
+ {file = "zope.interface-6.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b012d023b4fb59183909b45d7f97fb493ef7a46d2838a5e716e3155081894605"},
+ {file = "zope.interface-6.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:97806e9ca3651588c1baaebb8d0c5ee3db95430b612db354c199b57378312ee8"},
+ {file = "zope.interface-6.1-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fddbab55a2473f1d3b8833ec6b7ac31e8211b0aa608df5ab09ce07f3727326de"},
+ {file = "zope.interface-6.1-cp311-cp311-win_amd64.whl", hash = "sha256:a0da79117952a9a41253696ed3e8b560a425197d4e41634a23b1507efe3273f1"},
+ {file = "zope.interface-6.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e8bb9c990ca9027b4214fa543fd4025818dc95f8b7abce79d61dc8a2112b561a"},
+ {file = "zope.interface-6.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b51b64432eed4c0744241e9ce5c70dcfecac866dff720e746d0a9c82f371dfa7"},
+ {file = "zope.interface-6.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aa6fd016e9644406d0a61313e50348c706e911dca29736a3266fc9e28ec4ca6d"},
+ {file = "zope.interface-6.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c8cf55261e15590065039696607f6c9c1aeda700ceee40c70478552d323b3ff"},
+ {file = "zope.interface-6.1-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e30506bcb03de8983f78884807e4fd95d8db6e65b69257eea05d13d519b83ac0"},
+ {file = "zope.interface-6.1-cp312-cp312-win_amd64.whl", hash = "sha256:e33e86fd65f369f10608b08729c8f1c92ec7e0e485964670b4d2633a4812d36b"},
+ {file = "zope.interface-6.1-cp37-cp37m-macosx_11_0_x86_64.whl", hash = "sha256:2f8d89721834524a813f37fa174bac074ec3d179858e4ad1b7efd4401f8ac45d"},
+ {file = "zope.interface-6.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:13b7d0f2a67eb83c385880489dbb80145e9d344427b4262c49fbf2581677c11c"},
+ {file = "zope.interface-6.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ef43ee91c193f827e49599e824385ec7c7f3cd152d74cb1dfe02cb135f264d83"},
+ {file = "zope.interface-6.1-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e441e8b7d587af0414d25e8d05e27040d78581388eed4c54c30c0c91aad3a379"},
+ {file = "zope.interface-6.1-cp37-cp37m-win_amd64.whl", hash = "sha256:f89b28772fc2562ed9ad871c865f5320ef761a7fcc188a935e21fe8b31a38ca9"},
+ {file = "zope.interface-6.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:70d2cef1bf529bff41559be2de9d44d47b002f65e17f43c73ddefc92f32bf00f"},
+ {file = "zope.interface-6.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ad54ed57bdfa3254d23ae04a4b1ce405954969c1b0550cc2d1d2990e8b439de1"},
+ {file = "zope.interface-6.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ef467d86d3cfde8b39ea1b35090208b0447caaabd38405420830f7fd85fbdd56"},
+ {file = "zope.interface-6.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6af47f10cfc54c2ba2d825220f180cc1e2d4914d783d6fc0cd93d43d7bc1c78b"},
+ {file = "zope.interface-6.1-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c9559138690e1bd4ea6cd0954d22d1e9251e8025ce9ede5d0af0ceae4a401e43"},
+ {file = "zope.interface-6.1-cp38-cp38-win_amd64.whl", hash = "sha256:964a7af27379ff4357dad1256d9f215047e70e93009e532d36dcb8909036033d"},
+ {file = "zope.interface-6.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:387545206c56b0315fbadb0431d5129c797f92dc59e276b3ce82db07ac1c6179"},
+ {file = "zope.interface-6.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:57d0a8ce40ce440f96a2c77824ee94bf0d0925e6089df7366c2272ccefcb7941"},
+ {file = "zope.interface-6.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ebc4d34e7620c4f0da7bf162c81978fce0ea820e4fa1e8fc40ee763839805f3"},
+ {file = "zope.interface-6.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5a804abc126b33824a44a7aa94f06cd211a18bbf31898ba04bd0924fbe9d282d"},
+ {file = "zope.interface-6.1-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f294a15f7723fc0d3b40701ca9b446133ec713eafc1cc6afa7b3d98666ee1ac"},
+ {file = "zope.interface-6.1-cp39-cp39-win_amd64.whl", hash = "sha256:a41f87bb93b8048fe866fa9e3d0c51e27fe55149035dcf5f43da4b56732c0a40"},
+ {file = "zope.interface-6.1.tar.gz", hash = "sha256:2fdc7ccbd6eb6b7df5353012fbed6c3c5d04ceaca0038f75e601060e95345309"},
+]
+
+[package.dependencies]
+setuptools = "*"
+
+[package.extras]
+docs = ["Sphinx", "repoze.sphinx.autointerface", "sphinx-rtd-theme"]
+test = ["coverage (>=5.0.3)", "zope.event", "zope.testing"]
+testing = ["coverage (>=5.0.3)", "zope.event", "zope.testing"]
+
+[metadata]
+lock-version = "2.0"
+python-versions = ">=3.9,<3.10"
+content-hash = "1ff7f48a7d44da0e62d5c30199eba227b0fdaf3b67d5d1972dce88869a459034"
diff --git a/pyproject.toml b/pyproject.toml
new file mode 100644
index 0000000..7e9e05e
--- /dev/null
+++ b/pyproject.toml
@@ -0,0 +1,89 @@
+[tool.poetry]
+name = "simbot-offline-inference"
+version = "4.33.0"
+description = ""
+authors = ["Amit Parekh <7276308+amitkparekh@users.noreply.github.com>"]
+readme = "README.md"
+packages = [
+ { include = "simbot_offline_inference", from = "src" },
+ { include = "arena_wrapper", from = "src" },
+ { include = "arena_missions", from = "src" },
+]
+
+[tool.poe.tasks]
+
+[tool.poe.tasks.format]
+help = "Format using the pre-commit hooks"
+cmd = "pre-commit run --all-files"
+
+[tool.poe.tasks.typecheck]
+help = "Check types with mypy"
+cmd = "mypy ."
+
+[tool.poe.tasks.lint]
+help = "Lint with flake8"
+cmd = "flake8 ."
+
+[tool.poe.tasks.test]
+help = "Run the fast Python tests"
+cmd = "pytest --cov=src -m 'not slow'"
+
+[tool.poe.tasks.test-everything]
+help = "Run all the tests and get the coverage"
+cmd = "pytest -v --durations=40 --color=yes --junitxml=pytest.xml --cov=src -m 'not slow' -n 2"
+
+[tool.poetry.dependencies]
+python = ">=3.9,<3.10"
+flask = ">=2.2.2,<3"
+unityparser = "2.2.1"
+opencv-python-headless = ">=4.8.1.68"
+scipy = ">=1.10.0"
+jinja2 = ">=3.1.2"
+flask-cors = ">=3.0.10"
+gevent = ">=23.9"
+shortuuid = ">=1.0.11"
+typing-extensions = ">=4.8.0"
+deepmerge = ">=1.1.0"
+wandb = ">=0.16.0"
+torchmetrics = ">=0.11.4"
+
+[tool.poetry.group.emma.dependencies]
+emma-experience-hub = { git = "https://github.com/emma-heriot-watt/experience-hub.git" }
+
+[tool.poetry.group.lint.dependencies]
+black = ">=23.3.0"
+wemake-python-styleguide = ">=0.17.0"
+mypy = ">=1.2.0"
+pre-commit = ">=3.2.2"
+poethepoet = ">=0.18.1"
+isort = ">=5.12.0"
+types-pyyaml = ">=6.0.12.9"
+
+
+[tool.poetry.group.test.dependencies]
+pytest = ">=7.2.2"
+pytest-cases = ">=3.6.14"
+pytest-cov = ">=4.0.0"
+pytest-xdist = ">=3.2.1"
+deepdiff = ">=6.3.0"
+
+[tool.black]
+line-length = 99
+
+[tool.isort]
+profile = "black"
+combine_as_imports = true
+lines_after_imports = 2
+line_length = 99
+extra_standard_library = ["typing_extensions"]
+known_first_party = ["emma_*", "arena_*", "simbot_*"]
+
+[tool.pyright]
+include = ["src"]
+exclude = ["src/arena_wrapper"]
+# typeCheckingMode = "strict"
+reportImportCycles = "error"
+
+[build-system]
+requires = ["poetry-core"]
+build-backend = "poetry.core.masonry.api"
diff --git a/scripts/compare_t1_data.py b/scripts/compare_t1_data.py
new file mode 100644
index 0000000..1436543
--- /dev/null
+++ b/scripts/compare_t1_data.py
@@ -0,0 +1,102 @@
+import itertools
+import json
+from pathlib import Path
+
+from pydantic import BaseModel
+from rich.pretty import pprint as rich_print
+from rich.progress import track
+
+
+TOTAL_COUNT = 1149
+
+
+class SplitMetrics(BaseModel):
+ """Model for each metrics.json that got saved."""
+
+ games_available: int = TOTAL_COUNT
+ games_played: int
+ games_completed: int
+ subgoals_completed: int
+ total_subgoals: int
+ mission_groups: set[str]
+ games_played_per_mission_group: dict[str, int]
+ games_completed_per_mission_group: dict[str, int]
+ success_rate: float
+ subgoal_completion_rate: float
+ success_rate_per_mission: dict[str, float]
+
+
+def average_actions(mission_dir: Path) -> None:
+ """Get the average number of actions taken per mission."""
+ num_actions = []
+ files_to_check = list(mission_dir.iterdir())
+
+ for mission_file in track(files_to_check):
+ if mission_file.is_dir():
+ continue
+
+ with open(mission_file) as open_file:
+ parsed_file = json.load(open_file)
+ actions = parsed_file["predicted_actions"]
+ num_actions.append(len(actions))
+
+ rich_print(sum(num_actions) / len(num_actions))
+
+
+def compare_data(data_dir: Path) -> None:
+ """Run the numbers."""
+ # Load all the data
+ loaded_data: list[SplitMetrics] = []
+ for data_file in data_dir.iterdir():
+ if data_file.is_dir():
+ continue
+ loaded_data.append(SplitMetrics.parse_file(data_file))
+
+ games_played = sum(data.games_played for data in loaded_data)
+ games_completed = sum(data.games_completed for data in loaded_data)
+ subgoals_completed = sum(data.subgoals_completed for data in loaded_data)
+ total_subgoals = sum(data.total_subgoals for data in loaded_data)
+ mission_groups = set(
+ itertools.chain.from_iterable([data.mission_groups for data in loaded_data])
+ )
+ games_played_per_mission_group = {
+ mission_group: sum(
+ data.games_played_per_mission_group[mission_group]
+ if mission_group in data.games_played_per_mission_group
+ else 0
+ for data in loaded_data
+ )
+ for mission_group in mission_groups
+ }
+ games_completed_per_mission_group = {
+ mission_group: sum(
+ data.games_completed_per_mission_group[mission_group]
+ if mission_group in data.games_completed_per_mission_group
+ else 0
+ for data in loaded_data
+ )
+ for mission_group in mission_groups
+ }
+ merged_data = SplitMetrics(
+ games_played=games_played,
+ games_completed=games_completed,
+ subgoals_completed=subgoals_completed,
+ total_subgoals=total_subgoals,
+ mission_groups=mission_groups,
+ games_played_per_mission_group=games_played_per_mission_group,
+ games_completed_per_mission_group=games_completed_per_mission_group,
+ success_rate=games_completed / games_played,
+ subgoal_completion_rate=subgoals_completed / total_subgoals,
+ success_rate_per_mission={
+ mission_group: games_completed_per_mission_group[mission_group]
+ / games_played_per_mission_group[mission_group]
+ for mission_group in mission_groups
+ },
+ )
+
+ rich_print(merged_data)
+
+
+if __name__ == "__main__":
+ compare_data(Path("storage/metrics/"))
+ average_actions(Path("storage/metrics/missions/"))
diff --git a/scripts/fetch-arena-data.sh b/scripts/fetch-arena-data.sh
new file mode 100644
index 0000000..4c0dd24
--- /dev/null
+++ b/scripts/fetch-arena-data.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+mkdir -p storage/data/trajectory-data
+
+aws s3 cp s3://alexa-arena-resources/DATA_LICENSE ./storage/data/DATA_LICENSE --no-sign-request
+aws s3 cp s3://alexa-arena-resources/data/trajectory-data/valid.json ./storage/data/trajectory-data/valid.json --no-sign-request
diff --git a/scripts/fetch-arena.sh b/scripts/fetch-arena.sh
new file mode 100644
index 0000000..3b26134
--- /dev/null
+++ b/scripts/fetch-arena.sh
@@ -0,0 +1,7 @@
+#!/bin/bash
+
+mkdir -p storage/arena
+
+wget -P ./storage/ "https://alexa-arena-executable.s3.us-west-1.amazonaws.com/Arena.zip"
+unzip ./storage/Arena.zip -d ./storage/arena
+rm -f ./storage/Arena.zip
diff --git a/scripts/prepare-user-area.sh b/scripts/prepare-user-area.sh
new file mode 100644
index 0000000..63552c4
--- /dev/null
+++ b/scripts/prepare-user-area.sh
@@ -0,0 +1,73 @@
+#! /bin/bash
+set -e
+set -o pipefail
+
+# This script prepares the user area for the user to run the offline inference
+
+# Make sure this script is run as sudo
+if [ "$EUID" -ne 0 ]; then
+ echo "Please run as sudo"
+ exit 1
+fi
+
+# Ensure python version is 3.9, and fail otherwise
+if [[ $(python3 --version) != *"3.9"* ]]; then
+ echo "Please use python 3.9."
+ exit 1
+fi
+
+# Ensure poetry is installed
+if ! command -v poetry &>/dev/null; then
+ echo "Poetry could not be found"
+ exit 1
+fi
+
+# Install poetry deps without the experience hub
+echo "[SimBot] Installing poetry dependencies"
+poetry install --without emma
+
+# Clone the experience hub into the storage dir
+echo "[SimBot] Installing experience hub as editable"
+git clone https://github.com/emma-heriot-watt/experience-hub storage/experience-hub
+pip install -e storage/experience-hub
+
+# Fetch the arena
+echo "[SimBot] Fetching the arena"
+sh ./scripts/fetch-arena.sh
+
+# Set the permissions for the arena
+echo "[SimBot] Set permissions for the arena"
+sudo chmod -R 755 storage/arena/Linux
+chmod 777 storage/arena/Linux/Arena.x86_64
+
+# Install the arena dependencies by copying the files they want us to copy
+echo "[SimBot] Installing arena dependencies"
+# Warn the user in bright red
+echo -e "\e[1;31m"
+echo "WARNING!!!!!!!"
+echo "THIS WILL CHANGE FILES ON YOUR MACHINE!!!!!"
+echo "EITHER RUN THIS INSIDE A CONTAINER OR BACKUP THAT FOLDER BEFORE YOU RUN THIS."
+# Reset the colour
+echo -e "\e[0m"
+# Ask the user if they want to continue
+read -p "Do you want to continue? (y/n) " -n 1 -r
+
+if [[ ! $REPLY =~ ^[Yy]$ ]]; then
+ echo
+ echo "Aborting."
+ exit 1
+fi
+sudo cp -r storage/arena/Dependencies/* /usr/lib/
+sudo ldconfig
+
+# Download the arena data
+echo "[SimBot] Downloading arena mission data"
+sh ./scripts/fetch-arena-data.sh
+
+# Setup symlinks for Docker and storage/models
+echo "[SimBot] Setting up symlinks"
+ln -s ./storage/experience-hub/docker/ ./
+ln -s ./storage/experience-hub/storage/models/ ./storage/
+
+# Done
+echo "[SimBot] Done!"
diff --git a/src/arena_missions/__init__.py b/src/arena_missions/__init__.py
new file mode 100644
index 0000000..a29f170
--- /dev/null
+++ b/src/arena_missions/__init__.py
@@ -0,0 +1,4 @@
+from arena_missions.load_challenges import load_challenges
+
+
+load_challenges()
diff --git a/src/arena_missions/builders/__init__.py b/src/arena_missions/builders/__init__.py
new file mode 100644
index 0000000..9c6aa0c
--- /dev/null
+++ b/src/arena_missions/builders/__init__.py
@@ -0,0 +1,7 @@
+from arena_missions.builders.challenge_builder import (
+ ChallengeBuilder,
+ ChallengeBuilderFunction,
+ ChallengeBuilderOutput,
+)
+from arena_missions.builders.mission_builder import MissionBuilder
+from arena_missions.builders.required_objects_builder import RequiredObjectBuilder
diff --git a/src/arena_missions/builders/challenge_builder.py b/src/arena_missions/builders/challenge_builder.py
new file mode 100644
index 0000000..9ddfb18
--- /dev/null
+++ b/src/arena_missions/builders/challenge_builder.py
@@ -0,0 +1,138 @@
+from collections.abc import Iterator
+from copy import deepcopy
+from itertools import groupby
+from typing import Any, Callable, Optional, Union
+
+from deepmerge import always_merger
+from pydantic import BaseModel, Field
+
+from arena_missions.constants.arena import OfficeLayout, OfficeRoom
+from arena_missions.structures import HighLevelKey, RequiredObject, StateCondition, TaskGoal
+
+
+class ChallengeBuilderOutput(BaseModel):
+ """Output of a challenge builder function."""
+
+ start_room: OfficeRoom
+ required_objects: dict[str, RequiredObject]
+ plan: list[str]
+
+ task_goals: list[TaskGoal]
+ state_conditions: list[StateCondition] = Field(default_factory=list)
+
+ preparation_plan: list[str] = Field(default_factory=list)
+
+ # If you want to override the office layout, set this.
+ office_layout: Optional[OfficeLayout] = None
+
+ # Whether or not to include all the default objects like open doors, etc.
+ # If you don't care, just ignore it.
+ include_all_default_objects: Optional[bool] = True
+
+ randomise_start_position: bool = True
+
+ @property
+ def required_objects_list(self) -> list[RequiredObject]:
+ """Return a list of lists of required objects."""
+ return list(self.required_objects.values())
+
+
+ChallengeBuilderFunction = Callable[[], ChallengeBuilderOutput]
+
+
+class ChallengeBuilder:
+ """Registrable-style class that registers challenge builders to easily generate them."""
+
+ _registry: list[tuple[HighLevelKey, ChallengeBuilderFunction]] = []
+
+ def __iter__(self) -> Iterator[tuple[HighLevelKey, ChallengeBuilderFunction]]:
+ """Iterate over the registry."""
+ yield from self._registry
+
+ @classmethod
+ def register(
+ cls, high_level_key: Union[str, HighLevelKey]
+ ) -> Callable[[ChallengeBuilderFunction], ChallengeBuilderFunction]:
+ """Register a challenge builder."""
+ # mypy errors if we don't reassign the parsed high-level key to a new variable.
+ # Either that is a bug, or it knows something we don't.
+ parsed_high_level_key = (
+ HighLevelKey.from_string(high_level_key)
+ if isinstance(high_level_key, str)
+ else high_level_key
+ )
+
+ def decorator(func: ChallengeBuilderFunction) -> ChallengeBuilderFunction:
+ # Registry count before registering
+ registry_count = len(ChallengeBuilder._registry) # noqa: WPS437
+
+ # Register the challenge builder
+ ChallengeBuilder._registry.append((parsed_high_level_key, func)) # noqa: WPS437
+
+ # Get the count after removing duplicates
+ registry_count_after_duplicates_removed = len(
+ set(ChallengeBuilder._registry) # noqa: WPS437
+ )
+
+ # If the count is the same, then we didn't add a new challenge builder
+ if registry_count == registry_count_after_duplicates_removed:
+ raise ValueError(
+ f"Challenge builder already registered for: ({parsed_high_level_key}, {func})."
+ )
+
+ return func
+
+ return decorator
+
+ @classmethod
+ def register_with_modifiers(
+ cls,
+ high_level_key: Union[str, HighLevelKey],
+ modified_kwargs: dict[str, Any],
+ ) -> Callable[[ChallengeBuilderFunction], ChallengeBuilderFunction]:
+ """Register a challenge builder with modifiers."""
+
+ def decorator(func: ChallengeBuilderFunction) -> ChallengeBuilderFunction:
+ # Register the modified challenge builder
+ ChallengeBuilder.register(high_level_key)(
+ ChallengeBuilder.modify_challenge_builder_function_output(func, modified_kwargs)
+ )
+
+ return func
+
+ return decorator
+
+ @classmethod
+ def count_available_functions_per_key(cls) -> dict[HighLevelKey, int]:
+ """List all keys and how many functions connect with them."""
+ key_counts: dict[HighLevelKey, int] = {}
+
+ # Sort the registry by the high-level key
+ sorted_registry = sorted(cls._registry, key=lambda x: x[0].key)
+
+ for k, g in groupby(sorted_registry, key=lambda x: x[0]):
+ key_counts[k] = len(list(g))
+
+ return key_counts
+
+ @classmethod
+ def list_available(cls) -> list[HighLevelKey]:
+ """List all available high-level keys."""
+ return list({key for key, _ in cls._registry})
+
+ @staticmethod
+ def modify_challenge_builder_function_output( # noqa: WPS602
+ function: ChallengeBuilderFunction, modified_kwargs: dict[str, Any]
+ ) -> ChallengeBuilderFunction:
+ """Modify the output of a challenge builder function."""
+
+ def wrapper() -> ChallengeBuilderOutput:
+ # Call the original function
+ output = function().dict(by_alias=True)
+ output = deepcopy(output)
+ # Modify the output
+ always_merger.merge(output, modified_kwargs)
+ # Return the modified output
+ return ChallengeBuilderOutput.parse_obj(output)
+
+ return wrapper
diff --git a/src/arena_missions/builders/mission_builder.py b/src/arena_missions/builders/mission_builder.py
new file mode 100644
index 0000000..546b132
--- /dev/null
+++ b/src/arena_missions/builders/mission_builder.py
@@ -0,0 +1,93 @@
+import random
+from collections.abc import Iterator
+from typing import Optional, get_args
+
+from arena_missions.builders.challenge_builder import (
+ ChallengeBuilder,
+ ChallengeBuilderFunction,
+ ChallengeBuilderOutput,
+)
+from arena_missions.builders.required_objects_builder import RequiredObjectBuilder
+from arena_missions.constants.arena import OfficeLayout
+from arena_missions.structures import CDF, CDFScene, HighLevelKey, Mission, RequiredObject
+
+
+class MissionBuilder:
+ """Build missions for the Arena."""
+
+ def __init__(
+ self,
+ challenge_builder: ChallengeBuilder,
+ required_object_builder: RequiredObjectBuilder,
+ unity_scene_rng_seed: Optional[int] = None,
+ ) -> None:
+ self.challenge_builder = challenge_builder
+ self.required_object_builder = required_object_builder
+
+ self._unity_scene_rng_seed = unity_scene_rng_seed
+
+ @property
+ def cdf_floor_plan(self) -> str:
+ """Convert the Unity Scene RNG seed to a string for the `floor_plan`."""
+ return str(self._unity_scene_rng_seed) if self._unity_scene_rng_seed is not None else "-1"
+
+ def generate_all_missions(self) -> Iterator[Mission]:
+ """Generate all missions."""
+ yield from (
+ self.generate_mission(high_level_key, challenge_builder_function)
+ for high_level_key, challenge_builder_function in self.challenge_builder
+ )
+
+ def generate_mission(
+ self, high_level_key: HighLevelKey, challenge_builder_function: ChallengeBuilderFunction
+ ) -> Mission:
+ """Generate a mission."""
+ builder_output = challenge_builder_function()
+ cdf = self.generate_cdf(builder_output)
+ return Mission(
+ high_level_key=high_level_key,
+ plan=builder_output.plan,
+ cdf=cdf,
+ preparation_plan=builder_output.preparation_plan,
+ randomise_start_position=builder_output.randomise_start_position,
+ )
+
+ def generate_cdf(self, challenge_builder_output: ChallengeBuilderOutput) -> CDF:
+ """Generate a challenge."""
+ required_objects = [
+ *challenge_builder_output.required_objects_list,
+ *self.generate_default_arena_objects_if_required(
+ challenge_builder_output.include_all_default_objects
+ ),
+ ]
+
+ cdf_scene = CDFScene(
+ roomLocation=[challenge_builder_output.start_room],
+ floor_plan=self.cdf_floor_plan,
+ required_objects=required_objects,
+ layoutOverride=self.generate_office_layout_if_required(
+ challenge_builder_output.office_layout
+ ),
+ )
+ return CDF(
+ scene=cdf_scene,
+ task_goals=challenge_builder_output.task_goals,
+ stateconditions=challenge_builder_output.state_conditions,
+ )
+
+ def generate_default_arena_objects_if_required(
+ self, include_all_default_objects: Optional[bool]
+ ) -> list[RequiredObject]:
+ """Generate default arena objects."""
+ if include_all_default_objects is None:
+ include_all_default_objects = random.choice([True, False])
+
+ return (
+ self.required_object_builder.default_objects() if include_all_default_objects else []
+ )
+
+ def generate_office_layout_if_required(
+ self, office_layout: Optional[OfficeLayout]
+ ) -> OfficeLayout:
+ """Generate office layout."""
+ return office_layout if office_layout else random.choice(get_args(OfficeLayout))
diff --git a/src/arena_missions/builders/required_objects_builder.py b/src/arena_missions/builders/required_objects_builder.py
new file mode 100644
index 0000000..66430c3
--- /dev/null
+++ b/src/arena_missions/builders/required_objects_builder.py
@@ -0,0 +1,290 @@
+from typing import Literal, Optional
+
+from arena_missions.constants.arena import OfficeRoom
+from arena_missions.structures import ObjectInstanceId, RequiredObject, RequiredObjectState
+
+
+class RequiredObjectBuilder:
+ """Simplify object building within the arena."""
+
+ num_doors: int = 7
+ num_light_switches: int = 8
+ num_broken_cords: int = 3
+ num_gray_fuse_boxes: int = 2
+ num_red_fuse_boxes: int = 1
+ num_computer_monitors: int = 5
+ max_num_lab1_desks: int = 5
+ max_num_lab2_desks: int = 3
+
+ def default_objects(self) -> list[RequiredObject]:
+ """Generate all default objects for the arena."""
+ return [
+ *self.doors(),
+ *self.light_switches(),
+ *self.broken_cords(),
+ *self.fuse_boxes(),
+ *self.computer_monitors(),
+ ]
+
+ def color_changer(self) -> RequiredObject:
+ """Generate the color changer for the arena."""
+ return RequiredObject.from_string("ColorChangerStation_1")
+
+ def doors(self, *, is_open: bool = True) -> list[RequiredObject]:
+ """Generate all 7 doors for the arena."""
+ return [
+ RequiredObject(
+ name=ObjectInstanceId.parse(f"Door_01_{door_num}"),
+ state=[RequiredObjectState.from_parts("isOpen", "true" if is_open else "false")],
+ )
+ for door_num in range(1, self.num_doors + 1)
+ ]
+
+ def light_switches(self) -> list[RequiredObject]:
+ """Generate all 8 light switches for the arena."""
+ return [
+ RequiredObject(
+ name=ObjectInstanceId.parse(f"LightSwitch_01_{switch_num}"),
+ )
+ for switch_num in range(1, self.num_light_switches + 1)
+ ]
+
+ def broken_cords(self, *, is_on: bool = False) -> list[RequiredObject]:
+ """Generate all 3 broken cords for the arena."""
+ return [
+ RequiredObject(
+ name=ObjectInstanceId.parse(f"Broken_Cord_01_{cord_num}"),
+ state=[
+ RequiredObjectState.from_parts("isToggledOn", "true" if is_on else "false")
+ ],
+ )
+ for cord_num in range(1, self.num_broken_cords + 1)
+ ]
+
+ def fuse_boxes(self) -> list[RequiredObject]:
+ """Generate all fuse boxes for the arena."""
+ gray_fuse_boxes = [
+ RequiredObject(name=ObjectInstanceId.parse(f"FuseBox_01_{fuse_box_num}"))
+ for fuse_box_num in range(1, self.num_gray_fuse_boxes + 1)
+ ]
+ red_fuse_boxes = [
+ RequiredObject(name=ObjectInstanceId.parse(f"FuseBox_02_{fuse_box_num}"))
+ for fuse_box_num in range(1, self.num_red_fuse_boxes + 1)
+ ]
+ return [*gray_fuse_boxes, *red_fuse_boxes]
+
+ def computer_monitors(self) -> list[RequiredObject]:
+ """Generate all computer monitors for the arena."""
+ return [
+ RequiredObject(name=ObjectInstanceId.parse(f"Computer_Monitor_01_{monitor_num}"))
+ for monitor_num in range(1, self.num_computer_monitors + 1)
+ ]
+
+ def freeze_ray(self) -> RequiredObject:
+ """Generate the freeze ray for the arena."""
+ return RequiredObject(name=ObjectInstanceId.parse("FreezeRay_1"))
+
+ def emotion_tester(self) -> RequiredObject:
+ """Generate the emotion tester for the arena."""
+ return RequiredObject(name=ObjectInstanceId.parse("TAMPrototypeHead_01_1"))
+
+ def portal_generator(self) -> RequiredObject:
+ """Generate the portal generator for the arena."""
+ return RequiredObject(name=ObjectInstanceId.parse("PortalGenerator_10000"))
+
+ def laser(self) -> RequiredObject:
+ """Generate the laser for the arena."""
+ return RequiredObject(name=ObjectInstanceId.parse("Laser_1"))
+
+ def gravity_pad(self) -> RequiredObject:
+ """Generate the gravity pad for the arena."""
+ return RequiredObject(name=ObjectInstanceId.parse("GravityPad_1"))
+
+ def fridge(self, *, room: OfficeRoom = "BreakRoom", is_open: bool = False) -> RequiredObject:
+ """Generate the fridge for the arena."""
+ return RequiredObject(
+ name=ObjectInstanceId.parse("FridgeLower_02_1"),
+ state=[
+ RequiredObjectState.from_parts("isOpen", "true" if is_open else "false"),
+ RequiredObjectState.from_parts("removeInitialContainedItems", "true"),
+ ],
+ roomLocation=[room],
+ )
+
+ def freezer(self, *, room: OfficeRoom = "BreakRoom", is_open: bool = False) -> RequiredObject:
+ """Generate the freezer for the arena."""
+ return RequiredObject(
+ name=ObjectInstanceId.parse("FridgeUpper_02_1"),
+ state=[
+ RequiredObjectState.from_parts("isOpen", "true" if is_open else "false"),
+ RequiredObjectState.from_parts("removeInitialContainedItems", "true"),
+ ],
+ roomLocation=[room],
+ )
+
+ def time_machine(self, *, room: OfficeRoom = "BreakRoom") -> RequiredObject:
+ """Generate the time machine for the arena."""
+ return RequiredObject(
+ name=ObjectInstanceId.parse("YesterdayMachine_01_1"), roomLocation=[room]
+ )
+
+ def carrot_maker(self, *, room: OfficeRoom = "Lab2") -> RequiredObject:
+ """Generate the carrot maker for the arena."""
+ return RequiredObject(name=ObjectInstanceId.parse("EAC_Machine_1"), roomLocation=[room])
+
+ def microwave(self, *, room: OfficeRoom = "BreakRoom") -> RequiredObject:
+ """Generate the microwave for the arena."""
+ return RequiredObject(
+ name=ObjectInstanceId.parse("Microwave_01_1"),
+ state=[
+ RequiredObjectState.from_parts("removeInitialContainedItems", "true"),
+ RequiredObjectState.from_parts("isEmpty", "true"),
+ ],
+ roomLocation=[room],
+ )
+
+ def robotic_arm(self, *, is_arm_lifted: bool = True) -> RequiredObject:
+ """Generate the robotic arm for the arena."""
+ return RequiredObject(
+ name=ObjectInstanceId.parse("RoboticArm_01_1"),
+ state=[
+ RequiredObjectState.from_parts("isToggledOn", "true" if is_arm_lifted else "false")
+ ],
+ )
+
+ def fork_lift(self, *, is_fork_lifted: bool = True) -> RequiredObject:
+ """Generate the fork lift for the arena."""
+ return RequiredObject(
+ name=ObjectInstanceId.parse("ForkLift_1"),
+ state=[
+ RequiredObjectState.from_parts(
+ "isToggledOn", "true" if is_fork_lifted else "false"
+ )
+ ],
+ )
+
+ def coffee_pot(
+ self, *, fill_with: Optional[Literal["Coffee", "Water"]] = None
+ ) -> RequiredObject:
+ """Generate the coffee pot for the arena."""
+ coffee_pot = RequiredObject(
+ name=ObjectInstanceId.parse("CoffeePot_01_1"), roomLocation=["BreakRoom"]
+ )
+
+ if fill_with == "Coffee":
+ coffee_pot.update_state("isFilled", "Coffee")
+ coffee_pot.update_state("isHot", "true")
+
+ if fill_with == "Water":
+ coffee_pot.update_state("isFilled", "Water")
+
+ return coffee_pot
+
+ def coffee_unmaker(self) -> RequiredObject:
+ """Generate the coffee unmaker for the arena."""
+ return RequiredObject(
+ name=ObjectInstanceId.parse("CoffeeUnMaker_01_1"), roomLocation=["BreakRoom"]
+ )
+
+ def breakroom_table(self) -> RequiredObject:
+ """Create the round table in the breakroom."""
+ return RequiredObject(
+ name=ObjectInstanceId.parse("TableRound_02_1"),
+ state=[RequiredObjectState.from_parts("removeInitialContainedItems", "true")],
+ roomLocation=["BreakRoom"],
+ )
+
+ def breakroom_countertop(self) -> RequiredObject:
+ """Create the countertop in the breakroom."""
+ return RequiredObject(
+ name=ObjectInstanceId.parse("KitchenCounterTop_02_1"),
+ state=[RequiredObjectState.from_parts("removeInitialContainedItems", "true")],
+ roomLocation=["BreakRoom"],
+ )
+
+ def printer(self) -> RequiredObject:
+ """Generate the printer for the arena."""
+ return RequiredObject(name=ObjectInstanceId.parse("Printer_3D_1"))
+
+ def main_office_desks(self) -> list[RequiredObject]:
+ """Returns office desks in main office."""
+ desk_names = [
+ "AP_Prop_Desk_Blue",
+ "AP_Prop_Desk_Green",
+ "AP_Prop_Desk_Red",
+ ]
+
+ desk_objects = []
+
+ for desk_name in desk_names:
+ desk_objects.append(
+ RequiredObject(
+ name=ObjectInstanceId.parse(f"{desk_name}_1"),
+ state=[RequiredObjectState.from_parts("removeInitialContainedItems", "true")],
+ roomLocation=["MainOffice"],
+ )
+ )
+
+ return desk_objects
+
+ def reception_desk(self) -> RequiredObject:
+ """Returns the reception desk in the reception."""
+ return RequiredObject(
+ name=ObjectInstanceId.parse("ReceptionDesk_1"),
+ state=[RequiredObjectState.from_parts("removeInitialContainedItems", "true")],
+ roomLocation=["Reception"],
+ )
+
+ def manager_desk(self) -> RequiredObject:
+ """Returns the manager desk in the small office."""
+ return RequiredObject(
+ name=ObjectInstanceId.parse("ManagerDesk_1"),
+ state=[RequiredObjectState.from_parts("removeInitialContainedItems", "true")],
+ roomLocation=["SmallOffice"],
+ )
+
+ def warehouse_cabinet(self) -> RequiredObject:
+ """Returns the warehouse cabinet."""
+ return RequiredObject(
+ name=ObjectInstanceId.parse("KitchenCabinet_02_1"), roomLocation=["Warehouse"]
+ )
+
+ def warehouse_metal_table(self) -> RequiredObject:
+ """Returns the warehouse metal table."""
+ return RequiredObject(
+ name=ObjectInstanceId.parse("Table_Metal_01_1"), roomLocation=["Warehouse"]
+ )
+
+ def warehouse_wooden_table(self) -> RequiredObject:
+ """Returns the warehouse wooden table."""
+ return RequiredObject(
+ name=ObjectInstanceId.parse("SM_Prop_Table_02_1"), roomLocation=["Warehouse"]
+ )
+
+ def lab1_desks(self) -> list[RequiredObject]:
+ """Returns desks in the Lab1."""
+ desk_format = "Desk_01_{instance_count}"
+
+ desks = []
+
+ for desk_idx in range(1, self.max_num_lab1_desks + 1):
+ desk_id = ObjectInstanceId.parse(desk_format.format(instance_count=desk_idx))
+ desk = RequiredObject(name=desk_id)
+ desk.update_room("Lab1")
+ desks.append(desk)
+
+ return desks
+
+ def lab2_desks(self) -> list[RequiredObject]:
+ """Returns desks in the Lab2."""
+ desk_format = "Desk_01_{instance_count}"
+
+ desks = []
+
+ for desk_idx in range(1, self.max_num_lab2_desks + 1):
+ desk_id = ObjectInstanceId.parse(desk_format.format(instance_count=desk_idx))
+ desk = RequiredObject(name=desk_id)
+ desk.update_room("Lab2")
+ desks.append(desk)
+
+ return desks
diff --git a/src/arena_missions/challenges/__init__.py b/src/arena_missions/challenges/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/arena_missions/challenges/ambiguous_pickup.py b/src/arena_missions/challenges/ambiguous_pickup.py
new file mode 100644
index 0000000..c490965
--- /dev/null
+++ b/src/arena_missions/challenges/ambiguous_pickup.py
@@ -0,0 +1,152 @@
+import itertools
+from collections.abc import Iterator
+from typing import Any, Optional, get_args
+
+from arena_missions.builders import (
+ ChallengeBuilder,
+ ChallengeBuilderFunction,
+ ChallengeBuilderOutput,
+ RequiredObjectBuilder,
+)
+from arena_missions.constants.arena import ColorChangerObjectColor
+from arena_missions.structures import (
+ AndExpression,
+ ContainsExpression,
+ HighLevelKey,
+ IsPickedUpExpression,
+ ObjectInstanceId,
+ RequiredObject,
+ StateCondition,
+ StateExpression,
+ TaskGoal,
+)
+
+
+def create_ambiguous_pickup_challenge( # noqa: WPS231
+ target_object_idx: int, available_objects: list[RequiredObject], receptacle: RequiredObject
+) -> None:
+ """Generate challenges to pick up objects from other ambiguous objects."""
+ target_object = available_objects[target_object_idx]
+
+ conditions = [
+ # [PREP] Ensure all the objects are proper
+ StateCondition(
+ stateName="AllObjectsAreProper",
+ context=receptacle.object_instance_id,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ *[
+ ContainsExpression(
+ target=receptacle.object_instance_id,
+ contains=curr_object.object_instance_id,
+ )
+ for curr_object in available_objects
+ ]
+ )
+ ),
+ ),
+ # Ensure we pick up the target object
+ StateCondition(
+ stateName="PickedUpTargetObject",
+ context=target_object.object_instance_id,
+ expression=StateExpression.from_expression(
+ IsPickedUpExpression(target=target_object.object_instance_id, value=True),
+ ),
+ ),
+ ]
+
+ goals = [TaskGoal.from_state_condition(condition) for condition in conditions]
+
+ def create_mission_func_with_color(
+ target_color: Optional[ColorChangerObjectColor],
+ ) -> ChallengeBuilderFunction:
+ plan = (
+ f"pick up the {target_color} {target_object.readable_name}"
+ if target_color
+ else f"pick up the white {target_object.readable_name}"
+ )
+
+ def _create_mission() -> ChallengeBuilderOutput:
+ """Create the mission."""
+ if not receptacle.room:
+ raise ValueError(f"Receptacle {receptacle.name} must have a room set")
+
+ return ChallengeBuilderOutput(
+ start_room=receptacle.room,
+ required_objects={
+ receptacle.name: receptacle,
+ **{curr_object.name: curr_object for curr_object in available_objects},
+ },
+ task_goals=goals,
+ state_conditions=conditions,
+ plan=[plan],
+ )
+
+ return _create_mission
+
+ object_colors = [None, *get_args(ColorChangerObjectColor)]
+ object_color_permutations: Iterator[tuple[Any, ...]] = itertools.permutations(object_colors)
+
+ for color_permutation in object_color_permutations:
+ colored_target_object_kwargs: dict[str, Any] = {"required_objects": {}}
+
+ for curr_object, color in zip(available_objects, color_permutation):
+ if color is not None:
+ colored_target_object_kwargs["required_objects"].update(
+ {curr_object.name: {"colors": [color]}}
+ )
+ target_color: Optional[ColorChangerObjectColor] = color_permutation[target_object_idx]
+
+ high_level_key = HighLevelKey(
+ action="pickup",
+ target_object=target_object.object_id,
+ target_object_color=target_color,
+ target_object_is_ambiguous=True,
+ from_receptacle=receptacle.object_id,
+ from_receptacle_is_container=False,
+ )
+
+ # Register the challenge builder with the modifications
+ create_mission: ChallengeBuilderFunction = create_mission_func_with_color(target_color)
+ ChallengeBuilder.register_with_modifiers(high_level_key, colored_target_object_kwargs)(
+ create_mission
+ )
+
+
+def register_ambiguous_pickup_challenges(max_num_distractors: int = 2) -> None:
+ """Register challenges to pick up a target object among distractors objects."""
+ required_objects_builder = RequiredObjectBuilder()
+
+ receptacles = [
+ required_objects_builder.breakroom_table(),
+ required_objects_builder.breakroom_countertop(),
+ ]
+
+ # include only the objects that allow color changes
+ target_object_iterator = [
+ "Apple_{instance_count}",
+ "Cake_02_{instance_count}",
+ "Carrot_01_{instance_count}",
+ "Donut_01_{instance_count}",
+ "Pear_01_{instance_count}",
+ "CoffeeMug_Yellow_{instance_count}",
+ "CoffeeMug_Boss_{instance_count}",
+ "Bowl_01_{instance_count}",
+ "FoodPlate_01_{instance_count}",
+ "DeskFan_New_01_{instance_count}",
+ ]
+
+ for target_object_template in target_object_iterator:
+ for receptacle in receptacles:
+ available_objects: list[RequiredObject] = []
+ for idx in range(1, max_num_distractors + 1):
+ curr_object = RequiredObject(
+ name=ObjectInstanceId.parse(
+ target_object_template.format(instance_count=idx + 1)
+ )
+ )
+ curr_object.update_receptacle(receptacle.name)
+ available_objects.append(curr_object)
+
+ for target_object_idx, _ in enumerate(available_objects):
+ create_ambiguous_pickup_challenge(target_object_idx, available_objects, receptacle)
diff --git a/src/arena_missions/challenges/breaking_things.py b/src/arena_missions/challenges/breaking_things.py
new file mode 100644
index 0000000..28c7a01
--- /dev/null
+++ b/src/arena_missions/challenges/breaking_things.py
@@ -0,0 +1,300 @@
+import random
+from typing import get_args
+
+from arena_missions.builders import ChallengeBuilder, ChallengeBuilderOutput, RequiredObjectBuilder
+from arena_missions.constants.arena import ColorChangerObjectColor
+from arena_missions.structures import (
+ AndExpression,
+ ContainsExpression,
+ HighLevelKey,
+ IsBrokenExpression,
+ IsPickedUpExpression,
+ ObjectInstanceId,
+ RequiredObject,
+ StateCondition,
+ StateExpression,
+ TaskGoal,
+)
+
+
+def create_break_object_challenges(
+ object_instance_id: ObjectInstanceId,
+ receptacle: RequiredObject,
+ breakroom_table: RequiredObject,
+ *,
+ with_color_variants: bool = False,
+) -> None:
+ """Register challenges."""
+ required_object_builder = RequiredObjectBuilder()
+
+ # Turn the fork lift on
+ fork_lift = required_object_builder.fork_lift()
+ # Turn the robotic arm on
+ robotic_arm = required_object_builder.robotic_arm()
+
+ # Make the target object unique
+ target_object = RequiredObject(name=object_instance_id)
+ target_object.add_state("Unique", "true")
+
+ # Create the breakroom table
+ target_object.update_receptacle(receptacle.name)
+
+ # Ensure the hammer is on the table
+ hammer = RequiredObject(name=ObjectInstanceId.parse("Hammer_1"))
+ hammer.add_state("Unique", "true")
+ hammer.update_receptacle(breakroom_table.name)
+
+ # Success conditions
+ conditions = [
+ # Pick up the target object
+ StateCondition(
+ stateName="HammerPickedUp",
+ context=hammer.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ IsPickedUpExpression(target=hammer.name, value=True),
+ ContainsExpression(target=receptacle.name, contains=target_object.name),
+ ),
+ ),
+ ),
+ # Ensure the target object is broken
+ StateCondition(
+ stateName="TargetObjectBroken",
+ context=target_object.name,
+ expression=StateExpression.from_expression(
+ IsBrokenExpression(target=target_object.name, value=True)
+ ),
+ ),
+ ]
+
+ goals = [TaskGoal.from_state_condition(condition) for condition in conditions]
+
+ # Create mission
+ def create_mission() -> ChallengeBuilderOutput:
+ if not receptacle.room:
+ raise ValueError(f"Receptacle {receptacle.name} must have a room set")
+
+ return ChallengeBuilderOutput(
+ start_room=receptacle.room,
+ required_objects={
+ breakroom_table.name: breakroom_table,
+ receptacle.name: receptacle,
+ target_object.name: target_object,
+ hammer.name: hammer,
+ robotic_arm.name: robotic_arm,
+ fork_lift.name: fork_lift,
+ },
+ state_conditions=conditions,
+ task_goals=goals,
+ plan=[
+ f"find the {target_object.readable_name}",
+ f"break the {target_object.readable_name} with the hammer",
+ ],
+ preparation_plan=[
+ "go to the breakroom",
+ f"find the {hammer.readable_name}",
+ f"pick up the {hammer.readable_name}",
+ ],
+ )
+
+ # Register versions of the challenges without color variants
+ high_level_key = HighLevelKey(
+ action="break",
+ target_object=target_object.object_id,
+ from_receptacle=receptacle.object_id,
+ )
+
+ ChallengeBuilder.register(high_level_key)(create_mission)
+
+ # Register versions of the challenges with color variants
+ if with_color_variants:
+ for color in get_args(ColorChangerObjectColor):
+ colored_target_object_kwargs = {
+ "required_objects": {
+ target_object.name: {"colors": [color]},
+ }
+ }
+ high_level_key = HighLevelKey(
+ action="break",
+ target_object=target_object.object_id,
+ target_object_color=color,
+ from_receptacle=receptacle.object_id,
+ )
+ # Register the challenge builder with the modifications
+ ChallengeBuilder.register_with_modifiers(high_level_key, colored_target_object_kwargs)(
+ create_mission
+ )
+
+
+def register_breaking_things_challenges(enable_color_variants: bool = True) -> None:
+ """Register challenges to break things with the hammer."""
+ required_object_builder = RequiredObjectBuilder()
+
+ breakable_object_ids = [
+ (ObjectInstanceId.parse("Bowl_01_1"), True),
+ (ObjectInstanceId.parse("CoffeeMug_Boss_1"), True),
+ (ObjectInstanceId.parse("CoffeeMug_Yellow_1"), True),
+ (ObjectInstanceId.parse("Floppy_Virus_1"), False),
+ (ObjectInstanceId.parse("FoodPlate_01_1"), True),
+ (ObjectInstanceId.parse("Record_01_1"), False),
+ (ObjectInstanceId.parse("Trophy01_1"), False),
+ ]
+
+ breakroom_table = required_object_builder.breakroom_table()
+
+ receptacles = [
+ required_object_builder.breakroom_countertop(),
+ breakroom_table,
+ *required_object_builder.main_office_desks(),
+ required_object_builder.warehouse_cabinet(),
+ required_object_builder.warehouse_metal_table(),
+ required_object_builder.warehouse_wooden_table(),
+ required_object_builder.reception_desk(),
+ required_object_builder.manager_desk(),
+ ]
+
+ for target_object_id, with_color_variants in breakable_object_ids:
+ for receptacle in receptacles:
+ create_break_object_challenges(
+ target_object_id,
+ receptacle,
+ breakroom_table,
+ with_color_variants=enable_color_variants & with_color_variants,
+ )
+
+
+def create_break_object_on_desks_challenges(
+ object_instance_id: ObjectInstanceId,
+ desks: list[RequiredObject],
+ breakroom_table: RequiredObject,
+ *,
+ with_color_variants: bool = False,
+) -> None:
+ """Register challenges."""
+ target_desk = random.choice(desks)
+ required_object_builder = RequiredObjectBuilder()
+
+ # Turn the fork lift on
+ fork_lift = required_object_builder.fork_lift()
+ # Turn the robotic arm on
+ robotic_arm = required_object_builder.robotic_arm()
+
+ # Make the target object unique
+ target_object = RequiredObject(name=object_instance_id)
+ target_object.add_state("Unique", "true")
+
+ target_object.update_receptacle(target_desk.name)
+
+ # Ensure the hammer is on the table
+ hammer = RequiredObject(name=ObjectInstanceId.parse("Hammer_1"))
+ hammer.add_state("Unique", "true")
+ hammer.update_receptacle(breakroom_table.name)
+
+ # Success conditions
+ conditions = [
+ # Pick up the target object
+ StateCondition(
+ stateName="HammerPickedUp",
+ context=hammer.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ IsPickedUpExpression(target=hammer.name, value=True),
+ ContainsExpression(target=target_desk.name, contains=target_object.name),
+ ),
+ ),
+ ),
+ # Ensure the target object is broken
+ StateCondition(
+ stateName="TargetObjectBroken",
+ context=target_object.name,
+ expression=StateExpression.from_expression(
+ IsBrokenExpression(target=target_object.name, value=True)
+ ),
+ ),
+ ]
+
+ goals = [TaskGoal.from_state_condition(condition) for condition in conditions]
+
+ # Create mission
+ def create_mission() -> ChallengeBuilderOutput:
+ if not target_desk.room:
+ raise ValueError(f"Target desk {target_desk.name} must have a room set")
+
+ return ChallengeBuilderOutput(
+ start_room=target_desk.room,
+ required_objects={
+ breakroom_table.name: breakroom_table,
+ **{desk.name: desk for desk in desks},
+ target_object.name: target_object,
+ hammer.name: hammer,
+ robotic_arm.name: robotic_arm,
+ fork_lift.name: fork_lift,
+ },
+ state_conditions=conditions,
+ task_goals=goals,
+ plan=[
+ f"find the {target_object.readable_name}",
+ f"break the {target_object.readable_name} with the hammer",
+ ],
+ preparation_plan=[
+ "go to the breakroom",
+ f"find the {hammer.readable_name}",
+ f"pick up the {hammer.readable_name}",
+ ],
+ )
+
+ # Register versions of the challenges without color variants
+ high_level_key = HighLevelKey(
+ action="break",
+ target_object=target_object.object_id,
+ from_receptacle=target_desk.object_id,
+ )
+
+ ChallengeBuilder.register(high_level_key)(create_mission)
+
+ # Register versions of the challenges with color variants
+ if with_color_variants:
+ for color in get_args(ColorChangerObjectColor):
+ colored_target_object_kwargs = {
+ "required_objects": {
+ target_object.name: {"colors": [color]},
+ }
+ }
+ high_level_key = HighLevelKey(
+ action="break",
+ target_object=target_object.object_id,
+ target_object_color=color,
+ from_receptacle=target_desk.object_id,
+ )
+ # Register the challenge builder with the modifications
+ ChallengeBuilder.register_with_modifiers(high_level_key, colored_target_object_kwargs)(
+ create_mission
+ )
+
+
+def register_breaking_things_on_desks_challenges(enable_color_variants: bool = True) -> None:
+ """Register challenges to break things with the hammer."""
+ required_object_builder = RequiredObjectBuilder()
+
+ breakable_object_ids = [
+ (ObjectInstanceId.parse("Bowl_01_1"), True),
+ (ObjectInstanceId.parse("CoffeeMug_Boss_1"), True),
+ (ObjectInstanceId.parse("CoffeeMug_Yellow_1"), True),
+ (ObjectInstanceId.parse("Floppy_Virus_1"), False),
+ (ObjectInstanceId.parse("FoodPlate_01_1"), True),
+ (ObjectInstanceId.parse("Record_01_1"), False),
+ (ObjectInstanceId.parse("Trophy01_1"), False),
+ ]
+
+ breakroom_table = required_object_builder.breakroom_table()
+
+ all_desks = [required_object_builder.lab1_desks(), required_object_builder.lab2_desks()]
+
+ for target_object_id, with_color_variants in breakable_object_ids:
+ for desks in all_desks:
+ create_break_object_on_desks_challenges(
+ target_object_id,
+ desks,
+ breakroom_table,
+ with_color_variants=enable_color_variants & with_color_variants,
+ )
diff --git a/src/arena_missions/challenges/clean_dirty_plate.py b/src/arena_missions/challenges/clean_dirty_plate.py
new file mode 100644
index 0000000..74d6151
--- /dev/null
+++ b/src/arena_missions/challenges/clean_dirty_plate.py
@@ -0,0 +1,189 @@
+from typing import Literal, Optional, get_args
+
+from arena_missions.builders import ChallengeBuilder, ChallengeBuilderOutput, RequiredObjectBuilder
+from arena_missions.constants.arena import ColorChangerObjectColor, OfficeLayout
+from arena_missions.structures import (
+ AndExpression,
+ HighLevelKey,
+ IsDirtyExpression,
+ IsPickedUpExpression,
+ IsToggledOnExpression,
+ ObjectInstanceId,
+ RequiredObject,
+ StateCondition,
+ StateExpression,
+ TaskGoal,
+)
+
+
+def create_clean_dirty_plate_challenge(
+ room: Literal["BreakRoom", "Warehouse"],
+ office_layout: Optional[OfficeLayout] = None,
+ *,
+ with_color_variants: bool = False,
+) -> None:
+ """Clean a dirty plate."""
+ required_object_builder = RequiredObjectBuilder()
+
+ sink = RequiredObject(
+ name=ObjectInstanceId.parse("KitchenCounterSink_01_1"), roomLocation=[room]
+ )
+
+ plate = RequiredObject(name=ObjectInstanceId.parse("FoodPlate_01_1"))
+ plate.add_state("Unique", "true")
+ plate.add_state("isDirty", "true")
+
+ # Create the breakroom table
+ breakroom_table = required_object_builder.breakroom_table()
+
+ # Put the target object on the table
+ plate.update_receptacle(breakroom_table.name)
+
+ conditions = [
+ # Fill the sink before cleaning the plate
+ StateCondition(
+ stateName="FilledSinkBeforeCleaningPlate",
+ context=sink.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ IsToggledOnExpression(target=sink.name, value=True),
+ IsDirtyExpression(target=plate.name, value=True),
+ )
+ ),
+ ),
+ # Clean the plate
+ StateCondition(
+ stateName="CleanedPlate",
+ context=sink.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ IsToggledOnExpression(target=sink.name, value=True),
+ IsDirtyExpression(target=plate.name, value=False),
+ )
+ ),
+ ),
+ # Turn off the sink after cleaning the plate
+ StateCondition(
+ stateName="TurnedOffSinkAfterCleaningPlate",
+ context=sink.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ IsToggledOnExpression(target=sink.name, value=False),
+ IsDirtyExpression(target=plate.name, value=False),
+ )
+ ),
+ ),
+ ]
+
+ def fill_sink_before_cleaning_plate() -> ChallengeBuilderOutput:
+ """Fill the sink before cleaning the plate."""
+ prep_condition = StateCondition(
+ stateName="HoldingDirtyPlate",
+ context=sink.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ IsDirtyExpression(target=plate.name, value=True),
+ IsPickedUpExpression(target=plate.name, value=True),
+ )
+ ),
+ )
+ mission_conditions = [prep_condition, *conditions]
+ return ChallengeBuilderOutput(
+ start_room=room,
+ office_layout=office_layout,
+ required_objects={
+ sink.name: sink,
+ plate.name: plate,
+ breakroom_table.name: breakroom_table,
+ },
+ state_conditions=mission_conditions,
+ task_goals=[
+ TaskGoal.from_state_condition(condition) for condition in mission_conditions
+ ],
+ plan=[
+ "find the sink",
+ "toggle the sink",
+ "clean the plate in the sink",
+ "toggle the sink",
+ ],
+ preparation_plan=[
+ "go to the breakroom",
+ "pick up the plate",
+ ],
+ )
+
+ def sink_already_filled_before_cleaning() -> ChallengeBuilderOutput:
+ """Do not toggle the sink singce it's already filled."""
+ prep_condition = StateCondition(
+ stateName="HoldingDirtyPlateWithSinkOn",
+ context=sink.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ IsDirtyExpression(target=plate.name, value=True),
+ IsPickedUpExpression(target=plate.name, value=True),
+ IsToggledOnExpression(target=sink.name, value=True),
+ )
+ ),
+ )
+ mission_conditions = [prep_condition, *conditions]
+
+ builder_output = fill_sink_before_cleaning_plate()
+ builder_output.state_conditions = mission_conditions
+ builder_output.task_goals = [
+ TaskGoal.from_state_condition(condition) for condition in mission_conditions
+ ]
+ builder_output.plan = [
+ "find the sink",
+ "clean the plate in the sink",
+ "toggle the sink",
+ ]
+ builder_output.preparation_plan = [
+ "find the sink",
+ "toggle the sink",
+ "go to the breakroom",
+ "pick up the plate",
+ ]
+ return builder_output
+
+ high_level_key = HighLevelKey(
+ action="clean",
+ interaction_object=sink.object_id,
+ target_object=plate.object_id,
+ )
+
+ ChallengeBuilder.register(high_level_key)(fill_sink_before_cleaning_plate)
+ ChallengeBuilder.register(high_level_key)(sink_already_filled_before_cleaning)
+
+ # Register versions of the challenges with color variants
+ if with_color_variants:
+ for color in get_args(ColorChangerObjectColor):
+ colored_target_object_kwargs = {
+ "required_objects": {
+ plate.name: {"colors": [color]},
+ }
+ }
+
+ high_level_key = HighLevelKey(
+ action="clean",
+ interaction_object=sink.object_id,
+ target_object=plate.object_id,
+ target_object_color=color,
+ )
+ # Register the challenge builder with the modifications
+ ChallengeBuilder.register_with_modifiers(high_level_key, colored_target_object_kwargs)(
+ fill_sink_before_cleaning_plate
+ )
+ ChallengeBuilder.register_with_modifiers(high_level_key, colored_target_object_kwargs)(
+ sink_already_filled_before_cleaning
+ )
+
+
+def register_clean_dirty_plates(enable_color_variants: bool = True) -> None:
+ """Register all the the clean dirty plate challenges."""
+ for layout in get_args(OfficeLayout):
+ create_clean_dirty_plate_challenge(
+ "BreakRoom", office_layout=layout, with_color_variants=enable_color_variants
+ )
+ create_clean_dirty_plate_challenge(
+ "Warehouse", office_layout=layout, with_color_variants=enable_color_variants
+ )
diff --git a/src/arena_missions/challenges/fill_object_in_sink.py b/src/arena_missions/challenges/fill_object_in_sink.py
new file mode 100644
index 0000000..41b1e03
--- /dev/null
+++ b/src/arena_missions/challenges/fill_object_in_sink.py
@@ -0,0 +1,182 @@
+from typing import Literal, Optional, get_args
+
+from arena_missions.builders import ChallengeBuilder, ChallengeBuilderOutput, RequiredObjectBuilder
+from arena_missions.constants.arena import ColorChangerObjectColor, OfficeLayout
+from arena_missions.structures import (
+ AndExpression,
+ HighLevelKey,
+ IsFilledWithExpression,
+ IsPickedUpExpression,
+ IsToggledOnExpression,
+ ObjectInstanceId,
+ RequiredObject,
+ StateCondition,
+ StateExpression,
+ TaskGoal,
+)
+
+
+def create_fill_object_in_sink(
+ object_instance_id: ObjectInstanceId,
+ room: Literal["BreakRoom", "Warehouse"],
+ office_layout: Optional[OfficeLayout] = None,
+ *,
+ with_color_variants: bool = False,
+) -> None:
+ """Fill an object in a sink."""
+ required_object_builder = RequiredObjectBuilder()
+
+ sink = RequiredObject(
+ name=ObjectInstanceId.parse("KitchenCounterSink_01_1"), roomLocation=[room]
+ )
+
+ # Create object
+ target_object = RequiredObject(name=object_instance_id)
+ target_object.add_state("Unique", "true")
+
+ # Create the breakroom table
+ breakroom_table = required_object_builder.breakroom_table()
+
+ # Put the target object on the table
+ target_object.update_receptacle(breakroom_table.name)
+
+ conditions = [
+ # Fill the object with water
+ StateCondition(
+ stateName="FilledObjectWithWater",
+ context=target_object.name,
+ expression=StateExpression.from_expression(
+ IsFilledWithExpression(target=target_object.name, fluid="Water")
+ ),
+ ),
+ # Drain the sink
+ StateCondition(
+ stateName="DrainedSink",
+ context=sink.name,
+ expression=StateExpression.from_expression(
+ IsToggledOnExpression(target=sink.name, value=False),
+ ),
+ ),
+ ]
+
+ def fill_from_off_sink() -> ChallengeBuilderOutput:
+ prep_condition = StateCondition(
+ stateName="HoldingUnfilledObject",
+ context=target_object.name,
+ expression=StateExpression.from_expression(
+ IsPickedUpExpression(target=target_object.name, value=True)
+ ),
+ )
+ mission_conditions = [prep_condition, *conditions]
+
+ return ChallengeBuilderOutput(
+ start_room=room,
+ office_layout=office_layout,
+ required_objects={
+ sink.name: sink,
+ target_object.name: target_object,
+ breakroom_table.name: breakroom_table,
+ },
+ state_conditions=mission_conditions,
+ task_goals=[
+ TaskGoal.from_state_condition(condition) for condition in mission_conditions
+ ],
+ plan=[
+ "find the sink",
+ "toggle the sink",
+ f"fill the {object_instance_id.readable_name} in the sink",
+ "toggle the sink",
+ ],
+ preparation_plan=[
+ "go to the breakroom",
+ f"pick up the {object_instance_id.readable_name}",
+ ],
+ )
+
+ def fill_from_on_sink() -> ChallengeBuilderOutput:
+ prep_condition = StateCondition(
+ stateName="HoldingUnfilledObjectWithSinkOn",
+ context=sink.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ IsPickedUpExpression(target=target_object.name, value=True),
+ IsToggledOnExpression(target=sink.name, value=True),
+ )
+ ),
+ )
+ mission_conditions = [prep_condition, *conditions]
+
+ builder_output = fill_from_off_sink()
+ builder_output.state_conditions = mission_conditions
+ builder_output.task_goals = [
+ TaskGoal.from_state_condition(condition) for condition in mission_conditions
+ ]
+ builder_output.plan = [
+ "find the sink",
+ f"fill the {object_instance_id.readable_name} in the sink",
+ "toggle the sink",
+ ]
+ builder_output.preparation_plan = [
+ "find the sink",
+ "toggle the sink",
+ "go to the breakroom",
+ f"pick up the {object_instance_id.readable_name}",
+ ]
+ return builder_output
+
+ high_level_key = HighLevelKey(
+ action="fill",
+ interaction_object=sink.object_id,
+ target_object=target_object.object_id,
+ )
+
+ ChallengeBuilder.register(high_level_key)(fill_from_off_sink)
+ ChallengeBuilder.register(high_level_key)(fill_from_on_sink)
+
+ # Register versions of the challenges with color variants
+ if with_color_variants:
+ for color in get_args(ColorChangerObjectColor):
+ colored_target_object_kwargs = {
+ "required_objects": {
+ target_object.name: {"colors": [color]},
+ }
+ }
+
+ high_level_key = HighLevelKey(
+ action="fill",
+ interaction_object=sink.object_id,
+ target_object=target_object.object_id,
+ target_object_color=color,
+ )
+ # Register the challenge builder with the modifications
+ ChallengeBuilder.register_with_modifiers(high_level_key, colored_target_object_kwargs)(
+ fill_from_off_sink
+ )
+ ChallengeBuilder.register_with_modifiers(high_level_key, colored_target_object_kwargs)(
+ fill_from_on_sink
+ )
+
+
+def register_fill_objects_in_sink(enable_color_variants: bool = True) -> None:
+ """Register challenges about filling an object in sink challenges."""
+ object_iterator = [
+ (ObjectInstanceId.parse("Bowl_01_1"), True),
+ (ObjectInstanceId.parse("CoffeeMug_Boss_1"), True),
+ (ObjectInstanceId.parse("CoffeeMug_Yellow_1"), True),
+ (ObjectInstanceId.parse("CoffeePot_01_1"), False),
+ ]
+
+ for layout in get_args(OfficeLayout):
+ for object_instance_id, with_color_variants in object_iterator:
+ create_fill_object_in_sink(
+ object_instance_id,
+ room="BreakRoom",
+ office_layout=layout,
+ with_color_variants=enable_color_variants & with_color_variants,
+ )
+ create_fill_object_in_sink(
+ object_instance_id,
+ room="Warehouse",
+ office_layout=layout,
+ with_color_variants=enable_color_variants & with_color_variants,
+ )
diff --git a/src/arena_missions/challenges/objects_in_containers.py b/src/arena_missions/challenges/objects_in_containers.py
new file mode 100644
index 0000000..26050eb
--- /dev/null
+++ b/src/arena_missions/challenges/objects_in_containers.py
@@ -0,0 +1,462 @@
+from typing import get_args
+
+from arena_missions.builders import ChallengeBuilder, ChallengeBuilderOutput, RequiredObjectBuilder
+from arena_missions.constants.arena import ColorChangerObjectColor
+from arena_missions.structures import (
+ AndExpression,
+ ContainsExpression,
+ HighLevelKey,
+ IsFullOfItemsExpression,
+ IsOpenExpression,
+ IsPickedUpExpression,
+ NotExpression,
+ ObjectInstanceId,
+ RequiredObject,
+ StateCondition,
+ StateExpression,
+ TaskGoal,
+)
+
+
+def create_pick_up_from_container_challenge(
+ target_object_instance_id: ObjectInstanceId,
+ container: RequiredObject,
+ *,
+ with_color_variants: bool = False,
+) -> None:
+ """Generate challegnes to pick up objects from containers."""
+ # Create the target object
+ target_object = RequiredObject(name=target_object_instance_id)
+ target_object.add_state("Unique", "true")
+
+ # Put it in the container
+ target_object.update_receptacle(container.name)
+
+ conditions = [
+ # [PREP] Ensure the object is in the container
+ StateCondition(
+ stateName="InContainer",
+ context=container.name,
+ expression=StateExpression.from_expression(
+ ContainsExpression(target=container.name, contains=target_object.name)
+ ),
+ ),
+ # Ensure the object is picked up from the container
+ StateCondition(
+ stateName="PickedUpFromContainer",
+ context=target_object.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ IsPickedUpExpression(target=target_object.name, value=True),
+ IsOpenExpression(target=container.name, value=True),
+ )
+ ),
+ ),
+ StateCondition(
+ stateName="ClosedContainer",
+ context=container.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ IsOpenExpression(target=container.name, value=False),
+ NotExpression(
+ expression=StateExpression.from_expression(
+ ContainsExpression(target=container.name, contains=target_object.name)
+ )
+ ),
+ )
+ ),
+ ),
+ ]
+
+ goals = [TaskGoal.from_state_condition(condition) for condition in conditions]
+
+ def create_mission() -> ChallengeBuilderOutput:
+ """Create the mission."""
+ if not container.room:
+ raise ValueError(f"Container {container.name} must have a room set")
+
+ return ChallengeBuilderOutput(
+ start_room=container.room,
+ required_objects={
+ container.name: container,
+ target_object.name: target_object,
+ },
+ task_goals=goals,
+ state_conditions=conditions,
+ plan=[
+ f"find the {container.readable_name}",
+ f"open the {container.readable_name}",
+ f"pick up the {target_object_instance_id.readable_name}",
+ f"close the {container.readable_name}",
+ ],
+ preparation_plan=[
+ f"find the {container.readable_name}",
+ f"open the {container.readable_name}",
+ f"close the {container.readable_name}",
+ ],
+ )
+
+ def create_mission_with_container_open() -> ChallengeBuilderOutput:
+ builder_output = create_mission()
+ builder_output.plan = [
+ f"find the {container.readable_name}",
+ f"pick up the {target_object_instance_id.readable_name}",
+ f"close the {container.readable_name}",
+ ]
+ builder_output.preparation_plan = [
+ f"find the {container.readable_name}",
+ f"open the {container.readable_name}",
+ ]
+ return builder_output
+
+ high_level_key = HighLevelKey(
+ action="pickup",
+ target_object=target_object_instance_id.object_id,
+ from_receptacle=container.object_id,
+ from_receptacle_is_container=True,
+ )
+
+ ChallengeBuilder.register(high_level_key)(create_mission)
+ ChallengeBuilder.register(high_level_key)(create_mission_with_container_open)
+
+ # Register versions of the challenges with color variants
+ if with_color_variants:
+ for color in get_args(ColorChangerObjectColor):
+ colored_target_object_kwargs = {
+ "required_objects": {
+ target_object.name: {"colors": [color]},
+ }
+ }
+ high_level_key = HighLevelKey(
+ action="pickup",
+ target_object=target_object_instance_id.object_id,
+ target_object_color=color,
+ from_receptacle=container.object_id,
+ from_receptacle_is_container=True,
+ )
+ # Register the challenge builder with the modifications
+ ChallengeBuilder.register_with_modifiers(high_level_key, colored_target_object_kwargs)(
+ create_mission
+ )
+ ChallengeBuilder.register_with_modifiers(high_level_key, colored_target_object_kwargs)(
+ create_mission_with_container_open
+ )
+
+
+def create_place_in_container_challenge(
+ target_object_instance_id: ObjectInstanceId,
+ container: RequiredObject,
+ *,
+ with_color_variants: bool = False,
+) -> None:
+ """Generate challenges to pick up objects from containers."""
+ required_object_builder = RequiredObjectBuilder()
+
+ # Create the target object
+ target_object = RequiredObject(name=target_object_instance_id)
+ target_object.add_state("Unique", "true")
+
+ # Create the breakroom table
+ breakroom_table = required_object_builder.breakroom_table()
+
+ # Put the target object on the table
+ target_object.update_receptacle(breakroom_table.name)
+
+ conditions = [
+ # Ensure the container is not full of items at the start of the mission
+ StateCondition(
+ stateName="ContainerNotFull",
+ context=target_object.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ IsFullOfItemsExpression(target=container.name, value=False),
+ IsPickedUpExpression(target=target_object.name, value=True),
+ )
+ ),
+ ),
+ # Place it in the container while its open
+ StateCondition(
+ stateName="PlacedInContainer",
+ context=target_object.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ IsOpenExpression(target=container.name, value=True),
+ ContainsExpression(target=container.name, contains=target_object.name),
+ IsPickedUpExpression(target=target_object.name, value=False),
+ )
+ ),
+ ),
+ # Close the container with the object inside
+ StateCondition(
+ stateName="ClosedContainer",
+ context=container.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ IsOpenExpression(target=container.name, value=False),
+ ContainsExpression(target=container.name, contains=target_object.name),
+ )
+ ),
+ ),
+ ]
+
+ goals = [TaskGoal.from_state_condition(condition) for condition in conditions]
+
+ def create_mission() -> ChallengeBuilderOutput:
+ """Create the mission."""
+ if not container.room:
+ raise ValueError(f"Container {container.name} must have a room set")
+
+ return ChallengeBuilderOutput(
+ start_room=container.room,
+ required_objects={
+ container.name: container,
+ target_object.name: target_object,
+ breakroom_table.name: breakroom_table,
+ },
+ task_goals=goals,
+ state_conditions=conditions,
+ plan=[
+ f"find the {container.readable_name}",
+ f"open the {container.readable_name}",
+ f"put the {target_object_instance_id.readable_name} in the {container.readable_name}",
+ f"close the {container.readable_name}",
+ ],
+ preparation_plan=[
+ "go to the breakroom",
+ f"pick up the {target_object.readable_name}",
+ ],
+ )
+
+ def create_mission_with_container_open() -> ChallengeBuilderOutput:
+ builder_output = create_mission()
+ # Open the time machine
+ builder_output.required_objects[container.name].add_state("isOpen", "true")
+ # Change the plans
+ builder_output.plan = [
+ f"find the {container.readable_name}",
+ f"put the {target_object_instance_id.readable_name} in the {container.readable_name}",
+ f"close the {container.readable_name}",
+ ]
+ return builder_output
+
+ high_level_key = HighLevelKey(
+ action="place",
+ target_object=target_object_instance_id.object_id,
+ to_receptacle=container.object_id,
+ to_receptacle_is_container=True,
+ )
+
+ ChallengeBuilder.register(high_level_key)(create_mission)
+ ChallengeBuilder.register(high_level_key)(create_mission_with_container_open)
+
+ # Register versions of the challenges with color variants
+ if with_color_variants:
+ for color in get_args(ColorChangerObjectColor):
+ colored_target_object_kwargs = {
+ "required_objects": {
+ target_object.name: {"colors": [color]},
+ }
+ }
+ high_level_key = HighLevelKey(
+ action="place",
+ target_object=target_object_instance_id.object_id,
+ target_object_color=color,
+ to_receptacle=container.object_id,
+ to_receptacle_is_container=True,
+ )
+ # Register the challenge builder with the modifications
+ ChallengeBuilder.register_with_modifiers(high_level_key, colored_target_object_kwargs)(
+ create_mission
+ )
+ ChallengeBuilder.register_with_modifiers(high_level_key, colored_target_object_kwargs)(
+ create_mission_with_container_open
+ )
+
+
+def register_objects_with_fridge_challenges(enable_color_variants: bool = True) -> None:
+ """Register challenges to pick up and place objects in the fridge."""
+ required_objects_builder = RequiredObjectBuilder()
+
+ container = required_objects_builder.fridge()
+
+ # Ensure each container is in the breakroom
+ container.update_room("BreakRoom")
+
+ target_object_iterator = [
+ (ObjectInstanceId.parse("Apple_1"), True, True),
+ (ObjectInstanceId.parse("AppleSlice_01_1"), False, False),
+ (ObjectInstanceId.parse("Banana_01_1"), False, False),
+ (ObjectInstanceId.parse("BananaBunch_01_1"), False, False),
+ (ObjectInstanceId.parse("BreadLoaf_1"), False, False),
+ (ObjectInstanceId.parse("BreadSlice_01_1"), False, False),
+ (ObjectInstanceId.parse("Bowl_01_1"), True, True),
+ (ObjectInstanceId.parse("Burger_04_1"), False, False),
+ (ObjectInstanceId.parse("Cake_02_1"), True, True),
+ (ObjectInstanceId.parse("CakeSlice_02_1"), False, False),
+ (ObjectInstanceId.parse("CandyBar_01_1"), False, False),
+ (ObjectInstanceId.parse("CanSodaNew_01_1"), False, False),
+ (ObjectInstanceId.parse("CanSodaNew_Crushed_01_1"), False, False),
+ (ObjectInstanceId.parse("CanSodaNew_Open_01_1"), False, False),
+ (ObjectInstanceId.parse("Carrot_01_1"), True, True),
+ (ObjectInstanceId.parse("Cereal_Box_01_1"), False, False),
+ (ObjectInstanceId.parse("CoffeeBeans_01_1"), False, False),
+ (ObjectInstanceId.parse("CoffeeCup_Open_Empty_02_1"), False, False),
+ (ObjectInstanceId.parse("CoffeeMug_Boss_1"), True, True),
+ (ObjectInstanceId.parse("CoffeeMug_Yellow_1"), True, True),
+ (ObjectInstanceId.parse("Donut_01_1"), False, True),
+ (ObjectInstanceId.parse("FoodPlate_01_1"), True, True),
+ (ObjectInstanceId.parse("Fork_01_1"), False, False),
+ (ObjectInstanceId.parse("Jar_Jam_01_1"), False, False),
+ (ObjectInstanceId.parse("Jar_PeanutButter_01_1"), False, False),
+ (ObjectInstanceId.parse("Knife_01_1"), False, False),
+ (ObjectInstanceId.parse("MilkCarton_01_1"), False, False),
+ (ObjectInstanceId.parse("PaperCup_01_1"), False, False),
+ (ObjectInstanceId.parse("PaperCup_Crushed_01_1"), False, False),
+ (ObjectInstanceId.parse("PBJ_Sandwich_1"), False, False),
+ (ObjectInstanceId.parse("Pear_01_1"), True, True),
+ (ObjectInstanceId.parse("PieFruitSlice_01_1"), False, False),
+ (ObjectInstanceId.parse("PieFruit_01_1"), False, False),
+ (ObjectInstanceId.parse("SandwichHalf_01_1"), False, False),
+ (ObjectInstanceId.parse("Spoon_01_1"), False, False),
+ (ObjectInstanceId.parse("Toast_01_1"), False, False),
+ (ObjectInstanceId.parse("Toast_02_1"), False, False),
+ (ObjectInstanceId.parse("Toast_03_1"), False, False),
+ (ObjectInstanceId.parse("Toast_04_1"), False, False),
+ (ObjectInstanceId.parse("Toast_04_Jam_1"), False, False),
+ (ObjectInstanceId.parse("Toast_04_PBJ_1"), False, False),
+ ]
+
+ for target_object, pickup_color_variants, place_color_variants in target_object_iterator:
+ create_pick_up_from_container_challenge(
+ target_object,
+ container,
+ with_color_variants=pickup_color_variants & enable_color_variants,
+ )
+ create_place_in_container_challenge(
+ target_object,
+ container,
+ with_color_variants=place_color_variants & enable_color_variants,
+ )
+
+
+def register_objects_with_freezer_challenges(enable_color_variants: bool = True) -> None:
+ """Register challenges to pick up and place objects in the freezer."""
+ required_objects_builder = RequiredObjectBuilder()
+
+ container = required_objects_builder.freezer()
+
+ # Ensure each container is in the breakroom
+ container.update_room("BreakRoom")
+
+ target_object_iterator = [
+ (ObjectInstanceId.parse("Apple_1"), True, True),
+ (ObjectInstanceId.parse("AppleSlice_01_1"), False, False),
+ (ObjectInstanceId.parse("Banana_01_1"), False, False),
+ (ObjectInstanceId.parse("BananaBunch_01_1"), False, False),
+ (ObjectInstanceId.parse("BreadLoaf_1"), False, False),
+ (ObjectInstanceId.parse("BreadSlice_01_1"), False, False),
+ (ObjectInstanceId.parse("Bowl_01_1"), True, True),
+ (ObjectInstanceId.parse("Burger_04_1"), False, False),
+ (ObjectInstanceId.parse("Cake_02_1"), True, True),
+ (ObjectInstanceId.parse("CakeSlice_02_1"), False, False),
+ (ObjectInstanceId.parse("CandyBar_01_1"), False, False),
+ (ObjectInstanceId.parse("CanSodaNew_01_1"), False, False),
+ (ObjectInstanceId.parse("CanSodaNew_Crushed_01_1"), False, False),
+ (ObjectInstanceId.parse("CanSodaNew_Open_01_1"), False, False),
+ (ObjectInstanceId.parse("Carrot_01_1"), True, True),
+ (ObjectInstanceId.parse("Cereal_Box_01_1"), False, False),
+ (ObjectInstanceId.parse("CoffeeBeans_01_1"), False, False),
+ (ObjectInstanceId.parse("CoffeeCup_Open_Empty_02_1"), False, False),
+ (ObjectInstanceId.parse("CoffeeMug_Boss_1"), True, True),
+ (ObjectInstanceId.parse("CoffeeMug_Yellow_1"), True, True),
+ (ObjectInstanceId.parse("Donut_01_1"), False, True),
+ (ObjectInstanceId.parse("FoodPlate_01_1"), True, True),
+ (ObjectInstanceId.parse("Fork_01_1"), False, False),
+ (ObjectInstanceId.parse("Jar_Jam_01_1"), False, False),
+ (ObjectInstanceId.parse("Jar_PeanutButter_01_1"), False, False),
+ (ObjectInstanceId.parse("Knife_01_1"), False, False),
+ (ObjectInstanceId.parse("PaperCup_01_1"), False, False),
+ (ObjectInstanceId.parse("PaperCup_Crushed_01_1"), False, False),
+ (ObjectInstanceId.parse("PBJ_Sandwich_1"), False, False),
+ (ObjectInstanceId.parse("Pear_01_1"), True, True),
+ (ObjectInstanceId.parse("PieFruitSlice_01_1"), False, False),
+ (ObjectInstanceId.parse("PieFruit_01_1"), False, False),
+ (ObjectInstanceId.parse("SandwichHalf_01_1"), False, False),
+ (ObjectInstanceId.parse("Spoon_01_1"), False, False),
+ (ObjectInstanceId.parse("Toast_01_1"), False, False),
+ (ObjectInstanceId.parse("Toast_02_1"), False, False),
+ (ObjectInstanceId.parse("Toast_03_1"), False, False),
+ (ObjectInstanceId.parse("Toast_04_1"), False, False),
+ (ObjectInstanceId.parse("Toast_04_Jam_1"), False, False),
+ (ObjectInstanceId.parse("Toast_04_PBJ_1"), False, False),
+ ]
+ for target_object, pickup_color_variants, place_color_variants in target_object_iterator:
+ create_pick_up_from_container_challenge(
+ target_object,
+ container,
+ with_color_variants=pickup_color_variants & enable_color_variants,
+ )
+ create_place_in_container_challenge(
+ target_object,
+ container,
+ with_color_variants=place_color_variants & enable_color_variants,
+ )
+
+
+def register_warehouse_cabinet_challenges(enable_color_variants: bool = True) -> None:
+ """Register challenges to pick up and place objects in the warehouse cabinet."""
+ container = RequiredObject(
+ name=ObjectInstanceId.parse("KitchenCabinet_02_1"), roomLocation=["Warehouse"]
+ )
+
+ target_object_iterator = [
+ (ObjectInstanceId.parse("Apple_1"), True),
+ (ObjectInstanceId.parse("AppleSlice_01_1"), False),
+ (ObjectInstanceId.parse("Banana_01_1"), False),
+ (ObjectInstanceId.parse("BananaBunch_01_1"), False),
+ (ObjectInstanceId.parse("BreadLoaf_1"), False),
+ (ObjectInstanceId.parse("BreadSlice_01_1"), False),
+ (ObjectInstanceId.parse("Bowl_01_1"), True),
+ (ObjectInstanceId.parse("Burger_04_1"), False),
+ (ObjectInstanceId.parse("Cake_02_1"), True),
+ (ObjectInstanceId.parse("CakeSlice_02_1"), False),
+ (ObjectInstanceId.parse("CandyBar_01_1"), False),
+ (ObjectInstanceId.parse("Carrot_01_1"), True),
+ (ObjectInstanceId.parse("CanSodaNew_01_1"), False),
+ (ObjectInstanceId.parse("CanSodaNew_Crushed_01_1"), False),
+ (ObjectInstanceId.parse("CanSodaNew_Open_01_1"), False),
+ (ObjectInstanceId.parse("CoffeeBeans_01_1"), False),
+ (ObjectInstanceId.parse("CoffeeMug_Boss_1"), True),
+ (ObjectInstanceId.parse("CoffeeMug_Yellow_1"), True),
+ (ObjectInstanceId.parse("Donut_01_1"), True),
+ (ObjectInstanceId.parse("FoodPlate_01_1"), True),
+ (ObjectInstanceId.parse("Fork_01_1"), False),
+ (ObjectInstanceId.parse("Jar_Jam_01_1"), False),
+ (ObjectInstanceId.parse("Jar_PeanutButter_01_1"), False),
+ (ObjectInstanceId.parse("Knife_01_1"), False),
+ (ObjectInstanceId.parse("PaperCup_01_1"), False),
+ (ObjectInstanceId.parse("PaperCup_Crushed_01_1"), False),
+ (ObjectInstanceId.parse("PBJ_Sandwich_1"), False),
+ (ObjectInstanceId.parse("PieFruitSlice_01_1"), False),
+ (ObjectInstanceId.parse("PieFruit_01_1"), False),
+ (ObjectInstanceId.parse("SandwichHalf_01_1"), False),
+ (ObjectInstanceId.parse("Spoon_01_1"), False),
+ (ObjectInstanceId.parse("Toast_01_1"), False),
+ (ObjectInstanceId.parse("Toast_02_1"), False),
+ (ObjectInstanceId.parse("Toast_03_1"), False),
+ (ObjectInstanceId.parse("Toast_04_1"), False),
+ (ObjectInstanceId.parse("Toast_04_Jam_1"), False),
+ (ObjectInstanceId.parse("Toast_04_PBJ_1"), False),
+ ]
+
+ for target_object, with_color_variants in target_object_iterator:
+ create_pick_up_from_container_challenge(
+ target_object,
+ container,
+ with_color_variants=enable_color_variants & with_color_variants,
+ )
+ create_place_in_container_challenge(
+ target_object,
+ container,
+ with_color_variants=enable_color_variants & with_color_variants,
+ )
diff --git a/src/arena_missions/challenges/operate_carrot_maker.py b/src/arena_missions/challenges/operate_carrot_maker.py
new file mode 100644
index 0000000..79c914e
--- /dev/null
+++ b/src/arena_missions/challenges/operate_carrot_maker.py
@@ -0,0 +1,162 @@
+from typing import get_args
+
+from arena_missions.builders import ChallengeBuilder, ChallengeBuilderOutput, RequiredObjectBuilder
+from arena_missions.constants.arena import ColorChangerObjectColor
+from arena_missions.structures import (
+ ContainsExpression,
+ HighLevelKey,
+ IsPickedUpExpression,
+ ObjectGoalState,
+ ObjectInstanceId,
+ RequiredObject,
+ StateCondition,
+ StateExpression,
+ TaskGoal,
+)
+
+
+def create_operate_carrot_maker_challenges(
+ target_object: RequiredObject,
+ with_color_variants: bool = False,
+) -> None:
+ """Register challeneges."""
+ required_object_builder = RequiredObjectBuilder()
+
+ # Make the target object unique
+ target_object.add_state("Unique", "true")
+
+ # Create the carrot maker
+ carrot_maker = required_object_builder.carrot_maker()
+ carrot_maker.add_state("Unique", "true")
+
+ # Create the breakroom table
+ breakroom_table = required_object_builder.breakroom_table()
+ target_object.update_receptacle(breakroom_table.name)
+
+ # Success conditions
+ conditions = [
+ # [PREP] The target object is picked up
+ StateCondition(
+ stateName="OriginalPickedUp",
+ context=target_object.name,
+ expression=StateExpression.from_expression(
+ IsPickedUpExpression(target=target_object.name, value=True),
+ ),
+ ),
+ # The target object is placed on the carrot machine
+ StateCondition(
+ stateName="MachineContainsTarget",
+ context=carrot_maker.name,
+ expression=StateExpression.from_expression(
+ ContainsExpression(target=carrot_maker.name, contains=target_object.name)
+ ),
+ ),
+ ]
+
+ goals = [
+ *[TaskGoal.from_state_condition(condition) for condition in conditions],
+ # Ensure the machine is used on the target
+ TaskGoal.from_object_goal_states(
+ [ObjectGoalState.from_parts(carrot_maker.name, "isToggledOn", "true")]
+ ),
+ ]
+
+ # Create mission
+ def create_mission() -> ChallengeBuilderOutput:
+ return ChallengeBuilderOutput(
+ start_room="Lab2",
+ required_objects={
+ carrot_maker.name: carrot_maker,
+ target_object.name: target_object,
+ breakroom_table.name: breakroom_table,
+ },
+ state_conditions=conditions,
+ task_goals=goals,
+ plan=[
+ "find the carrot maker",
+ f"put the {target_object.readable_name} on the carrot maker",
+ "toggle the carrot maker",
+ ],
+ preparation_plan=[
+ "go to the breakroom",
+ f"pick up the {target_object.readable_name}",
+ "go to the quantum lab",
+ ],
+ )
+
+ # Register versions of the challenges
+ high_level_key = HighLevelKey(
+ action="interact",
+ interaction_object=carrot_maker.object_id,
+ target_object=target_object.object_id,
+ )
+
+ ChallengeBuilder.register(high_level_key)(create_mission)
+ # Register versions of the challenges with color variants
+ if with_color_variants:
+ for color in get_args(ColorChangerObjectColor):
+ colored_target_object_kwargs = {
+ "required_objects": {
+ target_object.name: {"colors": [color]},
+ }
+ }
+
+ high_level_key = HighLevelKey(
+ action="interact",
+ interaction_object=carrot_maker.object_id,
+ target_object=target_object.object_id,
+ target_object_color=color,
+ )
+ # Register the challenge builder with the modifications
+ ChallengeBuilder.register_with_modifiers(high_level_key, colored_target_object_kwargs)(
+ create_mission
+ )
+
+
+def register_carrot_maker_challenges(enable_color_variants: bool = True) -> None:
+ """Register challenges with the carrot maker."""
+ target_object_iterator = [
+ (ObjectInstanceId.parse("Apple_1"), True),
+ (ObjectInstanceId.parse("Banana_01_1"), False),
+ (ObjectInstanceId.parse("BananaBunch_01_1"), False),
+ (ObjectInstanceId.parse("BreadLoaf_1"), False),
+ (ObjectInstanceId.parse("BreadSlice_01_1"), False),
+ (ObjectInstanceId.parse("Bowl_01_1"), True),
+ (ObjectInstanceId.parse("Burger_04_1"), False),
+ (ObjectInstanceId.parse("Cake_02_1"), True),
+ (ObjectInstanceId.parse("CakeSlice_02_1"), False),
+ (ObjectInstanceId.parse("CandyBar_01_1"), False),
+ (ObjectInstanceId.parse("CanSodaNew_01_1"), False),
+ (ObjectInstanceId.parse("CanSodaNew_Crushed_01_1"), False),
+ (ObjectInstanceId.parse("CanSodaNew_Open_01_1"), False),
+ (ObjectInstanceId.parse("CoffeeBeans_01_1"), False),
+ (ObjectInstanceId.parse("CoffeeMug_Boss_1"), False),
+ (ObjectInstanceId.parse("CoffeeMug_Yellow_1"), True),
+ (ObjectInstanceId.parse("Donut_01_1"), True),
+ (ObjectInstanceId.parse("FoodPlate_01_1"), True),
+ (ObjectInstanceId.parse("Fork_01_1"), False),
+ (ObjectInstanceId.parse("Jar_Jam_01_1"), False),
+ (ObjectInstanceId.parse("Jar_PeanutButter_01_1"), False),
+ (ObjectInstanceId.parse("Knife_01_1"), False),
+ (ObjectInstanceId.parse("PaperCup_01_1"), False),
+ (ObjectInstanceId.parse("PaperCup_Crushed_01_1"), False),
+ (ObjectInstanceId.parse("PBJ_Sandwich_1"), False),
+ (ObjectInstanceId.parse("PieFruitSlice_01_1"), False),
+ (ObjectInstanceId.parse("PieFruit_01_1"), False),
+ (ObjectInstanceId.parse("SandwichHalf_01_1"), False),
+ (ObjectInstanceId.parse("Spoon_01_1"), False),
+ (ObjectInstanceId.parse("Toast_01_1"), False),
+ (ObjectInstanceId.parse("Toast_02_1"), False),
+ (ObjectInstanceId.parse("Toast_03_1"), False),
+ (ObjectInstanceId.parse("Toast_04_1"), False),
+ (ObjectInstanceId.parse("Toast_04_Jam_1"), False),
+ (ObjectInstanceId.parse("Toast_04_PBJ_1"), False),
+ ]
+
+ for target_object, with_color_variants in target_object_iterator:
+ create_operate_carrot_maker_challenges(
+ target_object=RequiredObject(
+ name=target_object,
+ ),
+ with_color_variants=enable_color_variants & with_color_variants,
+ )
diff --git a/src/arena_missions/challenges/operate_microwave.py b/src/arena_missions/challenges/operate_microwave.py
new file mode 100644
index 0000000..f279941
--- /dev/null
+++ b/src/arena_missions/challenges/operate_microwave.py
@@ -0,0 +1,326 @@
+from typing import get_args
+
+from arena_missions.builders import ChallengeBuilder, ChallengeBuilderOutput, RequiredObjectBuilder
+from arena_missions.constants.arena import ColorChangerObjectColor
+from arena_missions.structures import (
+ AndExpression,
+ ContainsExpression,
+ HighLevelKey,
+ IsBrokenExpression,
+ IsHotExpression,
+ IsOpenExpression,
+ IsPickedUpExpression,
+ IsToggledOnExpression,
+ ObjectId,
+ ObjectInstanceId,
+ RequiredObject,
+ StateCondition,
+ StateExpression,
+ TaskGoal,
+)
+
+
+def create_heat_with_microwave_challenges(
+ target_object: RequiredObject,
+ converted_object: ObjectId,
+ with_color_variants: bool = False,
+) -> None:
+ """Register challeneges."""
+ required_object_builder = RequiredObjectBuilder()
+
+ # Make the target object unique
+ target_object.add_state("Unique", "true")
+
+ # Create the microwave
+ microwave = required_object_builder.microwave()
+ microwave.add_state("Unique", "true")
+
+ # Create the breakroom table
+ breakroom_table = required_object_builder.breakroom_table()
+ target_object.update_receptacle(breakroom_table.name)
+
+ # Success conditions
+ conditions = [
+ # Pick up an object that is not hot
+ StateCondition(
+ stateName="OriginalPickedUp",
+ context=target_object.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ IsPickedUpExpression(target=target_object.name, value=True),
+ IsHotExpression(target=target_object.name, value=False),
+ )
+ ),
+ ),
+ # Ensure the machine is used on the target
+ StateCondition(
+ stateName="MachineUsedOnTarget",
+ context=microwave.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ IsToggledOnExpression(target=microwave.name, value=True),
+ ContainsExpression(target=microwave.name, contains=target_object.name),
+ )
+ ),
+ ),
+ # Pick up the object
+ StateCondition(
+ stateName="ConvertedPickedUp",
+ context=target_object.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ IsOpenExpression(target=target_object.name, value=False),
+ IsPickedUpExpression(target=target_object.name, value=True),
+ IsHotExpression(target=target_object.name, value=True),
+ )
+ ),
+ ),
+ ]
+
+ # Create mission
+ def create_mission() -> ChallengeBuilderOutput:
+ return ChallengeBuilderOutput(
+ start_room="BreakRoom",
+ required_objects={
+ microwave.name: microwave,
+ target_object.name: target_object,
+ breakroom_table.name: breakroom_table,
+ },
+ state_conditions=conditions,
+ task_goals=[TaskGoal.from_state_condition(condition) for condition in conditions],
+ plan=[
+ "find the microwave",
+ "open the microwave",
+ f"put the {target_object.readable_name} in the microwave",
+ "close the microwave",
+ "turn on the microwave",
+ "open the microwave",
+ f"pick up the {converted_object.readable_name} from the microwave",
+ "close the microwave",
+ ],
+ preparation_plan=[
+ "go to the breakroom",
+ f"pick up the {target_object.readable_name}",
+ ],
+ )
+
+ def create_mission_with_door_open() -> ChallengeBuilderOutput:
+ builder_output = create_mission()
+ # Change the plans
+ builder_output.plan = [
+ "find the microwave",
+ f"put the {target_object.readable_name} in the microwave",
+ "close the microwave",
+ "turn on the microwave",
+ "open the microwave",
+ f"pick up the {converted_object.readable_name} from the microwave",
+ "close the microwave",
+ ]
+ builder_output.preparation_plan = [
+ "find the microwave",
+ "open the microwave",
+ f"pick up the {target_object.readable_name}",
+ ]
+ return builder_output
+
+ # Register versions of the challenges without color variants
+ high_level_key = HighLevelKey(
+ action="interact",
+ interaction_object=microwave.object_id,
+ target_object=target_object.object_id,
+ )
+
+ ChallengeBuilder.register(high_level_key)(create_mission)
+ ChallengeBuilder.register(high_level_key)(create_mission_with_door_open)
+ # Register versions of the challenges with color variants
+ if with_color_variants:
+ for color in get_args(ColorChangerObjectColor):
+ colored_target_object_kwargs = {
+ "required_objects": {
+ target_object.name: {"colors": [color]},
+ }
+ }
+ high_level_key = HighLevelKey(
+ action="interact",
+ interaction_object=microwave.object_id,
+ target_object=target_object.object_id,
+ target_object_color=color,
+ converted_object=converted_object,
+ )
+ # Register the challenge builder with the modifications
+ ChallengeBuilder.register_with_modifiers(high_level_key, colored_target_object_kwargs)(
+ create_mission
+ )
+ ChallengeBuilder.register_with_modifiers(high_level_key, colored_target_object_kwargs)(
+ create_mission_with_door_open
+ )
+
+
+def create_break_with_microwave_challenges(
+ target_object: RequiredObject,
+ converted_object: ObjectId,
+ with_color_variants: bool = False,
+) -> None:
+ """Register challeneges."""
+ required_object_builder = RequiredObjectBuilder()
+
+ # Make the target object unique
+ target_object.add_state("Unique", "true")
+
+ # Create the microwave
+ microwave = required_object_builder.microwave()
+ microwave.add_state("Unique", "true")
+
+ # Create the breakroom table
+ breakroom_table = required_object_builder.breakroom_table()
+ target_object.update_receptacle(breakroom_table.name)
+
+ # Success conditions
+ conditions = [
+ # Pick up an object that is not hot
+ StateCondition(
+ stateName="OriginalPickedUp",
+ context=target_object.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ IsPickedUpExpression(target=target_object.name, value=True),
+ IsBrokenExpression(target=target_object.name, value=False),
+ )
+ ),
+ ),
+ # Ensure the machine is used on the target
+ StateCondition(
+ stateName="MachineUsedOnTarget",
+ context=microwave.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ IsToggledOnExpression(target=microwave.name, value=True),
+ ContainsExpression(target=microwave.name, contains=target_object.name),
+ )
+ ),
+ ),
+ # Pick up the object
+ StateCondition(
+ stateName="ConvertedPickedUp",
+ context=target_object.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ IsOpenExpression(target=target_object.name, value=False),
+ IsPickedUpExpression(target=target_object.name, value=True),
+ IsBrokenExpression(target=target_object.name, value=True),
+ )
+ ),
+ ),
+ ]
+
+ # Create mission
+ def create_mission() -> ChallengeBuilderOutput:
+ return ChallengeBuilderOutput(
+ start_room="BreakRoom",
+ required_objects={
+ microwave.name: microwave,
+ target_object.name: target_object,
+ breakroom_table.name: breakroom_table,
+ },
+ state_conditions=conditions,
+ task_goals=[TaskGoal.from_state_condition(condition) for condition in conditions],
+ plan=[
+ "go to the microwave",
+ "open the microwave",
+ f"put the {target_object.readable_name} in the microwave",
+ "close the microwave",
+ "turn on the microwave",
+ "open the microwave",
+ f"pick up the {converted_object.readable_name} from the microwave",
+ "close the microwave",
+ ],
+ preparation_plan=[
+ "go to the breakroom table",
+ f"pick up the {target_object.readable_name}",
+ ],
+ )
+
+ def create_mission_with_door_open() -> ChallengeBuilderOutput:
+ builder_output = create_mission()
+ builder_output.plan = [
+ "go to the microwave",
+ f"put the {target_object.readable_name} in the microwave",
+ "close the microwave",
+ "turn on the microwave",
+ "open the microwave",
+ f"pick up the {converted_object.readable_name} from the microwave",
+ "close the microwave",
+ ]
+ builder_output.preparation_plan = [
+ "find the microwave",
+ "open the microwave",
+ f"pick up the {target_object.readable_name}",
+ ]
+ return builder_output
+
+ # Register versions of the challenges without color variants
+ high_level_key = HighLevelKey(
+ action="interact",
+ interaction_object=microwave.object_id,
+ target_object=target_object.object_id,
+ )
+
+ ChallengeBuilder.register(high_level_key)(create_mission)
+ ChallengeBuilder.register(high_level_key)(create_mission_with_door_open)
+ # Register versions of the challenges with color variants
+ if with_color_variants:
+ for color in get_args(ColorChangerObjectColor):
+ colored_target_object_kwargs = {
+ "required_objects": {
+ target_object.name: {"colors": [color]},
+ }
+ }
+ high_level_key = HighLevelKey(
+ action="interact",
+ interaction_object=microwave.object_id,
+ target_object=target_object.object_id,
+ target_object_color=color,
+ converted_object=converted_object,
+ )
+ # Register the challenge builder with the modifications
+ ChallengeBuilder.register_with_modifiers(high_level_key, colored_target_object_kwargs)(
+ create_mission
+ )
+ ChallengeBuilder.register_with_modifiers(high_level_key, colored_target_object_kwargs)(
+ create_mission_with_door_open
+ )
+
+
+def register_heat_things(enable_color_variants: bool = True) -> None:
+ """Register challenges to heat different things."""
+ heatable_target_object_iterator = [
+ (ObjectInstanceId.parse("Bowl_01_1"), True),
+ (ObjectInstanceId.parse("Cake_02_1"), True),
+ (ObjectInstanceId.parse("CanSodaNew_01_1"), False),
+ (ObjectInstanceId.parse("CanSodaNew_Open_01_1"), False),
+ (ObjectInstanceId.parse("CoffeeMug_Boss_1"), True),
+ (ObjectInstanceId.parse("CoffeeMug_Yellow_1"), True),
+ (ObjectInstanceId.parse("CoffeePot_01_1"), False),
+ ]
+
+ for heatable_target_object, heatable_with_color_variants in heatable_target_object_iterator:
+ create_heat_with_microwave_challenges(
+ target_object=RequiredObject(
+ name=heatable_target_object,
+ ),
+ converted_object=heatable_target_object.object_id,
+ with_color_variants=heatable_with_color_variants & enable_color_variants,
+ )
+
+ breakable_target_object_iterator = [
+ (ObjectInstanceId.parse("Floppy_AntiVirus_1"), False),
+ (ObjectInstanceId.parse("Floppy_Virus_1"), False),
+ ]
+ for breakable_target_object, breakable_with_color_variants in breakable_target_object_iterator:
+ create_break_with_microwave_challenges(
+ target_object=RequiredObject(
+ name=breakable_target_object,
+ ),
+ converted_object=breakable_target_object.object_id,
+ with_color_variants=enable_color_variants & breakable_with_color_variants,
+ )
diff --git a/src/arena_missions/challenges/operate_printer.py b/src/arena_missions/challenges/operate_printer.py
new file mode 100644
index 0000000..e7e0859
--- /dev/null
+++ b/src/arena_missions/challenges/operate_printer.py
@@ -0,0 +1,133 @@
+from typing import get_args
+
+from arena_missions.builders import ChallengeBuilder, ChallengeBuilderOutput, RequiredObjectBuilder
+from arena_missions.constants.arena import OfficeLayout
+from arena_missions.structures import (
+ AndExpression,
+ ContainsExpression,
+ HighLevelKey,
+ IsPickedUpExpression,
+ ObjectInstanceId,
+ RequiredObject,
+ StateCondition,
+ StateExpression,
+ TaskGoal,
+)
+
+
+def create_operate_printer_challenges(
+ printer_cartridge: RequiredObject,
+ printer_spawned_object: ObjectInstanceId,
+ office_layout: OfficeLayout,
+) -> None:
+ """Register challenges."""
+ required_object_builder = RequiredObjectBuilder()
+
+ # Make the target object unique
+ printer_cartridge.add_state("Unique", "true")
+
+ # Create the time machine
+ printer = required_object_builder.printer()
+ printer.add_state("Unique", "true")
+
+ # Create the breakroom table
+ breakroom_table = required_object_builder.breakroom_table()
+ printer_cartridge.update_receptacle(breakroom_table.name)
+
+ # Ensure the robotic arm is out the way
+ robotic_arm = required_object_builder.robotic_arm()
+
+ # Success conditions
+ conditions = [
+ # Pick up the target object
+ StateCondition(
+ stateName="TargetPickedUp",
+ context=printer_cartridge.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ IsPickedUpExpression(target=printer_cartridge.name, value=True),
+ )
+ ),
+ ),
+ # Ensure the machine is used on the target
+ StateCondition(
+ stateName="PrinterUsed",
+ context=printer.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ ContainsExpression(
+ target=printer.name, contains=printer_spawned_object.with_asterisk
+ ),
+ ContainsExpression(
+ target=printer.name, contains=printer_cartridge.name.with_asterisk
+ ),
+ )
+ ),
+ ),
+ ]
+
+ goals = [TaskGoal.from_state_condition(condition) for condition in conditions]
+
+ # Create mission
+ def create_mission() -> ChallengeBuilderOutput:
+ return ChallengeBuilderOutput(
+ start_room="Lab1",
+ office_layout=office_layout,
+ required_objects={
+ printer.name: printer,
+ printer_cartridge.name: printer_cartridge,
+ breakroom_table.name: breakroom_table,
+ robotic_arm.name: robotic_arm,
+ },
+ state_conditions=conditions,
+ task_goals=goals,
+ plan=[
+ "find the printer",
+ f"put the {printer_cartridge.readable_name} in the printer",
+ "turn on the printer",
+ ],
+ preparation_plan=[
+ "go to the breakroom",
+ f"pick up the {printer_cartridge.readable_name}",
+ "go to the robotics lab",
+ ],
+ )
+
+ # Register versions of the challenges without color variants
+ high_level_key = HighLevelKey(
+ action="interact",
+ interaction_object=printer.object_id,
+ target_object=printer_cartridge.object_id,
+ converted_object=printer_spawned_object.object_id,
+ )
+
+ ChallengeBuilder.register(high_level_key)(create_mission)
+
+
+def register_print_things() -> None:
+ """Register challenges to print things using the 3D printer."""
+ object_instance_ids = [
+ (
+ ObjectInstanceId.parse("Printer_Cartridge_Figure_1"),
+ ObjectInstanceId.parse("Printer_3D_1_Spawned_ActionFigure_1"),
+ ),
+ (
+ ObjectInstanceId.parse("Printer_Cartridge_Hammer_1"),
+ ObjectInstanceId.parse("Printer_3D_1_Spawned_Hammer_1"),
+ ),
+ (
+ ObjectInstanceId.parse("Printer_Cartridge_Lever_1"),
+ ObjectInstanceId.parse("Printer_3D_1_Spawned_FuseBox_01_Lever_1"),
+ ),
+ (
+ ObjectInstanceId.parse("Printer_Cartridge_Mug_1"),
+ ObjectInstanceId.parse("Printer_3D_1_Spawned_CoffeeMug_Yellow_1"),
+ ),
+ ]
+ for office_layout in get_args(OfficeLayout):
+ for object_instance_id, converted_object_id in object_instance_ids:
+ create_operate_printer_challenges(
+ printer_cartridge=RequiredObject(name=object_instance_id),
+ printer_spawned_object=converted_object_id,
+ office_layout=office_layout,
+ )
diff --git a/src/arena_missions/challenges/operate_time_machine.py b/src/arena_missions/challenges/operate_time_machine.py
new file mode 100644
index 0000000..62bdd8a
--- /dev/null
+++ b/src/arena_missions/challenges/operate_time_machine.py
@@ -0,0 +1,371 @@
+from typing import get_args
+
+from arena_missions.builders import ChallengeBuilder, ChallengeBuilderOutput, RequiredObjectBuilder
+from arena_missions.constants.arena import ColorChangerObjectColor
+from arena_missions.structures import (
+ AndExpression,
+ ContainsExpression,
+ Expression,
+ HighLevelKey,
+ IsBrokenExpression,
+ IsFullOfItemsExpression,
+ IsOpenExpression,
+ IsPickedUpExpression,
+ IsToggledOnExpression,
+ NotExpression,
+ ObjectGoalState,
+ ObjectInstanceId,
+ RequiredObject,
+ RequiredObjectState,
+ StateCondition,
+ StateExpression,
+ TaskGoal,
+)
+
+
+def create_operate_time_machine_challenges(
+ target_object: RequiredObject,
+ converted_object: ObjectInstanceId,
+ additional_conditions_for_converted_object: list[Expression],
+ *,
+ with_color_variants: bool = False,
+) -> None:
+ """Register challeneges."""
+ required_object_builder = RequiredObjectBuilder()
+
+ # Make the target object unique
+ target_object.add_state("Unique", "true")
+
+ # Create the time machine
+ time_machine = required_object_builder.time_machine()
+ time_machine.add_state("Unique", "true")
+
+ # Create the breakroom table
+ breakroom_table = required_object_builder.breakroom_table()
+ target_object.update_receptacle(breakroom_table.name)
+
+ # Success conditions
+ conditions = [
+ # [PREP] The target object is picked up
+ StateCondition(
+ stateName="OriginalPickedUp",
+ context=target_object.name,
+ expression=StateExpression.from_expression(
+ IsPickedUpExpression(target=target_object.name, value=True),
+ ),
+ ),
+ # Ensure the machine is used on the target
+ StateCondition(
+ stateName="MachineUsedOnTarget",
+ context=time_machine.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ IsToggledOnExpression(target=time_machine.name, value=True),
+ ContainsExpression(target=time_machine.name, contains=target_object.name),
+ )
+ ),
+ ),
+ # Pick up the object
+ StateCondition(
+ stateName="ConvertedPickedUp",
+ context=target_object.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ IsOpenExpression(target=time_machine.name, value=False),
+ IsPickedUpExpression(target=converted_object, value=True),
+ *additional_conditions_for_converted_object,
+ )
+ ),
+ ),
+ ]
+
+ # Create mission
+ def create_mission() -> ChallengeBuilderOutput:
+ return ChallengeBuilderOutput(
+ start_room="BreakRoom",
+ required_objects={
+ time_machine.name: time_machine,
+ target_object.name: target_object,
+ breakroom_table.name: breakroom_table,
+ },
+ state_conditions=conditions,
+ task_goals=[TaskGoal.from_state_condition(condition) for condition in conditions],
+ plan=[
+ "go to the time machine",
+ "open the time machine",
+ f"put the {target_object.readable_name} in the time machine",
+ "close the time machine",
+ "turn on the time machine",
+ "open the time machine",
+ f"pick up the {converted_object.readable_name} from the time machine",
+ "close the time machine",
+ ],
+ preparation_plan=["go to the breakroom", f"pick up the {target_object.name}"],
+ )
+
+ def create_mission_with_door_open() -> ChallengeBuilderOutput:
+ builder_output = create_mission()
+ # Open the time machine
+ builder_output.required_objects[time_machine.name].add_state("isOpen", "true")
+ # Change the plans
+ builder_output.plan = [
+ "go to the time machine",
+ f"put the {target_object.readable_name} in the time machine",
+ "close the time machine",
+ "turn on the time machine",
+ "open the time machine",
+ f"pick up the {converted_object.readable_name} from the time machine",
+ "close the time machine",
+ ]
+ return builder_output
+
+ # Register versions of the challenges without color variants
+ high_level_key = HighLevelKey(
+ action="interact",
+ interaction_object=time_machine.object_id,
+ target_object=target_object.object_id,
+ converted_object=converted_object.object_id,
+ )
+
+ ChallengeBuilder.register(high_level_key)(create_mission)
+ ChallengeBuilder.register(high_level_key)(create_mission_with_door_open)
+
+ # Register versions of the challenges with color variants
+ if with_color_variants:
+ for color in get_args(ColorChangerObjectColor):
+ colored_target_object_kwargs = {
+ "required_objects": {
+ target_object.name: {"colors": [color]},
+ }
+ }
+
+ high_level_key = HighLevelKey(
+ action="interact",
+ interaction_object=time_machine.object_id,
+ target_object=target_object.object_id,
+ target_object_color=color,
+ converted_object=converted_object.object_id,
+ )
+ # Register the challenge builder with the modifications
+ ChallengeBuilder.register_with_modifiers(high_level_key, colored_target_object_kwargs)(
+ create_mission
+ )
+ ChallengeBuilder.register_with_modifiers(high_level_key, colored_target_object_kwargs)(
+ create_mission_with_door_open
+ )
+
+
+def create_operate_time_machine_with_carrot(
+ converted_object: ObjectInstanceId,
+ *,
+ with_color_variants: bool = False,
+) -> None:
+ """Create challenges that convert carrots to objects."""
+ required_object_builder = RequiredObjectBuilder()
+
+ carrot_object = RequiredObject(
+ name=ObjectInstanceId.parse("Carrot_01_1"),
+ yesterdayState=converted_object.object_id,
+ )
+ # Make the target object unique
+ carrot_object.add_state("Unique", "true")
+
+ # Blacklist the converted object
+ output_object = RequiredObject(name=converted_object)
+ output_object.add_state("Blacklist", "true")
+
+ # Create the time machine
+ time_machine = required_object_builder.time_machine()
+ time_machine.add_state("Unique", "true")
+
+ # Create the breakroom table
+ breakroom_table = required_object_builder.breakroom_table()
+ carrot_object.update_receptacle(breakroom_table.name)
+
+ # Success conditions
+ conditions: list[StateCondition] = [
+ # Pick up the carrot
+ StateCondition(
+ stateName="CarrotPickedUp",
+ context=carrot_object.name,
+ expression=StateExpression.from_expression(
+ IsPickedUpExpression(target=carrot_object.name, value=True)
+ ),
+ ),
+ # Ensure the machine is used on the target
+ StateCondition(
+ stateName="MachineUsedOnTarget",
+ context=time_machine.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ IsToggledOnExpression(target=time_machine.name, value=True),
+ ContainsExpression(target=time_machine.name, contains=carrot_object.name),
+ )
+ ),
+ ),
+ # Close the time machine after picking up the object
+ StateCondition(
+ stateName="TimeMachineClosed",
+ context=time_machine.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ IsOpenExpression(target=time_machine.name, value=False),
+ NotExpression(
+ expression=StateExpression.from_expression(
+ IsFullOfItemsExpression(target=time_machine.name, value=True)
+ )
+ ),
+ )
+ ),
+ ),
+ ]
+ goals = [
+ *[TaskGoal.from_state_condition(condition) for condition in conditions],
+ # Separately, create the task goal that makes sure the output object is picked up after conversion
+ TaskGoal.from_object_goal_states(
+ [ObjectGoalState.from_parts(output_object.name.with_asterisk, "isPickedUp", "true")]
+ ),
+ ]
+
+ # Create mission
+ def create_mission() -> ChallengeBuilderOutput:
+ return ChallengeBuilderOutput(
+ start_room="BreakRoom",
+ required_objects={
+ time_machine.name: time_machine,
+ carrot_object.name: carrot_object,
+ breakroom_table.name: breakroom_table,
+ output_object.name: output_object,
+ },
+ state_conditions=conditions,
+ task_goals=goals,
+ plan=[
+ "go to the time machine",
+ "open the time machine",
+ f"put the {carrot_object.readable_name} in the time machine",
+ "close the time machine",
+ "turn on the time machine",
+ "open the time machine",
+ f"pick up the {converted_object.readable_name} from the time machine",
+ "close the time machine",
+ ],
+ preparation_plan=["go to the breakroom", f"pick up the {carrot_object.readable_name}"],
+ )
+
+ def create_mission_with_door_open() -> ChallengeBuilderOutput:
+ builder_output = create_mission()
+ # Open the time machine
+ builder_output.required_objects[time_machine.name].add_state("isOpen", "true")
+ # Change the plans
+ builder_output.plan = [
+ "go to the time machine",
+ f"put the {carrot_object.readable_name} in the time machine",
+ "close the time machine",
+ "turn on the time machine",
+ "open the time machine",
+ f"pick up the {converted_object.readable_name} from the time machine",
+ "close the time machine",
+ ]
+ return builder_output
+
+ # Register versions of the challenges without color variants
+ high_level_key = HighLevelKey(
+ action="interact",
+ interaction_object=time_machine.object_id,
+ target_object=carrot_object.object_id,
+ converted_object=converted_object.object_id,
+ )
+
+ ChallengeBuilder.register(high_level_key)(create_mission)
+ ChallengeBuilder.register(high_level_key)(create_mission_with_door_open)
+
+ # Register versions of the challenges with color variants
+ if with_color_variants:
+ for color in get_args(ColorChangerObjectColor):
+ colored_target_object_kwargs = {
+ "required_objects": {
+ carrot_object.name: {"colors": [color]},
+ }
+ }
+
+ high_level_key = HighLevelKey(
+ action="interact",
+ interaction_object=time_machine.object_id,
+ target_object=carrot_object.object_id,
+ target_object_color=color,
+ converted_object=converted_object.object_id,
+ )
+ # Register the challenge builder with the modifications
+ ChallengeBuilder.register_with_modifiers(high_level_key, colored_target_object_kwargs)(
+ create_mission
+ )
+ ChallengeBuilder.register_with_modifiers(high_level_key, colored_target_object_kwargs)(
+ create_mission_with_door_open
+ )
+
+
+def register_repair_broken_things(enable_color_variants: bool = True) -> None:
+ """Register challenges to repair broken things."""
+ object_instance_ids = [
+ ObjectInstanceId.parse("Bowl_01_1"),
+ ObjectInstanceId.parse("FoodPlate_01_1"),
+ ]
+
+ for object_instance_id in object_instance_ids:
+ create_operate_time_machine_challenges(
+ target_object=RequiredObject(
+ name=object_instance_id,
+ state=[RequiredObjectState.from_parts("isBroken", "true")],
+ ),
+ converted_object=object_instance_id,
+ additional_conditions_for_converted_object=[
+ IsBrokenExpression(target=object_instance_id, value=False)
+ ],
+ with_color_variants=enable_color_variants,
+ )
+
+
+def register_repair_carrots(enable_color_variants: bool = True) -> None:
+ """Register challenges to repair carrots."""
+ converted_object_instance_ids = [
+ ObjectInstanceId.parse("Apple_1"),
+ ObjectInstanceId.parse("Banana_01_1"),
+ ObjectInstanceId.parse("BananaBunch_01_1"),
+ ObjectInstanceId.parse("BreadLoaf_1"),
+ ObjectInstanceId.parse("BreadSlice_01_1"),
+ ObjectInstanceId.parse("Bowl_01_1"),
+ ObjectInstanceId.parse("Burger_04_1"),
+ ObjectInstanceId.parse("Cake_02_1"),
+ ObjectInstanceId.parse("CakeSlice_02_1"),
+ ObjectInstanceId.parse("CandyBar_01_1"),
+ ObjectInstanceId.parse("CanSodaNew_01_1"),
+ ObjectInstanceId.parse("CanSodaNew_Crushed_01_1"),
+ ObjectInstanceId.parse("CanSodaNew_Open_01_1"),
+ ObjectInstanceId.parse("CoffeeBeans_01_1"),
+ ObjectInstanceId.parse("CoffeeMug_Boss_1"),
+ ObjectInstanceId.parse("CoffeeMug_Yellow_1"),
+ ObjectInstanceId.parse("Donut_01_1"),
+ ObjectInstanceId.parse("FoodPlate_01_1"),
+ ObjectInstanceId.parse("Fork_01_1"),
+ ObjectInstanceId.parse("Jar_Jam_01_1"),
+ ObjectInstanceId.parse("Jar_PeanutButter_01_1"),
+ ObjectInstanceId.parse("Knife_01_1"),
+ ObjectInstanceId.parse("PaperCup_01_1"),
+ ObjectInstanceId.parse("PaperCup_Crushed_01_1"),
+ ObjectInstanceId.parse("PBJ_Sandwich_1"),
+ ObjectInstanceId.parse("PieFruitSlice_01_1"),
+ ObjectInstanceId.parse("PieFruit_01_1"),
+ ObjectInstanceId.parse("SandwichHalf_01_1"),
+ ObjectInstanceId.parse("Spoon_01_1"),
+ ObjectInstanceId.parse("Toast_01_1"),
+ ObjectInstanceId.parse("Toast_02_1"),
+ ObjectInstanceId.parse("Toast_03_1"),
+ ObjectInstanceId.parse("Toast_04_1"),
+ ObjectInstanceId.parse("Toast_04_Jam_1"),
+ ObjectInstanceId.parse("Toast_04_PBJ_1"),
+ ]
+
+ for converted_object in converted_object_instance_ids:
+ create_operate_time_machine_with_carrot(
+ converted_object, with_color_variants=enable_color_variants
+ )
diff --git a/src/arena_missions/challenges/pickup_from_printer.py b/src/arena_missions/challenges/pickup_from_printer.py
new file mode 100644
index 0000000..6be4e30
--- /dev/null
+++ b/src/arena_missions/challenges/pickup_from_printer.py
@@ -0,0 +1,133 @@
+from typing import get_args
+
+from arena_missions.builders import ChallengeBuilder, ChallengeBuilderOutput, RequiredObjectBuilder
+from arena_missions.constants.arena import OfficeLayout
+from arena_missions.structures import (
+ AndExpression,
+ ContainsExpression,
+ HighLevelKey,
+ ObjectGoalState,
+ ObjectInstanceId,
+ RequiredObject,
+ StateCondition,
+ StateExpression,
+ TaskGoal,
+)
+
+
+def create_pickup_from_printer_challenges(
+ printer_cartridge: RequiredObject,
+ printer_spawned_object_id: ObjectInstanceId,
+ office_layout: OfficeLayout,
+) -> None:
+ """Register challenges."""
+ required_object_builder = RequiredObjectBuilder()
+
+ # Create the printer
+ printer = required_object_builder.printer()
+
+ # Make sure the cartridge is unique
+ printer_cartridge.add_state("Unique", "true")
+
+ # Ensure the robotic arm is out the way
+ robotic_arm = required_object_builder.robotic_arm()
+
+ # Create the breakroom table
+ breakroom_table = required_object_builder.breakroom_table()
+ printer_cartridge.update_receptacle(breakroom_table.name)
+
+ # Success conditions
+ conditions = [
+ # [PREP] Ensure the machine is used on the target
+ StateCondition(
+ stateName="PrinterUsed",
+ context=printer.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ ContainsExpression(
+ target=printer.name, contains=printer_spawned_object_id.with_asterisk
+ ),
+ ContainsExpression(
+ target=printer.name, contains=printer_cartridge.name.with_asterisk
+ ),
+ )
+ ),
+ ),
+ ]
+
+ goals = [
+ *[TaskGoal.from_state_condition(condition) for condition in conditions],
+ # Pick up the target object
+ TaskGoal.from_object_goal_states(
+ [
+ ObjectGoalState.from_parts(
+ printer_spawned_object_id.with_asterisk, "isPickedUp", "true"
+ )
+ ]
+ ),
+ ]
+
+ # Create mission
+ def create_mission() -> ChallengeBuilderOutput:
+ return ChallengeBuilderOutput(
+ start_room="Lab1",
+ office_layout=office_layout,
+ required_objects={
+ breakroom_table.name: breakroom_table,
+ printer.name: printer,
+ printer_cartridge.name: printer_cartridge,
+ robotic_arm.name: robotic_arm,
+ },
+ state_conditions=conditions,
+ task_goals=goals,
+ plan=[
+ f"pick up the {printer_spawned_object_id.readable_name} from the printer",
+ ],
+ preparation_plan=[
+ "go to the breakroom",
+ f"pick up the {printer_cartridge.readable_name}",
+ "go to the robotics lab",
+ "go to the printer",
+ f"put the {printer_cartridge.readable_name} in the printer",
+ "turn on the printer",
+ ],
+ )
+
+ # Register versions of the challenges without color variants
+ high_level_key = HighLevelKey(
+ action="pickup",
+ target_object=printer_spawned_object_id.object_id,
+ from_receptacle=printer.object_id,
+ )
+
+ ChallengeBuilder.register(high_level_key)(create_mission)
+
+
+def register_pickup_from_printer_challenges() -> None:
+ """Register challenges to print things using the 3D printer."""
+ object_instance_ids = [
+ (
+ ObjectInstanceId.parse("Printer_Cartridge_Figure_1"),
+ ObjectInstanceId.parse("Printer_3D_1_Spawned_ActionFigure_1"),
+ ),
+ (
+ ObjectInstanceId.parse("Printer_Cartridge_Hammer_1"),
+ ObjectInstanceId.parse("Printer_3D_1_Spawned_Hammer_1"),
+ ),
+ (
+ ObjectInstanceId.parse("Printer_Cartridge_Lever_1"),
+ ObjectInstanceId.parse("Printer_3D_1_Spawned_FuseBox_01_Lever_1"),
+ ),
+ (
+ ObjectInstanceId.parse("Printer_Cartridge_Mug_1"),
+ ObjectInstanceId.parse("Printer_3D_1_Spawned_CoffeeMug_Yellow_1"),
+ ),
+ ]
+
+ for office_layout in get_args(OfficeLayout):
+ for printer_cartridge_id, converted_object_id in object_instance_ids:
+ create_pickup_from_printer_challenges(
+ printer_cartridge=RequiredObject(name=printer_cartridge_id),
+ printer_spawned_object_id=converted_object_id,
+ office_layout=office_layout,
+ )
diff --git a/src/arena_missions/challenges/pickup_stack.py b/src/arena_missions/challenges/pickup_stack.py
new file mode 100644
index 0000000..eb07eb8
--- /dev/null
+++ b/src/arena_missions/challenges/pickup_stack.py
@@ -0,0 +1,186 @@
+from typing import Any, Optional, get_args
+
+from arena_missions.builders import ChallengeBuilder, ChallengeBuilderOutput, RequiredObjectBuilder
+from arena_missions.constants.arena import ColorChangerObjectColor, ObjectColor
+from arena_missions.structures import (
+ AndExpression,
+ ContainsExpression,
+ HighLevelKey,
+ IsPickedUpExpression,
+ ObjectId,
+ ObjectInstanceId,
+ RequiredObject,
+ StateCondition,
+ StateExpression,
+ TaskGoal,
+)
+
+
+def get_color_from_id(object_id: ObjectId) -> Optional[ObjectColor]:
+ """Extracts the color from the object id."""
+ if "green" in object_id.lower():
+ return "Green"
+
+ if "blue" in object_id.lower():
+ return "Blue"
+
+ if "red" in object_id.lower():
+ return "Red"
+
+ return None
+
+
+def create_plate_stack_challenge(
+ target_object_instance_id: ObjectInstanceId,
+ receptacle: RequiredObject,
+ *,
+ with_stacked_object_color_variants: bool = False,
+) -> None:
+ """Generate challenes to pick up objects from containers."""
+ # Create the target object
+ target_object = RequiredObject(name=target_object_instance_id)
+ target_object.add_state("Unique", "true")
+
+ # Create the plate
+ plate = RequiredObject(name=ObjectInstanceId.parse("FoodPlate_01_1"))
+ plate.add_state("Unique", "true")
+ plate.add_state("isDirty", "false")
+
+ # Put it in the container
+ plate.update_receptacle(receptacle.name)
+ target_object.update_receptacle(plate.name)
+
+ conditions = [
+ # [PREP] Ensure the plate is in the receptacle
+ StateCondition(
+ stateName="InReceptacle",
+ context=plate.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ ContainsExpression(target=receptacle.name, contains=plate.name),
+ ContainsExpression(target=plate.name, contains=target_object.name),
+ ),
+ ),
+ ),
+ # Ensure we pick up the plate
+ StateCondition(
+ stateName="PickedUpPlate",
+ context=plate.name,
+ expression=StateExpression.from_expression(
+ IsPickedUpExpression(target=plate.name, value=True),
+ ),
+ ),
+ ]
+
+ goals = [TaskGoal.from_state_condition(condition) for condition in conditions]
+
+ def create_mission() -> ChallengeBuilderOutput:
+ """Create the mission."""
+ if not receptacle.room:
+ raise ValueError(f"Receptacle {receptacle.name} must have a room set")
+
+ return ChallengeBuilderOutput(
+ start_room=receptacle.room,
+ required_objects={
+ receptacle.name: receptacle,
+ target_object.name: target_object,
+ plate.name: plate,
+ },
+ task_goals=goals,
+ state_conditions=conditions,
+ plan=[
+ f"go to the {receptacle.readable_name}",
+ "pick up the plate",
+ ],
+ )
+
+ plate_colors = [None, *get_args(ColorChangerObjectColor)]
+
+ for plate_color in plate_colors:
+ colored_target_object_kwargs: dict[str, Any] = {"required_objects": {}}
+
+ if plate_color is not None:
+ colored_target_object_kwargs["required_objects"].update(
+ {plate.name: {"colors": [plate_color]}}
+ )
+ high_level_key = HighLevelKey(
+ action="pickup",
+ target_object=plate.object_id,
+ target_object_color=plate_color,
+ stacked_object=target_object.object_id,
+ from_receptacle=receptacle.object_id,
+ from_receptacle_color=get_color_from_id(receptacle.object_id),
+ from_receptacle_is_container=False,
+ )
+ # Register the challenge builder with the modifications
+ ChallengeBuilder.register_with_modifiers(high_level_key, colored_target_object_kwargs)(
+ create_mission
+ )
+
+ if with_stacked_object_color_variants:
+ for target_color in get_args(ColorChangerObjectColor):
+ colored_target_object_kwargs["required_objects"].update(
+ {target_object.name: {"colors": [target_color]}}
+ )
+ high_level_key = HighLevelKey(
+ action="pickup",
+ target_object=plate.object_id,
+ target_object_color=plate_color,
+ stacked_object=target_object.object_id,
+ stacked_object_color=target_color,
+ from_receptacle=receptacle.object_id,
+ from_receptacle_color=get_color_from_id(receptacle.object_id),
+ from_receptacle_is_container=False,
+ )
+ # Register the challenge builder with the modifications
+ ChallengeBuilder.register_with_modifiers(
+ high_level_key, colored_target_object_kwargs
+ )(create_mission)
+
+
+def register_pickup_plate_stack_challenges(enable_color_variants: bool = True) -> None:
+ """Register challenges to pick up and place objects in the fridge."""
+ required_objects_builder = RequiredObjectBuilder()
+
+ receptacles = [
+ required_objects_builder.breakroom_table(),
+ required_objects_builder.breakroom_countertop(),
+ *required_objects_builder.main_office_desks(),
+ ]
+
+ target_object_iterator = [
+ (ObjectInstanceId.parse("Apple_1"), True),
+ (ObjectInstanceId.parse("AppleSlice_01_1"), False),
+ (ObjectInstanceId.parse("Banana_01_1"), False),
+ (ObjectInstanceId.parse("BananaBunch_01_1"), False),
+ (ObjectInstanceId.parse("BreadLoaf_1"), False),
+ (ObjectInstanceId.parse("BreadSlice_01_1"), False),
+ (ObjectInstanceId.parse("Burger_04_1"), False),
+ (ObjectInstanceId.parse("Cake_02_1"), True),
+ (ObjectInstanceId.parse("CakeSlice_02_1"), False),
+ (ObjectInstanceId.parse("CandyBar_01_1"), False),
+ (ObjectInstanceId.parse("Carrot_01_1"), True),
+ (ObjectInstanceId.parse("Donut_01_1"), True),
+ (ObjectInstanceId.parse("Fork_01_1"), False),
+ (ObjectInstanceId.parse("Knife_01_1"), False),
+ (ObjectInstanceId.parse("PBJ_Sandwich_1"), False),
+ (ObjectInstanceId.parse("Pear_01_1"), True),
+ (ObjectInstanceId.parse("PieFruitSlice_01_1"), False),
+ (ObjectInstanceId.parse("PieFruit_01_1"), False),
+ (ObjectInstanceId.parse("SandwichHalf_01_1"), False),
+ (ObjectInstanceId.parse("Spoon_01_1"), False),
+ (ObjectInstanceId.parse("Toast_01_1"), False),
+ (ObjectInstanceId.parse("Toast_02_1"), False),
+ (ObjectInstanceId.parse("Toast_03_1"), False),
+ (ObjectInstanceId.parse("Toast_04_1"), False),
+ (ObjectInstanceId.parse("Toast_04_Jam_1"), False),
+ (ObjectInstanceId.parse("Toast_04_PBJ_1"), False),
+ ]
+
+ for target_object, with_color_variants in target_object_iterator:
+ for receptacle in receptacles:
+ create_plate_stack_challenge(
+ target_object,
+ receptacle,
+ with_stacked_object_color_variants=enable_color_variants & with_color_variants,
+ )
diff --git a/src/arena_missions/challenges/place_stack.py b/src/arena_missions/challenges/place_stack.py
new file mode 100644
index 0000000..ad3cfbd
--- /dev/null
+++ b/src/arena_missions/challenges/place_stack.py
@@ -0,0 +1,307 @@
+from typing import Any, get_args
+
+from arena_missions.builders import ChallengeBuilder, ChallengeBuilderOutput, RequiredObjectBuilder
+from arena_missions.constants.arena import ColorChangerObjectColor
+from arena_missions.structures import (
+ AndExpression,
+ ContainsExpression,
+ HighLevelKey,
+ IsOpenExpression,
+ IsPickedUpExpression,
+ ObjectInstanceId,
+ RequiredObject,
+ StateCondition,
+ StateExpression,
+ TaskGoal,
+)
+
+
+def create_place_plate_stack_container_challenge(
+ target_object_instance_id: ObjectInstanceId,
+ container: RequiredObject,
+ *,
+ with_stacked_object_color_variants: bool = False,
+) -> None:
+ """Generate challenges to pick up objects from containers."""
+ required_object_builder = RequiredObjectBuilder()
+ # Create the target object
+ target_object = RequiredObject(name=target_object_instance_id)
+ target_object.add_state("Unique", "true")
+
+ # Create the plate
+ plate = RequiredObject(name=ObjectInstanceId.parse("FoodPlate_01_1"))
+ plate.add_state("Unique", "true")
+ plate.add_state("isDirty", "false")
+ plate.add_state("isEmpty", "true")
+
+ # Create the breakroom table
+ breakroom_table = required_object_builder.breakroom_table()
+
+ # Put the target object on the table
+ target_object.update_receptacle(breakroom_table.name)
+
+ # Put plate in the container
+ plate.update_receptacle(container.name)
+
+ conditions = [
+ # [PREP] Ensure the item is picked up
+ StateCondition(
+ stateName="ObjectPickedUp",
+ context=target_object.name,
+ expression=StateExpression.from_expression(
+ IsPickedUpExpression(target=target_object.name, value=True),
+ ),
+ ),
+ # Place object on the plate which is in the container while its open
+ StateCondition(
+ stateName="PlacedOnPlateInContainer",
+ context=target_object.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ IsOpenExpression(target=container.name, value=True),
+ ContainsExpression(target=container.name, contains=plate.name),
+ ContainsExpression(target=plate.name, contains=target_object.name),
+ IsPickedUpExpression(target=target_object.name, value=False),
+ )
+ ),
+ ),
+ ]
+
+ goals = [TaskGoal.from_state_condition(condition) for condition in conditions]
+
+ def create_mission() -> ChallengeBuilderOutput:
+ """Create the mission."""
+ if not container.room:
+ raise ValueError(f"Receptacle {container.name} must have a room set")
+
+ return ChallengeBuilderOutput(
+ start_room=container.room,
+ required_objects={
+ container.name: container,
+ target_object.name: target_object,
+ plate.name: plate,
+ },
+ task_goals=goals,
+ state_conditions=conditions,
+ plan=[
+ f"go to the {container.readable_name}",
+ f"open the {container.readable_name}",
+ f"put the {target_object_instance_id.readable_name} on the plate",
+ f"close the {container.readable_name}",
+ ],
+ preparation_plan=[
+ "go to the breakroom",
+ f"pick up the {target_object.readable_name}",
+ ],
+ )
+
+ plate_colors = [None, *get_args(ColorChangerObjectColor)]
+
+ for plate_color in plate_colors:
+ colored_target_object_kwargs: dict[str, Any] = {"required_objects": {}}
+
+ if plate_color is not None:
+ colored_target_object_kwargs["required_objects"].update(
+ {plate.name: {"colors": [plate_color]}}
+ )
+ high_level_key = HighLevelKey(
+ action="place",
+ target_object=plate.object_id,
+ target_object_color=plate_color,
+ stacked_object=target_object.object_id,
+ to_receptacle=container.object_id,
+ to_receptacle_is_container=True,
+ )
+ # Register the challenge builder with the modifications
+ ChallengeBuilder.register_with_modifiers(high_level_key, colored_target_object_kwargs)(
+ create_mission
+ )
+
+ if with_stacked_object_color_variants:
+ for target_color in get_args(ColorChangerObjectColor):
+ colored_target_object_kwargs["required_objects"].update(
+ {target_object.name: {"colors": [target_color]}}
+ )
+ high_level_key = HighLevelKey(
+ action="place",
+ target_object=plate.object_id,
+ target_object_color=plate_color,
+ stacked_object=target_object.object_id,
+ stacked_object_color=target_color,
+ to_receptacle=container.object_id,
+ to_receptacle_is_container=True,
+ )
+ # Register the challenge builder with the modifications
+ ChallengeBuilder.register_with_modifiers(
+ high_level_key, colored_target_object_kwargs
+ )(create_mission)
+
+
+def create_place_plate_on_gravity_pad_challenge(
+ target_object_instance_id: ObjectInstanceId,
+ gravity_pad: RequiredObject,
+ *,
+ with_color_variants: bool = False,
+) -> None:
+ """Generate challenges to pick up objects from containers."""
+ required_object_builder = RequiredObjectBuilder()
+ # Create the target object
+ target_object = RequiredObject(name=target_object_instance_id)
+ target_object.add_state("Unique", "true")
+
+ # Create the plate
+ plate = RequiredObject(name=ObjectInstanceId.parse("FoodPlate_01_1"))
+ plate.add_state("Unique", "true")
+ plate.add_state("isDirty", "false")
+ plate.add_state("isEmpty", "true")
+
+ # Create the breakroom table
+ breakroom_table = required_object_builder.breakroom_table()
+
+ # Put the target object on the breakroom table
+ target_object.update_receptacle(breakroom_table.name)
+
+ # Put plate in the gravity pad
+ plate.update_receptacle(gravity_pad.name)
+
+ conditions = [
+ # [PREP] Ensure the item is picked up
+ StateCondition(
+ stateName="ObjectPickedUp",
+ context=target_object.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ IsPickedUpExpression(target=target_object.name, value=True),
+ ContainsExpression(target=gravity_pad.name, contains=plate.name),
+ )
+ ),
+ ),
+ # Place object on the plate which is in the container while its open
+ StateCondition(
+ stateName="PlacedOnPlateInContainer",
+ context=target_object.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ ContainsExpression(target=gravity_pad.name, contains=plate.name),
+ ContainsExpression(target=plate.name, contains=target_object.name),
+ IsPickedUpExpression(target=target_object.name, value=False),
+ )
+ ),
+ ),
+ ]
+
+ goals = [TaskGoal.from_state_condition(condition) for condition in conditions]
+
+ def create_mission() -> ChallengeBuilderOutput:
+ """Create the mission."""
+ return ChallengeBuilderOutput(
+ start_room="Lab2",
+ required_objects={
+ gravity_pad.name: gravity_pad,
+ target_object.name: target_object,
+ breakroom_table.name: breakroom_table,
+ plate.name: plate,
+ },
+ task_goals=goals,
+ state_conditions=conditions,
+ plan=[
+ f"go to the {gravity_pad.readable_name}",
+ f"put the {target_object.readable_name} on the plate",
+ ],
+ preparation_plan=[
+ "go to the breakroom",
+ f"pick up the {target_object.readable_name}",
+ ],
+ )
+
+ plate_colors = [None, *get_args(ColorChangerObjectColor)]
+
+ for plate_color in plate_colors:
+ colored_target_object_kwargs: dict[str, Any] = {"required_objects": {}}
+
+ if plate_color is not None:
+ colored_target_object_kwargs["required_objects"].update(
+ {plate.name: {"colors": [plate_color]}}
+ )
+ high_level_key = HighLevelKey(
+ action="place",
+ target_object=target_object.object_id,
+ to_receptacle=plate.object_id,
+ to_receptacle_color=plate_color,
+ )
+ # Register the challenge builder with the modifications
+ ChallengeBuilder.register_with_modifiers(high_level_key, colored_target_object_kwargs)(
+ create_mission
+ )
+
+ if with_color_variants:
+ for target_color in get_args(ColorChangerObjectColor):
+ colored_target_object_kwargs["required_objects"].update(
+ {target_object.name: {"colors": [target_color]}}
+ )
+ high_level_key = HighLevelKey(
+ action="place",
+ target_object=target_object.object_id,
+ target_object_color=target_color,
+ to_receptacle=plate.object_id,
+ to_receptacle_color=plate_color,
+ )
+ # Register the challenge builder with the modifications
+ ChallengeBuilder.register_with_modifiers(
+ high_level_key, colored_target_object_kwargs
+ )(create_mission)
+
+
+def register_place_plate_stack_challenges(enable_color_variants: bool = True) -> None:
+ """Register challenges to pick up and place objects in the fridge/freezer."""
+ required_objects_builder = RequiredObjectBuilder()
+
+ containers = [required_objects_builder.fridge(), required_objects_builder.freezer()]
+
+ target_object_iterator = [
+ (ObjectInstanceId.parse("Apple_1"), True),
+ (ObjectInstanceId.parse("AppleSlice_01_1"), False),
+ (ObjectInstanceId.parse("Banana_01_1"), False),
+ (ObjectInstanceId.parse("BananaBunch_01_1"), False),
+ (ObjectInstanceId.parse("BreadLoaf_1"), False),
+ (ObjectInstanceId.parse("BreadSlice_01_1"), False),
+ (ObjectInstanceId.parse("Burger_04_1"), False),
+ (ObjectInstanceId.parse("Cake_02_1"), True),
+ (ObjectInstanceId.parse("CakeSlice_02_1"), False),
+ (ObjectInstanceId.parse("CandyBar_01_1"), False),
+ (ObjectInstanceId.parse("Carrot_01_1"), True),
+ (ObjectInstanceId.parse("Donut_01_1"), True),
+ (ObjectInstanceId.parse("Fork_01_1"), False),
+ (ObjectInstanceId.parse("Knife_01_1"), False),
+ (ObjectInstanceId.parse("PBJ_Sandwich_1"), False),
+ (ObjectInstanceId.parse("Pear_01_1"), True),
+ (ObjectInstanceId.parse("PieFruitSlice_01_1"), False),
+ (ObjectInstanceId.parse("PieFruit_01_1"), False),
+ (ObjectInstanceId.parse("SandwichHalf_01_1"), False),
+ (ObjectInstanceId.parse("Spoon_01_1"), False),
+ (ObjectInstanceId.parse("Toast_01_1"), False),
+ (ObjectInstanceId.parse("Toast_02_1"), False),
+ (ObjectInstanceId.parse("Toast_03_1"), False),
+ (ObjectInstanceId.parse("Toast_04_1"), False),
+ (ObjectInstanceId.parse("Toast_04_Jam_1"), False),
+ (ObjectInstanceId.parse("Toast_04_PBJ_1"), False),
+ ]
+
+ for target_object, with_color_variants in target_object_iterator:
+ for container in containers:
+ create_place_plate_stack_container_challenge(
+ target_object,
+ container,
+ with_stacked_object_color_variants=enable_color_variants & with_color_variants,
+ )
+
+
+def register_place_bowl_stack_from_gravity_pad(enable_color_variants: bool = True) -> None:
+ """Register challenges to pick up and place objects in the fridge/freezer."""
+ required_objects_builder = RequiredObjectBuilder()
+
+ create_place_plate_on_gravity_pad_challenge(
+ ObjectInstanceId.parse("Bowl_01_1"),
+ required_objects_builder.gravity_pad(),
+ with_color_variants=enable_color_variants,
+ )
diff --git a/src/arena_missions/challenges/using_coffee_unmaker.py b/src/arena_missions/challenges/using_coffee_unmaker.py
new file mode 100644
index 0000000..b156191
--- /dev/null
+++ b/src/arena_missions/challenges/using_coffee_unmaker.py
@@ -0,0 +1,224 @@
+from typing import get_args
+
+from arena_missions.builders import ChallengeBuilder, ChallengeBuilderOutput, RequiredObjectBuilder
+from arena_missions.constants.arena import ColorChangerObjectColor, OfficeLayout
+from arena_missions.structures import (
+ AndExpression,
+ HighLevelKey,
+ IsFilledWithExpression,
+ IsPickedUpExpression,
+ IsToggledOnExpression,
+ NotExpression,
+ ObjectInstanceId,
+ RequiredObject,
+ StateCondition,
+ StateExpression,
+ TaskGoal,
+)
+
+
+def convert_coffee_from_pot_to_beans(*, office_layout: OfficeLayout) -> ChallengeBuilderOutput:
+ """Convert coffee back to beans with the coffee unmaker."""
+ required_objects_builder = RequiredObjectBuilder()
+
+ # Remove existing beans from the scene
+ coffee_beans = RequiredObject(name=ObjectInstanceId.parse("CoffeeBeans_01_1"))
+ coffee_beans.update_state("Blacklist", "true")
+
+ # Coffee unmaker
+ coffee_unmaker = RequiredObject(name=ObjectInstanceId.parse("CoffeeUnMaker_01_1"))
+
+ # Create the coffee pot
+ coffee_pot = required_objects_builder.coffee_pot(fill_with="Coffee")
+
+ conditions = [
+ # Pick up the coffee pot
+ StateCondition(
+ stateName="HoldingCoffeePot",
+ context=coffee_pot.name,
+ expression=StateExpression.from_expression(
+ IsPickedUpExpression(target=coffee_pot.name, value=True)
+ ),
+ ),
+ # Fill the coffee unmaker with coffee, that happens to be from the coffee pot
+ StateCondition(
+ stateName="CoffeeUnMakerFilledWithCoffee",
+ context=coffee_unmaker.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ IsFilledWithExpression(target=coffee_unmaker.name, fluid="Coffee"),
+ NotExpression(
+ expression=StateExpression.from_expression(
+ IsFilledWithExpression(target=coffee_pot.name, fluid="Coffee")
+ )
+ ),
+ )
+ ),
+ ),
+ # Turn on the coffee unmaker
+ StateCondition(
+ stateName="CoffeeUnMakerOn",
+ context=coffee_unmaker.name,
+ expression=StateExpression.from_expression(
+ IsToggledOnExpression(target=coffee_unmaker.name, value=True)
+ ),
+ ),
+ ]
+
+ goals = [TaskGoal.from_state_condition(condition) for condition in conditions]
+
+ return ChallengeBuilderOutput(
+ start_room="BreakRoom",
+ office_layout=office_layout,
+ required_objects={
+ coffee_pot.name: coffee_pot,
+ coffee_unmaker.name: coffee_unmaker,
+ coffee_beans.name: coffee_beans,
+ },
+ task_goals=goals,
+ state_conditions=conditions,
+ plan=[
+ "find the coffee unmaker",
+ "pour the coffee into the coffee unmaker",
+ "toggle the coffee unmaker",
+ ],
+ preparation_plan=[
+ f"find the {coffee_pot.readable_name}",
+ f"pick up the {coffee_pot.readable_name}",
+ ],
+ )
+
+
+def convert_coffee_from_target_object_to_beans(
+ *,
+ target_object_instance_id: ObjectInstanceId,
+ office_layout: OfficeLayout,
+ with_color_variants: bool = True,
+) -> None:
+ """Convert coffee back to beans with the coffee unmaker."""
+ required_objects_builder = RequiredObjectBuilder()
+
+ # Remove existing beans from the scene
+ coffee_beans = RequiredObject(name=ObjectInstanceId.parse("CoffeeBeans_01_1"))
+ coffee_beans.update_state("Blacklist", "true")
+
+ # Coffee unmaker
+ coffee_unmaker = RequiredObject(name=ObjectInstanceId.parse("CoffeeUnMaker_01_1"))
+
+ # Create the target object
+ target_object = RequiredObject(name=target_object_instance_id)
+ target_object.update_state("isFilled", "Coffee")
+ target_object.update_state("isHot", "true")
+
+ # Create the breakroom table
+ breakroom_table = required_objects_builder.breakroom_table()
+
+ # Put the target object on the breakroom table
+ target_object.update_receptacle(breakroom_table.name)
+
+ conditions = [
+ # Pick up the coffee pot
+ StateCondition(
+ stateName="HoldingMug",
+ context=target_object.name,
+ expression=StateExpression.from_expression(
+ IsPickedUpExpression(target=target_object.name, value=True)
+ ),
+ ),
+ # Fill the coffee unmaker with coffee, that happens to be from the coffee pot
+ StateCondition(
+ stateName="CoffeeUnMakerFilledWithCoffee",
+ context=coffee_unmaker.name,
+ expression=StateExpression.from_expression(
+ AndExpression.from_expressions(
+ IsFilledWithExpression(target=coffee_unmaker.name, fluid="Coffee"),
+ NotExpression(
+ expression=StateExpression.from_expression(
+ IsFilledWithExpression(target=target_object.name, fluid="Coffee")
+ )
+ ),
+ )
+ ),
+ ),
+ # Turn on the coffee unmaker
+ StateCondition(
+ stateName="CoffeeUnMakerOn",
+ context=coffee_unmaker.name,
+ expression=StateExpression.from_expression(
+ IsToggledOnExpression(target=coffee_unmaker.name, value=True)
+ ),
+ ),
+ ]
+
+ goals = [TaskGoal.from_state_condition(condition) for condition in conditions]
+
+ def create_mission() -> ChallengeBuilderOutput:
+ return ChallengeBuilderOutput(
+ start_room="BreakRoom",
+ office_layout=office_layout,
+ required_objects={
+ breakroom_table.name: breakroom_table,
+ target_object.name: target_object,
+ coffee_unmaker.name: coffee_unmaker,
+ coffee_beans.name: coffee_beans,
+ },
+ task_goals=goals,
+ state_conditions=conditions,
+ plan=[
+ "find the coffee unmaker",
+ "pour the coffee into the coffee unmaker",
+ "toggle the coffee unmaker",
+ ],
+ preparation_plan=[
+ f"find the {target_object.readable_name}",
+ f"pick up the {target_object.readable_name}",
+ ],
+ )
+
+ high_level_key = HighLevelKey(
+ action="interact",
+ interaction_object=coffee_unmaker.object_id,
+ target_object=target_object.object_id,
+ )
+
+ ChallengeBuilder.register(high_level_key)(create_mission)
+
+ # Register versions of the challenges with color variants
+ if with_color_variants:
+ for color in get_args(ColorChangerObjectColor):
+ colored_target_object_kwargs = {
+ "required_objects": {
+ target_object.name: {"colors": [color]},
+ }
+ }
+
+ high_level_key = HighLevelKey(
+ action="interact",
+ interaction_object=coffee_unmaker.object_id,
+ target_object=target_object.object_id,
+ target_object_color=color,
+ )
+
+ # Register the challenge builder with the modifications
+ ChallengeBuilder.register_with_modifiers(high_level_key, colored_target_object_kwargs)(
+ create_mission
+ )
+
+
+def register_coffee_unmaker_challenges(enable_color_variants: bool = True) -> None:
+ """Register challenges with the coffee unmaker."""
+ target_object_iterator = [
+ (ObjectInstanceId.parse("Bowl_01_1"), True),
+ (ObjectInstanceId.parse("CoffeePot_01_1"), True),
+ (ObjectInstanceId.parse("CoffeeMug_Boss_1"), True),
+ (ObjectInstanceId.parse("CoffeeMug_Yellow_1"), True),
+ ]
+ for layout in get_args(OfficeLayout):
+ for target_object, with_color_variants in target_object_iterator:
+ convert_coffee_from_target_object_to_beans(
+ target_object_instance_id=target_object,
+ office_layout=layout,
+ with_color_variants=enable_color_variants & with_color_variants,
+ )
+
+ convert_coffee_from_pot_to_beans(office_layout=layout)
diff --git a/src/arena_missions/challenges/using_color_changer.py b/src/arena_missions/challenges/using_color_changer.py
new file mode 100644
index 0000000..e00da4b
--- /dev/null
+++ b/src/arena_missions/challenges/using_color_changer.py
@@ -0,0 +1,139 @@
+from typing import get_args
+
+from arena_missions.builders import ChallengeBuilder, ChallengeBuilderOutput, RequiredObjectBuilder
+from arena_missions.constants.arena import ColorChangerObjectColor
+from arena_missions.structures import (
+ ColorMetaDataChangeExpression,
+ ContainsExpression,
+ HighLevelKey,
+ IsPickedUpExpression,
+ ObjectInstanceId,
+ RequiredObject,
+ StateCondition,
+ StateExpression,
+ TaskGoal,
+)
+
+
+def create_change_object_color_challenge(
+ target_object_instance_id: ObjectInstanceId,
+ converted_object_color: ColorChangerObjectColor,
+ with_color_variants: bool = False,
+) -> None:
+ """Generate challenes to transform an object's color using the color changer."""
+ # Create the target object
+ required_objects_builder = RequiredObjectBuilder()
+
+ receptacle = required_objects_builder.breakroom_table()
+ color_changer = required_objects_builder.color_changer()
+
+ target_object = RequiredObject(name=target_object_instance_id)
+ target_object.add_state("Unique", "true")
+
+ # Put it in the container
+ target_object.update_receptacle(receptacle.name)
+
+ conditions = [
+ # Ensure the object is picked up from the receptacle
+ StateCondition(
+ stateName="PickedUpFromReceptacle",
+ context=target_object.name,
+ expression=StateExpression.from_expression(
+ IsPickedUpExpression(target=target_object.name, value=True)
+ ),
+ ),
+ StateCondition(
+ stateName="OnColorChanger",
+ context=color_changer.name,
+ expression=StateExpression.from_expression(
+ ContainsExpression(target=color_changer.name, contains=target_object.name)
+ ),
+ ),
+ StateCondition(
+ stateName="ChangedColor",
+ context=target_object.name,
+ expression=StateExpression.from_expression(
+ ColorMetaDataChangeExpression(
+ target=target_object.name, colorvalue=converted_object_color
+ )
+ ),
+ ),
+ ]
+
+ goals = [TaskGoal.from_state_condition(condition) for condition in conditions]
+
+ def create_mission() -> ChallengeBuilderOutput:
+ """Create the mission."""
+ return ChallengeBuilderOutput(
+ start_room="Lab2",
+ required_objects={
+ receptacle.name: receptacle,
+ target_object.name: target_object,
+ color_changer.name: color_changer,
+ },
+ task_goals=goals,
+ state_conditions=conditions,
+ plan=[
+ f"go to the {color_changer.readable_name}",
+ f"place the {target_object.readable_name} in the {color_changer.readable_name}",
+ f"press the {converted_object_color} button",
+ ],
+ preparation_plan=[
+ "go to the breakroom",
+ f"pick up the {target_object_instance_id.readable_name}",
+ ],
+ )
+
+ high_level_key = HighLevelKey(
+ action="interact",
+ target_object=target_object_instance_id.object_id,
+ converted_object_color=converted_object_color,
+ interaction_object=color_changer.object_id,
+ )
+
+ ChallengeBuilder.register(high_level_key)(create_mission)
+
+ # Register versions of the challenges with color variants
+ if with_color_variants:
+ for start_color in get_args(ColorChangerObjectColor):
+ # Do not try to turn an object into the same color
+ if converted_object_color == start_color:
+ continue
+
+ colored_target_object_kwargs = {
+ "required_objects": {
+ target_object.name: {"colors": [start_color]},
+ }
+ }
+
+ high_level_key = HighLevelKey(
+ action="interact",
+ interaction_object=color_changer.object_id,
+ target_object=target_object.object_id,
+ target_object_color=start_color,
+ converted_object_color=converted_object_color,
+ )
+
+ ChallengeBuilder.register_with_modifiers(high_level_key, colored_target_object_kwargs)(
+ create_mission
+ )
+
+
+def register_color_changer_challenges(enable_start_color_variants: bool = True) -> None:
+ """Register challenges to change object color using the color changer."""
+ target_object_iterator = [
+ ObjectInstanceId.parse("Apple_1"),
+ ObjectInstanceId.parse("Bowl_01_1"),
+ ObjectInstanceId.parse("Carrot_01_1"),
+ ObjectInstanceId.parse("CoffeeMug_Yellow_1"),
+ ObjectInstanceId.parse("DeskFan_New_01_1"),
+ ObjectInstanceId.parse("FoodPlate_01_1"),
+ ]
+
+ color_changer_colors = get_args(ColorChangerObjectColor)
+
+ for target_object in target_object_iterator:
+ for object_color in color_changer_colors:
+ create_change_object_color_challenge(
+ target_object, object_color, enable_start_color_variants
+ )
diff --git a/src/arena_missions/constants/__init__.py b/src/arena_missions/constants/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/arena_missions/constants/arena.py b/src/arena_missions/constants/arena.py
new file mode 100644
index 0000000..f293763
--- /dev/null
+++ b/src/arena_missions/constants/arena.py
@@ -0,0 +1,467 @@
+from functools import lru_cache
+from pathlib import Path
+from typing import Literal
+
+import orjson
+
+
+OfficeLayout = Literal[
+ "OfficeLayout1",
+ "OfficeLayout1B",
+ "OfficeLayout1C",
+ "OfficeLayout3",
+ "OfficeLayout1_mirror",
+ "OfficeLayout1B_mirror",
+ "OfficeLayout1C_mirror",
+ "OfficeLayout3_mirror",
+]
+
+OfficeRoom = Literal[
+ "BreakRoom",
+ "MainOffice",
+ "SmallOffice",
+ "Lab1",
+ "Lab2",
+ "Hallway",
+ "Reception",
+ "Warehouse",
+]
+
+ObjectIds = Literal[
+ "ActionFigure",
+ "AP_Bld_Ceiling_Aircon_01",
+ "AP_Bld_Wall_Glass_Large_Door_01",
+ "AP_Item_Tape_01",
+ "AP_Item_Tool_Board",
+ "AP_Prop_Barrel_Open_01",
+ "AP_Prop_Barrel_Water_01",
+ "AP_Prop_Bin_Rubbish_01",
+ "AP_Prop_Bucket_02",
+ "AP_Prop_Cabinets_01",
+ "AP_Prop_CardboardBox_Open_05",
+ "AP_Prop_CardboardBox_Stack_02",
+ "AP_Prop_Cellotape_01",
+ "AP_Prop_CorkBoard_02",
+ "AP_Prop_Couch_02",
+ "AP_Prop_Couch_06",
+ "AP_Prop_Desk_Blue",
+ "AP_Prop_Desk_Green_model",
+ "AP_Prop_Desk_Green",
+ "AP_Prop_Desk_Red_model",
+ "AP_Prop_Desk_Red",
+ "AP_Prop_Desk_Yellow",
+ "AP_Prop_Fire_Extinguisher_01",
+ "AP_Prop_Folder_PVC_02",
+ "AP_Prop_Generator_Large_02",
+ "AP_Prop_Lab_Clamp_02_Arm_01",
+ "AP_Prop_Lab_MachinePanel_01",
+ "AP_Prop_Lab_MachinePanel_02",
+ "AP_Prop_Lab_Tank_01",
+ "AP_Prop_Lab_Tank_02",
+ "AP_Prop_Minigolf_Ball_01",
+ "AP_Prop_Minigolf_Club_01",
+ "AP_Prop_Note_05",
+ "AP_Prop_PaperTray_01_Full_01",
+ "AP_Prop_Pen_01",
+ "AP_Prop_Pen_03",
+ "AP_Prop_Pen_06",
+ "AP_Prop_Photocopier_01",
+ "AP_Prop_Plant_01",
+ "AP_Prop_Plant_09",
+ "AP_Prop_Print_Tube_01",
+ "AP_Prop_Safety_Barrier_02",
+ "AP_Prop_Shelf_06",
+ "AP_Prop_Shelf_Wall_04",
+ "AP_Prop_Shelf_Wall_FreezeRay",
+ "AP_Prop_Shelf_Wall_Laser",
+ "AP_Prop_Sign_OutofOrder_01",
+ "AP_Prop_Target_Circle_01",
+ "AP_Prop_Whiteboard_Devices_03",
+ "AP_Prop_Whiteboard_Devices_04",
+ "AP_Prop_Whiteboard_Devices_05",
+ "AP_Prop_Whiteboard_Devices_06",
+ "AP_Prop_Whiteboard_Devices_07",
+ "AP_Prop_Whiteboard_Devices_08",
+ "AP_Prop_Whiteboard_Devices_09",
+ "AP_Prop_Whiteboard_Devices_10",
+ "AP_Prop_Whiteboard_Devices_11",
+ "AP_Prop_Whiteboard_Devices_12",
+ "AP_Prop_Whiteboard_Devices_13",
+ "AP_Prop_Whiteboard_Devices_14",
+ "AP_Prop_Whiteboard_Devices_15",
+ "AP_Tool_Buffer_01_Battery",
+ "Apple",
+ "AppleSlice_01",
+ "Banana_01",
+ "BananaBunch_01",
+ "Bookshelf_Wooden_01",
+ "Bowl_01",
+ "BreadLoaf",
+ "BreadSlice_01",
+ "Broken_Cord_01",
+ "Burger_04",
+ "CableFrayed_01",
+ "Cake_02",
+ "CakeSlice_02",
+ "CandyBar_01",
+ "CandyJar_01",
+ "CanSoda_01",
+ "CanSodaNew_01",
+ "CanSodaNew_Crushed_01",
+ "CanSodaNew_Open_01",
+ "Carrot_01",
+ "Cereal_Box_01",
+ "CoffeeBeans_01",
+ "CoffeeCup_Lid_01",
+ "CoffeeCup_Open_Empty_01",
+ "CoffeeCup_Open_Empty_02",
+ "CoffeeMaker_01",
+ "CoffeeMug_Boss",
+ "CoffeeMug_Yellow",
+ "CoffeePot_01",
+ "CoffeeUnMaker_01",
+ "ColorChanger_Button_Blue",
+ "ColorChanger_Button_Green",
+ "ColorChanger_Button_Red",
+ "ColorChangerStation",
+ "Computer_Monitor_01",
+ "Computer_Monitor_Broken",
+ "Computer_Monitor_New",
+ "CounterBase_03",
+ "Cutting_Board",
+ "Dart",
+ "DartBoard",
+ "Deembiggenator_Crates",
+ "Desk_01",
+ "DeskFan_Broken_01",
+ "DeskFan_New_01",
+ "Donut_01",
+ "Door_01",
+ "EAC_Machine",
+ "EAC_Machine_Spawned_Carrot_01",
+ "Embiggenator",
+ "EmptyPaperTray",
+ "FireAlarm_01",
+ "FireExtinguisher_01",
+ "Floppy_AntiVirus_Broken",
+ "Floppy_AntiVirus",
+ "Floppy_Virus_Broken",
+ "Floppy_Virus",
+ "FoodPlate_01",
+ "Fork_01",
+ "Fork_Lift",
+ "ForkLift",
+ "FreezeRay",
+ "FridgeLower_02",
+ "FridgeUpper_02",
+ "FulllPaperTray_01",
+ "FuseBox_01_Lever",
+ "FuseBox_01",
+ "FuseBox_02",
+ "GravityPad",
+ "Hammer",
+ "Handsaw",
+ "Jar_Jam_01",
+ "Jar_PeanutButter_01",
+ "Keyboard",
+ "KitchenCabinet_01_Trapped",
+ "KitchenCabinet_01",
+ "KitchenCabinet_02",
+ "KitchenCounter01",
+ "KitchenCounterBase_02",
+ "KitchenCounterBase_03",
+ "KitchenCounterDrawer_02",
+ "KitchenCounterDrawer_03",
+ "KitchenCounterSink_01",
+ "KitchenCounterTop_02",
+ "KitchenStool_01",
+ "Knife_01",
+ "Lab_Terminal",
+ "Laser_CircuitBoard",
+ "Laser_ControlPanel",
+ "Laser_Tip_Broken",
+ "Laser_Tip",
+ "Laser",
+ "LaserBase_toy",
+ "LightSwitch_01",
+ "Manager_Chair",
+ "ManagerDesk",
+ "Microwave_01",
+ "MilkCarton_01",
+ "MissionItemHolder",
+ "Office_Chair",
+ "PackingBox",
+ "PaperCup_01",
+ "PaperCup_Crushed_01",
+ "PBJ_Sandwich",
+ "Pear_01",
+ "PieFruit_01",
+ "PieFruitSlice_01",
+ "PinBoard_01",
+ "PinBoard_02",
+ "PortalGenerator",
+ "PowerOutlet_01",
+ "Printer_3D",
+ "Printer_Cartridge_Figure",
+ "Printer_Cartridge_Hammer",
+ "Printer_Cartridge_Lever",
+ "Printer_Cartridge_Mug",
+ "Printer_Cartridge",
+ "Printer_3D_1_Spawned_ActionFigure",
+ "Printer_3D_1_Spawned_CoffeeMug_Yellow",
+ "Printer_3D_1_Spawned_FuseBox_01_Lever",
+ "Printer_3D_1_Spawned_Hammer",
+ "Radio_01_Broken",
+ "Radio_01",
+ "ReceptionDesk",
+ "Record_01",
+ "RoboticArm_01",
+ "SafetyBarrier_02",
+ "SandwichHalf_01",
+ "Screwdriver",
+ "Security_Button",
+ "Shelf_01",
+ "Shelves_Tall_01",
+ "sign_diamond_carrot",
+ "sign_diamond_fire",
+ "sign_diamond_freeze",
+ "sign_diamond_gravity",
+ "sign_diamond_laser",
+ "sign_diamond_quantum",
+ "sign_diamond_shrink",
+ "sign_office_layout_1",
+ "sign_short_breakroom_1",
+ "sign_short_breakroom_2",
+ "sign_short_caution_carrot",
+ "sign_short_caution_electrical",
+ "sign_short_caution_gravity_1",
+ "sign_short_caution_gravity_2",
+ "sign_short_caution_quantum_1",
+ "sign_short_caution_quantum_2",
+ "sign_short_caution_restricted_1",
+ "sign_short_caution_shrink",
+ "sign_short_office_1",
+ "sign_short_poster_delwan_1",
+ "sign_short_poster_delwan_2",
+ "sign_short_poster_delwan_3",
+ "sign_short_poster_delwan_4",
+ "sign_short_poster_tam",
+ "sign_short_quantum_1",
+ "sign_short_quantum_2",
+ "sign_short_robotics_1",
+ "sign_short_robotics_2",
+ "sign_short_warehouse_1",
+ "sign_square_breakroom",
+ "sign_tall_caution_carrot",
+ "sign_tall_caution_electrical",
+ "sign_tall_caution_freeze",
+ "sign_tall_caution_laser",
+ "sign_tall_caution_robotics",
+ "sign_tall_caution_shrink",
+ "sign_tall_poster_tam_1",
+ "sign_tall_poster_tam_2",
+ "SK_Veh_Pickup_01_ToolBox",
+ "SM_Bld_Door_02",
+ "SM_Bld_Wall_Metal_Slide_02",
+ "SM_Bld_Wall_Window_Blinds_Open_04",
+ "SM_Item_Clipboard_01",
+ "SM_Prop_AirVent_01",
+ "SM_Prop_AirVent_Wall_01",
+ "SM_Prop_Book_Group_01",
+ "SM_Prop_Book_Group_02",
+ "SM_Prop_Book_Group_03",
+ "SM_Prop_Book_Group_04",
+ "SM_Prop_Book_Group_05",
+ "SM_Prop_Book_Group_06",
+ "SM_Prop_Book_Group_07",
+ "SM_Prop_Book_Group_08",
+ "SM_Prop_Book_Magazine_01",
+ "SM_Prop_Book_Phone_Open_01",
+ "SM_Prop_Buttons_02",
+ "SM_Prop_Buttons_05",
+ "SM_Prop_Calender_01",
+ "SM_Prop_Cart_01",
+ "SM_Prop_Certificate_01",
+ "SM_Prop_Crate_Stack_01",
+ "SM_Prop_Drink_Dispenser_01",
+ "SM_Prop_FlatPackCardboardBoxes_03",
+ "SM_Prop_FlatPackCardboardBoxes_04",
+ "SM_Prop_Folder_Holder_01",
+ "SM_Prop_Folder_Holder_02",
+ "SM_Prop_Folder_Holder_03",
+ "SM_Prop_Folder_Holder_04",
+ "SM_Prop_Folder_Manila_01",
+ "SM_Prop_Folder_Manila_02",
+ "SM_Prop_Folder_Manila_03",
+ "SM_Prop_Folder_Manila_04",
+ "SM_Prop_Folder_PVC_01",
+ "SM_Prop_Folder_PVC_02",
+ "SM_Prop_FolderTray_01",
+ "SM_Prop_FolderTray_02",
+ "SM_Prop_FolderTray_03",
+ "SM_Prop_FolderTray_04",
+ "SM_Prop_Lighting_Cable_Bulb_01",
+ "SM_Prop_NetCable_03",
+ "SM_Prop_NotePad_01",
+ "SM_Prop_Oxygen_Tank Water",
+ "SM_Prop_Oxygen_Tank_Large",
+ "SM_Prop_Oxygen_Tank",
+ "SM_Prop_PalletStack_02",
+ "SM_Prop_Paper_04",
+ "SM_Prop_Paper_05",
+ "SM_Prop_Paper_06",
+ "SM_Prop_Paper_Pile_01",
+ "SM_Prop_Paper_Pile_03",
+ "SM_Prop_Papers_01",
+ "SM_Prop_PaperTray_01_Full_01",
+ "SM_Prop_Plastic_Pipe_Spool_01",
+ "SM_Prop_PowerBoxes_01",
+ "SM_Prop_Powercable_01",
+ "SM_Prop_Powercable_02",
+ "SM_Prop_Powercable_03",
+ "SM_Prop_Scales_01",
+ "SM_Prop_Server_Cabinet_01",
+ "SM_Prop_Server_Node_01",
+ "SM_Prop_Table_02",
+ "SM_Prop_ToolBox_01",
+ "SM_Prop_Warehouse_Boxes_Stacked_03",
+ "SM_Prop_Warehouse_Boxes_Stacked_04",
+ "SM_Prop_Warehouse_Light_04",
+ "SM_Prop_Warehouse_Platform_Trolley_01",
+ "SM_Prop_Wirespool_01",
+ "SM_Prop_Wirespool_Small_01",
+ "SM_Sign_Exit_02",
+ "SM_Tool_Buffer_01_Battery",
+ "SM_Tool_Drill_Chuck_01",
+ "SM_Tool_Handsaw_01",
+ "Spoon_01",
+ "StickyNote",
+ "Table_Metal_01",
+ "TableRound_02",
+ "TableRoundSmall_02",
+ "TAMPrototypeHead_01",
+ "TeslaCoil_Small",
+ "TeslaCoil",
+ "Toast_01",
+ "Toast_02",
+ "Toast_03",
+ "Toast_04_Jam",
+ "Toast_04_PBJ",
+ "Toast_04",
+ "Toaster_02",
+ "ToyBed",
+ "TrashCan_01",
+ "Trophy01",
+ "Unassigned",
+ "V_Monitor_Embiggenator",
+ "V_Monitor_FreezeRay",
+ "V_Monitor_Gravity",
+ "V_Monitor_Laser",
+ "V_Monitor_Portal",
+ "VendingMachine_01_B4_Button",
+ "VendingMachine_01_E5_Button",
+ "VendingMachine_01_E7_Button",
+ "VendingMachine_01_M8_Button",
+ "VendingMachine_01",
+ "WallClock_01",
+ "Warehouse_Boxes",
+ "WarningSign_01",
+ "WaterCooler_01",
+ "WaterPuddle_01",
+ "WhiteBoard_01",
+ "Whiteboard_CoffeeUnmaker",
+ "Whiteboard_YesterdayMachine",
+ "YesterdayMachine_01",
+]
+
+
+ObjectColor = Literal[
+ "Black",
+ "Blue",
+ "Brown",
+ "Green",
+ "Gray",
+ "Red",
+ "Yellow",
+]
+
+ColorChangerObjectColor = Literal[
+ "Red",
+ "Green",
+ "Blue",
+]
+
+
+RequiredObjectStateName = Literal[
+ "isToggledOn",
+ "isOpen",
+ "isFilled",
+ "isHot",
+ "isCold",
+ "isCooked",
+ "isCut",
+ "isDirty",
+ "isBroken",
+ "isEaten",
+ "isSparking",
+ "isOverloaded",
+ # Will only work for the emotion tester
+ "isNeutral",
+ "isHappy",
+ "isSad",
+ "isAngry",
+ "isScared",
+ # isLocked only works on Doors
+ "isLocked",
+ # Only works on receptacles
+ "isEmpty",
+ # Empties receptacles on challenge start
+ "removeInitialContainedItems",
+ # Ensures the object is infected, I don't know why this is different to the goal state
+ "Infected",
+ # Prevents the object from spawning normally
+ "Blacklist",
+ # Ensures it is the only object of its type
+ "Unique",
+ # Removes the object if it exists from some other definition
+ "Removed",
+ # I don't know what circuits and power does, and I don't think it matters since all circuits
+ # seem to be disabled or global
+ "circuitId",
+ "generateCircuitId",
+ "generatePower",
+]
+
+GoalStateExpressionKey = Literal[
+ "isInfected",
+ # Will only work for the emotion tester
+ "isNeutral",
+ "isHappy",
+ "isSad",
+ "isAngry",
+ "isScared",
+]
+
+
+BooleanStr = Literal["true", "false"]
+SpawnRelation = Literal["in"]
+FluidType = Literal["Water", "Milk", "Coffee", "None"]
+
+
+@lru_cache(maxsize=1)
+def load_object_id_to_readable_name_map() -> dict[ObjectIds, str]:
+ """Load mapping of Object ID to a readable name."""
+ json_file = Path(__file__).parent.joinpath("object_id_to_readable_name.json")
+ mapping = orjson.loads(json_file.read_bytes())
+ return mapping
+
+
+@lru_cache(maxsize=1)
+def get_all_readable_names() -> list[str]:
+ """Get all the readable names."""
+ return list(set(load_object_id_to_readable_name_map().values()))
+
+
+def is_readable_name(name: str) -> bool:
+ """Check if the name is a readable name."""
+ return name in get_all_readable_names()
diff --git a/src/arena_missions/constants/object_id_to_readable_name.json b/src/arena_missions/constants/object_id_to_readable_name.json
new file mode 100644
index 0000000..9a63e89
--- /dev/null
+++ b/src/arena_missions/constants/object_id_to_readable_name.json
@@ -0,0 +1,346 @@
+{
+ "ActionFigure": "Action Figure",
+ "AP_Bld_Ceiling_Aircon_01": "Vent",
+ "AP_Bld_Wall_Glass_Large_Door_01": "Door",
+ "AP_Item_Tape_01": "Tape",
+ "AP_Item_Tool_Board": "Tool Board",
+ "AP_Prop_Barrel_Open_01": "Water Barrel",
+ "AP_Prop_Barrel_Water_01": "Water Barrel",
+ "AP_Prop_Bin_Rubbish_01": "Trash Can",
+ "AP_Prop_Bucket_02": "Trash Can",
+ "AP_Prop_Cabinets_01": "Cabinet",
+ "AP_Prop_CardboardBox_Open_05": "Boxes",
+ "AP_Prop_CardboardBox_Stack_02": "Boxes",
+ "AP_Prop_Cellotape_01": "Tape",
+ "AP_Prop_CorkBoard_02": "Board",
+ "AP_Prop_Couch_02": "Couch",
+ "AP_Prop_Couch_06": "Couch",
+ "AP_Prop_Desk_Blue": "Blue Desk",
+ "AP_Prop_Desk_Green_model": "Green Desk",
+ "AP_Prop_Desk_Green": "Green Desk",
+ "AP_Prop_Desk_Red_model": "Red Desk",
+ "AP_Prop_Desk_Red": "Red Desk",
+ "AP_Prop_Desk_Yellow": "Yellow Desk",
+ "AP_Prop_Fire_Extinguisher_01": "Fire Extinguisher",
+ "AP_Prop_Folder_PVC_02": "Folder",
+ "AP_Prop_Generator_Large_02": "Generator",
+ "AP_Prop_Lab_Clamp_02_Arm_01": "Clamp",
+ "AP_Prop_Lab_MachinePanel_01": "Machine Panel",
+ "AP_Prop_Lab_MachinePanel_02": "Machine Panel",
+ "AP_Prop_Lab_Tank_01": "Tank",
+ "AP_Prop_Lab_Tank_02": "Tank",
+ "AP_Prop_Minigolf_Ball_01": "Golf Ball",
+ "AP_Prop_Minigolf_Club_01": "Golf Club",
+ "AP_Prop_Note_05": "Sticky Note",
+ "AP_Prop_PaperTray_01_Full_01": "Tray",
+ "AP_Prop_Pen_01": "Pen",
+ "AP_Prop_Pen_03": "Pen",
+ "AP_Prop_Pen_06": "Pen",
+ "AP_Prop_Photocopier_01": "Photocopier",
+ "AP_Prop_Plant_01": "Plant",
+ "AP_Prop_Plant_09": "Plant",
+ "AP_Prop_Print_Tube_01": "Print Tube",
+ "AP_Prop_Safety_Barrier_02": "Warning Sign",
+ "AP_Prop_Shelf_06": "Shelf",
+ "AP_Prop_Shelf_Wall_04": "Freeze Ray Shelf",
+ "AP_Prop_Shelf_Wall_FreezeRay": "Freeze Ray Shelf",
+ "AP_Prop_Shelf_Wall_Laser": "Laser Shelf",
+ "AP_Prop_Sign_OutofOrder_01": "Door Sign",
+ "AP_Prop_Target_Circle_01": "Target",
+ "AP_Prop_Whiteboard_Devices_03": "Whiteboard",
+ "AP_Prop_Whiteboard_Devices_04": "Whiteboard",
+ "AP_Prop_Whiteboard_Devices_05": "Whiteboard",
+ "AP_Prop_Whiteboard_Devices_06": "Whiteboard",
+ "AP_Prop_Whiteboard_Devices_07": "Whiteboard",
+ "AP_Prop_Whiteboard_Devices_08": "Whiteboard",
+ "AP_Prop_Whiteboard_Devices_09": "Whiteboard",
+ "AP_Prop_Whiteboard_Devices_10": "Whiteboard",
+ "AP_Prop_Whiteboard_Devices_11": "Whiteboard",
+ "AP_Prop_Whiteboard_Devices_12": "Whiteboard",
+ "AP_Prop_Whiteboard_Devices_13": "Whiteboard",
+ "AP_Prop_Whiteboard_Devices_14": "Whiteboard",
+ "AP_Prop_Whiteboard_Devices_15": "Whiteboard",
+ "AP_Tool_Buffer_01_Battery": "Battery",
+ "Apple": "Apple",
+ "AppleSlice_01": "Apple",
+ "Banana_01": "Banana",
+ "BananaBunch_01": "Banana",
+ "Bookshelf_Wooden_01": "Bookshelf",
+ "Bowl_01": "Bowl",
+ "BreadLoaf": "Bread",
+ "BreadSlice_01": "Bread",
+ "Broken_Cord_01": "Cable",
+ "Burger_04": "Burger",
+ "CableFrayed_01": "Cable",
+ "Cake_02": "Cake",
+ "CakeSlice_02": "Cake",
+ "CandyBar_01": "Candy Bar",
+ "CandyJar_01": "Jar",
+ "CanSoda_01": "Can",
+ "CanSodaNew_01": "Can",
+ "CanSodaNew_Crushed_01": "Can",
+ "CanSodaNew_Open_01": "Can",
+ "Carrot_01": "Carrot",
+ "Cereal_Box_01": "Cereal Box",
+ "CoffeeBeans_01": "Coffee Beans",
+ "CoffeeCup_Lid_01": "Lid",
+ "CoffeeCup_Open_Empty_01": "Cup",
+ "CoffeeCup_Open_Empty_02": "Cup",
+ "CoffeeMaker_01": "Coffee Maker",
+ "CoffeeMug_Boss": "Mug",
+ "CoffeeMug_Yellow": "Mug",
+ "CoffeePot_01": "Coffee Pot",
+ "CoffeeUnMaker_01": "Coffee Unmaker",
+ "ColorChanger_Button_Blue": "Blue Button",
+ "ColorChanger_Button_Green": "Green Button",
+ "ColorChanger_Button_Red": "Red Button",
+ "ColorChangerStation": "Color Changer",
+ "Computer_Monitor_01": "Computer",
+ "Computer_Monitor_Broken": "Computer",
+ "Computer_Monitor_New": "Computer",
+ "CounterBase_03": "Counter",
+ "Cutting_Board": "Cutting Board",
+ "Dart": "Dart",
+ "DartBoard": "Dart Board",
+ "Deembiggenator_Crates": "Crate",
+ "Desk_01": "Desk",
+ "DeskFan_Broken_01": "Fan",
+ "DeskFan_New_01": "Fan",
+ "Donut_01": "Donut",
+ "Door_01": "Door",
+ "EAC_Machine": "Everything's A Carrot Machine",
+ "Embiggenator": "Embiggenator",
+ "EmptyPaperTray": "Tray",
+ "FireAlarm_01": "Fire Alarm",
+ "FireExtinguisher_01": "Fire Extinguisher",
+ "Floppy_AntiVirus_Broken": "Floppy Disk",
+ "Floppy_AntiVirus": "Floppy Disk",
+ "Floppy_Virus_Broken": "Floppy Disk",
+ "Floppy_Virus": "Floppy Disk",
+ "FoodPlate_01": "Plate",
+ "Fork_01": "Fork",
+ "Fork_Lift": "Forklift",
+ "ForkLift": "Forklift",
+ "FreezeRay": "Freeze Ray",
+ "FridgeLower_02": "Fridge",
+ "FridgeUpper_02": "Freezer",
+ "FulllPaperTray_01": "Tray",
+ "FuseBox_01_Lever": "Lever",
+ "FuseBox_01": "Fuse Box",
+ "FuseBox_02": "Fuse Box",
+ "GravityPad": "Gravity Pad",
+ "Hammer": "Hammer",
+ "Handsaw": "Handsaw",
+ "Jar_Jam_01": "Jar",
+ "Jar_PeanutButter_01": "Jar",
+ "Keyboard": "Keyboard",
+ "KitchenCabinet_01_Trapped": "Cabinet",
+ "KitchenCabinet_01": "Cabinet",
+ "KitchenCabinet_02": "Cabinet",
+ "KitchenCounter01": "Counter",
+ "KitchenCounterBase_02": "Counter",
+ "KitchenCounterBase_03": "Counter",
+ "KitchenCounterDrawer_02": "Drawer",
+ "KitchenCounterDrawer_03": "Drawer",
+ "KitchenCounterSink_01": "Sink",
+ "KitchenCounterTop_02": "Counter Top",
+ "KitchenStool_01": "Stool",
+ "Knife_01": "Knife",
+ "Lab_Terminal": "Computer",
+ "Laser_CircuitBoard": "Circuit Board",
+ "Laser_ControlPanel": "Control Panel",
+ "Laser_Tip_Broken": "Laser Tip",
+ "Laser_Tip": "Laser Tip",
+ "Laser": "Laser",
+ "LaserBase_toy": "Laser Toy",
+ "LightSwitch_01": "Light Switch",
+ "Manager_Chair": "Chair",
+ "ManagerDesk": "Desk",
+ "Microwave_01": "Microwave",
+ "MilkCarton_01": "Milk",
+ "MissionItemHolder": "Unassigned",
+ "Office_Chair": "Chair",
+ "PackingBox": "Boxes",
+ "PaperCup_01": "Cup",
+ "PaperCup_Crushed_01": "Cup",
+ "PBJ_Sandwich": "Sandwich",
+ "Pear_01": "Pear",
+ "PieFruit_01": "Pie",
+ "PieFruitSlice_01": "Pie",
+ "PinBoard_01": "Pin Board",
+ "PinBoard_02": "Pin Board",
+ "PortalGenerator": "Generator",
+ "PowerOutlet_01": "Outlet",
+ "Printer_3D": "Printer",
+ "Printer_Cartridge_Figure": "Printer Cartridge",
+ "Printer_Cartridge_Hammer": "Printer Cartridge",
+ "Printer_Cartridge_Lever": "Printer Cartridge",
+ "Printer_Cartridge_Mug": "Printer Cartridge",
+ "Printer_Cartridge": "Printer Cartridge",
+ "Printer_3D_1_Spawned_ActionFigure": "Action Figure",
+ "Printer_3D_1_Spawned_CoffeeMug_Yellow": "Mug",
+ "Printer_3D_1_Spawned_FuseBox_01_Lever": "Lever",
+ "Printer_3D_1_Spawned_Hammer": "Hammer",
+ "Radio_01_Broken": "Radio",
+ "Radio_01": "Radio",
+ "ReceptionDesk": "Reception Desk",
+ "Record_01": "Record",
+ "RoboticArm_01": "Robot Arm",
+ "SafetyBarrier_02": "Warning Sign",
+ "SandwichHalf_01": "Sandwich",
+ "Screwdriver": "Screwdriver",
+ "Security_Button": "Button",
+ "Shelf_01": "Shelf",
+ "Shelves_Tall_01": "Shelf",
+ "sign_diamond_carrot": "Warning Sign",
+ "sign_diamond_fire": "Warning Sign",
+ "sign_diamond_freeze": "Warning Sign",
+ "sign_diamond_gravity": "Warning Sign",
+ "sign_diamond_laser": "Warning Sign",
+ "sign_diamond_quantum": "Warning Sign",
+ "sign_diamond_shrink": "Warning Sign",
+ "sign_office_layout_1": "Map",
+ "sign_short_breakroom_1": "Door Sign",
+ "sign_short_breakroom_2": "Door Sign",
+ "sign_short_caution_carrot": "Door Sign",
+ "sign_short_caution_electrical": "Door Sign",
+ "sign_short_caution_gravity_1": "Door Sign",
+ "sign_short_caution_gravity_2": "Door Sign",
+ "sign_short_caution_quantum_1": "Door Sign",
+ "sign_short_caution_quantum_2": "Door Sign",
+ "sign_short_caution_restricted_1": "Door Sign",
+ "sign_short_caution_shrink": "Door Sign",
+ "sign_short_office_1": "Door Sign",
+ "sign_short_poster_delwan_1": "Poster",
+ "sign_short_poster_delwan_2": "Poster",
+ "sign_short_poster_delwan_3": "Poster",
+ "sign_short_poster_delwan_4": "Poster",
+ "sign_short_poster_tam": "Poster",
+ "sign_short_quantum_1": "Door Sign",
+ "sign_short_quantum_2": "Door Sign",
+ "sign_short_robotics_1": "Door Sign",
+ "sign_short_robotics_2": "Door Sign",
+ "sign_short_warehouse_1": "Door Sign",
+ "sign_square_breakroom": "Door Sign",
+ "sign_tall_caution_carrot": "Door Sign",
+ "sign_tall_caution_electrical": "Door Sign",
+ "sign_tall_caution_freeze": "Door Sign",
+ "sign_tall_caution_laser": "Door Sign",
+ "sign_tall_caution_robotics": "Door Sign",
+ "sign_tall_caution_shrink": "Door Sign",
+ "sign_tall_poster_tam_1": "Poster",
+ "sign_tall_poster_tam_2": "Poster",
+ "SK_Veh_Pickup_01_ToolBox": "Toolbox",
+ "SM_Bld_Door_02": "Door",
+ "SM_Bld_Wall_Metal_Slide_02": "Door",
+ "SM_Bld_Wall_Window_Blinds_Open_04": "Blinds",
+ "SM_Item_Clipboard_01": "Clipboard",
+ "SM_Prop_AirVent_01": "Vent",
+ "SM_Prop_AirVent_Wall_01": "Vent",
+ "SM_Prop_Book_Group_01": "Books",
+ "SM_Prop_Book_Group_02": "Books",
+ "SM_Prop_Book_Group_03": "Books",
+ "SM_Prop_Book_Group_04": "Books",
+ "SM_Prop_Book_Group_05": "Books",
+ "SM_Prop_Book_Group_06": "Books",
+ "SM_Prop_Book_Group_07": "Books",
+ "SM_Prop_Book_Group_08": "Books",
+ "SM_Prop_Book_Magazine_01": "Books",
+ "SM_Prop_Book_Phone_Open_01": "Books",
+ "SM_Prop_Buttons_02": "Button",
+ "SM_Prop_Buttons_05": "Button",
+ "SM_Prop_Calender_01": "Calendar",
+ "SM_Prop_Cart_01": "Cart",
+ "SM_Prop_Certificate_01": "Poster",
+ "SM_Prop_Crate_Stack_01": "Crate",
+ "SM_Prop_Drink_Dispenser_01": "Cooler",
+ "SM_Prop_FlatPackCardboardBoxes_03": "Boxes",
+ "SM_Prop_FlatPackCardboardBoxes_04": "Boxes",
+ "SM_Prop_Folder_Holder_01": "Folder Holder",
+ "SM_Prop_Folder_Holder_02": "Folder Holder",
+ "SM_Prop_Folder_Holder_03": "Folder Holder",
+ "SM_Prop_Folder_Holder_04": "Folder Holder",
+ "SM_Prop_Folder_Manila_01": "Folder",
+ "SM_Prop_Folder_Manila_02": "Folder",
+ "SM_Prop_Folder_Manila_03": "Folder",
+ "SM_Prop_Folder_Manila_04": "Folder",
+ "SM_Prop_Folder_PVC_01": "Folder",
+ "SM_Prop_Folder_PVC_02": "Folder",
+ "SM_Prop_FolderTray_01": "Tray",
+ "SM_Prop_FolderTray_02": "Tray",
+ "SM_Prop_FolderTray_03": "Tray",
+ "SM_Prop_FolderTray_04": "Tray",
+ "SM_Prop_Lighting_Cable_Bulb_01": "Light Bulb",
+ "SM_Prop_NetCable_03": "Cable",
+ "SM_Prop_NotePad_01": "Notepad",
+ "SM_Prop_Oxygen_Tank Water": "Unassigned",
+ "SM_Prop_Oxygen_Tank_Large": "Oxygen Tank",
+ "SM_Prop_Oxygen_Tank": "Oxygen Tank",
+ "SM_Prop_PalletStack_02": "Pallets",
+ "SM_Prop_Paper_04": "Paper",
+ "SM_Prop_Paper_05": "Paper",
+ "SM_Prop_Paper_06": "Paper",
+ "SM_Prop_Paper_Pile_01": "Paper",
+ "SM_Prop_Paper_Pile_03": "Paper",
+ "SM_Prop_Papers_01": "Paper",
+ "SM_Prop_PaperTray_01_Full_01": "Tray",
+ "SM_Prop_Plastic_Pipe_Spool_01": "Cable",
+ "SM_Prop_PowerBoxes_01": "Power Boxes",
+ "SM_Prop_Powercable_01": "Cable",
+ "SM_Prop_Powercable_02": "Cable",
+ "SM_Prop_Powercable_03": "Cable",
+ "SM_Prop_Scales_01": "Scale",
+ "SM_Prop_Server_Cabinet_01": "Cabinet",
+ "SM_Prop_Server_Node_01": "Server",
+ "SM_Prop_Table_02": "Table",
+ "SM_Prop_ToolBox_01": "Toolbox",
+ "SM_Prop_Warehouse_Boxes_Stacked_03": "Boxes",
+ "SM_Prop_Warehouse_Boxes_Stacked_04": "Boxes",
+ "SM_Prop_Warehouse_Light_04": "Light",
+ "SM_Prop_Warehouse_Platform_Trolley_01": "Trolley",
+ "SM_Prop_Wirespool_01": "Cable",
+ "SM_Prop_Wirespool_Small_01": "Cable",
+ "SM_Sign_Exit_02": "Door Sign",
+ "SM_Tool_Buffer_01_Battery": "Battery",
+ "SM_Tool_Drill_Chuck_01": "Drill Chuck",
+ "SM_Tool_Handsaw_01": "Handsaw",
+ "Spoon_01": "Spoon",
+ "Sticky Note": "Sticky Note",
+ "StickyNote": "Sticky Note",
+ "Table_Metal_01": "Table",
+ "TableRound_02": "Table",
+ "TableRoundSmall_02": "Table",
+ "TAMPrototypeHead_01": "Emotion Tester",
+ "TeslaCoil_Small": "Tesla Coil",
+ "TeslaCoil": "Tesla Coil",
+ "Toast_01": "Bread",
+ "Toast_02": "Bread",
+ "Toast_03": "Bread",
+ "Toast_04_Jam": "Bread",
+ "Toast_04_PBJ": "Bread",
+ "Toast_04": "Bread",
+ "Toaster_02": "Toaster",
+ "ToyBed": "Bed Toy",
+ "TrashCan_01": "Trash Can",
+ "Trophy01": "Trophy",
+ "Unassigned": "Unassigned",
+ "V_Monitor_Embiggenator": "Embiggenator Monitor",
+ "V_Monitor_FreezeRay": "Freeze Ray Monitor",
+ "V_Monitor_Gravity": "Gravity Monitor",
+ "V_Monitor_Laser": "Laser Monitor",
+ "V_Monitor_Portal": "Portal Generator Monitor",
+ "VendingMachine_01_B4_Button": "Button",
+ "VendingMachine_01_E5_Button": "Button",
+ "VendingMachine_01_E7_Button": "Button",
+ "VendingMachine_01_M8_Button": "Button",
+ "VendingMachine_01": "Vending Machine",
+ "WallClock_01": "Clock",
+ "Warehouse_Boxes": "Boxes",
+ "WarningSign_01": "Warning Sign",
+ "WaterCooler_01": "Cooler",
+ "WaterPuddle_01": "Puddle",
+ "WhiteBoard_01": "Whiteboard",
+ "Whiteboard_CoffeeUnmaker": "Whiteboard",
+ "Whiteboard_YesterdayMachine": "Whiteboard",
+ "YesterdayMachine_01": "Time Machine"
+}
diff --git a/src/arena_missions/load_challenges.py b/src/arena_missions/load_challenges.py
new file mode 100644
index 0000000..c351f66
--- /dev/null
+++ b/src/arena_missions/load_challenges.py
@@ -0,0 +1,52 @@
+from arena_missions.challenges.ambiguous_pickup import register_ambiguous_pickup_challenges
+from arena_missions.challenges.breaking_things import (
+ register_breaking_things_challenges,
+ register_breaking_things_on_desks_challenges,
+)
+from arena_missions.challenges.clean_dirty_plate import register_clean_dirty_plates
+from arena_missions.challenges.fill_object_in_sink import register_fill_objects_in_sink
+from arena_missions.challenges.objects_in_containers import (
+ register_objects_with_freezer_challenges,
+ register_objects_with_fridge_challenges,
+ register_warehouse_cabinet_challenges,
+)
+from arena_missions.challenges.operate_carrot_maker import register_carrot_maker_challenges
+from arena_missions.challenges.operate_microwave import register_heat_things
+from arena_missions.challenges.operate_printer import register_print_things
+from arena_missions.challenges.operate_time_machine import (
+ register_repair_broken_things,
+ register_repair_carrots,
+)
+from arena_missions.challenges.pickup_from_printer import register_pickup_from_printer_challenges
+from arena_missions.challenges.pickup_stack import register_pickup_plate_stack_challenges
+from arena_missions.challenges.place_stack import (
+ register_place_bowl_stack_from_gravity_pad,
+ register_place_plate_stack_challenges,
+)
+from arena_missions.challenges.using_coffee_unmaker import register_coffee_unmaker_challenges
+from arena_missions.challenges.using_color_changer import register_color_changer_challenges
+
+
+def load_challenges() -> None:
+ """Run all the register functions to load the challenges."""
+ register_objects_with_fridge_challenges(enable_color_variants=False)
+ register_objects_with_freezer_challenges(enable_color_variants=False)
+ register_warehouse_cabinet_challenges(enable_color_variants=False)
+ register_ambiguous_pickup_challenges()
+ register_pickup_plate_stack_challenges(enable_color_variants=False)
+ register_place_plate_stack_challenges(enable_color_variants=False)
+
+ # "Interaction" challenges / ones that are "nicher"
+ register_repair_broken_things(enable_color_variants=False)
+ register_carrot_maker_challenges(enable_color_variants=False)
+ register_fill_objects_in_sink(enable_color_variants=False)
+ register_heat_things(enable_color_variants=False)
+ register_clean_dirty_plates(enable_color_variants=False)
+ register_coffee_unmaker_challenges(enable_color_variants=False)
+ register_print_things()
+ register_color_changer_challenges(enable_start_color_variants=False)
+ register_repair_carrots(enable_color_variants=False)
+ register_pickup_from_printer_challenges()
+ register_place_bowl_stack_from_gravity_pad(enable_color_variants=True)
+ register_breaking_things_on_desks_challenges(enable_color_variants=False)
+ register_breaking_things_challenges(enable_color_variants=False)
diff --git a/src/arena_missions/structures/__init__.py b/src/arena_missions/structures/__init__.py
new file mode 100644
index 0000000..6f18fe5
--- /dev/null
+++ b/src/arena_missions/structures/__init__.py
@@ -0,0 +1,40 @@
+from arena_missions.structures.cdf import CDF, CDFPortal, CDFScene
+from arena_missions.structures.high_level_key import HighLevelKey
+from arena_missions.structures.mission import Mission, MissionTrajectory
+from arena_missions.structures.object_id import ObjectId, ObjectInstanceId
+from arena_missions.structures.required_object import RequiredObject, RequiredObjectState
+from arena_missions.structures.state_condition import (
+ AndExpression,
+ CanBeSeenExpression,
+ ColorMetaDataChangeExpression,
+ ContainsExpression,
+ DinoFedExpression,
+ Expression,
+ ExpressionType,
+ IsBrokenExpression,
+ IsColdExpression,
+ IsDirtyExpression,
+ IsEmbiggenatedExpression,
+ IsFilledWithExpression,
+ IsFullOfItemsExpression,
+ IsHotExpression,
+ IsInRangeExpression,
+ IsOpenExpression,
+ IsOverloadedExpression,
+ IsPickedUpExpression,
+ IsPoweredExpression,
+ IsReceptacleExpression,
+ IsScannedExpression,
+ IsToggledOnExpression,
+ IsUsedExpression,
+ NotExpression,
+ OrExpression,
+ StateCondition,
+ StateExpression,
+)
+from arena_missions.structures.task_goal import (
+ ObjectGoalState,
+ ObjectGoalStateExpression,
+ ObjectGoalStateRelation,
+ TaskGoal,
+)
diff --git a/src/arena_missions/structures/cdf.py b/src/arena_missions/structures/cdf.py
new file mode 100644
index 0000000..5ff2a6b
--- /dev/null
+++ b/src/arena_missions/structures/cdf.py
@@ -0,0 +1,116 @@
+from typing import Any, Literal, Optional
+
+from pydantic import BaseModel, Field, validator
+
+from arena_missions.constants.arena import OfficeLayout, OfficeRoom
+from arena_missions.structures.required_object import RequiredObject
+from arena_missions.structures.state_condition import StateCondition
+from arena_missions.structures.task_goal import TaskGoal
+
+
+CDF_GAME_INTERACTIONS: dict[str, Any] = { # noqa: WPS407
+ "camera_movements": {
+ "task_beginning": [],
+ "task_procedure": [],
+ "task_ending": [],
+ "object_conditions": [],
+ },
+ "game_messages": {
+ "task_beginning": [],
+ "task_procedure": [],
+ "task_ending": [],
+ "object_conditions": [],
+ },
+}
+
+
+class CDFPortal(BaseModel):
+ """Portal definition within the CDF."""
+
+ name: Literal["past", "future"] = Field(..., alias="PortalName")
+ status: bool = Field(default=False, alias="PortalStatus")
+
+
+class CDFScene(BaseModel):
+ """Scene within a CDF."""
+
+ room_location: list[OfficeRoom] = Field(
+ ..., alias="roomLocation", description="Start location of the robot"
+ )
+ required_objects: list[RequiredObject]
+ layout_override: OfficeLayout = Field(
+ ..., description="Override the layout", alias="layoutOverride"
+ )
+
+ floor_plan: str = Field(
+ default="0", description="Controls RNG during scene setup. Set to -1 for random."
+ )
+ scene_id: str = Field(default="01 (Make_Cereal)")
+
+ # Unused/Ignored fields
+ simbot_init: list[Any] = Field(default_factory=list)
+ sticky_notes: Optional[list[Any]] = Field(default_factory=list)
+ blacklisted_layouts: Optional[list[OfficeLayout]] = None
+ completely_random_visual: bool = Field(default=False, alias="completelyRandomVisual")
+
+ @validator("floor_plan")
+ @classmethod
+ def check_floor_plan_is_numeric(cls, floor_plan: str) -> str:
+ """Check that floor plan is a numeric string."""
+ try:
+ int(floor_plan)
+ except ValueError as err:
+ raise ValueError(f"Floor plan must be numeric string, got {floor_plan}") from err
+ return floor_plan
+
+
+class CDF(BaseModel):
+ """CDF, used to generate scenes in the Arena."""
+
+ scene: CDFScene
+
+ task_goals: list[TaskGoal] = Field(..., min_items=1)
+ state_conditions: list[StateCondition] = Field(default_factory=list, alias="stateconditions")
+
+ goal_text: str = ""
+ task_description: str = ""
+ game_id: str = Field(default="3")
+ experimental: str = Field(default="true")
+
+ # Unused/Ignored fields
+ game_interactions: dict[str, Any] = CDF_GAME_INTERACTIONS
+ past_portals: list[CDFPortal] = Field(
+ default=[CDFPortal(PortalName="past")], alias="pastPortals", max_items=1
+ )
+ future_portals: list[CDFPortal] = Field(
+ default=[CDFPortal(PortalName="future")], alias="futurePortals", max_items=1
+ )
+
+ @validator("task_goals")
+ @classmethod
+ def update_goal_ids_in_task_goals(cls, task_goals: list[TaskGoal]) -> list[TaskGoal]:
+ """Update goal IDs in task goals."""
+ for idx, task_goal in enumerate(task_goals):
+ task_goal.goal_id = idx
+ return task_goals
+
+ @validator("game_id")
+ @classmethod
+ def check_game_id_is_numeric(cls, game_id: str) -> str:
+ """Check that game ID is a numeric string."""
+ if not game_id.isdigit():
+ raise ValueError(f"Game ID must be numeric string, got {game_id}")
+ return game_id
+
+ @validator("experimental")
+ @classmethod
+ def check_experimental_is_bool(cls, experimental: str) -> str:
+ """Check that experimental is a boolean string."""
+ if experimental not in {"true", "false"}:
+ raise ValueError(f"Experimental must be boolean string, got {experimental}")
+ return experimental
+
+ @property
+ def start_room(self) -> OfficeRoom:
+ """Get the starting room."""
+ return self.scene.room_location[0]
diff --git a/src/arena_missions/structures/high_level_key.py b/src/arena_missions/structures/high_level_key.py
new file mode 100644
index 0000000..333d63c
--- /dev/null
+++ b/src/arena_missions/structures/high_level_key.py
@@ -0,0 +1,122 @@
+from typing import Literal, Optional
+from typing_extensions import Self
+
+from convert_case import kebab_case, snake_case, title_case
+from pydantic import BaseModel, validator
+
+from arena_missions.constants.arena import ObjectColor
+from arena_missions.structures.object_id import ObjectId
+
+
+InstructionAction = Literal[
+ "break",
+ "clean",
+ "close",
+ "fill",
+ "interact",
+ "open",
+ "pickup",
+ "place",
+ "pour",
+ "scan",
+ "toggle",
+]
+
+
+class HighLevelKey(BaseModel, validate_assignment=True, frozen=True, extra="forbid"):
+ """Structured form of the High-Level Key."""
+
+ action: InstructionAction
+
+ target_object: ObjectId
+ target_object_color: Optional[ObjectColor] = None
+ # An ambiguous target object is one where there are multiple objects in the scene, but the
+ # agent should be able to distinguish the correct one by color
+ target_object_is_ambiguous: bool = False
+
+ interaction_object: Optional[ObjectId] = None
+ interaction_object_color: Optional[ObjectColor] = None
+
+ converted_object: Optional[ObjectId] = None
+ converted_object_color: Optional[ObjectColor] = None
+
+ # A stacked object is one that is on top of a plate, and is used for "with" phrases.
+ # For example, "pick up the plate with the apple" would have the apple as the stacked object.
+ stacked_object: Optional[ObjectId] = None
+ stacked_object_color: Optional[ObjectColor] = None
+
+ from_receptacle: Optional[ObjectId] = None
+ from_receptacle_color: Optional[ObjectColor] = None
+ from_receptacle_is_container: bool = False
+
+ to_receptacle: Optional[ObjectId] = None
+ to_receptacle_color: Optional[ObjectColor] = None
+ to_receptacle_is_container: bool = False
+
+ def __str__(self) -> str:
+ """Return the string representation of the high-level key."""
+ return self.key
+
+ @classmethod
+ def from_string(cls, key_string: str) -> Self:
+ """Create the high-level key from the string."""
+ high_level_key_dict = {}
+
+ # Split the key by the # character
+ parts = [part for part in key_string.split("#") if part]
+
+ for part in parts:
+ # Split each part by the = character
+ split_part = part.split("=")
+
+ # Convert the key to snake_case
+ part_name = split_part[0]
+ part_name = snake_case(part_name)
+
+ # If the part_name is going to hold a boolean and the value does not exist, set it to
+ # True since it is a flag
+ if part_name.endswith("is_container") and len(split_part) == 1:
+ part_value = "true"
+ # Otherwise, set it to the 2nd part of the split
+ elif len(split_part) == 2:
+ part_value = split_part[1]
+ # Otherwise, raise an error
+ else:
+ raise ValueError(f"Each split part should contain 2 pieces, received {part}")
+
+ # Add it to the dictionary
+ high_level_key_dict[part_name] = part_value
+
+ # Parse it with pydantic
+ return cls.parse_obj(high_level_key_dict)
+
+ @validator(
+ "target_object_color",
+ "converted_object_color",
+ "from_receptacle_color",
+ "to_receptacle_color",
+ "interaction_object_color",
+ "stacked_object_color",
+ pre=True,
+ )
+ @classmethod
+ def format_color_string(cls, color: Optional[str]) -> Optional[str]:
+ """Format the color to be in title case."""
+ if color:
+ color = title_case(color)
+ return color
+
+ @property
+ def key(self) -> str:
+ """Get the high-level key as a string."""
+ parts: list[str] = []
+
+ for part_name, part_value in self.dict().items():
+ if part_value:
+ part_name = kebab_case(part_name)
+ if isinstance(part_value, bool):
+ parts.append(f"#{part_name}")
+ else:
+ parts.append(f"#{part_name}={part_value}")
+
+ return "".join(parts)
diff --git a/src/arena_missions/structures/mission.py b/src/arena_missions/structures/mission.py
new file mode 100644
index 0000000..076d2e4
--- /dev/null
+++ b/src/arena_missions/structures/mission.py
@@ -0,0 +1,93 @@
+from datetime import datetime
+from typing import Any, Optional, Union
+from uuid import uuid4
+
+import shortuuid
+from pydantic import BaseModel, Field
+
+from arena_missions.structures.cdf import CDF
+from arena_missions.structures.high_level_key import HighLevelKey
+
+
+class MissionTrajectory(BaseModel, smart_union=True):
+ """Single trajectory for a given mission."""
+
+ session_id: str
+ utterances: list[str]
+
+ # Preparation utterances which are not part of the mission.
+ # These are given first to help setup the environment as needed.
+ preparation_utterances: list[str] = Field(default_factory=list)
+
+ high_level_key: Optional[HighLevelKey] = None
+
+ # Challenge definition
+ cdf: Union[CDF, dict[str, Any]]
+
+ # Used by T1/T2 data
+ mission_id: Optional[str] = None
+ mission_group: Optional[str] = None
+
+ randomise_start_position: bool = True
+
+ @property
+ def cdf_as_dict(self) -> dict[str, Any]:
+ """Get the CDF as a dict."""
+ if isinstance(self.cdf, dict):
+ return self.cdf
+ return self.cdf.dict(by_alias=True)
+
+ def create_preparation_session_id(self, prefix: str = "T") -> str:
+ """Create a session ID for the preparation utterances."""
+ now = datetime.now()
+ date_chunk = f"{now.year:02d}{now.month:02d}{now.day:02d}"
+ return f"{prefix}.{date_chunk}/prep-{uuid4()}"
+
+
+class Mission(BaseModel):
+ """Single mission for the Arena.."""
+
+ high_level_key: HighLevelKey
+ plan: list[str]
+ cdf: CDF
+
+ # Preparation utterances which are not part of the mission.
+ # These are given first to help setup the environment as needed.
+ preparation_plan: list[str] = Field(default_factory=list)
+
+ # Used by T1/T2 data
+ mission_group: Optional[str] = None
+
+ randomise_start_position: bool = True
+
+ def convert_to_trajectory(
+ self,
+ session_id_prefix: str,
+ *,
+ include_randomness: bool = True,
+ randomise_start_position: bool = True,
+ ) -> MissionTrajectory:
+ """Convert the challenge to a list of single trajectories."""
+ return MissionTrajectory(
+ high_level_key=self.high_level_key,
+ session_id=self.create_session_id(
+ session_id_prefix, include_randomness=include_randomness
+ ),
+ utterances=self.plan,
+ preparation_utterances=self.preparation_plan,
+ cdf=self.cdf,
+ mission_group=self.mission_group,
+ randomise_start_position=randomise_start_position,
+ )
+
+ def create_session_id(self, prefix: str, *, include_randomness: bool = True) -> str:
+ """Create a session ID for the trajectory."""
+ safe_high_level_key = (
+ str(self.high_level_key).replace("=", "--").replace("#", "_").lstrip("_")
+ )
+
+ now = datetime.now()
+ date_chunk = f"{now.year:02d}{now.month:02d}{now.day:02d}"
+ randomness = f"-{shortuuid.uuid()[:5]}" if include_randomness else ""
+
+ return f"{prefix}.{date_chunk}/{safe_high_level_key}{randomness}"
diff --git a/src/arena_missions/structures/object_id.py b/src/arena_missions/structures/object_id.py
new file mode 100644
index 0000000..dec68ca
--- /dev/null
+++ b/src/arena_missions/structures/object_id.py
@@ -0,0 +1,106 @@
+from collections.abc import Generator
+from typing import Any, Callable, Literal, Union, cast, get_args
+from typing_extensions import Self
+
+from arena_missions.constants.arena import ObjectIds, load_object_id_to_readable_name_map
+
+
+def convert_object_instance_id_to_object_id(object_instance: str) -> str:
+ """Convert object instance to object id.
+
+ We need to remove everything after the last "_".
+ """
+ return object_instance[::-1].split("_", 1)[1][::-1]
+
+
+class ObjectId(str): # noqa: WPS600
+ """An object ID in the Arena."""
+
+ @classmethod
+ def __get_validators__(cls) -> Generator[Callable[..., Self], None, None]:
+ """Return a generator of validators for this type."""
+ yield cls.validate
+
+ @classmethod
+ def validate(cls, v: Any) -> Self:
+ """Validate the object ID."""
+ if not isinstance(v, str):
+ raise TypeError("Object ID must be a string")
+
+ # Make sure the ID is one of the literals
+ if v not in get_args(ObjectIds):
+ raise ValueError("Object ID is not valid and does not exist in the Arena.")
+
+ return cls(v)
+
+ def __repr__(self) -> str:
+ """Return a string representation of the object ID."""
+ return f"ObjectId({super().__repr__()})"
+
+ @classmethod
+ def parse(cls, v: Any) -> Self:
+ """Parse the input."""
+ return cls.validate(v)
+
+ @property
+ def readable_name(self) -> str:
+ """Return the readable name of the object."""
+ return load_object_id_to_readable_name_map()[cast(ObjectIds, self)]
+
+ def as_instance(self, instance_id: Union[int, Literal["*"]]) -> "ObjectInstanceId":
+ """Return the object instance ID."""
+ return ObjectInstanceId(f"{self}_{instance_id}")
+
+
+class ObjectInstanceId(str): # noqa: WPS600
+ """An object instance ID in the Arena."""
+
+ @classmethod
+ def __get_validators__(cls) -> Generator[Callable[..., Self], None, None]:
+ """Return a generator of validators for this type."""
+ yield cls.validate
+
+ @classmethod
+ def validate(cls, v: Any) -> Self:
+ """Validate the object instance ID."""
+ if not isinstance(v, str):
+ raise TypeError("Object instance ID must be a string")
+
+ # Make sure it has an object ID in it
+ ObjectId.parse(convert_object_instance_id_to_object_id(v))
+
+ # Make sure the instance number is an integer
+ instance_number = v.split("_")[-1]
+
+ # Make sure the instance number does not have any leading 0s
+ if instance_number.startswith("0"):
+ raise ValueError("Object instance ID cannot have leading 0s")
+
+ if not (instance_number.isdigit() or instance_number == "*"):
+ raise ValueError("Object instance ID end with a digit or a '*'")
+
+ return cls(v)
+
+ def __repr__(self) -> str:
+ """Return a string representation of the object instance ID."""
+ return f"ObjectInstanceId({super().__repr__()})"
+
+ @classmethod
+ def parse(cls, v: Any) -> Self:
+ """Parse the input."""
+ return cls.validate(v)
+
+ @property
+ def object_id(self) -> ObjectId:
+ """Return the object ID of the object instance."""
+ return ObjectId(convert_object_instance_id_to_object_id(self))
+
+ @property
+ def readable_name(self) -> str:
+ """Return the readable name of the object instance."""
+ return self.object_id.readable_name
+
+ @property
+ def with_asterisk(self) -> Self:
+ """Create the object instance with an asterisk."""
+ return self.parse(f"{self.object_id}_*")
diff --git a/src/arena_missions/structures/required_object.py b/src/arena_missions/structures/required_object.py
new file mode 100644
index 0000000..d9fbc8e
--- /dev/null
+++ b/src/arena_missions/structures/required_object.py
@@ -0,0 +1,209 @@
+from typing import Any, Literal, Optional, Union, get_args
+
+from pydantic import BaseModel, Field, validator
+
+from arena_missions.constants.arena import (
+ BooleanStr,
+ FluidType,
+ ObjectColor,
+ OfficeRoom,
+ RequiredObjectStateName,
+ SpawnRelation,
+)
+from arena_missions.structures.object_id import ObjectId, ObjectInstanceId
+
+
+RequiredObjectStateValue = Union[BooleanStr, FluidType]
+
+
+class RequiredObjectState(BaseModel, validate_assignment=True):
+ """Spawn state of a required object."""
+
+ __root__: dict[RequiredObjectStateName, RequiredObjectStateValue]
+
+ def __len__(self) -> int:
+ """Get the length of the state."""
+ return len(self.__root__)
+
+ @classmethod
+ def from_parts(
+ cls, state_name: RequiredObjectStateName, state_value: RequiredObjectStateValue
+ ) -> "RequiredObjectState":
+ """Create a required object spawn state from parts."""
+ return cls.parse_obj({state_name: state_value})
+
+ @validator("__root__")
+ @classmethod
+ def ensure_only_one_key(
+ cls, root: dict[RequiredObjectStateName, RequiredObjectStateValue]
+ ) -> dict[RequiredObjectStateName, RequiredObjectStateValue]:
+ """Ensure that there is only one key in the dictionary."""
+ if len(root) != 1:
+ raise ValueError("State must only have one key.")
+ return root
+
+ @validator("__root__")
+ @classmethod
+ def validate_state_value_for_key(
+ cls, root: dict[RequiredObjectStateName, RequiredObjectStateValue]
+ ) -> dict[RequiredObjectStateName, RequiredObjectStateValue]:
+ """Validate the state value for the state key."""
+ state_key, state_value = list(root.items())[0]
+
+ if state_key == "isFilled":
+ if state_value not in get_args(FluidType):
+ raise ValueError(f"{state_value} is not a valid liquid type.")
+
+ elif state_value not in get_args(BooleanStr):
+ raise ValueError(f"{state_value} is not a valid boolean string.")
+
+ return root
+
+ @property
+ def name(self) -> RequiredObjectStateName:
+ """Get the name of the state."""
+ return list(self.__root__.keys())[0]
+
+ @property
+ def value(self) -> RequiredObjectStateValue: # noqa: WPS110
+ """Get the value of the state."""
+ return list(self.__root__.values())[0]
+
+
+class RequiredObject(BaseModel, validate_assignment=True):
+ """Object within the Arena."""
+
+ name: ObjectInstanceId
+ state: list[RequiredObjectState] = Field(default_factory=list, unique_items=True)
+ location: list[dict[ObjectInstanceId, SpawnRelation]] = Field(
+ default_factory=list, unique_items=True, max_items=1
+ )
+ room_location: list[OfficeRoom] = Field(
+ default_factory=list, max_items=1, alias="roomLocation"
+ )
+ colors: list[ObjectColor] = Field(default_factory=list, unique_items=True, max_items=1)
+
+ # This is only used with the Carrot
+ yesterday_state: Union[Literal[""], ObjectId] = Field(default="", alias="yesterdayState")
+
+ # Unknown/Unused fields
+ condition: dict[Any, Any] = Field(default_factory=dict)
+ printing_object: str = Field(default="", alias="printingObject", const=True)
+ associated_past_portals: list[Any] = Field(default_factory=list, alias="associatedPastPortals")
+ associated_future_portals: list[Any] = Field(
+ default_factory=list, alias="associatedFuturePortals"
+ )
+ current_portal: str = Field(default="", alias="currentPortal")
+ dino_food: str = Field(default="", alias="dinoFood")
+
+ @classmethod
+ def from_string(cls, object_instance_id: str) -> "RequiredObject":
+ """Instantiate a RequiredObject from the object instance ID."""
+ return cls(name=ObjectInstanceId.parse(object_instance_id))
+
+ @validator("state", "location", each_item=True)
+ @classmethod
+ def ensure_each_state_has_only_one_key(cls, state: dict[str, str]) -> dict[str, str]:
+ """Ensure that each state/location dict has only one key."""
+ if len(state) != 1:
+ raise ValueError("Each state must have only one key")
+ return state
+
+ @validator("yesterday_state")
+ @classmethod
+ def only_carrot_can_have_yesterday_state(
+ cls, yesterday_state: str, values: dict[str, Any] # noqa: WPS110
+ ) -> str:
+ """Only carrots can have yesterdayState."""
+ if yesterday_state:
+ if not values["name"].startswith("Carrot_01"):
+ raise ValueError("Only Carrot can have yesterdayState")
+
+ return yesterday_state
+
+ @property
+ def object_instance_id(self) -> ObjectInstanceId:
+ """Return the object instance ID for this object.
+
+ This is just here for convenience because 'name' isn't the most descriptive name.
+ """
+ return self.name
+
+ @property
+ def object_id(self) -> ObjectId:
+ """Return the object ID for this object."""
+ return self.name.object_id
+
+ @property
+ def readable_name(self) -> str:
+ """Return the readable name of this object."""
+ return self.name.readable_name
+
+ @property
+ def receptacle(self) -> Optional[ObjectInstanceId]:
+ """Return the receptacle this object is in."""
+ if self.location:
+ return list(self.location[0].keys())[0]
+ return None
+
+ @property
+ def room(self) -> Optional[OfficeRoom]:
+ """Return the room this object is in."""
+ if self.room_location:
+ return self.room_location[0]
+ return None
+
+ @property
+ def color(self) -> Optional[ObjectColor]:
+ """Return the color of this object."""
+ if self.colors:
+ return self.colors[0]
+ return None
+
+ def update_receptacle(self, receptacle: Optional[ObjectInstanceId]) -> None:
+ """Set the receptacle this object is in."""
+ # Clear any existing location set
+ self.location.clear()
+
+ # If there is no receptacle to add, return
+ if not receptacle:
+ return
+
+ self.location.append({receptacle: "in"})
+
+ def update_room(self, room: Optional[OfficeRoom]) -> None:
+ """Set the room this object is in."""
+ if not room:
+ self.room_location.clear()
+ return
+
+ self.room_location = [room] # noqa: WPS601
+
+ def update_color(self, color: Optional[ObjectColor]) -> None:
+ """Set the color of this object."""
+ if not color:
+ self.colors.clear()
+ return
+
+ self.colors = [color] # noqa: WPS601
+
+ def update_state(
+ self, state_name: RequiredObjectStateName, state_value: Optional[RequiredObjectStateValue]
+ ) -> None:
+ """Update the state of this object."""
+ # Remove the state from the list if it already exists
+ self.state = [state for state in self.state if state.name != state_name] # noqa: WPS601
+
+ # Add the state to the list if it is not None
+ if state_value is not None:
+ self.state.append(RequiredObjectState.parse_obj({state_name: state_value}))
+
+ def add_state(
+ self, state_name: RequiredObjectStateName, state_value: Optional[RequiredObjectStateValue]
+ ) -> None:
+ """Add state to this object."""
+ return self.update_state(state_name, state_value)
+
+ def remove_state(self, state_name: RequiredObjectStateName) -> None:
+ """Remove the state from this object."""
+ return self.update_state(state_name, None)
diff --git a/src/arena_missions/structures/state_condition.py b/src/arena_missions/structures/state_condition.py
new file mode 100644
index 0000000..630a9e4
--- /dev/null
+++ b/src/arena_missions/structures/state_condition.py
@@ -0,0 +1,348 @@
+from typing import Any, Generic, Literal, Optional, TypeVar, cast
+from typing_extensions import Self
+
+from pydantic import BaseModel, Field, root_validator
+from pydantic.generics import GenericModel
+
+from arena_missions.constants.arena import FluidType, ObjectColor
+from arena_missions.structures.object_id import ObjectInstanceId
+
+
+T = TypeVar("T")
+
+# Things that can be expressed as state conditions
+ExpressionType = Literal[
+ "CanBeSeen",
+ "isFilledWith",
+ "IsInRange",
+ "isToggledOn",
+ "isPickedUp",
+ "isPowered",
+ "isBroken",
+ "isOpen",
+ "isScanned",
+ "isUsed",
+ "isOverloaded",
+ "isEmbiggenated",
+ "isDirty",
+ "isHot",
+ "isCold",
+ "OR",
+ "AND",
+ "NOT",
+ "Contains",
+ "DinoFed",
+ "ColorMetaDataChange",
+ "IsReceptacle",
+ "isFullOfItems",
+]
+
+
+class Expression(BaseModel):
+ """Expression within a state condition.
+
+ This is the base class that all other expressions inherit from.
+ """
+
+ _key: ExpressionType
+
+ @property
+ def key(self) -> ExpressionType:
+ """Get the key for the expression."""
+ if not self._key:
+ raise ValueError("Key should exist for the expression.")
+ return self._key
+
+
+class SimpleExpression(Expression):
+ """Simple expression for a state condition.
+
+ The target is the object instance that the expression is being applied to. The message can be
+ always left blank.
+ """
+
+ target: ObjectInstanceId
+ message: str = Field(default="", const=True)
+
+
+class ValueExpression(GenericModel, Generic[T], SimpleExpression):
+ """Expression that checks if an object has a specific value."""
+
+ value: T # noqa: WPS110
+
+
+class BoolExpression(ValueExpression[bool]):
+ """Expression that checks if some property is true or false."""
+
+ value: bool # noqa: WPS110
+
+
+class IsInRangeExpression(ValueExpression[float]):
+ """Expression that checks if the target object is within the range of the current object.
+
+ This is different to `CanBeSeen` because it checks the distance between two objects, and not
+ whether or not the target object is within a certain distance of the agent.
+ """
+
+ _key = "IsInRange"
+ value: float # noqa: WPS110
+
+
+class ContainsExpression(SimpleExpression):
+ """Expression that checks if an object contains another object.
+
+ I have not seen the anchor point be used, so we can ignore that, just like we do for `message`.
+ """
+
+ _key: ExpressionType = "Contains"
+
+ contains: ObjectInstanceId
+ anchor_point: str = Field(default="", const=True, alias="anchorPoint")
+
+
+class IsFilledWithExpression(SimpleExpression):
+ """Expression that checks if an object is filled with a specific liquid."""
+
+ _key: ExpressionType = "isFilledWith"
+
+ fluid: FluidType
+
+
+class CanBeSeenExpression(SimpleExpression):
+ """Expression that checks if an object is within some distance of the agent."""
+
+ _key: ExpressionType = "CanBeSeen"
+
+ distance: float
+
+
+class ColorMetaDataChangeExpression(SimpleExpression):
+ """Expression that checks if an object has a specific color.
+
+ This is useful for things like the Color Changer.
+ """
+
+ _key: ExpressionType = "ColorMetaDataChange"
+
+ color: ObjectColor = Field(..., alias="colorvalue")
+
+
+class DinoFedExpression(SimpleExpression):
+ """Check if the Portal Generator has the "dinoFed' property set to True."""
+
+ _key: ExpressionType = "DinoFed"
+
+ target: ObjectInstanceId = Field(..., regex="^PortalGenerator.*")
+
+ # Is this even a thing? I think so, but I don't see it in the source code?
+ is_fed: bool = Field(..., alias="isFed")
+
+
+class IsToggledOnExpression(BoolExpression):
+ """Expression that checks if an object is toggled on."""
+
+ _key: ExpressionType = "isToggledOn"
+
+
+class IsPickedUpExpression(BoolExpression):
+ """Checks if the target has been picked up."""
+
+ _key: ExpressionType = "isPickedUp"
+
+
+class IsPoweredExpression(BoolExpression):
+ """Check if the target has been powered."""
+
+ _key: ExpressionType = "isPowered"
+
+
+class IsBrokenExpression(BoolExpression):
+ """Check if the object is broken."""
+
+ _key: ExpressionType = "isBroken"
+
+
+class IsOpenExpression(BoolExpression):
+ """Check if the object is open."""
+
+ _key: ExpressionType = "isOpen"
+
+
+class IsScannedExpression(BoolExpression):
+ """Check if the object has been scanned."""
+
+ _key: ExpressionType = "isScanned"
+
+
+class IsUsedExpression(BoolExpression):
+ """Check if the target has been used."""
+
+ _key: ExpressionType = "isUsed"
+
+
+class IsOverloadedExpression(BoolExpression):
+ """Check if the target has been overloaded."""
+
+ _key: ExpressionType = "isOverloaded"
+
+
+class IsEmbiggenatedExpression(BoolExpression):
+ """Check if the target has been embiggenated."""
+
+ _key: ExpressionType = "isEmbiggenated"
+
+
+class IsDirtyExpression(BoolExpression):
+ """Check if the target is dirty."""
+
+ _key: ExpressionType = "isDirty"
+
+
+class IsHotExpression(BoolExpression):
+ """Check if the target is hot."""
+
+ _key: ExpressionType = "isHot"
+
+
+class IsColdExpression(BoolExpression):
+ """Check if the target is cold."""
+
+ _key: ExpressionType = "isCold"
+
+
+class IsReceptacleExpression(BoolExpression):
+ """Check if the target is a receptacle."""
+
+ _key: ExpressionType = "IsReceptacle"
+
+
+class IsFullOfItemsExpression(BoolExpression):
+ """Check if the target is full of items."""
+
+ _key: ExpressionType = "isFullOfItems"
+
+
+class StateExpression(BaseModel):
+ """State expression."""
+
+ __root__: dict[ExpressionType, Expression]
+
+ @classmethod
+ def from_expression(cls, expression: Expression) -> "StateExpression":
+ """Create a state expression from a type and an expression."""
+ return cls(__root__={expression.key: expression})
+
+ @root_validator(pre=True)
+ @classmethod
+ def parse_expression_correctly(cls, values: dict[str, Any]) -> dict[str, Any]: # noqa: WPS110
+ """Parse the expression correctly.
+
+ Without this, the values will just become an empty dictionary, and lose all the
+ information. This cannot happen.
+ """
+ root: Optional[dict[str, Any]] = values.get("__root__")
+
+ # Get the root
+ if not root:
+ return values
+
+ # Since the root will only have one key, we can get the first key
+ expression_type = cast(ExpressionType, list(root.keys())[0])
+
+ # Get the expression from the root
+ expression = root[expression_type]
+
+ # If the expression is already a parsed `Expression`, then we can just return the values
+ if isinstance(expression, Expression):
+ return values
+
+ # Otherwise, we need to parse the expression and update the values
+ values["__root__"][expression_type] = ExpressionTypeMapping[expression_type].parse_obj(
+ expression
+ )
+
+ return values
+
+
+class NotExpression(Expression):
+ """Expression that negates another expression."""
+
+ _key: ExpressionType = "NOT"
+
+ expression: StateExpression
+ message: str = Field(default="", const=True)
+
+
+class AggregateExpression(Expression):
+ """Expression that combines other expressions."""
+
+ expressions: list[StateExpression]
+
+ @classmethod
+ def from_expressions(cls, *expressions: Expression) -> Self:
+ """Create the aggregate expression from expressions."""
+ return cls(
+ _key=cls._key,
+ expressions=[
+ StateExpression.from_expression(expression) for expression in expressions
+ ],
+ )
+
+
+class AndExpression(AggregateExpression):
+ """Expression that other expressions using the AND operator."""
+
+ _key: ExpressionType = "AND"
+
+
+class OrExpression(AggregateExpression):
+ """Expression that other expressions using the OR operator."""
+
+ _key: ExpressionType = "OR"
+
+
+ExpressionTypeMapping: dict[ExpressionType, type[Expression]] = {
+ "isToggledOn": IsToggledOnExpression,
+ "isPickedUp": IsPickedUpExpression,
+ "isPowered": IsPoweredExpression,
+ "isBroken": IsBrokenExpression,
+ "isOpen": IsOpenExpression,
+ "isScanned": IsScannedExpression,
+ "isUsed": IsUsedExpression,
+ "isOverloaded": IsOverloadedExpression,
+ "isEmbiggenated": IsEmbiggenatedExpression,
+ "isDirty": IsDirtyExpression,
+ "isHot": IsHotExpression,
+ "isCold": IsColdExpression,
+ "IsReceptacle": IsReceptacleExpression,
+ "isFullOfItems": IsFullOfItemsExpression,
+ "isFilledWith": IsFilledWithExpression,
+ "CanBeSeen": CanBeSeenExpression,
+ "ColorMetaDataChange": ColorMetaDataChangeExpression,
+ "DinoFed": DinoFedExpression,
+ "IsInRange": IsInRangeExpression,
+ "Contains": ContainsExpression,
+ "AND": AndExpression,
+ "OR": OrExpression,
+ "NOT": NotExpression,
+}
+
+
+class StateCondition(BaseModel):
+ """State condition."""
+
+ expression: StateExpression
+ state_name: str = Field(
+ ..., description="Name of the expression", alias="stateName", regex="^[a-zA-Z]+$"
+ )
+ context: ObjectInstanceId = Field(..., description="Object Instance ID for the expression")
+
+ @property
+ def instance_id(self) -> ObjectInstanceId:
+ """Get the instance ID of the object that this condition is for."""
+ return self.context
+
+ @property
+ def state_value(self) -> Literal["true", "false"]:
+ """Get the state value of the condition."""
+ return "true"
diff --git a/src/arena_missions/structures/task_goal.py b/src/arena_missions/structures/task_goal.py
new file mode 100644
index 0000000..59d7222
--- /dev/null
+++ b/src/arena_missions/structures/task_goal.py
@@ -0,0 +1,186 @@
+from collections.abc import Generator
+from typing import Any, Callable, Literal, Union, cast, get_args
+from typing_extensions import Self
+
+from pydantic import BaseModel, Field, validator
+
+from arena_missions.constants.arena import BooleanStr, FluidType, GoalStateExpressionKey
+from arena_missions.structures.object_id import ObjectInstanceId
+from arena_missions.structures.state_condition import StateCondition
+
+
+TASK_GOAL_VISIBILITY = { # noqa: WPS407
+ "isHidden": False,
+ "activationInteractable": "ALWAYS UNLOCKED",
+ "stickyNoteIndex": 0,
+}
+
+
+ObjectGoalStateRelation = Literal[
+ # "and" is not allowed. This is because "and" does not work the way you think it does.
+ # "and",
+ "or",
+]
+
+GoalStateExpressionValue = Union[BooleanStr, ObjectInstanceId, FluidType]
+
+
+class ObjectGoalStateExpression(str): # noqa: WPS600
+ """A goal object state value."""
+
+ @classmethod
+ def __get_validators__(cls) -> Generator[Callable[..., Self], None, None]:
+ """Return a generator of validators for this type."""
+ yield cls.validate
+
+ @classmethod
+ def parse(cls, v: Any) -> Self:
+ """Parse the input."""
+ return cls.validate(v)
+
+ @classmethod
+ def validate(cls, v: Any) -> Self: # noqa: WPS231
+ """Validate the object goal state expression."""
+ if not isinstance(v, str):
+ raise TypeError("Goal object state value must be a string")
+
+ state_condition_key, state_condition_value = v.split("=")
+
+ # Make sure the state condition key is a valid state
+ # if state_condition_key not in get_args(GoalStateExpressionKey):
+ # raise ValueError(f"{state_condition_key} is not a valid state condition.")
+
+ # If the state condition key is contains, then the value should be an ObjectInstanceId
+ if state_condition_key == "Contains":
+ ObjectInstanceId(state_condition_value)
+
+ # If the state condition key is isFilled, then the value should be a LiquidType
+ elif state_condition_key == "isFilled":
+ if state_condition_value not in get_args(FluidType):
+ raise ValueError(f"{state_condition_value} must be a valid liquid type.")
+
+ # Otherwise, the value should be a boolean string
+ elif state_condition_value not in get_args(BooleanStr):
+ raise ValueError("Goal object state value must be true or false")
+
+ return cls(v)
+
+ @classmethod
+ def from_parts(
+ cls,
+ state_condition_key: Union[str, GoalStateExpressionKey],
+ state_condition_value: GoalStateExpressionValue,
+ ) -> Self:
+ """Create a goal object state value from its parts."""
+ return cls.parse(f"{state_condition_key}={state_condition_value}")
+
+ @property
+ def state_condition_key(self) -> Union[str, GoalStateExpressionKey]:
+ """Return the state condition key."""
+ return self.split("=")[0]
+
+ @property
+ def state_condition_value(self) -> GoalStateExpressionValue:
+ """Return the state condition value."""
+ state_condition_value = self.split("=")[1]
+
+ if self.state_condition_key == "Contains":
+ return ObjectInstanceId.parse(state_condition_value)
+
+ if self.state_condition_key == "isFilled":
+ return cast(FluidType, state_condition_value)
+
+ return cast(BooleanStr, state_condition_value)
+
+
+class ObjectGoalState(BaseModel):
+ """A goal object state."""
+
+ __root__: dict[ObjectInstanceId, ObjectGoalStateExpression]
+
+ def __len__(self) -> int:
+ """Return the length of the root dict."""
+ return len(self.__root__)
+
+ @classmethod
+ def from_parts(
+ cls,
+ object_instance_id: ObjectInstanceId,
+ state_condition_key: Union[str, GoalStateExpressionKey],
+ state_condition_value: GoalStateExpressionValue,
+ ) -> Self:
+ """Create a goal object state from its parts."""
+ return cls(
+ __root__={
+ object_instance_id: ObjectGoalStateExpression.from_parts(
+ state_condition_key, state_condition_value
+ )
+ }
+ )
+
+ @validator("__root__")
+ @classmethod
+ def ensure_only_one_key(
+ cls, root: dict[ObjectInstanceId, ObjectGoalStateExpression]
+ ) -> dict[ObjectInstanceId, ObjectGoalStateExpression]:
+ """Ensure that the root dict has only one key."""
+ if len(root) != 1:
+ raise ValueError("State must have only one key")
+ return root
+
+ @property
+ def object_instance_id(self) -> ObjectInstanceId:
+ """Return the object instance id."""
+ return list(self.__root__.keys())[0]
+
+ @property
+ def state_condition_key(self) -> Union[str, GoalStateExpressionKey]:
+ """Return the state condition key."""
+ return list(self.__root__.values())[0].state_condition_key
+
+ @property
+ def state_condition_value(self) -> GoalStateExpressionValue:
+ """Return the state condition value."""
+ return list(self.__root__.values())[0].state_condition_value
+
+
+class TaskGoal(BaseModel):
+ """Task goal within the Arena."""
+
+ object_states: list[ObjectGoalState] = Field(..., unique_items=True, min_items=1)
+ object_states_relation: ObjectGoalStateRelation = "or"
+
+ # This will be automatically set later
+ goal_id: int = 0
+
+ # Unused/Ignored fields
+ description: str = Field(default="")
+ preconditions: list[Any] = Field(default_factory=list)
+ visibility: dict[str, Any] = Field(default=TASK_GOAL_VISIBILITY)
+ can_reset: bool = Field(default=False, alias="canReset")
+
+ @classmethod
+ def from_object_goal_states(cls, object_states: list[ObjectGoalState]) -> "TaskGoal":
+ """Create the goal from the object states."""
+ return cls(object_states=object_states)
+
+ @classmethod
+ def from_state_condition(cls, state_condition: StateCondition) -> "TaskGoal":
+ """Create the goal from the state condition."""
+ return cls.from_object_goal_states(
+ [
+ ObjectGoalState.from_parts(
+ state_condition.instance_id,
+ state_condition.state_name,
+ state_condition.state_value,
+ )
+ ]
+ )
+
+ @validator("object_states", each_item=True)
+ @classmethod
+ def ensure_each_state_has_only_one_key(cls, state: dict[str, str]) -> dict[str, str]:
+ """Ensure that each state/location dict has only one key."""
+ if len(state) != 1:
+ raise ValueError("Each state must have only one key")
+ return state
diff --git a/src/arena_wrapper/__init__.py b/src/arena_wrapper/__init__.py
new file mode 100644
index 0000000..750359a
--- /dev/null
+++ b/src/arena_wrapper/__init__.py
@@ -0,0 +1,9 @@
+import os
+
+
+class AppConfig:
+ unity_executable_path = os.getenv("UNITY_EXE_PATH")
+ unity_log_file = os.getenv("UNITY_LOG_PATH")
+ host_pipe_file = os.getenv("HOST_PIPE")
+ runtime_platform = os.getenv("RUNTIME_PLATFORM")
+ debug = True
diff --git a/src/arena_wrapper/arena_controller.py b/src/arena_wrapper/arena_controller.py
new file mode 100644
index 0000000..2a21cfa
--- /dev/null
+++ b/src/arena_wrapper/arena_controller.py
@@ -0,0 +1,192 @@
+import json
+import logging
+import socket
+import sys
+import threading
+import time
+
+from flask import abort
+from loguru import logger
+
+
+logging.getLogger("werkzeug").setLevel(logging.ERROR)
+
+
+class ArenaController:
+ def __init__(self, host="127.0.0.1"):
+ self.last_rate_timestamp = time.time()
+ self.frame_counter = 0
+ self.debug_frames_per_interval = 50
+ self.UnityWSPath = host
+ self.UnityWSPort = 5000
+ self.isSocketOpen = False
+ self.ws = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ self.resultJSON = list()
+ self.currentBatchNum = 0
+ self.currentRespNum = 0
+ self.isUnityConnected = threading.Event()
+
+ def interact(self, actions):
+ batchNum = self.currentBatchNum
+ self.currentBatchNum += 1
+ self.wsSend(actions)
+ JSONResponse = dict()
+ ticks = 0
+ while not JSONResponse:
+ if not self.resultJSON:
+ time.sleep(0.1)
+ ticks += 1
+ if ticks >= 6000:
+ abort(408)
+ resp.set_status(408)
+ return resp
+ if not self.isSocketOpen:
+ abort(404)
+ resp.set_status(404)
+ return resp
+ continue
+ else:
+ for JSON in self.resultJSON:
+ if JSON["batchNum"] == batchNum:
+ JSONResponse = JSON
+ self.resultJSON.remove(JSON)
+
+ resp = json.dumps(JSONResponse)
+ return resp
+
+ def handle_init(self, init_request):
+ logger.debug(
+ "Received initialize message. Sending it to Unity application to bring up for play."
+ )
+ self.wsSend(init_request)
+ return
+
+ def start(self):
+ self.isUnityConnected.set()
+
+ self.ws_listen_thread = threading.Thread(target=self.wsListen)
+ self.ws_listen_thread.daemon = True
+ self.ws_listen_thread.start()
+
+ self.ws_monitor_thread = threading.Thread(target=self.wsMonitor)
+ self.ws_monitor_thread.daemon = True
+ self.ws_monitor_thread.start()
+
+ logger.debug("Listener and monitor threads successfully started.")
+
+ def wsConnect(self):
+ logger.debug("Awaiting connection to Unity instance")
+ self.isSocketOpen = False
+ self.currentBatchNum = 0
+ self.currentRespNum = 0
+ self.resultJSON.clear()
+ counter = 0
+ logger.debug("self.UnityWSPath: ", self.UnityWSPath)
+ # Loop until a connection is made
+ while self.isUnityConnected.is_set():
+ time.sleep(0.1)
+
+ self.ws.close()
+ self.ws = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+
+ result = self.ws.connect_ex((self.UnityWSPath, self.UnityWSPort))
+
+ if result == 0:
+ self.isSocketOpen = True
+ logger.debug("Connection established")
+ return
+ else:
+ logger.error("Could not connect to RG unity instance: ", result)
+ if counter == 250:
+ logger.error(
+ "Tried to connect to unity for 250 times. Stopping the controller."
+ )
+ self.isUnityConnected.clear()
+ counter += 1
+ return
+
+ # Runs on its own thread listening for data from Unity
+ def wsListen(self):
+ while self.isUnityConnected.is_set():
+ if self.isSocketOpen:
+ try:
+ sizeInBytes = self.ws.recv(4)
+ if not sizeInBytes:
+ self.isSocketOpen = False
+ logger.warning("Connection lost during listener thread loop")
+ continue
+
+ size = int.from_bytes(bytes=sizeInBytes, byteorder=sys.byteorder, signed=False)
+ bytesReceived = 0
+ dataBuffer = bytearray()
+
+ while bytesReceived < size:
+ dataBuffer += self.ws.recv(size - bytesReceived)
+ bytesReceived = len(dataBuffer)
+
+ jsonData = str(dataBuffer, encoding="UTF-8")
+ # print(jsonData + '\n')
+
+ JSONPacket = json.loads(jsonData)
+ JSONPacket["batchNum"] = self.currentRespNum
+ self.currentRespNum += 1
+ self.resultJSON.append(JSONPacket)
+
+ except OSError as e:
+ logger.error("Exception during read")
+ if e.errno == socket.errno.ECONNRESET:
+ self.isSocketOpen = False
+ else:
+ raise
+ else:
+ time.sleep(0.1)
+ logger.debug("Listen thread ends")
+
+ def wsSend(self, jsonCommand):
+ isDataSent = False
+ while not isDataSent:
+ if self.isSocketOpen:
+ encodedString = json.dumps(jsonCommand).encode(encoding="UTF-8")
+ encodedBytesToSend = len(encodedString).to_bytes(4, sys.byteorder)
+
+ try:
+ bytesSent = 0
+ bytesSent += self.ws.send(encodedBytesToSend)
+ bytesSent += self.ws.send(encodedString)
+ if bytesSent > 0:
+ logger.debug(
+ str(bytesSent)
+ + " of expected "
+ + str(len(encodedString) + 4)
+ + " bytes sent.\n"
+ + json.dumps(jsonCommand)
+ )
+ isDataSent = True
+
+ except OSError as e:
+ if e.errno == socket.errno.ECONNRESET:
+ self.isSocketOpen = False
+ else:
+ raise
+
+ time.sleep(0.1)
+
+ def wsMonitor(self):
+ while self.isUnityConnected.is_set():
+ try:
+ self.ws.send(bytearray(0))
+ except:
+ self.isSocketOpen = False
+ if not self.isSocketOpen:
+ self.wsConnect()
+
+ time.sleep(1.0)
+ self.isSocketOpen = False
+ logger.info("Monitor thread ends")
+
+ def stop(self):
+ self.isUnityConnected.clear()
+ logger.info("Unity exe disconnected successfully")
+
+ def get_connection_status(self):
+ return self.isUnityConnected.is_set()
diff --git a/src/arena_wrapper/arena_orchestrator.py b/src/arena_wrapper/arena_orchestrator.py
new file mode 100644
index 0000000..a4dd8ff
--- /dev/null
+++ b/src/arena_wrapper/arena_orchestrator.py
@@ -0,0 +1,306 @@
+import atexit
+import base64
+import json
+import os
+import subprocess
+import time
+from dataclasses import dataclass
+from typing import Any
+
+import cv2
+import httpx
+import numpy as np
+from loguru import logger
+
+from arena_wrapper.arena_controller import ArenaController
+from arena_wrapper.arena_request_builder import ArenaRequestBuilder
+from arena_wrapper.constants import ACTIONS_REQUIRING_MASK, OBJECT_CLASS_ALLOW_LIST
+from arena_wrapper.enums.object_output_wrapper import ObjectOutputType
+from arena_wrapper.exceptions import RaycastMissedException
+from arena_wrapper.util import object_class_decoder
+
+
+@dataclass
+class AppConfig:
+ unity_executable_path = os.getenv("ARENA_PATH")
+ unity_log_file = os.getenv("UNITY_LOG_PATH")
+ host_pipe_file = os.getenv("HOST_PIPE")
+ runtime_platform = os.getenv("PLATFORM")
+ debug = True
+
+
+class ArenaOrchestrator:
+ def __init__(self, x_display=1):
+ self.arena_request_builder = ArenaRequestBuilder()
+ self.controller = ArenaController()
+ self.x_display = x_display
+ self.is_unity_running = False
+ self.segmentation_images = None
+ self.response = None
+ self.segmentation_color_to_object_id_map = {}
+ self.subgoals_completion_indices = []
+ self.subgoals_completion_ids = []
+ self.logger = logger
+ self.app_config = AppConfig()
+
+ def init_game(self, cdf):
+ if self.init_unity_instance():
+ self.launch_game(cdf)
+ return True
+ return False
+
+ def create_action_status(self, actions: list[dict[str, Any]]) -> dict[str, Any]:
+ last_action = json.loads(self.response["lastAction"])
+ last_action_id = last_action["commandNum"]
+ try:
+ action_type = actions[last_action_id]["type"]
+ except (KeyError, IndexError):
+ action_type = last_action["commandType"]
+
+ if action_type.lower() == "goto":
+ action_type = "Goto"
+ if action_type.lower() == "pickup":
+ action_type = "Pickup"
+
+ if self.response["lastActionSuccess"] == "InterruptedByNewCommandBatch":
+ raise AssertionError("Unable to recover from `InterruptedByNewCommandBatch`")
+
+ return {
+ "id": last_action["commandNum"],
+ "type": action_type,
+ "success": self.response["lastActionSuccess"] == "ActionSuccessful",
+ "errorType": self.response["lastActionSuccess"],
+ }
+
+ def execute_action(self, actions, object_output_type, nlg_action) -> tuple[bool, Any]:
+ rg_compatible_actions = []
+ try:
+ if object_output_type == ObjectOutputType.OBJECT_CLASS:
+ if not self.validate_object_classes(actions):
+ self.logger.error("Invalid object classes found. Not executing any actions.")
+ return False, "InvalidObjectClass"
+ self.logger.debug(
+ "Cross-check against object class allow-list successful. Valid classes found."
+ )
+ actions = object_class_decoder.convert_object_class_to_id(
+ actions, self.response, nlg_action
+ )
+ self.logger.info("Converted actions after decoding object classes: %s" % actions)
+ params = {
+ "segmentationImages": self.segmentation_images,
+ "segmentationColorToObjectIdMap": self.segmentation_color_to_object_id_map,
+ "objectOutputType": object_output_type,
+ }
+ for action in actions:
+ rg_compatible_action = self.arena_request_builder.get_request_json(action, params)
+ logger.info("RG compatible action: " + str(rg_compatible_action))
+ if rg_compatible_action is not None:
+ rg_compatible_actions.append(rg_compatible_action)
+ except Exception as e:
+ self.logger.error(
+ "Skipping actions execution as exception occurred while interpreting actions: %s"
+ % e
+ )
+ return False, "IncorrectActionFormat"
+ if len(rg_compatible_actions) != 0:
+ try:
+ self.response = json.loads(self.controller.interact(rg_compatible_actions))
+ self.segmentation_images = self.get_images_from_metadata(
+ "instanceSegmentationImage"
+ )
+ self.build_segmentation_color_to_object_id_map()
+ return (
+ self.response["lastActionSuccess"] == "ActionSuccessful",
+ self.create_action_status(actions),
+ )
+ except Exception as ex:
+ self.logger.debug(f"Response keys: {list(self.response.keys())}")
+
+ try:
+ last_action_success = self.response["lastActionSuccess"]
+ except KeyError:
+ self.response["lastActionSuccess"] = "ActionExecutionError"
+
+ if "408 Request Timeout" in str(ex):
+ raise httpx.ConnectTimeout(f"Stream closed due to timeout: {ex}")
+
+ if "RaycastMissed" in str(ex):
+ raise RaycastMissedException(f"Failed to handle raycast: {ex}")
+
+ self.logger.error(
+ f"Exception while executing actions with status {last_action_success}: {ex}"
+ )
+ return False, self.create_action_status(actions)
+
+ logger.error("UnknownError: Unable to execute any actions in the Arena")
+ return False, None
+
+ def stop_game(self):
+ self.kill_unity_instance()
+
+ def init_unity_instance(self):
+ try:
+ if self.is_unity_running:
+ self.kill_unity_instance()
+ time.sleep(1)
+ except Exception as e:
+ logger.info("Exception occurred while killing the RG unity instance.", e)
+ logger.info("Starting unity instance...")
+ env = os.environ.copy()
+ env["DISPLAY"] = ":" + str(self.x_display)
+ try:
+ self.unity_proc = proc = subprocess.Popen(
+ self._get_unity_execution_command(), env=env, shell=True
+ )
+ (output, err) = self.unity_proc.communicate()
+ logger.info(output)
+ logger.info(err)
+ self.unity_pid = proc.pid
+ logger.info("Unity instance process ID: " + str(self.unity_pid))
+ atexit.register(lambda: self.unity_proc.poll() is None and self.unity_proc.kill())
+ except Exception as e:
+ logger.exception(
+ "Exception occurred while opening the RG unity instance. Please start it. ", e
+ )
+ return False
+ time.sleep(1)
+ if not self.controller.get_connection_status():
+ self.controller.start()
+ return True
+
+ def _get_unity_execution_command(self):
+ command = None
+ if self.app_config.runtime_platform == "Linux":
+ command = (
+ "DISPLAY=:"
+ + str(self.x_display)
+ + " "
+ + self.app_config.unity_executable_path
+ + " -logfile "
+ + self.app_config.unity_log_file
+ + "&"
+ )
+ elif self.app_config.runtime_platform == "Mac":
+ command = "open -n " + self.app_config.unity_executable_path
+ return command
+
+ def launch_game(self, cdf):
+ self.controller.handle_init(cdf)
+ time.sleep(10)
+ return True
+
+ def kill_unity_instance(self):
+ logger.info("Killing unity instance...")
+ try:
+ os.system("kill -9 $(ps -A | grep Arena | awk '{ print $1 }')")
+ logger.info("Unity process killed successfully")
+ return True
+ except Exception as e:
+ logger.info("Exception occurred while killing the RG unity instance.", e)
+ return False
+
+ def build_segmentation_color_to_object_id_map(self):
+ objects = self.response["objects"]
+ self.segmentation_color_to_object_id_map = {}
+ for obj in objects:
+ color_dict = obj["instanceSegmentationColor"]
+ color_dict.pop("a", None)
+ key = frozenset(color_dict.items())
+ if key not in self.segmentation_color_to_object_id_map:
+ self.segmentation_color_to_object_id_map[key] = obj["objectID"]
+ else:
+ self.logger.error(
+ obj["instanceSegmentationColor"],
+ " This color already exists for object: ",
+ self.segmentation_color_to_object_id_map[key],
+ )
+
+ def get_scene_data(self):
+ exclude_keys = ["colorImage", "depthImage", "normalsImage", "instanceSegmentationImage"]
+ return {key: self.response[key] for key in self.response if key not in exclude_keys}
+
+ def get_images_from_metadata(self, image_key):
+ if image_key not in self.response:
+ return None
+ image_rgb_list = []
+ for raw_image in self.response[image_key]:
+ encoded_image = str.encode(raw_image)
+ decoded_image = base64.decodebytes(encoded_image)
+ image_buffer = np.asarray(bytearray(decoded_image), dtype="uint8")
+ image_bgr = cv2.imdecode(image_buffer, cv2.IMREAD_COLOR)
+ image_rgb = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2RGB)
+ image_rgb_list.append(image_rgb)
+ return image_rgb_list
+
+ def get_goals_status(self):
+ subgoals_completion_ids_in_current_action = []
+ all_goals_completed = False
+ subgoal_completion_status = []
+ if "challengeProgress" not in self.response:
+ return (
+ subgoals_completion_ids_in_current_action,
+ all_goals_completed,
+ subgoal_completion_status,
+ )
+ goals = self.response["challengeProgress"]["ChallengeGoals"]
+ try:
+ all_goals_completed = True
+ for task in goals:
+ goal_id = task["goal_id"]
+ if task["isFinished"]:
+ subgoal_completion_status.append(1)
+ if goal_id not in self.subgoals_completion_ids:
+ self.logger.info(f"Task {goal_id} has been completed: {str(task)}")
+ subgoals_completion_ids_in_current_action.append(goal_id)
+ self.subgoals_completion_ids.append(goal_id)
+ else:
+ subgoal_completion_status.append(0)
+ all_goals_completed = False
+ except Exception as e:
+ self.logger.error(f"Unable to get goal status: {str(e)}")
+ return (
+ subgoals_completion_ids_in_current_action,
+ all_goals_completed,
+ subgoal_completion_status,
+ )
+
+ def validate_object_classes(self, actions):
+ for action in actions:
+ try:
+ if action["type"] in ACTIONS_REQUIRING_MASK:
+ object_class_name = action[action["type"].lower()]["object"]["name"]
+ if object_class_name not in OBJECT_CLASS_ALLOW_LIST:
+ return False
+ except Exception as ex:
+ self.logger.error("Exception while validating object classes: %s" % ex)
+ raise ex
+ return True
+
+ def get_reconstructed_metadata(self):
+ robot_info = list()
+ for obj in self.response["objects"]:
+ if "TAM_" in obj["objectID"]:
+ robot_info.append(
+ {
+ "currentRoom": obj["currentRoom"],
+ "position": obj["position"],
+ "rotation": obj["rotation"],
+ }
+ )
+ break
+ if not robot_info:
+ self.logger.error("TAM location not found in the object list")
+ if "colorImage" not in self.response:
+ self.logger.error("Color images not found in the RG response")
+ color_images = {}
+ for color_image_index, color_image in enumerate(self.response["colorImage"]):
+ color_images[str(color_image_index)] = self.response["colorImage"][color_image_index]
+ depth_images = {}
+ for depth_image_index, depth_image in enumerate(self.response["depthImage"]):
+ depth_images[str(depth_image_index)] = self.response["depthImage"][depth_image_index]
+ return {
+ "colorImages": color_images,
+ "depthImages": depth_images,
+ "robotInfo": robot_info,
+ "viewPoints": self.response["sceneMetadata"]["GoToPoints"],
+ }
diff --git a/src/arena_wrapper/arena_request_builder.py b/src/arena_wrapper/arena_request_builder.py
new file mode 100644
index 0000000..fead9f2
--- /dev/null
+++ b/src/arena_wrapper/arena_request_builder.py
@@ -0,0 +1,518 @@
+import logging
+
+import numpy as np
+
+from arena_wrapper.enums.object_output_wrapper import ObjectOutputType
+from arena_wrapper.util import decompress_mask
+
+
+class RGActionsConstant:
+ DEFAULT_ROTATE_MAGNITUDE = 45
+ DEFAULT_MOVE_MAGNITUDE = 1
+ DEFAULT_LOOK_MAGNITUDE = 45
+ DEFAULT_LOOK_AROUND_MAGNITUDE = 100
+
+
+class ArenaRequestBuilder:
+ def __init__(self):
+ self.logger = logging.getLogger("Simbot.ArenaRequestBuilder")
+ self.ground_truth_segmentation_images = None
+ self.segmentation_color_to_object_id_map = None
+ self.objects_in_hands = {"left": None, "right": None}
+
+ def find_object_id(self, compressed_mask, color_image_index):
+ ## Decompress the mask
+ mask = decompress_mask(compressed_mask)
+ if (
+ self.ground_truth_segmentation_images is None
+ or not self.ground_truth_segmentation_images
+ ):
+ self.logger.error(
+ "Unable to find the object id, previous segmentation images are not present"
+ )
+ ## Use the first segmentation image until InferenceService specifies which image to use along with the mask
+ ground_truth_segmentation_image = self.ground_truth_segmentation_images[color_image_index]
+
+ ## Perform element wise multiplication of mask_matrix and previous_seg_image
+ mask_3d = np.concatenate(
+ (mask[:, :, np.newaxis], mask[:, :, np.newaxis], mask[:, :, np.newaxis]), axis=2
+ )
+ masked_image = ground_truth_segmentation_image * mask_3d
+
+ ## Make a list of all colors present in result_image
+ unique_colors = np.unique(masked_image.reshape(-1, masked_image.shape[2]), axis=0)
+
+ ## Compute IoU for each color and find the color that has maximum IoU.
+ pred_indices = np.where(np.all(mask_3d == 1, axis=-1))
+ pred_indices = {(x, y) for x, y in zip(*pred_indices)}
+ ious = []
+ for i in range(unique_colors.shape[0]):
+ color = tuple(unique_colors[i])
+ indices = np.where(np.all(ground_truth_segmentation_image == color, axis=-1))
+ indices = {(x, y) for x, y in zip(*indices)}
+ intersection = pred_indices.intersection(indices)
+ union = pred_indices.union(indices)
+ ious.append(len(intersection) / len(union))
+ ious = np.array(ious)
+ max_ind = np.argmax(ious)
+ max_color = unique_colors[max_ind]
+ self.logger.info("Segmentation color with maximum IoU: " + str(max_color))
+
+ ## Determine object id based on identified color using instance Segmentation color to object id map
+ key = frozenset(
+ {"r": int(max_color[0]), "g": int(max_color[1]), "b": int(max_color[2])}.items()
+ )
+ if key in self.segmentation_color_to_object_id_map:
+ object_id = self.segmentation_color_to_object_id_map[key]
+ self.logger.info("Found object id: " + str(object_id))
+ else:
+ self.logger.error("Unable to find the object id")
+ object_id = None
+
+ return object_id
+
+ def get_request_json(self, action_request, params):
+ rg_compatible_request = None
+ self.ground_truth_segmentation_images = params["segmentationImages"]
+ self.segmentation_color_to_object_id_map = params["segmentationColorToObjectIdMap"]
+ self.object_output_type = params["objectOutputType"]
+ if action_request["type"] == "Move":
+ rg_compatible_request = self._build_move_request(action_request["move"])
+ elif action_request["type"] == "Rotate":
+ rg_compatible_request = self._build_rotate_request(action_request["rotation"])
+ elif action_request["type"] == "Goto":
+ rg_compatible_request = self._build_goto_request(action_request["goto"])
+ elif action_request["type"] == "Pickup":
+ rg_compatible_request = self._build_pickup_request(action_request["pickup"])
+ elif action_request["type"] == "Place":
+ rg_compatible_request = self._build_place_request(action_request["place"])
+ elif action_request["type"] == "Open":
+ rg_compatible_request = self._build_open_request(action_request["open"])
+ elif action_request["type"] == "Close":
+ rg_compatible_request = self._build_close_request(action_request["close"])
+ elif action_request["type"] == "Break":
+ rg_compatible_request = self._build_break_request(action_request["break"])
+ elif action_request["type"] == "Scan":
+ rg_compatible_request = self._build_scan_request(action_request["scan"])
+ elif action_request["type"] == "Pour":
+ rg_compatible_request = self._build_pour_request(action_request["pour"])
+ elif action_request["type"] == "Toggle":
+ rg_compatible_request = self._build_toggle_request(action_request["toggle"])
+ elif action_request["type"] == "Throw":
+ rg_compatible_request = self._build_throw_request(action_request["throw"])
+ elif action_request["type"] == "Fill":
+ rg_compatible_request = self._build_fill_request(action_request["fill"])
+ elif action_request["type"] == "Clean":
+ rg_compatible_request = self._build_clean_request(action_request["clean"])
+ elif action_request["type"] == "Examine":
+ rg_compatible_request = self._build_examine_request(action_request["examine"])
+ elif action_request["type"] == "CameraChange":
+ rg_compatible_request = self._build_camera_change_request(
+ action_request["camerachange"]
+ )
+ elif action_request["type"] == "Look":
+ rg_compatible_request = self._build_look_request(action_request["look"])
+ elif action_request["type"] == "Highlight":
+ rg_compatible_request = self._build_highlight_request(action_request["highlight"])
+ else:
+ self.logger.error("Incorrect action format received." + str(action_request))
+ raise ValueError("Invalid action dictionary received")
+ return rg_compatible_request
+
+ def _build_move_request(self, input_move_request):
+ move_request = {}
+ if input_move_request["direction"] == "Forward":
+ move_request["commandType"] = "MoveForward"
+ elif input_move_request["direction"] == "Backward":
+ move_request["commandType"] = "MoveBackward"
+ if "magnitude" not in input_move_request:
+ move_request["magnitude"] = RGActionsConstant.DEFAULT_MOVE_MAGNITUDE
+ else:
+ move_request["magnitude"] = input_move_request["magnitude"]
+ return move_request
+
+ def _build_rotate_request(self, input_rotate_request):
+ rotate_request = {"commandType": "Rotate"}
+ rotation_angle = RGActionsConstant.DEFAULT_ROTATE_MAGNITUDE
+ if "magnitude" in input_rotate_request:
+ rotation_angle = input_rotate_request["magnitude"]
+ if input_rotate_request["direction"] == "Left":
+ rotate_request["magnitude"] = -rotation_angle
+ elif input_rotate_request["direction"] == "Right":
+ rotate_request["magnitude"] = rotation_angle
+ if "rotationSpeed" in input_rotate_request:
+ rotate_request["rotationSpeed"] = input_rotate_request["rotationSpeed"]
+ return rotate_request
+
+ def _build_goto_request(self, input_goto_request):
+ # "position" and "raycast" will be set later
+ goto_request = self._get_goto_command_json()
+ if "object" not in input_goto_request:
+ self.logger.error(f'Input request did not contain "object": {input_goto_request}')
+ goto_request = None
+ elif "officeRoom" in input_goto_request["object"]:
+ self.logger.info(f'Using "officeRoom" in goTo command')
+ goto_request["goToCommand"]["officeRoom"] = input_goto_request["object"]["officeRoom"]
+ elif "goToPoint" in input_goto_request["object"]:
+ self.logger.info(f'Using "goToPoint" in goTo command')
+ goto_request["goToCommand"]["goToPoint"] = input_goto_request["object"]["goToPoint"]
+ elif (
+ "mask" in input_goto_request["object"]
+ and self.object_output_type == ObjectOutputType.OBJECT_MASK
+ ):
+ self.logger.info(f'Using "mask" in goTo command')
+ color_image_index = input_goto_request["object"].get("colorImageIndex", 0)
+ object_id = self.find_object_id(
+ input_goto_request["object"]["mask"], color_image_index
+ )
+ goto_request["goToCommand"]["instanceIdOfObject"] = object_id
+ elif "name" in input_goto_request["object"]:
+ self.logger.info(f'Using "name" in goTo command')
+ goto_request["goToCommand"]["instanceIdOfObject"] = input_goto_request["object"][
+ "name"
+ ]
+ elif "position" in input_goto_request["object"]:
+ self.logger.info("Using 'position' in goTo command")
+ goto_request["goToCommand"]["position"] = list(
+ input_goto_request["object"]["position"].values()
+ )
+ # goto_request["goToCommand"]["raycast"] = [
+ # input_goto_request["object"]["rotation"]["y"],
+ # input_goto_request["object"]["rotation"]["w"],
+ # ]
+ else:
+ self.logger.error(
+ f'Did not find required goTo parameters in "object": {input_goto_request}'
+ )
+ goto_request = None
+ return goto_request
+
+ def _get_interact_command_json(self):
+ return {
+ "commandType": "Interact",
+ "interactCommand": {"sourceObjectID": "TAM_1", "destinationObjectID": "", "verb": ""},
+ }
+
+ def _get_pickup_command_json(self):
+ return {
+ "commandType": "PickUp",
+ "pickUpOrPlaceCommand": {"destinationObjectID": "", "useLeftHand": "false"},
+ }
+
+ def _get_place_command_json(self):
+ return {
+ "commandType": "Place",
+ "pickUpOrPlaceCommand": {"destinationObjectID": "", "useLeftHand": "false"},
+ }
+
+ def _get_camera_change_command_json(self):
+ return {"commandType": "CameraChange", "camChangeCommand": {"mode": ""}}
+
+ def _get_goto_command_json(self):
+ return {
+ "commandType": "GoTo",
+ "goToCommand": {
+ "instanceIdOfObject": None,
+ "position": None,
+ "raycast": None,
+ "officeRoom": None,
+ "goToPoint": None,
+ },
+ }
+
+ def _get_highlight_command_json(self):
+ return {
+ "commandType": "Highlight",
+ "highlightCommand": {
+ "instanceId": None,
+ "shouldRotateToObject": True,
+ "shouldRotateBack": True,
+ },
+ }
+
+ def _build_pickup_request(self, input_pickup_request):
+ if self.object_output_type == ObjectOutputType.OBJECT_MASK:
+ color_image_index = input_pickup_request["object"].get("colorImageIndex", 0)
+ object_id = self.find_object_id(
+ input_pickup_request["object"]["mask"], color_image_index
+ )
+ input_pickup_request["object"]["name"] = object_id
+ pickup_request = self._get_pickup_command_json()
+ pickup_request["pickUpOrPlaceCommand"]["destinationObjectID"] = input_pickup_request[
+ "object"
+ ]["name"]
+ self.objects_in_hands["right"] = input_pickup_request["object"]["name"]
+ return pickup_request
+
+ def _build_place_request(self, input_place_request):
+ if self.object_output_type == ObjectOutputType.OBJECT_MASK:
+ color_image_index = input_place_request["object"].get("colorImageIndex", 0)
+ object_id = self.find_object_id(
+ input_place_request["object"]["mask"], color_image_index
+ )
+ input_place_request["object"]["name"] = object_id
+ self.objects_in_hands["right"] = None
+ place_request = self._get_place_command_json()
+ place_request["pickUpOrPlaceCommand"]["destinationObjectID"] = input_place_request[
+ "object"
+ ]["name"]
+ return place_request
+
+ def _build_open_request(self, input_open_request):
+ if self.object_output_type == ObjectOutputType.OBJECT_MASK:
+ color_image_index = input_open_request["object"].get("colorImageIndex", 0)
+ object_id = self.find_object_id(
+ input_open_request["object"]["mask"], color_image_index
+ )
+ input_open_request["object"]["name"] = object_id
+ open_request = self._get_interact_command_json()
+ open_request["interactCommand"]["destinationObjectID"] = input_open_request["object"][
+ "name"
+ ]
+ open_request["interactCommand"]["verb"] = "OPEN"
+ return open_request
+
+ def _build_close_request(self, input_close_request):
+ if self.object_output_type == ObjectOutputType.OBJECT_MASK:
+ color_image_index = input_close_request["object"].get("colorImageIndex", 0)
+ object_id = self.find_object_id(
+ input_close_request["object"]["mask"], color_image_index
+ )
+ input_close_request["object"]["name"] = object_id
+ close_request = self._get_interact_command_json()
+ close_request["interactCommand"]["destinationObjectID"] = input_close_request["object"][
+ "name"
+ ]
+ close_request["interactCommand"]["verb"] = "CLOSE"
+ return close_request
+
+ def _build_break_request(self, input_break_request):
+ if self.objects_in_hands["right"] is not None:
+ source_object_id = self.objects_in_hands["right"]
+ else:
+ source_object_id = "TAM_1"
+ if self.object_output_type == ObjectOutputType.OBJECT_MASK:
+ color_image_index = input_break_request["object"].get("colorImageIndex", 0)
+ object_id = self.find_object_id(
+ input_break_request["object"]["mask"], color_image_index
+ )
+ input_break_request["object"]["source"] = source_object_id
+ input_break_request["object"]["destination"] = object_id
+ elif (
+ self.object_output_type == ObjectOutputType.OBJECT_CLASS
+ and "name" in input_break_request["object"]
+ ):
+ input_break_request["object"]["source"] = source_object_id
+ input_break_request["object"]["destination"] = input_break_request["object"]["name"]
+ break_request = self._get_interact_command_json()
+ break_request["interactCommand"]["sourceObjectID"] = input_break_request["object"][
+ "source"
+ ]
+ break_request["interactCommand"]["destinationObjectID"] = input_break_request["object"][
+ "destination"
+ ]
+ break_request["interactCommand"]["verb"] = "BREAK"
+ return break_request
+
+ def _build_scan_request(self, input_scan_request):
+ if self.object_output_type == ObjectOutputType.OBJECT_MASK:
+ color_image_index = input_scan_request["object"].get("colorImageIndex", 0)
+ object_id = self.find_object_id(
+ input_scan_request["object"]["mask"], color_image_index
+ )
+ input_scan_request["object"]["name"] = object_id
+ scan_request = self._get_interact_command_json()
+ scan_request["interactCommand"]["destinationObjectID"] = input_scan_request["object"][
+ "name"
+ ]
+ scan_request["interactCommand"]["verb"] = "SCAN"
+ return scan_request
+
+ def _build_pour_request(self, input_pour_request):
+ if self.objects_in_hands["right"] is not None:
+ source_object_id = self.objects_in_hands["right"]
+ else:
+ source_object_id = input_pour_request["object"]["source"] = "TAM_1"
+ if self.object_output_type == ObjectOutputType.OBJECT_MASK:
+ color_image_index = input_pour_request["object"].get("colorImageIndex", 0)
+ object_id = self.find_object_id(
+ input_pour_request["object"]["mask"], color_image_index
+ )
+ input_pour_request["object"]["source"] = source_object_id
+ input_pour_request["object"]["destination"] = object_id
+ elif (
+ self.object_output_type == ObjectOutputType.OBJECT_CLASS
+ and "name" in input_pour_request["object"]
+ ):
+ input_pour_request["object"]["source"] = source_object_id
+ input_pour_request["object"]["destination"] = input_pour_request["object"]["name"]
+ pour_request = self._get_interact_command_json()
+ pour_request["interactCommand"]["sourceObjectID"] = input_pour_request["object"]["source"]
+ pour_request["interactCommand"]["destinationObjectID"] = input_pour_request["object"][
+ "destination"
+ ]
+ pour_request["interactCommand"]["verb"] = "POUR"
+ return pour_request
+
+ def _build_throw_request(self, input_throw_request):
+ if self.objects_in_hands["right"] is not None:
+ source_object_id = self.objects_in_hands["right"]
+ else:
+ source_object_id = "TAM_1"
+ if self.object_output_type == ObjectOutputType.OBJECT_MASK:
+ color_image_index = input_throw_request["object"].get("colorImageIndex", 0)
+ object_id = self.find_object_id(
+ input_throw_request["object"]["mask"], color_image_index
+ )
+ input_throw_request["object"]["source"] = source_object_id
+ input_throw_request["object"]["destination"] = object_id
+ elif (
+ self.object_output_type == ObjectOutputType.OBJECT_CLASS
+ and "name" in input_throw_request["object"]
+ ):
+ input_throw_request["object"]["source"] = source_object_id
+ input_throw_request["object"]["destination"] = input_throw_request["object"]["name"]
+ throw_request = self._get_interact_command_json()
+ throw_request["interactCommand"]["sourceObjectID"] = input_throw_request["object"][
+ "source"
+ ]
+ throw_request["interactCommand"]["destinationObjectID"] = input_throw_request["object"][
+ "destination"
+ ]
+ throw_request["interactCommand"]["verb"] = "THROW"
+ return throw_request
+
+ def _build_clean_request(self, input_clean_request):
+ if self.objects_in_hands["right"] is not None:
+ source_object_id = self.objects_in_hands["right"]
+ else:
+ source_object_id = "TAM_1"
+ if self.object_output_type == ObjectOutputType.OBJECT_MASK:
+ color_image_index = input_clean_request["object"].get("colorImageIndex", 0)
+ object_id = self.find_object_id(
+ input_clean_request["object"]["mask"], color_image_index
+ )
+ input_clean_request["object"]["source"] = source_object_id
+ input_clean_request["object"]["destination"] = object_id
+ elif (
+ self.object_output_type == ObjectOutputType.OBJECT_CLASS
+ and "name" in input_clean_request["object"]
+ ):
+ input_clean_request["object"]["source"] = source_object_id
+ input_clean_request["object"]["destination"] = input_clean_request["object"]["name"]
+ clean_request = self._get_interact_command_json()
+ clean_request["interactCommand"]["sourceObjectID"] = input_clean_request["object"][
+ "source"
+ ]
+ clean_request["interactCommand"]["destinationObjectID"] = input_clean_request["object"][
+ "destination"
+ ]
+ clean_request["interactCommand"]["verb"] = "CLEAN"
+ return clean_request
+
+ def _build_fill_request(self, input_fill_request):
+ destination_object_id = None
+ if self.objects_in_hands["right"] is not None:
+ destination_object_id = self.objects_in_hands["right"]
+ if self.object_output_type == ObjectOutputType.OBJECT_MASK:
+ color_image_index = input_fill_request["object"].get("colorImageIndex", 0)
+ object_id = self.find_object_id(
+ input_fill_request["object"]["mask"], color_image_index
+ )
+ input_fill_request["object"]["destination"] = destination_object_id
+ input_fill_request["object"]["source"] = object_id
+ elif (
+ self.object_output_type == ObjectOutputType.OBJECT_CLASS
+ and "name" in input_fill_request["object"]
+ ):
+ input_fill_request["object"]["source"] = input_fill_request["object"]["name"]
+ input_fill_request["object"]["destination"] = destination_object_id
+ fill_request = self._get_interact_command_json()
+ fill_request["interactCommand"]["sourceObjectID"] = input_fill_request["object"]["source"]
+ fill_request["interactCommand"]["destinationObjectID"] = input_fill_request["object"][
+ "destination"
+ ]
+ fill_request["interactCommand"]["verb"] = "FILL"
+ return fill_request
+
+ def _build_toggle_request(self, input_toggle_request):
+ if self.object_output_type == ObjectOutputType.OBJECT_MASK:
+ color_image_index = input_toggle_request["object"].get("colorImageIndex", 0)
+ object_id = self.find_object_id(
+ input_toggle_request["object"]["mask"], color_image_index
+ )
+ input_toggle_request["object"]["name"] = object_id
+ toggle_request = self._get_interact_command_json()
+ toggle_request["interactCommand"]["destinationObjectID"] = input_toggle_request["object"][
+ "name"
+ ]
+ toggle_request["interactCommand"]["verb"] = "TOGGLE"
+ return toggle_request
+
+ def _build_examine_request(self, input_examine_request):
+ if self.object_output_type == ObjectOutputType.OBJECT_MASK:
+ color_image_index = input_examine_request["object"].get("colorImageIndex", 0)
+ object_id = self.find_object_id(
+ input_examine_request["object"]["mask"], color_image_index
+ )
+ input_examine_request["object"]["name"] = object_id
+ examine_request = self._get_interact_command_json()
+ examine_request["interactCommand"]["destinationObjectID"] = input_examine_request[
+ "object"
+ ]["name"]
+ examine_request["interactCommand"]["verb"] = "EXAMINE"
+ return examine_request
+
+ def _build_camera_change_request(self, input_camera_change_request):
+ camera_change_request = self._get_camera_change_command_json()
+ camera_change_request["camChangeCommand"]["mode"] = input_camera_change_request["mode"]
+ return camera_change_request
+
+ def _build_look_request(self, input_look_request):
+ look_request = {}
+ if input_look_request["direction"].lower() == "up":
+ look_request["commandType"] = "LookUp"
+ look_up_magnitude = RGActionsConstant.DEFAULT_LOOK_MAGNITUDE
+ if "magnitude" in input_look_request:
+ look_up_magnitude = input_look_request["magnitude"]
+ look_request["panAndLookCommand"] = {"magnitude": look_up_magnitude}
+ elif input_look_request["direction"].lower() == "down":
+ look_request["commandType"] = "LookDown"
+ look_down_magnitude = RGActionsConstant.DEFAULT_LOOK_MAGNITUDE
+ if "magnitude" in input_look_request:
+ look_down_magnitude = input_look_request["magnitude"]
+ look_request["panAndLookCommand"] = {"magnitude": look_down_magnitude}
+ elif input_look_request["direction"].lower() == "around":
+ look_request["commandType"] = "LookAround"
+ field_of_view_value = RGActionsConstant.DEFAULT_LOOK_AROUND_MAGNITUDE
+ if "magnitude" in input_look_request:
+ field_of_view_value = int(input_look_request["magnitude"])
+ look_request["lookAroundCommand"] = {"fieldOfView": field_of_view_value}
+ if "shouldRotate" in input_look_request:
+ look_request["lookAroundCommand"]["shouldRotate"] = input_look_request[
+ "shouldRotate"
+ ]
+ else:
+ look_request["lookAroundCommand"]["shouldRotate"] = False
+ return look_request
+
+ def _build_highlight_request(self, input_highlight_request):
+ if self.object_output_type == ObjectOutputType.OBJECT_MASK:
+ color_image_index = input_highlight_request["object"].get("colorImageIndex", 0)
+ object_id = self.find_object_id(
+ input_highlight_request["object"]["mask"], color_image_index
+ )
+ input_highlight_request["object"]["name"] = object_id
+ highlight_request = self._get_highlight_command_json()
+ highlight_request["highlightCommand"]["instanceID"] = input_highlight_request["object"][
+ "name"
+ ]
+ if "shouldRotateToObject" in input_highlight_request["object"]:
+ highlight_request["highlightCommand"][
+ "shouldRotateToObject"
+ ] = input_highlight_request["object"]["shouldRotateToObject"]
+ if "shouldRotateBack" in input_highlight_request["object"]:
+ highlight_request["highlightCommand"]["shouldRotateBack"] = input_highlight_request[
+ "object"
+ ]["shouldRotateBack"]
+ return highlight_request
diff --git a/src/arena_wrapper/constants.py b/src/arena_wrapper/constants.py
new file mode 100644
index 0000000..e6b249b
--- /dev/null
+++ b/src/arena_wrapper/constants.py
@@ -0,0 +1,37 @@
+########################################################################################################################
+# The list of object classes that are allowed to use for simbot challenge. Following classes can be used with
+# OBJECT_CLASS as ObjectOutputType.
+
+OBJECT_CLASS_ALLOW_LIST = ["stickynote"]
+########################################################################################################################
+# Action space
+
+NAVIGATIONAL_ACTIONS = ["Goto", "Move", "Rotate", "Look"]
+OBJECT_INTERACTION_ACTIONS = [
+ "Pickup",
+ "Open",
+ "Close",
+ "Break",
+ "Scan",
+ "Examine",
+ "Place",
+ "Pour",
+ "Toggle",
+ "Fill",
+ "Clean",
+]
+ACTIONS_REQUIRING_MASK = [
+ "Pickup",
+ "Open",
+ "Close",
+ "Break",
+ "Scan",
+ "Examine",
+ "Place",
+ "Pour",
+ "Toggle",
+ "Fill",
+ "Clean",
+ "Goto",
+]
+########################################################################################################################
diff --git a/src/arena_wrapper/enums/__init__.py b/src/arena_wrapper/enums/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/arena_wrapper/enums/object_output_wrapper.py b/src/arena_wrapper/enums/object_output_wrapper.py
new file mode 100644
index 0000000..4195581
--- /dev/null
+++ b/src/arena_wrapper/enums/object_output_wrapper.py
@@ -0,0 +1,7 @@
+from enum import Enum
+
+
+# creating enumerations for object output types
+class ObjectOutputType(str, Enum):
+ OBJECT_CLASS = "OBJECT_CLASS"
+ OBJECT_MASK = "OBJECT_MASK"
diff --git a/src/arena_wrapper/exceptions.py b/src/arena_wrapper/exceptions.py
new file mode 100644
index 0000000..695a6b4
--- /dev/null
+++ b/src/arena_wrapper/exceptions.py
@@ -0,0 +1,2 @@
+class RaycastMissedException(Exception):
+ """Custom exception to handle the RaycastMissed exception raised by Arena."""
diff --git a/src/arena_wrapper/util/__init__.py b/src/arena_wrapper/util/__init__.py
new file mode 100644
index 0000000..5387ae6
--- /dev/null
+++ b/src/arena_wrapper/util/__init__.py
@@ -0,0 +1,59 @@
+import os
+import random
+import time
+
+import numpy as np
+
+
+def makedirs(directory):
+ os.makedirs(directory, exist_ok=True)
+
+
+def atomic_write(path, data):
+ tmp_path = "-".join([path, str(time.time()), str(random.random())])
+ mode = "w"
+
+ if type(data) is bytes:
+ mode = "wb"
+
+ with open(tmp_path, mode) as f:
+ f.write(data)
+ os.rename(tmp_path, path)
+
+
+DETECTION_SCREEN_WIDTH = 300
+DETECTION_SCREEN_HEIGHT = 300
+
+
+def decompress_mask(compressed_mask):
+ """decompress compressed mask array alfred todo: refactoring."""
+ mask = np.zeros((DETECTION_SCREEN_WIDTH, DETECTION_SCREEN_HEIGHT))
+ for start_idx, run_len in compressed_mask:
+ for idx in range(start_idx, start_idx + run_len):
+ mask[idx // DETECTION_SCREEN_WIDTH, idx % DETECTION_SCREEN_HEIGHT] = 1
+ return mask
+
+
+def compress_mask(seg_mask):
+ """compress mask array alfred todo: refactoring."""
+ run_len_compressed = (
+ []
+ ) # list of lists of run lengths for 1s, which are assumed to be less frequent.
+ idx = 0
+ curr_run = False
+ run_len = 0
+ for x_idx in range(len(seg_mask)):
+ for y_idx in range(len(seg_mask[x_idx])):
+ if seg_mask[x_idx][y_idx] == 1 and not curr_run:
+ curr_run = True
+ run_len_compressed.append([idx, None])
+ if seg_mask[x_idx][y_idx] == 0 and curr_run:
+ curr_run = False
+ run_len_compressed[-1][1] = run_len
+ run_len = 0
+ if curr_run:
+ run_len += 1
+ idx += 1
+ if curr_run:
+ run_len_compressed[-1][1] = run_len
+ return run_len_compressed
diff --git a/src/arena_wrapper/util/object_class_decoder.py b/src/arena_wrapper/util/object_class_decoder.py
new file mode 100644
index 0000000..2e3e441
--- /dev/null
+++ b/src/arena_wrapper/util/object_class_decoder.py
@@ -0,0 +1,729 @@
+import numpy as np
+
+from arena_wrapper import AppConfig
+
+
+readable_type_matching_dict = {
+ "Fork_01": "fork",
+ "CoffeeMug_Yellow": "coffeemug",
+ "CoffeeMug_Yellow_ContainsCoffee": "coffeemug", # note that this is substring of broken coffeemug
+ "CoffeeMug_Boss": "coffeemug",
+ "CoffeeMug_Boss_ContainsCoffee": "coffeemug",
+ "CoffeeCup_Open_Empty_01": "coffeemug",
+ "CoffeeCup_Open_Empty_02": "coffeemug",
+ "CoffeeMug_Yellow_Broken": "brokencoffeemug",
+ "CoffeeMug_Boss_Broken": "brokencoffeemug",
+ "CoffeePotEmpty_01": "coffeepot",
+ "CoffeePot_01": "coffeepot",
+ "CoffeePot_WithCoffee_01": "coffeepot",
+ "Jar_PeanutButter_01": "jar",
+ "Jar_PeanutButter_Eaten_01": "jar",
+ "Jar_Jam_Eaten_01": "jar",
+ "CandyJar_01": "jar",
+ "Jar_Jam_01": "jar",
+ "CanSodaNew_01": "sodacan",
+ "CanSoda_01": "sodacan",
+ "CanSodaNew_Open_01": "sodacan",
+ "CanSodaNew_Crushed_01": "sodacan",
+ "BreadSlice_01": "bread",
+ "CoffeeBeans_01": "coffeebeans",
+ "PaperCup_Crushed_01": "papercup",
+ "Knife_01": "knife",
+ "MilkCarton_01": "milkcarton",
+ "SandwichHalf_01": "sandwich",
+ "SandwichHalfEaten_01": "sandwich",
+ "FoodPlate_01": "plate",
+ "FoodPlateDirty_01": "plate",
+ "FoodPlateBroken_01": "plate",
+ "Bowl_01_Broken": "brokenbowl", # Check whether to just map this to bowl
+ "Bowl_ContainsMilk_01": "bowl",
+ # note that this is substring of brokenbowl and broken bowl needs to be matched first
+ "Bowl_ContainsCereal_01": "bowl",
+ "Bowl_ContainsMilkAndCereal": "bowl",
+ "Bowl_ContainsCoffee": "bowl",
+ "Bowl_01": "bowl",
+ "CakeSlice_02": "cake",
+ "CakeCut_02": "cake",
+ "SM_Prop_Table_02": "table",
+ "Table_Metal_01": "table",
+ "TableRound_02": "table",
+ "TableRoundSmall_02": "table",
+ "PaperCup_01": "papercup",
+ "CandyBar_01": "candybar",
+ "CandyBar_Open_01": "candybar",
+ "CandyBar_Eaten_01": "candybar",
+ "Shelf_01": "shelf",
+ "AP_Prop_Shelf_Wall_04": "shelf",
+ "AP_Prop_Shelf_06": "shelf",
+ "Shelves_Tall_01": "shelf",
+ "AP_Prop_Shelf_Wall_Laser": "shelf",
+ "AP_Prop_Shelf_Wall_FreezeRay": "shelf",
+ "Bookshelf_Wooden_01": "bookshelf", # Figure out how to distinguish this from sb phrase shelf
+ "KitchenCabinet_02": "cabinet", # Notice that shelf and cabinet are different
+ "KitchenCabinet_01": "cabinet",
+ "KitchenCabinet_01_Trapped": "cabinet",
+ "KitchenCounterDrawer_03": "drawer", # Keeping this separate from shelf/cabinet
+ "KitchenCounterDrawer_02": "drawer", # Keeping this separate from shelf/cabinet
+ "ReceptionDesk": "desk", # Different from table
+ "Desk_01": "desk",
+ "ManagerDesk": "desk",
+ "AP_Prop_Desk_Red": "desk",
+ "AP_Prop_Desk_Green": "desk",
+ "AP_Prop_Desk_Blue": "desk",
+ "AP_Prop_Desk_Yellow": "desk",
+ "Computer_Monitor_New": "monitor",
+ "Computer_Monitor_01": "monitor",
+ "V_Monitor_Laser": "lasermonitor",
+ "V_Monitor_Gravity": "monitor",
+ "V_Monitor_Embiggenator": "monitor",
+ "V_Monitor_FreezeRay": "freezeraymonitor",
+ "V_Monitor_Portal": "monitor",
+ "Computer_Monitor_Broken": "monitor",
+ "Cake_02": "cake",
+ "Pear_01": "pear",
+ "Pear_01_Eaten": "pear",
+ "GravityPad": "gravitypad",
+ "Hammer": "hammer",
+ "Burger_04": "burger",
+ "BreadLoaf": "bread",
+ "ColorChanger_Button_Blue": "bluecolorchangerbutton",
+ "ColorChanger_Button_Green": "greencolorchangerbutton",
+ "ColorChanger_Button_Red": "redcolorchangerbutton",
+ "VendingMachine_01_E5_Button": "vendingmachinebutton", # See how to differentiate these if needed
+ "VendingMachine_01_E7_Button": "vendingmachinebutton",
+ "VendingMachine_01_B4_Button": "vendingmachinebutton",
+ "VendingMachine_01_M8_Button": "vendingmachinebutton",
+ "VendingMachine_01": "vendingmachine",
+ "BurgerEaten_04": "burger",
+ "TrashCan_01": "trashcan",
+ "Toast_01": "toast",
+ "Toast_02": "toast",
+ "Toast_03": "toast",
+ "Toast_04": "toast",
+ "Toast_04_Jam": "toast",
+ "Toast_04_PBJ": "toast",
+ "Toast_04_Jam_Eaten": "toast",
+ "Toast_04_PBJ_Eaten": "toast",
+ "Toast_04_Eaten": "toast",
+ "Cereal_Box_01": "cerealbox",
+ "YesterdayMachine_01": "yesterdaymachine",
+ "Microwave_01": "microwave",
+ "FridgeUpper_02": "fridgeupper",
+ "FridgeLower_02": "fridgelower",
+ "WallClock_01": "clock",
+ "Apple": "apple",
+ "AppleSlice_01": "apple",
+ "AppleCut_01": "apple",
+ "Apple_Eaten": "apple",
+ "DartBoard": "dartboard",
+ "BreadLoaf_Sliced": "bread",
+ "Dart": "dart",
+ "Laser": "laser",
+ "Laser_Tip": "lasertip",
+ "Radio_01": "radio",
+ "TeslaCoil": "teslacoil",
+ "Door_01": "door",
+ "Spoon_01": "spoon",
+ "Banana_01": "banana",
+ "BananaBunch_01": "banana",
+ "Banana_Peeled_01": "banana",
+ "Banana_Eaten_01": "banana",
+ "Trophy01": "trophy",
+ "FireAlarm_01": "firealarm",
+ "LightSwitch_01": "switch",
+ "Floppy_AntiVirus": "floppy",
+ "WaterCooler_01": "watercooler",
+ "Toaster_02": "toaster",
+ "PortalGenerator": "portalgenerator",
+ "Record_01": "record",
+ "Record_01_Broken": "record",
+ "ForkLift": "forklift",
+ "RoboticArm_01": "roboticarm",
+ "CoffeeMaker_01": "coffeemaker",
+ "ColorChangerStation": "colorchangerstation",
+ "TAMPrototypeHead_01": "tamhead",
+ "EAC_Machine": "eacmachine",
+ "MissionItemHolder": "missionitemholder",
+ "KitchenStool_01": "stool",
+ "WaterPuddle_01": "puddle",
+ "FuseBox_02": "fusebox",
+ "FuseBox_01": "fusebox",
+ "FuseBox_01_Lever": "lever",
+ "PackingBox": "box",
+ "KitchenCounterBase_03": "counter",
+ "KitchenCounterBase_02": "counter",
+ "KitchenCounterTop_02": "counter",
+ "CounterBase_03": "counter",
+ "KitchenCounter01": "counter",
+ "CoffeeUnMaker_01": "unmaker",
+ "FulllPaperTray_01": "tray",
+ "EmptyPaperTray": "tray",
+ "KitchenCounterSink_01": "sink",
+ "Handsaw": "handsaw",
+ "Screwdriver": "screwdriver",
+ "FreezeRay": "freezeray",
+ "Whiteboard_CoffeeUnmaker": "whiteboard",
+ "Whiteboard_YesterdayMachine": "whiteboard",
+ "WhiteBoard_01": "whiteboard",
+ "Broken_Cord_01": " cord", # Space needed to differentiate from record
+ "Printer_3D": "printer",
+ "Embiggenator": "embiggenator",
+ "Floppy_Virus": "virus",
+ "WarningSign_01": "warningsign",
+ "Fork_Lift": "forklift",
+ "Carrot_01": "carrot",
+ "Carrot_Eaten_01": "carrot",
+ "PowerOutlet_01": "poweroutlet",
+ "Laser_CircuitBoard": "circuitboard",
+ "PinBoard_01": "pinboard",
+ "PinBoard_02": "pinboard",
+ "FireExtinguisher_01": "extinguisher",
+ "PieFruit_01": "fruitpie",
+ "PieFruitSlice_01": "fruitpie",
+ "PieFruitCut_01": "fruitpie",
+ "SafetyBarrier_02": "safetybarrier",
+ "DeskFan_Broken_01": "fan",
+ "DeskFan_New_01": "fan",
+ "CoffeeCup_Lid_01": "lid",
+ "CableFrayed_01": "cable",
+ "Printer_Cartridge": "cartridge",
+ "Donut_01": "donut",
+ "Donut_Eaten_01": "donut",
+ "StickyNote": "stickynote",
+ "Security_Button": "securitybutton",
+ "AutoChompers": "chompers",
+ "Laser_ControlPanel": "controlpanel",
+ # MS6 objects
+ "Office_Chair": "chair",
+ "Manager_Chair": "chair",
+ "Keyboard": "keyboard",
+ "Printer_Cartridge_Lever": "lever",
+ "ActionFigure": "actionfigure",
+ "Cutting_Board": "cutting board",
+ "PBJ_Sandwich": "sandwich",
+ "TeslaCoil_Small": "tesla coil",
+ "Printer_Cartridge_Mug": "mug",
+ "Printer_Cartridge_Figure": "actionfigurecartridge",
+ "Floppy_AntiVirus_Broken": "broken floppy",
+ "Floppy_Virus_Broken": "broken floppy",
+ "Printer_Cartridge_Hammer": "hammer",
+ "Warehouse_Boxes": "warehouse boxes",
+ "Radio_01_Broken": "broken radio",
+ "LaserBase_toy": "laser toy",
+ # Some not used ones are mapped to a temporary string
+ "SM_Bld_Wall_Window_Blinds_Open_04": "notused",
+ "SM_Prop_FlatPackCardboardBoxes_03": "notused",
+ "SK_Veh_Pickup_01_ToolBox": "notused",
+ "SM_Prop_Paper_Pile_01": "notused",
+ "AP_Prop_Lab_Tank_02": "notused",
+ "AP_Prop_Note_05": "notused",
+ "sign_short_caution_electrical": "notused",
+ "sign_tall_caution_carrot": "notused",
+ "sign_short_quantum_1": "notused",
+ "sign_tall_poster_tam_2": "notused",
+ "SM_Prop_PalletStack_02": "notused",
+ "SM_Prop_Book_Group_01": "notused",
+ "sign_diamond_carrot": "notused",
+ "AP_Prop_Minigolf_Club_01": "notused",
+ "SM_Prop_Paper_Pile_03": "notused",
+ "SM_Prop_Book_Group_07": "notused",
+ "SM_Tool_Drill_Chuck_01": "notused",
+ "SM_Prop_Book_Group_06": "notused",
+ "AP_Prop_Cabinets_01": "notused",
+ "SM_Prop_FolderTray_01": "notused",
+ "sign_short_caution_gravity_2": "notused",
+ "SM_Prop_Book_Group_05": "notused",
+ "SM_Prop_FlatPackCardboardBoxes_04": "notused",
+ "SM_Prop_FolderTray_04": "notused",
+ "SM_Bld_Wall_Metal_Slide_02": "notused",
+ "SM_Bld_Door_02": "notused",
+ "sign_short_poster_delwan_2": "notused",
+ "SM_Prop_Drink_Dispenser_01": "notused",
+ "SM_Prop_Paper_06": "notused",
+ "SM_Prop_Folder_PVC_01": "notused",
+ "AP_Prop_CorkBoard_02": "notused",
+ "SM_Prop_Warehouse_Light_04": "notused",
+ "sign_short_breakroom_2": "notused",
+ "SM_Prop_Buttons_05": "notused",
+ "SM_Prop_Folder_Holder_02": "notused",
+ "sign_short_warehouse_1": "notused",
+ "AP_Prop_Barrel_Water_01": "notused",
+ "AP_Prop_Folder_PVC_02": "notused",
+ "SM_Prop_Server_Node_01": "notused",
+ "SM_Prop_NetCable_03": "notused",
+ "SM_Prop_Book_Group_08": "notused",
+ "AP_Prop_Couch_06": "notused",
+ "sign_tall_caution_shrink": "notused",
+ "AP_Prop_Barrel_Open_01": "notused",
+ "SM_Prop_NotePad_01": "notused",
+ "SM_Prop_Book_Phone_Open_01": "notused",
+ "sign_tall_caution_freeze": "notused",
+ "sign_short_caution_restricted_1": "notused",
+ "SM_Item_Clipboard_01": "notused",
+ "SM_Prop_Cart_01": "notused",
+ "AP_Prop_Lab_Tank_01": "notused",
+ "sign_diamond_gravity": "notused",
+ "SM_Prop_Book_Group_02": "notused",
+ "SM_Prop_Book_Magazine_01": "notused",
+ "AP_Prop_Lab_MachinePanel_01": "notused",
+ "sign_short_caution_gravity_1": "notused",
+ "SM_Prop_Oxygen_Tank": "notused",
+ "AP_Prop_Fire_Extinguisher_01": "notused",
+ "SM_Prop_Folder_Holder_04": "notused",
+ "SM_Prop_FolderTray_03": "notused",
+ "AP_Prop_Plant_09": "notused",
+ "SM_Prop_Folder_PVC_02": "notused",
+ "SM_Prop_Lighting_Cable_Bulb_01": "notused",
+ "AP_Prop_Pen_01": "notused",
+ "SM_Prop_Wirespool_01": "notused",
+ "SM_Prop_Warehouse_Boxes_Stacked_04": "notused",
+ "sign_diamond_laser": "notused",
+ "sign_short_poster_delwan_1": "notused",
+ "SM_Prop_Book_Group_04": "notused",
+ "SM_Prop_Paper_04": "notused",
+ "SM_Prop_Server_Cabinet_01": "notused",
+ "sign_short_office_1": "notused",
+ "SM_Prop_AirVent_Wall_01": "notused",
+ "AP_Prop_Photocopier_01": "notused",
+ "SM_Prop_Certificate_01": "notused",
+ "SM_Prop_Wirespool_Small_01": "notused",
+ "AP_Prop_Safety_Barrier_02": "notused",
+ "sign_short_caution_shrink": "notused",
+ "sign_short_caution_quantum_2": "notused",
+ "SM_Prop_AirVent_01": "notused",
+ "AP_Prop_Pen_06": "notused",
+ "SM_Prop_PowerBoxes_01": "notused",
+ "sign_diamond_freeze": "notused",
+ "SM_Prop_Folder_Holder_01": "notused",
+ "AP_Prop_Bucket_02": "notused",
+ "AP_Prop_CardboardBox_Open_05": "notused",
+ "AP_Prop_Lab_Clamp_02_Arm_01": "notused",
+ "sign_tall_caution_electrical": "notused",
+ "sign_tall_poster_tam_1": "notused",
+ "SM_Prop_Folder_Holder_03": "notused",
+ "SM_Prop_Book_Group_03": "notused",
+ "SM_Prop_Folder_Manila_04": "notused",
+ "AP_Prop_Plant_01": "notused",
+ "Laser_Tip_Broken": "notused",
+ "AP_Prop_Lab_MachinePanel_02": "notused",
+ "sign_diamond_shrink": "notused",
+ "SM_Prop_Warehouse_Boxes_Stacked_03": "notused",
+ "sign_square_breakroom": "notused",
+ "SM_Prop_Powercable_02": "notused",
+ "AP_Prop_CardboardBox_Stack_02": "notused",
+ "SM_Tool_Buffer_01_Battery": "notused",
+ "SM_Prop_Calender_01": "notused",
+ "AP_Item_Tape_01": "notused",
+ "SM_Prop_Oxygen_Tank_Large": "notused",
+ "SM_Prop_Powercable_01": "notused",
+ "AP_Prop_Couch_02": "notused",
+ "SM_Prop_Papers_01": "notused",
+ "SM_Prop_Crate_Stack_01": "notused",
+ "SM_Prop_Plastic_Pipe_Spool_01": "notused",
+ "AP_Bld_Wall_Glass_Large_Door_01": "notused",
+ "sign_short_quantum_2": "notused",
+ "sign_diamond_fire": "notused",
+ "sign_short_robotics_1": "notused",
+ "SM_Prop_Powercable_03": "notused",
+ "SM_Prop_Folder_Manila_01": "notused",
+ "AP_Prop_Cellotape_01": "notused",
+ "sign_short_poster_delwan_4": "notused",
+ "SM_Tool_Handsaw_01": "notused",
+ "SM_Prop_Buttons_02": "notused",
+ "AP_Prop_Bin_Rubbish_01": "notused",
+ "SM_Prop_Scales_01": "notused",
+ "SM_Sign_Exit_02": "notused",
+ "sign_short_robotics_2": "notused",
+ "SM_Prop_Paper_05": "notused",
+ "SM_Prop_Warehouse_Platform_Trolley_01": "notused",
+ "sign_tall_caution_robotics": "notused",
+ "sign_short_poster_delwan_3": "notused",
+ "AP_Prop_Pen_03": "notused",
+ "AP_Item_Tool_Board": "notused",
+ "AP_Prop_PaperTray_01_Full_01": "notused",
+ "AP_Prop_Generator_Large_02": "notused",
+ "AP_Bld_Ceiling_Aircon_01": "notused",
+ "sign_tall_caution_laser": "notused",
+ "sign_short_breakroom_1": "notused",
+ "SM_Prop_Folder_Manila_02": "notused",
+ "AP_Prop_Print_Tube_01": "notused",
+ "Lab_Terminal": "notused",
+ "Deembiggenator_Crates": "notused",
+}
+
+
+def parse_metadata(metadata, readable_type_matching_dict):
+ # Used to parse the metadata to get objects, their locations, their states, their affordances, "objectType" and
+ # their ID's. Also returns the location of the bot itself Takes as input the metadata object returned by the
+ # simbot environment and the readable_type_matching_dict which maps each RG defined object type to a huam
+ # readable object type (this could be statically defined beforehand since it remains fixed for the entire task. )
+ # Returns a dictionary of list of dictionaries where the top level keys are human readable object types with each
+ # of these human object types being mapped to all objects in the scene Each of these keys has an associated list
+ # as value where list contains information of all the objects in the RG environment corresponding to that
+ # specific object type category. The dictionary elements in this list have the same structure as the metadata
+ # dictionary object corresponding to th evarious objects. Also returns the location of the bot itself
+ if metadata is None:
+ return None, None
+ data = metadata["objects"] # List of dictionaries of all objects including the bot itself
+ unique_object_type_dict = {}
+ bot_position = None
+ for elt in data:
+ if "TAM_" in elt["objectID"]: # This is the bot
+ # Dictionary with x, y and z as keys. Note, the (x,z) location is the coordinate
+ # location and y is the height in unity
+ bot_position = elt["position"]
+
+ else:
+ if elt["objectType"] not in readable_type_matching_dict:
+ continue
+ readable_object_type = readable_type_matching_dict[
+ elt["objectType"]
+ ] # Dictionary used to map the auxillary RG types to human readable ones
+ if readable_object_type in unique_object_type_dict:
+ # unique_object_type_dict[objectType].append({"objectID": elt["objectID"], "objectType": elt[
+ # "objectType"], "position": elt["position"], "supportedVerbs": elt["supportedVerbs"],
+ # "supportedStates": elt["supportedStates"], "currentStates": elt["currentStates"],
+ # "parentReceptacle": elt["parentReceptacle"]}) #Retain selective entries from the main metadata
+ # dictionary
+
+ # Add the whole metadata entry for that object type to its list corresponding to its
+ # redable name
+ unique_object_type_dict[readable_object_type].append(elt)
+ else:
+ # Create a new list corresponding to this human readable object type
+ unique_object_type_dict[readable_object_type] = [elt]
+ # TODO: Will need to handle human readable synonyms to object type too
+ return unique_object_type_dict, bot_position
+
+
+def parse_metadata_only_bot_position(metadata, readable_type_matching_dict):
+ # Use this if using preprocessed unique_object_type_dict
+ data = metadata["objects"] # List of dictionaries of all objects including the bot itself
+ for elt in data:
+ if "TAM_" in elt["objectID"]: # This is the bot
+ # Dictionary with x, y and z as keys. Note, the (x,z) location is the coordinate location and y is
+ # the height in unity
+ bot_position = elt["position"]
+ break # Bot position found, can break
+ else:
+ continue
+
+ return bot_position
+
+
+def locate_tams_room(metadata):
+ # Use this to return the room TAM is in
+ data = metadata["objects"] # List of dictionaries of all objects including the bot itself
+ bot_room = None
+ for elt in data:
+ if "TAM_" in elt["objectID"]: # This is the bot
+ # Dictionary with x, y and z as keys. Note, the (x,z) location is the coordinate
+ # location and y is the height in unity
+ bot_room = elt["currentRoom"]
+ break # Bot position found, can break
+ else:
+ continue
+ return bot_room
+
+
+def find_matching_object(
+ predicted_object, object_type_dictionary, bot_location, best_object_match_index=0
+):
+ if best_object_match_index != 0: # Handle case for second best separately
+ return find_next_best_matching_object(
+ predicted_object, object_type_dictionary, bot_location, best_object_match_index
+ )
+
+ # Predicted object is the human readable object type predicted
+ # object_type_dictionary is a dictionary with keys as the human readable object types and value as a list of
+ # dictionaries containing all instances of that object type This is used to find the correct object id based on
+ # nearest distance from the bot and return its object ID returns the object_id for the best matched object
+ if predicted_object not in object_type_dictionary:
+ if AppConfig.runtime_platform == "Mac":
+ return predicted_object
+ else:
+ return None
+ matched_object_list = object_type_dictionary[predicted_object]
+ min_dist = np.inf
+ best_object_id = None
+ for obj in matched_object_list:
+ object_location = obj["position"]
+ dist = (object_location["x"] - bot_location["x"]) ** 2 + (
+ object_location["z"] - bot_location["z"]
+ ) ** 2
+ if dist < min_dist:
+ min_dist = dist
+ best_object_id = obj["objectID"]
+ return best_object_id
+
+
+def find_next_best_matching_object(
+ predicted_object, object_type_dictionary, bot_location, best_object_match_index
+):
+ # Finds and returns the SECOND best object closest to the bot
+ matched_object_list = object_type_dictionary[predicted_object]
+ min_dist = np.inf
+ second_min_dist = np.inf
+ best_object_id = None
+ second_object_id = None
+
+ for object in matched_object_list:
+ object_location = object["position"]
+ dist = (object_location["x"] - bot_location["x"]) ** 2 + (
+ object_location["z"] - bot_location["z"]
+ ) ** 2
+ if dist < min_dist:
+ second_min_dist = min_dist
+ min_dist = dist
+ second_object_id = best_object_id
+ best_object_id = object["objectID"]
+
+ elif dist < second_min_dist:
+ second_min_dist = dist
+ second_object_id = object["objectID"]
+
+ return second_object_id
+
+
+def find_object_ids_from_type(needed_object_types, metadata):
+ needed_object_ids = []
+ data = metadata["objects"] # List of dictionaries of all objects including the bot itself
+ for object_type in needed_object_types:
+ for elt in data:
+ if object_type == elt["objectType"]:
+ needed_object_ids.append(elt["objectID"])
+ break
+ else:
+ continue
+
+ return needed_object_ids
+
+
+# Will be used to get all computers of the type
+def find_all_object_ids_from_type(metadata, needed_object_type="Computer_Monitor_01"):
+ needed_object_ids = []
+ data = metadata["objects"] # List of dictionaries of all objects including the bot itself
+ for elt in data:
+ if needed_object_type == elt["objectType"]:
+ needed_object_ids.append(
+ elt["objectID"]
+ ) # Keep running to get all object IDs of this type
+ else:
+ continue
+
+ return needed_object_ids
+
+
+def identify_correct_shelf(
+ commands,
+ metadata,
+ instr,
+ predicted_object,
+ unique_object_type_dict,
+ bot_position,
+ best_object_match_index,
+ i,
+):
+ if "second" in instr or "two" in instr: # Special case needed to support freeze ray mission
+ predicted_object_id = find_all_object_ids_from_type(
+ metadata, needed_object_type="AP_Prop_Shelf_Wall_04"
+ )[0]
+ elif "first" in instr or "one" in instr: # Special case needed to support Laser ray mission
+ predicted_object_id = find_all_object_ids_from_type(
+ metadata, needed_object_type="AP_Prop_Shelf_Wall_Laser"
+ )[0]
+ else:
+ # This is used to find the correct object id based on nearest distance from the bot and return
+ # its object ID
+ predicted_object_id = find_matching_object(
+ predicted_object, unique_object_type_dict, bot_position, best_object_match_index
+ )
+ return predicted_object_id
+
+
+def convert_object_class_to_id(commands, metadata, instr=None):
+ """#Takes in the response object returned by action inference service and converts object class
+ names to object IDs using the metadata
+
+ #instr (NLP command) needed to implement certain rules
+ """
+ # Used to parse the metadata to get objects, their locations, their states, their affordances, "objectType" and
+ # their ID's. This is returned as a dictionary with keys as human readable object types and values as list of all
+ # the objects in RG environemnt corresponding to that object type. The object specific information is encoded as
+ # a dictionary with same structure as the metadata. Also returns the location of the bot itself
+ unique_object_type_dict, bot_position = parse_metadata(metadata, readable_type_matching_dict)
+
+ for i in range(len(commands)):
+ command = commands[i]
+ best_object_match_index = 0 # Default. Used to support second best matches
+
+ if (
+ command["type"] == "Dialog"
+ or command["type"] == "CameraChange"
+ or command["type"] == "Move"
+ or command["type"] == "Rotate"
+ or command["type"] == "Look"
+ or ("object" not in command[command["type"].lower()])
+ ):
+ continue # No object here, can skip
+ else:
+ objects = command[command["type"].lower()]["object"]
+ if "officeRoom" in objects: # Room navigation commands don't need object-id
+ continue
+ if "name" in objects:
+ predicted_object = objects["name"]
+ if (
+ predicted_object is not None
+ and predicted_object == "monitor"
+ and locate_tams_room(metadata) == "MainOffice"
+ and command["type"] == "Goto"
+ ):
+ computers = find_all_object_ids_from_type(
+ metadata, needed_object_type="Computer_Monitor_01"
+ )
+ # This makes sure that they are always in same order ensuring consistency of first, second and third
+ computers.sort()
+ if "first" in instr or "left" in instr:
+ predicted_object_id = computers[0]
+ elif "second" in instr or "middle" in instr or "center" in instr:
+ predicted_object_id = computers[1]
+ elif "third" in instr or "right" in instr:
+ predicted_object_id = computers[2]
+ elif "one" in instr: # Putting this separately as user might say "second one"
+ predicted_object_id = computers[0]
+ elif "two" in instr:
+ predicted_object_id = computers[1]
+ elif "three" in instr:
+ predicted_object_id = computers[2]
+ else:
+ # This is used to find the correct object id based on nearest distance from the bot and return
+ # its object ID
+ predicted_object_id = find_matching_object(
+ predicted_object,
+ unique_object_type_dict,
+ bot_position,
+ best_object_match_index,
+ )
+ commands[i][commands[i]["type"].lower()]["object"][
+ "name"
+ ] = predicted_object_id
+ elif predicted_object == "shelf" and locate_tams_room(metadata) == "Lab1":
+ predicted_object_id = identify_correct_shelf(
+ commands,
+ metadata,
+ instr,
+ predicted_object,
+ unique_object_type_dict,
+ bot_position,
+ best_object_match_index,
+ i,
+ )
+ commands[i][commands[i]["type"].lower()]["object"][
+ "name"
+ ] = predicted_object_id
+ elif predicted_object == "vendingmachinebutton" and "button" in instr:
+ # Always press this button if a vending machine button is being pressed
+ predicted_object_id = "VendingMachine_01_E5_Button_10000"
+ commands[i][commands[i]["type"].lower()]["object"][
+ "name"
+ ] = predicted_object_id
+ elif (
+ command["type"] == "Goto" and predicted_object == "monitor" and "next" in instr
+ ): # To enable "Goto the next computer"
+ # This will be used to tell the "find_matching_object_root" function to find the next best match
+ # besides the present one
+ best_object_match_index = 1
+ # This is used to find the correct object id based on nearest distance from the bot and return
+ # its object ID
+ predicted_object_id = find_matching_object(
+ predicted_object,
+ unique_object_type_dict,
+ bot_position,
+ best_object_match_index,
+ )
+ commands[i][commands[i]["type"].lower()]["object"][
+ "name"
+ ] = predicted_object_id
+ elif predicted_object is not None:
+ # This is used to find the correct object id based on nearest distance from the bot and return
+ # its object ID
+ predicted_object_id = find_matching_object(
+ predicted_object,
+ unique_object_type_dict,
+ bot_position,
+ best_object_match_index,
+ )
+ commands[i][commands[i]["type"].lower()]["object"][
+ "name"
+ ] = predicted_object_id
+ else:
+ print("Predicting object: None")
+ if "source" in objects:
+ predicted_object = objects["source"]
+ if predicted_object == "shelf" and locate_tams_room(metadata) == "Lab1":
+ predicted_object_id = identify_correct_shelf(
+ commands,
+ metadata,
+ instr,
+ predicted_object,
+ unique_object_type_dict,
+ bot_position,
+ best_object_match_index,
+ i,
+ )
+ commands[i][commands[i]["type"].lower()]["object"][
+ "source"
+ ] = predicted_object_id
+ elif (
+ predicted_object is not None
+ and predicted_object in unique_object_type_dict
+ and predicted_object != "TAM_1"
+ ):
+ # This is used to find the correct object id based on nearest distance from the bot and return
+ # its object ID
+ predicted_object_id = find_matching_object(
+ predicted_object,
+ unique_object_type_dict,
+ bot_position,
+ best_object_match_index,
+ )
+ commands[i][commands[i]["type"].lower()]["object"][
+ "source"
+ ] = predicted_object_id
+ else:
+ print("Keeping original source")
+ if "destination" in objects:
+ predicted_object = objects["destination"]
+ if predicted_object == "shelf" and locate_tams_room(metadata) == "Lab1":
+ predicted_object_id = identify_correct_shelf(
+ commands,
+ metadata,
+ instr,
+ predicted_object,
+ unique_object_type_dict,
+ bot_position,
+ best_object_match_index,
+ i,
+ )
+ commands[i][commands[i]["type"].lower()]["object"][
+ "destination"
+ ] = predicted_object_id
+ elif (
+ predicted_object is not None
+ and predicted_object in unique_object_type_dict
+ and predicted_object != "TAM_1"
+ ):
+ # This is used to find the correct object id based on nearest distance from the bot and return
+ # its object ID
+ predicted_object_id = find_matching_object(
+ predicted_object,
+ unique_object_type_dict,
+ bot_position,
+ best_object_match_index,
+ )
+ commands[i][commands[i]["type"].lower()]["object"][
+ "destination"
+ ] = predicted_object_id
+ else:
+ print("Keeping original destination")
+ return commands
diff --git a/src/simbot_offline_inference/__init__.py b/src/simbot_offline_inference/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/src/simbot_offline_inference/__main__.py b/src/simbot_offline_inference/__main__.py
new file mode 100644
index 0000000..6e93a60
--- /dev/null
+++ b/src/simbot_offline_inference/__main__.py
@@ -0,0 +1,32 @@
+import typer
+
+from simbot_offline_inference.commands import (
+ generate_trajectories,
+ print_challenges_per_high_level_key,
+ print_high_level_keys,
+ run_background_services,
+ run_their_evaluation,
+ run_trajectories,
+ validate_cdfs,
+ validate_generated_missions,
+)
+
+
+app = typer.Typer(name="Run inference offline.", no_args_is_help=True, add_completion=False)
+
+
+app.command(rich_help_panel="Run")(run_background_services)
+
+app.command(rich_help_panel="Preparation")(validate_cdfs)
+app.command(rich_help_panel="Preparation")(validate_generated_missions)
+app.command(rich_help_panel="Preparation")(print_high_level_keys)
+app.command(rich_help_panel="Preparation")(print_challenges_per_high_level_key)
+
+app.command(rich_help_panel="Generation")(generate_trajectories)
+app.command(rich_help_panel="Generation")(run_trajectories)
+
+app.command(rich_help_panel="Evaluation")(run_their_evaluation)
+
+
+if __name__ == "__main__":
+ app()
diff --git a/src/simbot_offline_inference/_version.py b/src/simbot_offline_inference/_version.py
new file mode 100644
index 0000000..2e9d76d
--- /dev/null
+++ b/src/simbot_offline_inference/_version.py
@@ -0,0 +1,7 @@
+# Store the version here so:
+# 1) we don't load dependencies by storing it in __init__.py
+# 2) we can import it in setup.py for the same reason
+# 3) we can import it into your module module
+#
+# This is automatic, therefore DO NOT manually change this file!
+__version__ = "4.33.0"
diff --git a/src/simbot_offline_inference/arena_action_builder.py b/src/simbot_offline_inference/arena_action_builder.py
new file mode 100644
index 0000000..4332039
--- /dev/null
+++ b/src/simbot_offline_inference/arena_action_builder.py
@@ -0,0 +1,95 @@
+import random
+from functools import partial
+from typing import Any, Literal
+
+from convert_case import title_case
+
+
+ArenaAction = dict[str, Any]
+
+
+class ArenaActionBuilder:
+ """Generate actions for the Arena."""
+
+ def random_navigation(self) -> ArenaAction:
+ """Return a random action."""
+ methods = [
+ partial(self.rotate, direction="left"),
+ partial(self.rotate, direction="right"),
+ partial(self.move, direction="backward"),
+ partial(self.move, direction="forward"),
+ # Look actions disabled because more control is needed to stop the agent going
+ # upside-down
+ # partial(self.look, direction="up"),
+ # partial(self.look, direction="down"),
+ ]
+
+ return random.choice(methods)(magnitude=random.randint(0, 360)) # noqa: WPS432
+
+ def get_language_instruction_from_action(self, action: ArenaAction) -> str:
+ """Return a language instruction from an action."""
+ switcher = {
+ "Rotate": f"Rotate {action['rotation']['direction']}",
+ "Move": f"Move {action['move']['direction']}",
+ "Look": f"Look {action['look']['direction']}",
+ }
+ try:
+ return switcher[action["type"]]
+ except KeyError:
+ return "Rotate right"
+
+ def dummy_action(self) -> ArenaAction:
+ """Create a dummy action."""
+ return self.rotate("right", 0)
+
+ def rotate(self, direction: Literal["left", "right"], magnitude: int = 45) -> ArenaAction:
+ """Create a rotate action."""
+ # Make sure the magnitude is in the range [0, 360)
+ magnitude = magnitude % 360 # noqa: WPS432
+
+ return {
+ "id": "1",
+ "type": "Rotate",
+ "rotation": {
+ "direction": title_case(direction),
+ "magnitude": magnitude,
+ },
+ }
+
+ def move(self, direction: Literal["forward", "backward"], magnitude: int = 1) -> ArenaAction:
+ """Create a move action."""
+ # Force the magnitude to be 1
+ magnitude = 1
+
+ return {
+ "id": "1",
+ "type": "Move",
+ "move": {
+ "direction": title_case(direction),
+ "magnitude": magnitude,
+ },
+ }
+
+ def look(self, direction: Literal["up", "down"], magnitude: int = 30) -> ArenaAction:
+ """Create a look action."""
+ magnitude = magnitude % 60
+ return {
+ "id": "1",
+ "type": "Look",
+ "look": {
+ "direction": title_case(direction),
+ "magnitude": magnitude,
+ },
+ }
+
+ def viewpoint(self, viewpoint: str) -> ArenaAction:
+ """Go to a viewpoint."""
+ return {
+ "id": "1",
+ "type": "Goto",
+ "goto": {
+ "object": {
+ "goToPoint": viewpoint,
+ },
+ },
+ }
diff --git a/src/simbot_offline_inference/arena_evaluator.py b/src/simbot_offline_inference/arena_evaluator.py
new file mode 100644
index 0000000..d5c5967
--- /dev/null
+++ b/src/simbot_offline_inference/arena_evaluator.py
@@ -0,0 +1,183 @@
+from typing import Any
+
+import httpx
+from loguru import logger
+
+from arena_missions.structures import CDF, MissionTrajectory
+from arena_wrapper.exceptions import RaycastMissedException
+from simbot_offline_inference.inference_controller import SimBotInferenceController
+from simbot_offline_inference.metrics import EvaluationMetrics, WandBCallback
+
+
+class SimBotArenaEvaluator:
+ """Handle the evaluation of the experience hub on the arena."""
+
+ def __init__(
+ self,
+ inference_controller: SimBotInferenceController,
+ evaluation_metrics: EvaluationMetrics,
+ wandb_callback: WandBCallback,
+ *,
+ enforce_successful_preparation: bool = False,
+ should_resume_previous_wandb_run: bool = False,
+ ) -> None:
+ self._inference_controller = inference_controller
+ self._evaluation_metrics = evaluation_metrics
+ self._wandb_callback = wandb_callback
+
+ self._enforce_successful_preparation = enforce_successful_preparation
+ self._should_resume_previous_wandb_run = should_resume_previous_wandb_run
+
+ def run_evaluation(self, trajectories: list[MissionTrajectory]) -> None:
+ """Run the evaluation on all the test data."""
+ with self._inference_controller:
+ self._wandb_callback.start_evaluation(resume=self._should_resume_previous_wandb_run)
+
+ if self._should_resume_previous_wandb_run:
+ self._evaluation_metrics.restore_checkpoint()
+
+ for instance in trajectories:
+ self.run_evaluation_step(instance)
+
+ self._wandb_callback.finish_evaluation()
+ self._evaluation_metrics.delete_checkpoint()
+
+ logger.info("Finished evaluation!")
+
+ def run_evaluation_step(self, trajectory: MissionTrajectory) -> None:
+ """Run a single evaluation step, with guards in case something goes wrong."""
+ if self._has_mission_been_evaluated(trajectory):
+ logger.info("Skipping mission because it was already evaluated.")
+ return None
+
+ logger.info(f"Running evaluation for '{trajectory.session_id}'")
+
+ try:
+ return self.run_trajectory_in_the_arena(trajectory)
+
+ except httpx.ConnectTimeout:
+ logger.error("Failed to establish a connection to the arena.")
+
+ if self._inference_controller.restart_arena():
+ logger.info("Restarted the arena. Retrying...")
+ return self.run_trajectory_in_the_arena(trajectory)
+
+ except RaycastMissedException:
+ logger.error("Current trajectory will be ignored due to a RaycastMissed exception.")
+
+ if self._inference_controller.restart_arena():
+ logger.info("Successfully restarted arena. Skipping current trajectory...")
+ return None
+
+ raise RuntimeError("Failed to run the trajectory in the arena.")
+
+ def run_trajectory_in_the_arena(self, trajectory: MissionTrajectory) -> None:
+ """Run a single trajectory in the arena, from start to finish."""
+ preparation_session_id = trajectory.create_preparation_session_id()
+
+ self._wandb_callback.start_trajectory(trajectory, preparation_session_id)
+
+ try:
+ self.prepare_arena_for_trajectory(trajectory, preparation_session_id)
+ except AssertionError:
+ logger.warning("Preparation failed. Skipping...")
+ self._finish_trajectory(
+ trajectory, actions_for_session=[], processed_utterance_counter=0
+ )
+ return
+
+ actions_for_session: list[Any] = []
+ processed_utterance_counter = 0
+ for utterance in trajectory.utterances:
+ if self._inference_controller.is_all_goals_complete():
+ logger.warning("All goals are complete but there are still utterances left.")
+ break
+
+ try:
+ actions_for_utterance = self._inference_controller.handle_utterance(
+ trajectory.session_id, utterance
+ )
+ except AssertionError:
+ logger.error("Unrecoverable exception occurred, exiting...")
+ break
+
+ actions_for_session.extend(actions_for_utterance)
+ processed_utterance_counter += 1
+
+ self._finish_trajectory(
+ trajectory,
+ actions_for_session=actions_for_session,
+ processed_utterance_counter=processed_utterance_counter,
+ )
+
+ def prepare_arena_for_trajectory( # noqa: WPS231
+ self, trajectory: MissionTrajectory, preparation_session_id: str
+ ) -> None:
+ """Prepare the arena to run the trajectory."""
+ logger.info("Sending CDF to the arena")
+ self._inference_controller.launch_game(trajectory.cdf_as_dict)
+
+ logger.debug("Verifying Experience Hub is healthy")
+ if not self._inference_controller.healthcheck():
+ raise AssertionError("The Experience Hub is not healthy.")
+
+ if trajectory.preparation_utterances:
+ logger.debug("Running preparation steps")
+
+ for prep_utterance in trajectory.preparation_utterances:
+ self._inference_controller.handle_utterance(preparation_session_id, prep_utterance)
+
+ if self._enforce_successful_preparation:
+ if not self._inference_controller.trajectory_preparation_completed:
+ raise AssertionError("The subgoal status is 0, so preparation failed")
+
+ if trajectory.randomise_start_position:
+ logger.info("Randomising start position")
+
+ # Go to random viewpoint
+ logger.debug("Going to random viewpoint")
+ if isinstance(trajectory.cdf, CDF):
+ self._inference_controller.go_to_random_viewpoint(trajectory.cdf.start_room)
+
+ # Randomise the start position
+ logger.debug("Randomising start position")
+ self._inference_controller.randomise_start_position()
+
+ def _has_mission_been_evaluated(self, trajectory: MissionTrajectory) -> bool:
+ """Check if the mission has already been evaluated.
+
+ See use the mission ID if it exists, otherwise use the session ID.
+ """
+ if trajectory.mission_id:
+ return self._evaluation_metrics.has_mission_been_evaluated(trajectory.mission_id)
+ return self._evaluation_metrics.has_mission_been_evaluated(trajectory.session_id)
+
+ def _finish_trajectory(
+ self,
+ trajectory: MissionTrajectory,
+ *,
+ actions_for_session: list[Any],
+ processed_utterance_counter: int,
+ ) -> None:
+ """Log the results for the trajectory."""
+ (
+ goal_completion_status,
+ subgoal_completion_status,
+ ) = self._inference_controller.get_goal_completion_status()
+
+ self._evaluation_metrics.update(
+ mission_id=trajectory.mission_id or trajectory.session_id,
+ mission_group=trajectory.mission_group,
+ is_mission_completed=goal_completion_status,
+ subgoal_completion_status=subgoal_completion_status,
+ predicted_actions=actions_for_session,
+ last_game_state=self._inference_controller.get_latest_game_state(),
+ remaining_utterances=trajectory.utterances[processed_utterance_counter:],
+ )
+
+ self._wandb_callback.finish_trajectory(
+ trajectory,
+ evaluation_metrics=self._evaluation_metrics,
+ is_success=goal_completion_status,
+ subgoal_completion_status=subgoal_completion_status,
+ )
diff --git a/src/simbot_offline_inference/challenge_validator.py b/src/simbot_offline_inference/challenge_validator.py
new file mode 100644
index 0000000..0ace9db
--- /dev/null
+++ b/src/simbot_offline_inference/challenge_validator.py
@@ -0,0 +1,106 @@
+from contextlib import ExitStack
+from pathlib import Path
+from typing import Any, NamedTuple, Optional, Union
+
+from loguru import logger
+from rich.live import Live
+from rich.panel import Panel
+from rich.progress import BarColumn, MofNCompleteColumn, Progress, TimeElapsedColumn
+
+from arena_missions.structures import CDF
+from simbot_offline_inference.orchestrators import ArenaOrchestrator
+
+
+UNITY_FAILURES = { # noqa: WPS407
+ "UNABLE_TO_SPAWN_OBJECTS": "The objects that were unable to spawn are",
+ "DUPLICATE_KEYS_IN_CDF": "ArgumentException: An item with the same key has already been added",
+ "IMPROPER_OBJECT_REFERENCE": "Object reference not set to an instance of an object",
+}
+
+
+class InvalidCDFException(Exception):
+ """Exception for when an invalid CDF is found."""
+
+ def __init__(self, cdf: Any, *args: Any) -> None:
+ logger.error(f"CDF: {cdf}")
+ super().__init__(*args)
+
+
+class CDFValidationInstance(NamedTuple):
+ """A CDF validation instance."""
+
+ cdf: CDF
+ path: Union[Path, str]
+
+
+class ChallengeValidator:
+ """Validate the CDF file in the Arena.
+
+ The only way to validate the CDF file is to submit it to the Arena and see if there are any
+ errors.
+
+ It's annoying, but it's the only way, and this should hopefully automate that entire process.
+ """
+
+ def __init__(
+ self,
+ arena_orchestrator: ArenaOrchestrator,
+ *,
+ send_dummy_actions_after_cdf_load: bool = False,
+ ) -> None:
+ self._arena_orchestrator = arena_orchestrator
+ self._send_dummy_actions_after_cdf_load = send_dummy_actions_after_cdf_load
+
+ self.progress = Progress(
+ "{task.description}",
+ BarColumn(bar_width=None),
+ MofNCompleteColumn(),
+ TimeElapsedColumn(),
+ expand=True,
+ )
+
+ def validate_cdfs(self, cdfs: list[CDFValidationInstance]) -> bool:
+ """Validate the CDFs with the Arena."""
+ task_id = self.progress.add_task("Validating CDFs", total=len(cdfs))
+
+ # Create the context managers
+ context_manager_stack = ExitStack()
+ context_manager_stack.enter_context(self._display_progress())
+ context_manager_stack.enter_context(self._arena_orchestrator)
+
+ with context_manager_stack:
+ for instance in cdfs:
+ try:
+ self._validate_single_cdf(instance.cdf)
+ except InvalidCDFException:
+ logger.error(f"Failed to validate CDF: {instance.path}")
+ return False
+
+ self.progress.advance(task_id)
+
+ return True
+
+ def _validate_single_cdf(self, cdf: CDF) -> None:
+ """Validate a single CDF with the Arena."""
+ self._arena_orchestrator.send_cdf_to_arena(cdf.dict(by_alias=True))
+
+ if self._send_dummy_actions_after_cdf_load:
+ self._arena_orchestrator.send_dummy_actions_to_arena()
+
+ load_error = self._get_error_from_unity_log()
+ if load_error is not None:
+ raise InvalidCDFException(cdf, f"Unity log contains error: {load_error}")
+
+ def _get_error_from_unity_log(self) -> Optional[str]:
+ """Check the Unity log for any exceptions."""
+ with open(self._arena_orchestrator.unity_log_path) as unity_log_file:
+ for check_name, string_pattern in UNITY_FAILURES.items():
+ if string_pattern in unity_log_file.read():
+ logger.error(check_name)
+ return check_name
+
+ return None
+
+ def _display_progress(self) -> Live:
+ """Display the progress bar."""
+ return Live(Panel(self.progress, padding=(1, 4), border_style="yellow"))
diff --git a/src/simbot_offline_inference/commands/__init__.py b/src/simbot_offline_inference/commands/__init__.py
new file mode 100644
index 0000000..2f1bb35
--- /dev/null
+++ b/src/simbot_offline_inference/commands/__init__.py
@@ -0,0 +1,13 @@
+from simbot_offline_inference.commands.generate_trajectories import (
+ generate_trajectories,
+ run_trajectories,
+)
+from simbot_offline_inference.commands.run_background_services import run_background_services
+from simbot_offline_inference.commands.run_their_evaluation import run_their_evaluation
+from simbot_offline_inference.commands.run_trajectories_in_arena import run_trajectories_in_arena
+from simbot_offline_inference.commands.validate_cdfs import (
+ print_challenges_per_high_level_key,
+ print_high_level_keys,
+ validate_cdfs,
+ validate_generated_missions,
+)
diff --git a/src/simbot_offline_inference/commands/generate_trajectories.py b/src/simbot_offline_inference/commands/generate_trajectories.py
new file mode 100644
index 0000000..197eae7
--- /dev/null
+++ b/src/simbot_offline_inference/commands/generate_trajectories.py
@@ -0,0 +1,91 @@
+import random
+from pathlib import Path
+
+from loguru import logger
+
+from arena_missions.builders import ChallengeBuilder, MissionBuilder, RequiredObjectBuilder
+from arena_missions.structures import MissionTrajectory
+from simbot_offline_inference.commands.run_trajectories_in_arena import run_trajectories_in_arena
+from simbot_offline_inference.metrics import WandBTrajectoryGenerationCallback
+from simbot_offline_inference.settings import Settings
+
+
+def _get_default_mission_dir() -> Path:
+ """Return the default mission dir."""
+ return Settings().missions_dir
+
+
+def generate_trajectories(
+ output_dir: Path = _get_default_mission_dir(), # noqa: WPS404
+ *,
+ session_id_prefix: str = "T",
+ enable_randomisation_in_session_id: bool = True,
+) -> None:
+ """Generate trajectories from the missions."""
+ settings = Settings()
+ settings.put_settings_in_environment()
+ settings.prepare_file_system()
+ output_dir.mkdir(parents=True, exist_ok=True)
+
+ logger.info("Loading missions...")
+ missions = list(
+ MissionBuilder(ChallengeBuilder(), RequiredObjectBuilder()).generate_all_missions()
+ )
+ logger.info(f"Loaded {len(missions)} missions")
+
+ trajectories = [
+ mission.convert_to_trajectory(
+ session_id_prefix, include_randomness=enable_randomisation_in_session_id
+ )
+ for mission in missions
+ ]
+
+ logger.info(f"Loaded {len(trajectories)} separate trajectories.")
+
+ saved_trajectories_paths = set()
+
+ for trajectory in trajectories:
+ output_path = output_dir.joinpath(f"{trajectory.session_id}.json")
+ output_path.parent.mkdir(parents=True, exist_ok=True)
+ output_path.write_text(trajectory.json(by_alias=True))
+
+ # Update the list of saved trajectories to keep a separate track
+ saved_trajectories_paths.add(output_path)
+
+ logger.info(f"Saved {len(saved_trajectories_paths)} trajectories to disk.")
+
+
+def run_trajectories(
+ trajectories_dir: Path,
+ wandb_group_name: str,
+ wandb_project: str = "arena-high-level-trajectories",
+ randomise_order: bool = False,
+) -> None:
+ """Run trajectories from disk."""
+ if not trajectories_dir.is_dir():
+ raise NotADirectoryError("The given path is not a directory.")
+
+ settings = Settings()
+ settings.put_settings_in_environment()
+ settings.prepare_file_system()
+
+ trajectories = [
+ MissionTrajectory.parse_file(trajectory_file)
+ for trajectory_file in trajectories_dir.rglob("*.json")
+ ]
+
+ logger.info(f"Loaded {len(trajectories)} separate trajectories.")
+
+ if randomise_order:
+ random.shuffle(trajectories)
+
+ wandb_callback = WandBTrajectoryGenerationCallback(
+ project=wandb_project,
+ entity=settings.wandb_entity,
+ group=wandb_group_name,
+ mission_trajectory_dir=settings.missions_dir,
+ mission_trajectory_outputs_dir=settings.evaluation_output_dir,
+ unity_logs=settings.unity_log_path,
+ )
+
+ run_trajectories_in_arena(trajectories, wandb_callback=wandb_callback)
diff --git a/src/simbot_offline_inference/commands/run_background_services.py b/src/simbot_offline_inference/commands/run_background_services.py
new file mode 100644
index 0000000..a5ffbd1
--- /dev/null
+++ b/src/simbot_offline_inference/commands/run_background_services.py
@@ -0,0 +1,19 @@
+from emma_experience_hub.commands.simbot.cli import (
+ run_background_services as run_exp_hub_background_services,
+)
+from simbot_offline_inference.settings import Settings
+
+
+def run_background_services() -> None:
+ """Run the background services for the Experience Hub."""
+ settings = Settings()
+ settings.put_settings_in_environment()
+
+ run_exp_hub_background_services(
+ model_storage_dir=settings.models_dir,
+ force_download_models=False,
+ run_in_background=False,
+ observability=False,
+ num_gpus=2,
+ offline_evaluation=True,
+ )
diff --git a/src/simbot_offline_inference/commands/run_their_evaluation.py b/src/simbot_offline_inference/commands/run_their_evaluation.py
new file mode 100644
index 0000000..f5e5ec4
--- /dev/null
+++ b/src/simbot_offline_inference/commands/run_their_evaluation.py
@@ -0,0 +1,104 @@
+import json
+from collections.abc import Iterator
+from pathlib import Path
+from shutil import rmtree
+from typing import Optional
+from uuid import uuid4
+
+from loguru import logger
+from rich.progress import track
+
+from arena_missions.structures import MissionTrajectory
+from simbot_offline_inference.commands.run_trajectories_in_arena import run_trajectories_in_arena
+from simbot_offline_inference.metrics import MissionGroup, WandBEvaluationCallback
+from simbot_offline_inference.settings import Settings
+
+
+def extract_mission_group_from_description(mission_desc: str) -> Optional[MissionGroup]:
+ """Extract the group from the mission description."""
+ switcher: dict[str, MissionGroup] = {
+ "Break_Object": "breakObject",
+ "Clean_and_Deliver": "clean&deliver",
+ "Color_and_Deliver": "color&deliver",
+ "Fill_and_Deliver": "fill&deliver",
+ "Freeze_and_Deliver": "freeze&deliver",
+ "Heat_and_Deliver": "heat&deliver",
+ "Insert_in_Device": "insertInDevice",
+ "Pickup_and_Deliver": "pickup&deliver",
+ "Pour_into_Container": "pourContainer",
+ "Repair_and_Deliver": "repair&deliver",
+ "Scan_Object": "scanObject",
+ "Toggle_": "toggleDevice",
+ }
+
+ for mission_group, mission_group_name in switcher.items():
+ if mission_group.lower() in mission_desc.lower():
+ return mission_group_name
+
+ return None
+
+
+def process_their_trajectory_data(
+ in_file: Path, session_id_prefix: str
+) -> list[MissionTrajectory]:
+ """Process the trajectory data from their evaluation sets."""
+ task_data = json.loads(in_file.read_bytes())
+
+ test_instances: list[MissionTrajectory] = []
+
+ iterator = track(
+ task_data.items(), description="Processing their trajectory data to our format"
+ )
+
+ for task_description, task in iterator:
+ for annotation_idx, annotation in enumerate(task["human_annotations"]):
+ utterances: Iterator[str] = (
+ instruction["instruction"] for instruction in annotation["instructions"]
+ )
+ utterances = (utterance for utterance in utterances if "_" not in utterance)
+ utterances = (utterance.lower() for utterance in utterances)
+
+ test_instance = MissionTrajectory(
+ mission_group=extract_mission_group_from_description(task_description),
+ mission_id=f"{task_description}_{annotation_idx}",
+ session_id=f"{session_id_prefix}_{str(uuid4())}",
+ cdf=task["CDF"],
+ utterances=list(utterances),
+ randomise_start_position=False,
+ )
+
+ test_instances.append(test_instance)
+
+ return test_instances
+
+
+def run_their_evaluation(
+ wandb_project: str = "alexa-arena-evaluation",
+ *,
+ force_from_scratch: bool = False,
+) -> None:
+ """Run the evaluation on the test set."""
+ settings = Settings()
+
+ if force_from_scratch:
+ logger.info(
+ "Removing any previously run missions so that all missions can be run from scratch."
+ )
+ rmtree(settings.evaluation_output_dir)
+
+ trajectory_data_path = settings.trajectory_dir.joinpath("valid.json")
+
+ logger.info(f"Loading test data from {trajectory_data_path}")
+ instances = process_their_trajectory_data(trajectory_data_path, session_id_prefix="T1")
+
+ run_trajectories_in_arena(
+ instances,
+ wandb_callback=WandBEvaluationCallback(
+ project=wandb_project,
+ entity=settings.wandb_entity,
+ group="T1",
+ mission_trajectory_dir=settings.missions_dir,
+ mission_trajectory_outputs_dir=settings.evaluation_output_dir,
+ unity_logs=settings.unity_log_path,
+ ),
+ )
diff --git a/src/simbot_offline_inference/commands/run_trajectories_in_arena.py b/src/simbot_offline_inference/commands/run_trajectories_in_arena.py
new file mode 100644
index 0000000..0d24b65
--- /dev/null
+++ b/src/simbot_offline_inference/commands/run_trajectories_in_arena.py
@@ -0,0 +1,55 @@
+from loguru import logger
+from torchmetrics import MeanMetric
+
+from arena_missions.structures import MissionTrajectory
+from emma_common.logging import setup_rich_logging
+from simbot_offline_inference.arena_evaluator import SimBotArenaEvaluator
+from simbot_offline_inference.inference_controller import SimBotInferenceController
+from simbot_offline_inference.metrics import EvaluationMetrics, WandBCallback
+from simbot_offline_inference.orchestrators import ArenaOrchestrator, ExperienceHubOrchestrator
+from simbot_offline_inference.settings import Settings
+
+
+def run_trajectories_in_arena(
+ instances: list[MissionTrajectory], *, wandb_callback: WandBCallback
+) -> None:
+ """Run the evaluation."""
+ settings = Settings()
+ settings.put_settings_in_environment()
+ settings.prepare_file_system()
+
+ setup_rich_logging()
+
+ logger.info("Preparing orchestrators and evaluators")
+ arena_orchestrator = ArenaOrchestrator()
+ experience_hub_orchestrator = ExperienceHubOrchestrator(
+ healthcheck_endpoint=f"{settings.base_endpoint}/healthcheck",
+ predict_endpoint=f"{settings.base_endpoint}/v1/predict",
+ auxiliary_metadata_dir=settings.auxiliary_metadata_dir,
+ auxiliary_metadata_cache_dir=settings.auxiliary_metadata_cache_dir,
+ cached_extracted_features_dir=settings.feature_cache_dir,
+ experience_hub_dir=settings.experience_hub_dir,
+ model_storage_dir=settings.models_dir,
+ )
+ inference_controller = SimBotInferenceController(
+ arena_orchestrator, experience_hub_orchestrator
+ )
+ evaluation_metrics = EvaluationMetrics(
+ settings.evaluation_output_dir,
+ settings.evaluation_metrics_checkpoint,
+ MeanMetric(),
+ MeanMetric(),
+ )
+
+ evaluator = SimBotArenaEvaluator(
+ inference_controller,
+ evaluation_metrics,
+ wandb_callback,
+ enforce_successful_preparation=settings.enforce_successful_preparation,
+ should_resume_previous_wandb_run=settings.should_resume_previous_wandb_run,
+ )
+
+ logger.info(f"Running evaluation for {len(instances)} instances...")
+ evaluator.run_evaluation(instances)
+
+ logger.info("Done!")
diff --git a/src/simbot_offline_inference/commands/validate_cdfs.py b/src/simbot_offline_inference/commands/validate_cdfs.py
new file mode 100644
index 0000000..0223afa
--- /dev/null
+++ b/src/simbot_offline_inference/commands/validate_cdfs.py
@@ -0,0 +1,89 @@
+from pathlib import Path
+
+from loguru import logger
+from rich import box, print as rich_print
+from rich.columns import Columns
+from rich.panel import Panel
+from rich.table import Table
+
+from arena_missions.builders import ChallengeBuilder
+from arena_missions.builders.mission_builder import MissionBuilder
+from arena_missions.builders.required_objects_builder import RequiredObjectBuilder
+from arena_missions.structures import Mission
+from emma_common.logging import setup_rich_logging
+from simbot_offline_inference.challenge_validator import CDFValidationInstance, ChallengeValidator
+from simbot_offline_inference.orchestrators import ArenaOrchestrator
+from simbot_offline_inference.settings import Settings
+
+
+def validate_cdfs(directory: Path) -> None:
+ """Validate the CDFs in the directory."""
+ settings = Settings()
+ settings.put_settings_in_environment()
+ settings.prepare_file_system()
+
+ setup_rich_logging()
+
+ files_to_load = list(directory.rglob("*.json"))
+ logger.info(f"Found {len(files_to_load)} CDFs to validate.")
+
+ cdfs = [
+ CDFValidationInstance(cdf=Mission.parse_file(challenge_file).cdf, path=challenge_file)
+ for challenge_file in files_to_load
+ ]
+
+ arena_orchestrator = ArenaOrchestrator()
+ challenge_validator = ChallengeValidator(arena_orchestrator)
+
+ logger.info("Starting validation")
+ challenge_validator.validate_cdfs(cdfs)
+
+ logger.info("Done.")
+
+
+def validate_generated_missions() -> None:
+ """Validate all missions from the `MissionBuilder`."""
+ settings = Settings()
+ settings.put_settings_in_environment()
+ settings.prepare_file_system()
+
+ setup_rich_logging()
+
+ missions = MissionBuilder(ChallengeBuilder(), RequiredObjectBuilder()).generate_all_missions()
+ cdfs = [
+ CDFValidationInstance(cdf=mission.cdf, path=mission.high_level_key.key)
+ for mission in missions
+ ]
+
+ arena_orchestrator = ArenaOrchestrator()
+ challenge_validator = ChallengeValidator(arena_orchestrator)
+
+ logger.info("Starting validation")
+ challenge_validator.validate_cdfs(cdfs)
+
+ logger.info("Done.")
+
+
+def print_high_level_keys() -> None:
+ """Print all the high level keys from the registered challenge builder."""
+ keys = sorted([str(key) for key in ChallengeBuilder.list_available()])
+ columns = Columns(keys)
+ panel = Panel(
+ columns,
+ title="Registered high-level keys",
+ border_style="green",
+ subtitle=f"Total: {len(keys)}",
+ )
+ rich_print(panel)
+
+
+def print_challenges_per_high_level_key() -> None:
+ """Print the challenges that exist per high-level key."""
+ table = Table(box=box.ROUNDED, style="yellow", highlight=True)
+ table.add_column("High-level key")
+ table.add_column("Num. challenges")
+
+ for key, challenge_count in ChallengeBuilder.count_available_functions_per_key().items():
+ table.add_row(str(key), str(challenge_count))
+
+ rich_print(table)
diff --git a/src/simbot_offline_inference/inference_controller.py b/src/simbot_offline_inference/inference_controller.py
new file mode 100644
index 0000000..7b58e6a
--- /dev/null
+++ b/src/simbot_offline_inference/inference_controller.py
@@ -0,0 +1,217 @@
+import time
+from contextlib import ExitStack
+from typing import Any, Literal
+
+from loguru import logger
+
+from arena_wrapper.enums.object_output_wrapper import ObjectOutputType
+from simbot_offline_inference.orchestrators import ArenaOrchestrator, ExperienceHubOrchestrator
+
+
+class SimBotInferenceController:
+ """Controller for the inference pipeline."""
+
+ def __init__(
+ self,
+ arena_orchestrator: ArenaOrchestrator,
+ experience_hub_orchestrator: ExperienceHubOrchestrator,
+ object_output_type: ObjectOutputType = ObjectOutputType.OBJECT_MASK,
+ max_loops_for_single_utterance: int = 15,
+ experience_hub_healthcheck_attempts: int = 40,
+ ) -> None:
+ self._arena_orchestrator = arena_orchestrator
+ self._experience_hub_orchestrator = experience_hub_orchestrator
+
+ self._object_output_type = object_output_type
+ self._max_loops_for_single_utterance = max_loops_for_single_utterance
+ self._experience_hub_healthcheck_attempts = experience_hub_healthcheck_attempts
+
+ self._exit_stack = ExitStack()
+
+ self.randomise_start_position = self._arena_orchestrator.randomise_start_position
+ self.go_to_random_viewpoint = self._arena_orchestrator.go_to_random_viewpoint
+
+ def __enter__(self) -> None:
+ """Initialize the services."""
+ self._exit_stack.enter_context(self._arena_orchestrator)
+ self._exit_stack.enter_context(self._experience_hub_orchestrator)
+
+ logger.info("Checking experience hub is ready...")
+ self._experience_hub_orchestrator.healthcheck(self._experience_hub_healthcheck_attempts, 5)
+
+ return self._exit_stack.__enter__() # type: ignore[return-value]
+
+ def __exit__(self, *args: Any, **kwargs: Any) -> bool:
+ """Exit the services."""
+ return self._exit_stack.__exit__(*args, **kwargs)
+
+ @property
+ def is_arena_running(self) -> bool:
+ """Check if the arena is running."""
+ return self._arena_orchestrator.is_unity_running
+
+ @property
+ def trajectory_preparation_completed(self) -> bool:
+ """Return True if the subgoal status is above 0."""
+ subgoal_completion_status = self.get_goal_completion_status()[1]
+
+ subgoals_completed = sum(subgoal_completion_status)
+ logger.debug(f"Subgoals completed: {subgoals_completed}")
+
+ first_subgoal_completed = subgoal_completion_status[0] != 0
+ logger.debug(f"First subgoal completed: {first_subgoal_completed}")
+
+ return subgoals_completed > 0 and first_subgoal_completed
+
+ def healthcheck(self) -> bool:
+ """Healthcheck the services."""
+ return self._experience_hub_orchestrator.healthcheck()
+
+ def launch_game(
+ self, mission_cdf: dict[str, Any], attempts: int = 10, interval: int = 5
+ ) -> None:
+ """Launch the game on the Arena instance.
+
+ We also need to do the dummy actions to make sure the game is ready to go.
+ """
+ return self._arena_orchestrator.launch_new_game(
+ mission_cdf, attempts, interval, self._object_output_type
+ )
+
+ def get_goal_completion_status(self) -> tuple[bool, list[Literal[0, 1]]]:
+ """Get the goal completion status from the Arena instance."""
+ (
+ _,
+ goal_completion_status,
+ subgoal_completion_status,
+ ) = self._arena_orchestrator.get_goals_status()
+ return goal_completion_status, subgoal_completion_status
+
+ def handle_utterance( # noqa: WPS231
+ self, session_id: str, utterance: str
+ ) -> list[dict[str, Any]]:
+ """Handle execution of a single utterance in the arena.
+
+ Return a list of all actions taken for the current utterance.
+ """
+ actions_taken: list[dict[str, Any]] = []
+ previous_action_statuses: list[Any] = []
+
+ if self.is_all_goals_complete():
+ raise AssertionError(
+ "Do not send an utterance when all goals are complete. Arena will crash. If you are wanting to do this, there is something wrong in the challenge definition."
+ )
+
+ for loop_idx in range(self._max_loops_for_single_utterance):
+ if self.is_all_goals_complete():
+ logger.warning("All goals are complete, so we are breaking out of the loop")
+ break
+
+ logger.debug(f"Executing step {loop_idx}")
+
+ # Get the auxiliary metadata from the arena
+ logger.debug("Getting auxiliary metadata from the arena")
+ auxiliary_metadata = self._arena_orchestrator.get_reconstructed_metadata()
+
+ # Get the next actions to take from the ExperienceHub
+ logger.debug("Trying to get the next actions to take from the Experience Hub")
+ (
+ interaction_actions,
+ dialog_actions,
+ should_return_control,
+ ) = self._experience_hub_orchestrator.get_next_actions(
+ session_id,
+ # Only give the utterance on the first loop, otherwise we don't since the user is
+ # not instructing us to do anything
+ utterance if loop_idx == 0 else None,
+ auxiliary_metadata,
+ previous_action_statuses,
+ )
+ actions_taken.extend(interaction_actions)
+
+ # Execute the actions on the arena environment
+ logger.debug(f"Executing actions: {interaction_actions}")
+ return_val, action_status = self._arena_orchestrator.execute_action(
+ interaction_actions, self._object_output_type, utterance
+ )
+ logger.debug(f"Received response from arena: {return_val}, {action_status}")
+
+ # Update the previous action statuses so it goes back to the arena
+ if not should_return_control or not return_val:
+ if action_status is not None:
+ previous_action_statuses = [action_status]
+
+ # If there is an issue completing the action, we need to give that back to the
+ # experience hub
+ if not return_val:
+ logger.error(f"Action could not be completed for the utterance {utterance}")
+
+ if not interaction_actions:
+ logger.warning(
+ "There were not actions to perform, so there is just dialog. Returning control back to the user since we didn't do anything in the arena."
+ )
+ break
+
+ # Only break out the loop if we return a dialog action AND there is no error in
+ # performing the action
+ if should_return_control and return_val:
+ logger.debug("Returning control to the user to get the next utterance")
+ break
+
+ return actions_taken
+
+ def get_latest_game_state(self) -> dict[str, Any]:
+ """Get the latest game state for the evaluation output."""
+ if self._arena_orchestrator.response is None:
+ raise AssertionError("There is no response from the Arena")
+
+ exclude_keys = [
+ "sceneMetadata",
+ "colorImage",
+ "depthImage",
+ "normalsImage",
+ "instanceSegmentationImage",
+ "objects",
+ ]
+ return {
+ key: self._arena_orchestrator.response[key]
+ for key in self._arena_orchestrator.response
+ if key not in exclude_keys
+ }
+
+ def is_all_goals_complete(self) -> bool:
+ """Check to see if all the goals are complete."""
+ arena_response = self._arena_orchestrator.response
+
+ if not arena_response:
+ return False
+
+ # If the challenge progress does not exist in the response, then no.
+ challenge_progress = arena_response.get("challengeProgress", None)
+
+ if not challenge_progress:
+ return False
+
+ challenge_goals = challenge_progress.get("ChallengeGoals", None)
+
+ if not challenge_goals:
+ return False
+
+ num_goals = len(challenge_goals)
+ finished_goal_count = 0
+
+ for goal in challenge_goals:
+ is_finished = goal.get("isFinished", False)
+ if is_finished:
+ finished_goal_count += 1
+
+ return num_goals == finished_goal_count
+
+ def restart_arena(self) -> bool:
+ """Restart the Arena."""
+ self._arena_orchestrator.kill_unity_instance()
+
+ logger.info("Waiting for 30 seconds before restarting the arena...")
+ time.sleep(30) # noqa: WPS432
+
+ return self._arena_orchestrator.init_unity_instance()
diff --git a/src/simbot_offline_inference/metrics/__init__.py b/src/simbot_offline_inference/metrics/__init__.py
new file mode 100644
index 0000000..911c6b2
--- /dev/null
+++ b/src/simbot_offline_inference/metrics/__init__.py
@@ -0,0 +1,6 @@
+from simbot_offline_inference.metrics.evaluation import EvaluationMetrics, MissionGroup
+from simbot_offline_inference.metrics.wandb import (
+ WandBCallback,
+ WandBEvaluationCallback,
+ WandBTrajectoryGenerationCallback,
+)
diff --git a/src/simbot_offline_inference/metrics/evaluation.py b/src/simbot_offline_inference/metrics/evaluation.py
new file mode 100644
index 0000000..39aef13
--- /dev/null
+++ b/src/simbot_offline_inference/metrics/evaluation.py
@@ -0,0 +1,115 @@
+from pathlib import Path
+from typing import Any, Literal, Optional, get_args
+
+import orjson
+import torch
+from torchmetrics import MeanMetric, SumMetric
+
+
+MissionGroup = Literal[
+ "breakObject",
+ "clean&deliver",
+ "color&deliver",
+ "fill&deliver",
+ "freeze&deliver",
+ "heat&deliver",
+ "insertInDevice",
+ "pickup&deliver",
+ "pourContainer",
+ "repair&deliver",
+ "scanObject",
+ "toggleDevice",
+]
+
+
+class EvaluationMetrics:
+ """Metrics for evaluating the agent's performance."""
+
+ def __init__(
+ self,
+ evaluation_output_dir: Path,
+ evaluation_metrics_checkpoint_path: Path,
+ success_rate_metric: MeanMetric,
+ subgoal_completion_rate_metric: MeanMetric,
+ per_mission_group_success_rate: Optional[dict[str, MeanMetric]] = None,
+ ) -> None:
+ self._output_path = evaluation_output_dir
+ self._evaluation_metrics_checkpoint_path = evaluation_metrics_checkpoint_path
+
+ self.games_played = SumMetric()
+
+ self.success_rate = success_rate_metric
+ self.subgoal_completion_rate = subgoal_completion_rate_metric
+
+ self.per_mission_group_success_rate = per_mission_group_success_rate or {
+ mission_group: MeanMetric() for mission_group in get_args(MissionGroup)
+ }
+
+ def restore_checkpoint(self) -> "EvaluationMetrics":
+ """Restore the evaluation metrics from the checkpoint."""
+ if not self._evaluation_metrics_checkpoint_path.exists():
+ raise FileNotFoundError(
+ "Evaluation metrics checkpoint does not exist. Why are we resuming?"
+ )
+
+ return torch.load(self._evaluation_metrics_checkpoint_path)
+
+ def save_checkpoint(self) -> None:
+ """Create a checkpoint for the evaluation metrics."""
+ torch.save(self, self._evaluation_metrics_checkpoint_path)
+
+ def delete_checkpoint(self) -> None:
+ """Delete the checkpoint for the evaluation metrics."""
+ self._evaluation_metrics_checkpoint_path.unlink(missing_ok=True)
+
+ def has_mission_been_evaluated(self, mission_id: str) -> bool:
+ """Check if the mission has already been evaluated."""
+ return self._output_path.joinpath(f"{mission_id}.json").exists()
+
+ def update(
+ self,
+ mission_id: str,
+ mission_group: Optional[str],
+ is_mission_completed: bool,
+ subgoal_completion_status: list[Literal[0, 1]],
+ predicted_actions: list[dict[str, Any]],
+ last_game_state: dict[str, Any],
+ remaining_utterances: list[str],
+ ) -> None:
+ """Add metrics from a recently-evaluated mission."""
+ self.games_played.update(1)
+ self.success_rate.update(1 if is_mission_completed else 0)
+
+ for subgoal_completion in subgoal_completion_status:
+ self.subgoal_completion_rate.update(subgoal_completion)
+
+ if mission_group:
+ self.per_mission_group_success_rate[mission_group].update(
+ 1 if is_mission_completed else 0
+ )
+
+ self._save_mission_results(
+ mission_id, predicted_actions, last_game_state, remaining_utterances
+ )
+
+ def _save_mission_results(
+ self,
+ mission_id: str,
+ predicted_actions: list[dict[str, Any]],
+ last_game_state: dict[str, Any],
+ remaining_utterances: list[str],
+ ) -> None:
+ """Save the mission results to a file.
+
+ This is what gets uploaded to Eval.AI.
+ """
+ output_results = {
+ "predicted_actions": predicted_actions,
+ "last_game_state": last_game_state,
+ "remaining_utterances": remaining_utterances,
+ }
+
+ # Write the results to a file
+ output_file = self._output_path.joinpath(f"{mission_id}.json")
+ output_file.parent.mkdir(parents=True, exist_ok=True)
+ output_file.write_bytes(orjson.dumps(output_results))
diff --git a/src/simbot_offline_inference/metrics/wandb.py b/src/simbot_offline_inference/metrics/wandb.py
new file mode 100644
index 0000000..58a1db4
--- /dev/null
+++ b/src/simbot_offline_inference/metrics/wandb.py
@@ -0,0 +1,264 @@
+from abc import ABC, abstractmethod
+from copy import copy
+from pathlib import Path
+from typing import Literal, Optional
+
+import torch
+import wandb
+from loguru import logger
+
+from arena_missions.structures import CDF, MissionTrajectory
+from emma_experience_hub._version import __version__ as experience_hub_version # noqa: WPS436
+from emma_experience_hub.constants import constants_absolute_path
+from simbot_offline_inference._version import ( # noqa: WPS436
+ __version__ as offline_inference_version,
+)
+from simbot_offline_inference.metrics.evaluation import EvaluationMetrics
+
+
+SERVICE_REGISTRY_PATH = constants_absolute_path.joinpath("simbot", "registry.yaml")
+
+
+class WandBCallback(ABC):
+ """Base class for sending data to WandB."""
+
+ def __init__(
+ self,
+ project: str,
+ entity: str,
+ group: Optional[str],
+ mission_trajectory_dir: Path,
+ mission_trajectory_outputs_dir: Path,
+ unity_logs: Path,
+ ) -> None:
+ self.project = project
+ self.entity = entity
+ self.group = group
+ self.mission_trajectory_dir = mission_trajectory_dir
+ self.mission_trajectory_outputs_dir = mission_trajectory_outputs_dir
+
+ self._unity_logs = unity_logs
+
+ self.__post_init__()
+
+ @abstractmethod
+ def __post_init__(self) -> None:
+ """Post init actions to perform, if needed.
+
+ This is important to avoid changing the signature of the `__init__` method.
+ """
+ pass # noqa: WPS420
+
+ @abstractmethod
+ def start_evaluation(self, *, resume: bool = False) -> None:
+ """Start a new evaluation session."""
+ raise NotImplementedError
+
+ @abstractmethod
+ def finish_evaluation(self) -> None:
+ """Finish an evaluation session."""
+ raise NotImplementedError
+
+ @abstractmethod
+ def start_trajectory(self, trajectory: MissionTrajectory, preparation_session_id: str) -> None:
+ """Start running a new trajectory."""
+ raise NotImplementedError
+
+ @abstractmethod
+ def finish_trajectory(
+ self,
+ trajectory: MissionTrajectory,
+ *,
+ evaluation_metrics: EvaluationMetrics,
+ is_success: bool,
+ subgoal_completion_status: list[Literal[0, 1]],
+ ) -> None:
+ """Finish running a trajectory."""
+ raise NotImplementedError
+
+
+class WandBTrajectoryGenerationCallback(WandBCallback):
+ """Track each trajectory as a new run in WandB."""
+
+ def __post_init__(self) -> None:
+ """Post init actions to perform, if needed."""
+ pass # noqa: WPS420
+
+ def start_evaluation(self, *, resume: bool = False) -> None:
+ """No-op on start evaluation."""
+ pass # noqa: WPS420
+
+ def finish_evaluation(self) -> None:
+ """No-op on end evaluation."""
+ pass # noqa: WPS420
+
+ def start_trajectory(self, trajectory: MissionTrajectory, preparation_session_id: str) -> None:
+ """Start tracking a trajectory for generation."""
+ cdf = trajectory.cdf
+ high_level_key = trajectory.high_level_key
+
+ if not high_level_key:
+ raise AssertionError("High level key is not set.")
+
+ if isinstance(cdf, CDF):
+ cdf_scene = cdf.scene
+ else:
+ raise AssertionError("CDF is not set.")
+
+ wandb.init(
+ name=trajectory.session_id,
+ entity=self.entity,
+ project=self.project,
+ group=self.group,
+ config={
+ "version/experience_hub": experience_hub_version,
+ "version/offline_inference": offline_inference_version,
+ "session_id": trajectory.session_id,
+ "preparation_session_id": preparation_session_id,
+ # CDF
+ "cdf/floor_plan": cdf_scene.floor_plan,
+ "cdf/scene_id": cdf_scene.scene_id,
+ "cdf/room": cdf_scene.room_location[0],
+ "cdf/layout": cdf_scene.layout_override,
+ # High level key
+ "high_level_key": str(high_level_key),
+ "high_level_key/action": high_level_key.action,
+ "high_level_key/target_object": high_level_key.target_object,
+ "high_level_key/target_object_color": high_level_key.target_object_color,
+ "high_level_key/target_object_is_ambiguous": high_level_key.target_object_is_ambiguous,
+ "high_level_key/interaction_object": high_level_key.interaction_object,
+ "high_level_key/interaction_object_color": high_level_key.interaction_object_color,
+ "high_level_key/converted_object": high_level_key.converted_object,
+ "high_level_key/converted_object_color": high_level_key.converted_object_color,
+ "high_level_key/stacked_object": high_level_key.stacked_object,
+ "high_level_key/stacked_object_color": high_level_key.stacked_object_color,
+ "high_level_key/from_receptacle": high_level_key.from_receptacle,
+ "high_level_key/from_receptacle_color": high_level_key.from_receptacle_color,
+ "high_level_key/from_receptacle_is_container": high_level_key.from_receptacle_is_container,
+ "high_level_key/to_receptacle": high_level_key.to_receptacle,
+ "high_level_key/to_receptacle_color": high_level_key.to_receptacle_color,
+ "high_level_key/to_receptacle_is_container": high_level_key.to_receptacle_is_container,
+ },
+ )
+
+ # Upload the mission trajectory file
+ wandb.save(str(self.mission_trajectory_dir.joinpath(f"{trajectory.session_id}.json")))
+
+ # Upload the trajectory results on run completion
+ # According to wandb docs, this command is correct
+ wandb.save(
+ str(self.mission_trajectory_outputs_dir.joinpath(f"{trajectory.session_id}.json")),
+ policy="end",
+ )
+ # Also upload the unity logs
+ wandb.save(str(self._unity_logs), policy="end")
+
+ def finish_trajectory(
+ self,
+ trajectory: MissionTrajectory,
+ *,
+ evaluation_metrics: EvaluationMetrics,
+ is_success: bool,
+ subgoal_completion_status: list[Literal[0, 1]],
+ ) -> None:
+ """Finish a trajectory."""
+ try:
+ subgoal_success_rate = sum(subgoal_completion_status) / len(subgoal_completion_status)
+ except ZeroDivisionError:
+ subgoal_success_rate = 0
+
+ wandb.log({"is_success": int(is_success), "subgoal_success_rate": subgoal_success_rate})
+
+ # If subgoal success rate is 0, then it means the preparation also failed, therefore mark
+ # the run as failed.
+ wandb.finish(exit_code=1 if subgoal_success_rate == 0 else None)
+
+
+class WandBEvaluationCallback(WandBCallback):
+ """Track metrics across the entire validation set.
+
+ According to wandb docs, the various save commands are correct.
+ """
+
+ def __post_init__(self) -> None:
+ """Post init actions to perform."""
+ # The `mission_success_table` tracks the success of each mission during the course of an
+ # entire run, and also the session ID for that mission.`
+ self._mission_success_table = wandb.Table(columns=["mission_id", "session_id", "success"])
+
+ def start_evaluation(self, *, resume: bool = False) -> None:
+ """Start running an evaluation."""
+ if resume:
+ logger.info("Resuming previous wandb run.")
+
+ wandb.init(
+ entity=self.entity,
+ project=self.project,
+ group=self.group,
+ resume=resume,
+ config={
+ "version/experience_hub": experience_hub_version,
+ "version/offline_inference": offline_inference_version,
+ },
+ )
+
+ # Upload the trajectory results on run completion
+ wandb.save(str(self.mission_trajectory_outputs_dir), policy="end")
+
+ # Also upload the unity logs
+ wandb.save(str(self._unity_logs))
+
+ def finish_evaluation(self) -> None:
+ """Finish running an evaluation."""
+ wandb.finish()
+
+ def start_trajectory(self, trajectory: MissionTrajectory, preparation_session_id: str) -> None:
+ """No-op when starting a new trajectory."""
+ pass # noqa: WPS420
+
+ def finish_trajectory(
+ self,
+ trajectory: MissionTrajectory,
+ *,
+ evaluation_metrics: EvaluationMetrics,
+ is_success: bool,
+ subgoal_completion_status: list[Literal[0, 1]],
+ ) -> None:
+ """Finish a trajectory."""
+ step_idx = int(evaluation_metrics.games_played.compute().item())
+
+ if trajectory.mission_id:
+ # Update the table with the mission output
+ self._mission_success_table.add_data(
+ trajectory.mission_id, trajectory.session_id, 1 if is_success else 0
+ )
+
+ # Log the table to wandb
+ wandb.log(
+ {"mission_success_table": copy(self._mission_success_table)},
+ commit=False,
+ step=step_idx,
+ )
+
+ # If we have mission groups, log them
+ if evaluation_metrics.per_mission_group_success_rate:
+ wandb.log(
+ {
+ f"success_rate/{mission_group}": torch.nan_to_num(success_rate.compute())
+ for mission_group, success_rate in evaluation_metrics.per_mission_group_success_rate.items()
+ },
+ commit=False,
+ step=step_idx,
+ )
+
+ wandb.log(
+ {
+ "success_rate": evaluation_metrics.success_rate.compute(),
+ "subgoal_success_rate": evaluation_metrics.subgoal_completion_rate.compute(),
+ },
+ commit=True,
+ step=step_idx,
+ )
+
+ # Save a checkpoint of the evaluation metrics
+ evaluation_metrics.save_checkpoint()
diff --git a/src/simbot_offline_inference/orchestrators.py b/src/simbot_offline_inference/orchestrators.py
new file mode 100644
index 0000000..e3b69d1
--- /dev/null
+++ b/src/simbot_offline_inference/orchestrators.py
@@ -0,0 +1,377 @@
+import random
+import subprocess
+import time
+from pathlib import Path
+from typing import Any, NamedTuple, Optional
+from uuid import uuid4
+
+import httpx
+import orjson
+from loguru import logger
+
+from arena_missions.constants.arena import OfficeRoom
+from arena_wrapper.arena_orchestrator import ArenaOrchestrator as AlexaArenaOrchestrator
+from arena_wrapper.enums.object_output_wrapper import ObjectOutputType
+from simbot_offline_inference.arena_action_builder import ArenaActionBuilder
+from simbot_offline_inference.settings import Settings
+
+
+class ExperienceHubNextActions(NamedTuple):
+ """Return type after getting the next set of actions from the experience hub."""
+
+ interaction_actions: list[dict[str, Any]]
+ dialog_actions: list[dict[str, Any]]
+ should_return_control: bool
+
+
+class ArenaOrchestrator(AlexaArenaOrchestrator):
+ """Wrapper for the ArenaOrchestrator."""
+
+ def __enter__(self) -> None:
+ """Initialize the unity instance."""
+ if not self.init_unity_instance():
+ raise AssertionError("Could not start the unity instance.")
+
+ def __exit__(self, *args: Any, **kwargs: Any) -> None:
+ """Try to kill the unity instance."""
+ if not self.kill_unity_instance():
+ logger.warning(
+ "Could not kill the Unity instance. You might need to kill it manually."
+ )
+
+ @property
+ def unity_log_path(self) -> Path:
+ """Get the path to the unity logs."""
+ settings = Settings()
+ return Path(settings.unity_log_path)
+
+ def launch_new_game(
+ self,
+ mission_cdf: Any,
+ attempts: int = 10,
+ interval: int = 5,
+ object_output_type: ObjectOutputType = ObjectOutputType.OBJECT_MASK,
+ ) -> None:
+ """Launch the game on the Arena instance.
+
+ We also need to do the dummy actions to make sure the game is ready to go.
+ """
+ self.send_cdf_to_arena(mission_cdf)
+ self.send_dummy_actions_to_arena(attempts, interval, object_output_type)
+
+ def send_cdf_to_arena(self, mission_cdf: Any) -> None:
+ """Send the CDF to the Arena instance."""
+ if not self.launch_game(mission_cdf):
+ raise AssertionError("Could not launch the game")
+
+ def send_dummy_actions_to_arena(
+ self,
+ attempts: int = 10,
+ interval: int = 5,
+ object_output_type: ObjectOutputType = ObjectOutputType.OBJECT_MASK,
+ ) -> None:
+ """Send dummy actions to the Arena instance to make sure it's ready to go."""
+ logger.debug("Sending dummy actions to verify game is ready")
+ dummy_action = [
+ {
+ "id": "1",
+ "type": "Rotate",
+ "rotation": {
+ "direction": "Right",
+ "magnitude": 0,
+ },
+ }
+ ]
+
+ for attempt_idx in range(attempts):
+ return_val, _ = self.execute_action(dummy_action, object_output_type, "Rotate right")
+
+ # If it succeeds, then just exit the loop since it's ready to go
+ if return_val:
+ return
+
+ logger.error(
+ f"Attempt {attempt_idx + 1}/{attempts} failed. Waiting for {interval} seconds before trying again."
+ )
+ time.sleep(5)
+
+ raise AssertionError("Exhauted all attempts")
+
+ def go_to_random_viewpoint(self, room: OfficeRoom) -> None:
+ """Go to a random viewpoint in the given room."""
+ if not self.response:
+ logger.exception("There is no reponse to get viewpoints from.")
+ return
+
+ try:
+ viewpoints: dict[str, dict[str, float]] = self.response["sceneMetadata"]["GoToPoints"]
+ except KeyError:
+ logger.error("Unable to get viewpoints from response.")
+ return
+
+ # Get all the viewpoints in the current room
+ viewpoints_for_current_room = [
+ viewpoint for viewpoint in viewpoints.keys() if viewpoint.startswith(room)
+ ]
+
+ # Choose random viewpoint
+ chosen_viewpoint = random.choice(viewpoints_for_current_room)
+
+ # Go to the chosen viewpoint
+ logger.debug(f"Going to viewpoint: {chosen_viewpoint}")
+
+ action_builder = ArenaActionBuilder()
+ return_val, _ = self.execute_action(
+ [action_builder.viewpoint(chosen_viewpoint)], ObjectOutputType.OBJECT_MASK, None
+ )
+
+ if not return_val:
+ logger.warning(
+ "Failed to go to a random viewpoint, going to the first one in the room"
+ )
+ self.execute_action(
+ [action_builder.viewpoint(f"{room}_1")], ObjectOutputType.OBJECT_MASK, None
+ )
+
+ def randomise_start_position(
+ self,
+ num_steps: int = 10,
+ object_output_type: ObjectOutputType = ObjectOutputType.OBJECT_MASK,
+ ) -> None:
+ """Randomise the start position of the agent."""
+ logger.debug("Randomising start position of the agent")
+ action_builder = ArenaActionBuilder()
+ actions_to_send = [action_builder.random_navigation() for _ in range(num_steps)]
+
+ for action in actions_to_send:
+ return_val, action_response = self.execute_action([action], object_output_type, None)
+
+ # If it fails, raise assertion error
+ if not return_val:
+ # Explicitly do not raise if these error types occur
+ error_types_to_ignore = ("AlternateNavigationUsed", "UnsupportedNavigation")
+
+ if action_response.get("errorType") not in error_types_to_ignore:
+ raise AssertionError("Failed to randomise start position")
+
+ time.sleep(5)
+
+ def _get_unity_execution_command(self) -> str:
+ settings = Settings()
+
+ command = (
+ "DISPLAY=:"
+ + str(settings.display)
+ + " "
+ + str(settings.arena_path)
+ + " -logfile "
+ + str(settings.unity_log_path)
+ )
+
+ command = f"{command} &"
+ return command
+
+
+class ExperienceHubOrchestrator:
+ """Orchestrator for the Experience Hub."""
+
+ def __init__(
+ self,
+ healthcheck_endpoint: str,
+ predict_endpoint: str,
+ auxiliary_metadata_dir: Path,
+ auxiliary_metadata_cache_dir: Path,
+ cached_extracted_features_dir: Path,
+ model_storage_dir: Path,
+ experience_hub_dir: Path,
+ ) -> None:
+ self._healthcheck_endpoint = healthcheck_endpoint
+ self._predict_endpoint = predict_endpoint
+ self._auxiliary_metadata_dir = auxiliary_metadata_dir
+ self._auxiliary_metadata_cache_dir = auxiliary_metadata_cache_dir
+ self._cached_extracted_features_dir = cached_extracted_features_dir
+ self._experience_hub_dir = experience_hub_dir
+ self._model_storage_dir = model_storage_dir
+
+ def __enter__(self) -> None:
+ """Start the Experience Hub."""
+ logger.debug("Starting controller API for the experience hub...")
+ subprocess.run(self._build_experience_hub_command(), shell=True)
+
+ def __exit__(self, *args: Any, **kwargs: Any) -> None:
+ """Try to kill the experience hub."""
+ subprocess.run(
+ "ps -ax | grep gunicorn | awk '{print $1}' | xargs kill -9",
+ shell=True,
+ )
+
+ def healthcheck(self, attempts: int = 5, interval: int = 2) -> bool:
+ """Perform healthcheck, with retry intervals.
+
+ To disable retries, just set the number of attempts to 1.
+ """
+ healthcheck_flag = False
+
+ for attempt in range(attempts):
+ healthcheck_flag = self._healthcheck()
+
+ # If the healthcheck flag is all good, break from the loop
+ if healthcheck_flag:
+ break
+
+ # Otherwise, report a failed attempt
+ logger.error(f"Healthcheck attempt {attempt + 1}/{attempts} failed.")
+
+ # If attempt is not the last one, sleep for interval and go again
+ if attempt < attempts - 1:
+ logger.debug(f"Waiting for {interval} seconds and then trying again.")
+ time.sleep(interval)
+
+ return healthcheck_flag
+
+ def get_next_actions(
+ self,
+ session_id: str,
+ utterance: Optional[str],
+ auxiliary_metadata: dict[str, Any],
+ previous_action_statuses: list[Any],
+ ) -> ExperienceHubNextActions:
+ """Make a prediction for the actions the agent should take."""
+ prediction_request_id = str(uuid4())
+
+ self._save_auxiliary_metadata(session_id, prediction_request_id, auxiliary_metadata)
+
+ logger.debug("Building request payload")
+ simbot_request = self._build_raw_simbot_request(
+ session_id, prediction_request_id, utterance, previous_action_statuses
+ )
+
+ logger.debug(f"Sending request: {simbot_request}")
+ simbot_response = self._make_request(simbot_request)
+
+ actions = simbot_response.get("actions")
+ if not actions:
+ raise AssertionError("No actions to return.")
+
+ return ExperienceHubNextActions(
+ interaction_actions=self._filter_dialog_actions(actions),
+ dialog_actions=self._filter_interaction_actions(actions),
+ should_return_control=self._should_return_control_for_actions(actions),
+ )
+
+ def _healthcheck(self) -> bool:
+ """Verify the health of the experience hub service."""
+ logger.debug("Running healthcheck")
+
+ with httpx.Client() as client:
+ try:
+ response = client.get(self._healthcheck_endpoint)
+ except httpx.ReadTimeout:
+ logger.error("Healthcheck timed out")
+ return False
+ except httpx.ConnectError:
+ logger.error("Connection refused")
+ return False
+
+ try:
+ response.raise_for_status()
+ except httpx.HTTPStatusError:
+ logger.error("Healthcheck failed")
+ return False
+
+ logger.info("Healthcheck success")
+ return True
+
+ def _save_auxiliary_metadata(
+ self,
+ session_id: str,
+ prediction_request_id: str,
+ auxiliary_metadata: dict[str, Any],
+ ) -> None:
+ """Save the auxiliary metadata to the file."""
+ output_location = self._auxiliary_metadata_dir.joinpath(
+ f"{session_id}/{prediction_request_id}.json"
+ )
+ output_location.parent.mkdir(parents=True, exist_ok=True)
+ output_location.write_bytes(orjson.dumps(auxiliary_metadata))
+ logger.debug(f"Wrote auxiliary metadata to `{output_location}`")
+
+ def _build_raw_simbot_request(
+ self,
+ session_id: str,
+ prediction_request_id: str,
+ utterance: Optional[str],
+ previous_action_statuses: list[Any],
+ ) -> dict[str, Any]:
+ """Build the request to send to the Experience Hub."""
+ request_header = {
+ "sessionId": session_id,
+ "predictionRequestId": prediction_request_id,
+ }
+ raw_auxiliary_metadata_sensor = {
+ "type": "GameMetaData",
+ "metaData": {"uri": f"efs://{session_id}/{prediction_request_id}.json"},
+ }
+
+ simbot_request: dict[str, Any] = {
+ "header": request_header,
+ "request": {
+ "sensors": [
+ raw_auxiliary_metadata_sensor,
+ ],
+ "previousActions": previous_action_statuses,
+ },
+ }
+
+ if utterance:
+ simbot_request["request"]["sensors"].append(
+ {
+ "type": "SpeechRecognition",
+ "recognition": {
+ "tokens": [
+ {"value": token, "confidence": {"score": 0.95, "bin": "HIGH"}}
+ for token in utterance.strip().split(" ")
+ ]
+ },
+ }
+ )
+
+ return simbot_request
+
+ def _make_request(self, simbot_request: dict[str, Any]) -> dict[str, Any]:
+ """Make the request to the experience hub and return the response."""
+ with httpx.Client(timeout=None) as client:
+ response = client.post(self._predict_endpoint, json=simbot_request)
+
+ try:
+ response.raise_for_status()
+ except Exception:
+ logger.exception("Unable to get response for request.")
+
+ return response.json()
+
+ def _should_return_control_for_actions(self, actions: list[dict[str, Any]]) -> bool:
+ """Is the agent returning control after the actions?
+
+ We only return control on sending the "Dialog" action, and no other time.
+ """
+ return any(action["type"] == "Dialog" for action in actions)
+
+ def _filter_dialog_actions(self, actions: list[dict[str, Any]]) -> list[dict[str, Any]]:
+ """Remove any dialog actions from the response."""
+ return [
+ action for action in actions if action["type"] not in {"Dialog", "LightweightDialog"}
+ ]
+
+ def _filter_interaction_actions(self, actions: list[dict[str, Any]]) -> list[dict[str, Any]]:
+ """Filter out actions that are interaction actions/are not dialog actions."""
+ return [action for action in actions if action["type"] in {"Dialog", "LightweightDialog"}]
+
+ def _build_experience_hub_command(self) -> str:
+ """Build the command to run the experience hub."""
+ command = "python -m emma_experience_hub simbot run-controller-api --auxiliary-metadata-dir {auxiliary_metadata_dir} --auxiliary-metadata-cache-dir {auxiliary_metadata_cache_dir} --extracted-features-cache-dir {extracted_features_cache_dir} --workers 2 --timeout 10000000000 &"
+ return command.format(
+ auxiliary_metadata_dir=self._auxiliary_metadata_dir,
+ auxiliary_metadata_cache_dir=self._auxiliary_metadata_cache_dir,
+ extracted_features_cache_dir=self._cached_extracted_features_dir,
+ )
diff --git a/src/simbot_offline_inference/settings.py b/src/simbot_offline_inference/settings.py
new file mode 100644
index 0000000..7c2712f
--- /dev/null
+++ b/src/simbot_offline_inference/settings.py
@@ -0,0 +1,80 @@
+import os
+from pathlib import Path
+from typing import Union
+
+from pydantic import BaseSettings
+
+
+class Settings(BaseSettings):
+ """Settings to run the evaluation."""
+
+ # Paths
+ storage_dir: Path = Path("storage/")
+ auxiliary_metadata_dir: Path = storage_dir.joinpath("auxiliary_metadata/")
+ auxiliary_metadata_cache_dir: Path = storage_dir.joinpath("auxiliary_metadata_cache/")
+ feature_cache_dir: Path = storage_dir.joinpath("features/")
+ trajectory_dir: Path = storage_dir.joinpath("data/", "trajectory-data/")
+ experience_hub_dir: Path = storage_dir.joinpath("experience-hub/")
+ models_dir: Path = experience_hub_dir.joinpath("storage/models/")
+ cdf_dir: Path = storage_dir.joinpath("cdfs/")
+ missions_dir: Path = cdf_dir.joinpath("missions/")
+
+ evaluation_output_dir: Path = storage_dir.joinpath("action_outputs/")
+ evaluation_metrics_checkpoint: Path = storage_dir.joinpath("evaluation_metrics_checkpoint.pt")
+
+ # WandB
+ wandb_entity: str = "emma-simbot"
+
+ # Experience hub
+ base_endpoint: str = "http://0.0.0.0:5522"
+ simbot_port: int = 5522
+ simbot_client_timeout: int = -1
+ simbot_feature_flags__enable_offline_evaluation: bool = True # noqa: WPS116, WPS118
+
+ # Unity
+ platform: str = "Linux"
+ arena_path: Path = storage_dir.joinpath("arena", platform, "Arena.x86_64")
+ unity_log_path: Path = storage_dir.joinpath("logs", "unity_logs.log")
+ display: Union[str, int] = 1
+
+ # Evaluator settings
+ enforce_successful_preparation: bool = False
+
+ @property
+ def should_resume_previous_wandb_run(self) -> bool:
+ """Determine whether or not we should resume the previous wandb run.
+
+ If the `storage/action_outputs/` dir is empty, then we should not resume.
+ """
+ is_evaluation_output_dir_empty = bool(list(self.evaluation_output_dir.glob("*")))
+
+ # If the directory is empty, be sure to delete any existing metric checkpoints
+ if is_evaluation_output_dir_empty:
+ self.evaluation_metrics_checkpoint.unlink(missing_ok=True)
+
+ return not is_evaluation_output_dir_empty
+
+ def put_settings_in_environment(self) -> None:
+ """Put settings in the environment variables."""
+ for env_name, env_var in self:
+ os.environ[env_name.upper()] = str(env_var)
+
+ def prepare_file_system(self) -> None:
+ """Prepare the various directories and files on the machine."""
+ # Create the necessary directories
+ directories_to_create = [
+ self.storage_dir,
+ self.auxiliary_metadata_dir,
+ self.auxiliary_metadata_cache_dir,
+ self.feature_cache_dir,
+ self.trajectory_dir,
+ self.models_dir,
+ self.unity_log_path.parent,
+ self.evaluation_output_dir,
+ self.missions_dir,
+ ]
+ for directory in directories_to_create:
+ directory.mkdir(parents=True, exist_ok=True)
+
+ # Create the unity logs path if it doesn't exist already
+ self.unity_log_path.touch(exist_ok=True)
diff --git a/storage/cdfs/01.json b/storage/cdfs/01.json
new file mode 100644
index 0000000..a1c0f5c
--- /dev/null
+++ b/storage/cdfs/01.json
@@ -0,0 +1,781 @@
+{
+ "task_description": "There are no intact bowls anywhere. There is a broken bowl in the sink. There's milk in the refrigerator. The Yesterday Machine is present. There is an open box of cereal next to the refrigerator.",
+ "goal_text": "Make a bowl of cereal with milk",
+ "game_id": "3",
+ "experimental": "true",
+ "scene": {
+ "floor_plan": "0",
+ "scene_id": "01 (Make_Cereal)",
+ "simbot_init": [],
+ "roomLocation": ["BreakRoom"],
+ "required_objects": [
+ {
+ "name": "Bowl_01_1",
+ "state": [
+ {
+ "Unique": "true"
+ },
+ {
+ "isBroken": "true"
+ }
+ ],
+ "location": [
+ {
+ "TableRound_02_1": "in"
+ }
+ ],
+ "roomLocation": ["BreakRoom"],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "YesterdayMachine_01_1",
+ "state": [
+ {
+ "Unique": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": ["BreakRoom"],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "MilkCarton_01_1",
+ "state": [
+ {
+ "Unique": "true"
+ }
+ ],
+ "location": [
+ {
+ "FridgeLower_02_1": "in"
+ }
+ ],
+ "roomLocation": ["BreakRoom"],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "FridgeLower_02_1",
+ "state": [],
+ "location": [],
+ "roomLocation": ["BreakRoom"],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "FridgeUpper_02_1",
+ "state": [],
+ "location": [],
+ "roomLocation": ["BreakRoom"],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Cereal_Box_01_1",
+ "state": [
+ {
+ "Unique": "true"
+ }
+ ],
+ "location": [
+ {
+ "TableRound_02_1": "in"
+ }
+ ],
+ "roomLocation": ["BreakRoom"],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Door_01_1",
+ "state": [
+ {
+ "isOpen": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Door_01_2",
+ "state": [
+ {
+ "isOpen": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Door_01_3",
+ "state": [
+ {
+ "isOpen": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Door_01_4",
+ "state": [
+ {
+ "isOpen": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Door_01_5",
+ "state": [
+ {
+ "isOpen": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Door_01_6",
+ "state": [
+ {
+ "isOpen": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Door_01_7",
+ "state": [
+ {
+ "isOpen": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "FreezeRay_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "FuseBox_01_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "FuseBox_01_2",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "FuseBox_02_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "GravityPad_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Laser_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_2",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_3",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_4",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_5",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_6",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_7",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_8",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "RoboticArm_01_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Broken_Cord_01_1",
+ "state": [
+ {
+ "isToggledOn": "false"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Broken_Cord_01_2",
+ "state": [
+ {
+ "isToggledOn": "false"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Broken_Cord_01_3",
+ "state": [
+ {
+ "isToggledOn": "false"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Computer_Monitor_01_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Computer_Monitor_01_2",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Computer_Monitor_01_3",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Computer_Monitor_01_4",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "TAMPrototypeHead_01_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "PortalGenerator_10000",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "TableRound_02_1",
+ "state": [],
+ "location": [],
+ "roomLocation": ["BreakRoom"],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "StickyNote_1",
+ "state": [
+ {
+ "text": "Place the bowl in this Time Machine and turn it on!"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "StickyNote_2",
+ "state": [
+ {
+ "text": "Before making cereal, this bowl needs to be repaired using the Time Machine on the breakroom counter."
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Computer_Monitor_01_5",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Security_Button_10000",
+ "state": [
+ {
+ "circuitId": -1
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "dinoFood": ""
+ }
+ ],
+ "sticky_notes": [
+ {
+ "name": "StickyNote_3",
+ "text": "Milk for the cereal can be found in the fridge.",
+ "room": "Breakroom",
+ "location": 2
+ }
+ ],
+ "blacklisted_layouts": null,
+ "layoutOverride": "OfficeLayout1",
+ "completelyRandomVisual": false
+ },
+ "task_goals": [
+ {
+ "goal_id": 0,
+ "object_states": [
+ {
+ "StickyNote_1": "isExamined=true"
+ },
+ {
+ "Bowl_01_1": "isBroken=false"
+ }
+ ],
+ "object_states_relation": "or",
+ "preconditions": [],
+ "description": "Locate the Time Machine on the breakroom counter",
+ "visibility": {
+ "isHidden": false,
+ "activationInteractable": "ALWAYS UNLOCKED",
+ "stickyNoteIndex": 0
+ },
+ "canReset": false
+ },
+ {
+ "goal_id": 1,
+ "object_states": [
+ {
+ "Bowl_01_1": "isBroken=false"
+ }
+ ],
+ "object_states_relation": "or",
+ "preconditions": [],
+ "description": "Repair the broken bowl using the Time Machine",
+ "visibility": {
+ "isHidden": false,
+ "activationInteractable": "ALWAYS UNLOCKED",
+ "stickyNoteIndex": 0
+ },
+ "canReset": false
+ },
+ {
+ "goal_id": 2,
+ "object_states": [
+ {
+ "Bowl_01_1": "isFilled=Milk"
+ },
+ {
+ "Bowl_01_1": "ContainsCereal=true"
+ }
+ ],
+ "object_states_relation": "and",
+ "preconditions": [],
+ "description": "Pour the cereal and milk into the bowl",
+ "visibility": {
+ "isHidden": false,
+ "activationInteractable": "ALWAYS UNLOCKED",
+ "stickyNoteIndex": 0
+ },
+ "canReset": false
+ }
+ ],
+ "game_interactions": {
+ "camera_movements": {
+ "task_beginning": [],
+ "task_procedure": [],
+ "task_ending": [],
+ "object_conditions": []
+ },
+ "game_messages": {
+ "task_beginning": [],
+ "task_procedure": [],
+ "task_ending": [],
+ "object_conditions": []
+ }
+ },
+ "stateconditions": [],
+ "pastPortals": [
+ {
+ "PortalName": "past",
+ "PortalStatus": false
+ }
+ ],
+ "futurePortals": [
+ {
+ "PortalName": "future",
+ "PortalStatus": false
+ }
+ ]
+}
diff --git a/storage/cdfs/07.json b/storage/cdfs/07.json
new file mode 100644
index 0000000..ee099a2
--- /dev/null
+++ b/storage/cdfs/07.json
@@ -0,0 +1,705 @@
+{
+ "task_description": "Assemble the laser machine and then fire it",
+ "goal_text": "Assemble and Fire the Laser Machine",
+ "game_id": "1",
+ "scene": {
+ "floor_plan": "-1",
+ "scene_id": "07 (AssembleLaser)",
+ "simbot_init": [],
+ "roomLocation": ["Lab1"],
+ "required_objects": [
+ {
+ "name": "Laser_1",
+ "state": [],
+ "location": [],
+ "roomLocation": ["Lab1"],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Shelf_01_1",
+ "state": [
+ {
+ "Unique": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": ["Lab1"],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Desk_01_10006",
+ "state": [],
+ "location": [],
+ "roomLocation": ["Lab1"],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Laser_Tip_1",
+ "state": [
+ {
+ "Unique": "true"
+ }
+ ],
+ "location": [
+ {
+ "Laser_1": "in"
+ }
+ ],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Laser_ControlPanel_1",
+ "state": [
+ {
+ "Unique": "true"
+ }
+ ],
+ "location": [
+ {
+ "Desk_01_10006": "in"
+ }
+ ],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Laser_CircuitBoard_01",
+ "state": [
+ {
+ "Unique": "true"
+ }
+ ],
+ "location": [
+ {
+ "Laser_1": "in"
+ }
+ ],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Door_01_1",
+ "state": [
+ {
+ "isOpen": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Door_01_2",
+ "state": [
+ {
+ "isOpen": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Door_01_3",
+ "state": [
+ {
+ "isOpen": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Door_01_4",
+ "state": [
+ {
+ "isOpen": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Door_01_5",
+ "state": [
+ {
+ "isOpen": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Door_01_6",
+ "state": [
+ {
+ "isOpen": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Door_01_7",
+ "state": [
+ {
+ "isOpen": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "FuseBox_01_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "FuseBox_01_2",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "FuseBox_02_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_2",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_3",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_4",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_5",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_6",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_7",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_8",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Broken_Cord_01_1",
+ "state": [
+ {
+ "isToggledOn": "false"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Broken_Cord_01_2",
+ "state": [
+ {
+ "isToggledOn": "false"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Broken_Cord_01_3",
+ "state": [
+ {
+ "isToggledOn": "false"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Computer_Monitor_01_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Computer_Monitor_01_2",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Computer_Monitor_01_3",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Computer_Monitor_01_4",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "FreezeRay_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "GravityPad_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "RoboticArm_01_1",
+ "state": [
+ {
+ "isToggledOn": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "TAMPrototypeHead_01_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "StickyNote_1",
+ "state": [
+ {
+ "text": "We need to place this Control Panel into the laser machine before we can fire it. Then, turn on the laser using the red computer."
+ }
+ ],
+ "location": [],
+ "roomLocation": ["Lab1"],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "PortalGenerator_10000",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Computer_Monitor_01_5",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Security_Button_10000",
+ "state": [
+ {
+ "circuitId": -1
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "dinoFood": ""
+ }
+ ],
+ "sticky_notes": null,
+ "blacklisted_layouts": null,
+ "layoutOverride": "OfficeLayout1",
+ "completelyRandomVisual": false
+ },
+ "task_goals": [
+ {
+ "goal_id": 0,
+ "object_states": [
+ {
+ "Laser_1": "Contains=Laser_ControlPanel_1"
+ }
+ ],
+ "object_states_relation": "or",
+ "preconditions": [],
+ "description": "Place the control panel into the laser machine",
+ "visibility": {
+ "isHidden": false,
+ "activationInteractable": "ALWAYS UNLOCKED",
+ "stickyNoteIndex": 0
+ },
+ "canReset": false
+ },
+ {
+ "goal_id": 1,
+ "object_states": [
+ {
+ "Laser_1": "isToggledOn=true"
+ }
+ ],
+ "object_states_relation": "or",
+ "preconditions": [],
+ "description": "Turn on the red computer to fire laser",
+ "visibility": {
+ "isHidden": false,
+ "activationInteractable": "ALWAYS UNLOCKED",
+ "stickyNoteIndex": 0
+ },
+ "canReset": false
+ }
+ ],
+ "game_interactions": {
+ "camera_movements": {
+ "task_beginning": [],
+ "task_procedure": [],
+ "task_ending": [],
+ "object_conditions": []
+ },
+ "game_messages": {
+ "task_beginning": [],
+ "task_procedure": [],
+ "task_ending": [],
+ "object_conditions": []
+ }
+ },
+ "stateconditions": [],
+ "pastPortals": [
+ {
+ "PortalName": "past",
+ "PortalStatus": false
+ }
+ ],
+ "futurePortals": [
+ {
+ "PortalName": "future",
+ "PortalStatus": false
+ }
+ ]
+}
diff --git a/storage/cdfs/08.json b/storage/cdfs/08.json
new file mode 100644
index 0000000..afc153e
--- /dev/null
+++ b/storage/cdfs/08.json
@@ -0,0 +1,737 @@
+{
+ "task_description": "",
+ "goal_text": "Use the Freeze Ray to make an Ice-Cold Soda",
+ "game_id": "4",
+ "scene": {
+ "floor_plan": "1",
+ "scene_id": "08 (IceColdSoda)",
+ "simbot_init": [],
+ "roomLocation": ["Lab1"],
+ "required_objects": [
+ {
+ "name": "Door_01_1",
+ "state": [
+ {
+ "isOpen": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Door_01_2",
+ "state": [
+ {
+ "isOpen": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Door_01_3",
+ "state": [
+ {
+ "isOpen": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Door_01_4",
+ "state": [
+ {
+ "isOpen": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Door_01_5",
+ "state": [
+ {
+ "isOpen": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Door_01_6",
+ "state": [
+ {
+ "isOpen": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Door_01_7",
+ "state": [
+ {
+ "isOpen": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "FreezeRay_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "FuseBox_01_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "FuseBox_01_2",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "FuseBox_02_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "GravityPad_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Laser_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_2",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_3",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_4",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_5",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_6",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_7",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_8",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "RoboticArm_01_1",
+ "state": [
+ {
+ "isToggledOn": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "AP_Prop_Shelf_Wall_04_1",
+ "state": [],
+ "location": [],
+ "roomLocation": ["Lab1"],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Desk_01_1",
+ "state": [],
+ "location": [],
+ "roomLocation": ["Lab1"],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "CanSodaNew_01_1",
+ "state": [
+ {
+ "Unique": "true"
+ }
+ ],
+ "location": [
+ {
+ "Desk_01_1": "on"
+ }
+ ],
+ "roomLocation": ["Lab1"],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Broken_Cord_01_1",
+ "state": [
+ {
+ "isToggledOn": "false"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Broken_Cord_01_2",
+ "state": [
+ {
+ "isToggledOn": "false"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Broken_Cord_01_3",
+ "state": [
+ {
+ "isToggledOn": "false"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Computer_Monitor_01_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Computer_Monitor_01_2",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Computer_Monitor_01_3",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Computer_Monitor_01_4",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "TAMPrototypeHead_01_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Desk_01_2",
+ "state": [],
+ "location": [],
+ "roomLocation": ["Lab1"],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "CanSodaNew_Crushed_01_2",
+ "state": [],
+ "location": [
+ {
+ "Desk_01_2": "on"
+ }
+ ],
+ "roomLocation": ["Lab1"],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "PortalGenerator_10000",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Computer_Monitor_01_5",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "StickyNote_1",
+ "state": [
+ {
+ "Removed": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "StickyNote_2",
+ "state": [
+ {
+ "text": "Find a soda can and freeze it using the Freeze Ray machine."
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Security_Button_10000",
+ "state": [
+ {
+ "circuitId": -1
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "dinoFood": ""
+ }
+ ],
+ "sticky_notes": [
+ {
+ "name": "StickyNote_3",
+ "text": "Place the soda can on the blue shelf on the wall.",
+ "room": "Lab1",
+ "location": 0
+ },
+ {
+ "name": "StickyNote_4",
+ "text": "The Freeze Ray can be turned on using the blue computer on the desk.",
+ "room": "Lab1",
+ "location": 1
+ }
+ ],
+ "blacklisted_layouts": null,
+ "layoutOverride": "OfficeLayout1C",
+ "completelyRandomVisual": false
+ },
+ "task_goals": [
+ {
+ "goal_id": 0,
+ "object_states": [
+ {
+ "CanSodaNew_01_1": "isPickedUp=true"
+ }
+ ],
+ "object_states_relation": "or",
+ "preconditions": [],
+ "description": "Find a soda can",
+ "visibility": {
+ "isHidden": false,
+ "activationInteractable": "ALWAYS UNLOCKED",
+ "stickyNoteIndex": 0
+ },
+ "canReset": false
+ },
+ {
+ "goal_id": 1,
+ "object_states": [
+ {
+ "AP_Prop_Shelf_Wall_04_1": "Contains=CanSodaNew_01_1"
+ }
+ ],
+ "object_states_relation": "or",
+ "preconditions": [],
+ "description": "Place the soda can on the blue shelf",
+ "visibility": {
+ "isHidden": false,
+ "activationInteractable": "ALWAYS UNLOCKED",
+ "stickyNoteIndex": 0
+ },
+ "canReset": false
+ },
+ {
+ "goal_id": 2,
+ "object_states": [
+ {
+ "CanSodaNew_01_1": "isCold=true"
+ }
+ ],
+ "object_states_relation": "or",
+ "preconditions": [],
+ "description": "Turn on the blue computer to fire the freeze ray",
+ "visibility": {
+ "isHidden": false,
+ "activationInteractable": "ALWAYS UNLOCKED",
+ "stickyNoteIndex": 0
+ },
+ "canReset": false
+ }
+ ],
+ "game_interactions": {
+ "camera_movements": {
+ "task_beginning": [],
+ "task_procedure": [],
+ "task_ending": [],
+ "object_conditions": []
+ },
+ "game_messages": {
+ "task_beginning": [],
+ "task_procedure": [],
+ "task_ending": [],
+ "object_conditions": []
+ }
+ },
+ "stateconditions": [],
+ "pastPortals": [
+ {
+ "PortalName": "past",
+ "PortalStatus": false
+ }
+ ],
+ "futurePortals": [
+ {
+ "PortalName": "future",
+ "PortalStatus": false
+ }
+ ]
+}
diff --git a/storage/cdfs/40.json b/storage/cdfs/40.json
new file mode 100644
index 0000000..b7706e2
--- /dev/null
+++ b/storage/cdfs/40.json
@@ -0,0 +1,783 @@
+{
+ "task_description": "",
+ "goal_text": "Change the color of a white bowl to red",
+ "game_id": "2",
+ "scene": {
+ "floor_plan": "-1",
+ "scene_id": "40 (RedBowlScan)",
+ "simbot_init": [],
+ "roomLocation": ["Lab2"],
+ "required_objects": [
+ {
+ "name": "Broken_Cord_01_1",
+ "state": [
+ {
+ "isToggledOn": "false"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Broken_Cord_01_2",
+ "state": [
+ {
+ "isToggledOn": "false"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Broken_Cord_01_3",
+ "state": [
+ {
+ "isToggledOn": "false"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Computer_Monitor_01_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Computer_Monitor_01_2",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Computer_Monitor_01_3",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Computer_Monitor_01_4",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Door_01_1",
+ "state": [
+ {
+ "isOpen": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Door_01_2",
+ "state": [
+ {
+ "isOpen": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Door_01_3",
+ "state": [
+ {
+ "isOpen": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Door_01_4",
+ "state": [
+ {
+ "isOpen": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Door_01_5",
+ "state": [
+ {
+ "isOpen": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Door_01_6",
+ "state": [
+ {
+ "isOpen": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Door_01_7",
+ "state": [
+ {
+ "isOpen": "true"
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "FreezeRay_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "FuseBox_01_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "FuseBox_01_2",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "FuseBox_02_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "GravityPad_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Laser_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_2",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_3",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_4",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_5",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_6",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_7",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "LightSwitch_01_8",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "PortalGenerator_10000",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "RoboticArm_01_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "TAMPrototypeHead_01_1",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Bowl_01_1",
+ "state": [],
+ "location": [
+ {
+ "TableRound_02_1": "in"
+ }
+ ],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "TableRound_02_1",
+ "state": [],
+ "location": [],
+ "roomLocation": ["BreakRoom"],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Computer_Monitor_01_5",
+ "state": [],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "StickyNote_1",
+ "state": [
+ {
+ "text": "This is a color changer. Find a white bowl from the break room to test this machine."
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Bowl_01_2",
+ "state": [],
+ "location": [
+ {
+ "TableRound_02_1": "in"
+ }
+ ],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Bowl_01_3",
+ "state": [],
+ "location": [
+ {
+ "TableRound_02_1": "in"
+ }
+ ],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "yesterdayState": "",
+ "dinoFood": ""
+ },
+ {
+ "name": "Security_Button_10000",
+ "state": [
+ {
+ "circuitId": -1
+ }
+ ],
+ "location": [],
+ "roomLocation": [],
+ "condition": {},
+ "colors": [],
+ "printingObject": "",
+ "associatedPastPortals": [],
+ "associatedFuturePortals": [],
+ "currentPortal": "",
+ "dinoFood": ""
+ }
+ ],
+ "sticky_notes": null,
+ "blacklisted_layouts": null,
+ "layoutOverride": "OfficeLayout1C_mirror",
+ "completelyRandomVisual": false
+ },
+ "task_goals": [
+ {
+ "goal_id": 0,
+ "object_states": [
+ {
+ "Bowl_01_*": "isPickedUp=true"
+ }
+ ],
+ "object_states_relation": "or",
+ "preconditions": [],
+ "description": "Get a bowl from the breakroom",
+ "visibility": {
+ "isHidden": false,
+ "activationInteractable": "ALWAYS UNLOCKED",
+ "stickyNoteIndex": 0
+ },
+ "canReset": false
+ },
+ {
+ "goal_id": 1,
+ "object_states": [
+ {
+ "Bowl_01_1": "red1=true"
+ },
+ {
+ "Bowl_01_2": "red2=true"
+ },
+ {
+ "Bowl_01_3": "red3=true"
+ }
+ ],
+ "object_states_relation": "or",
+ "preconditions": [],
+ "description": "Use the color changer in the Quantum Lab to paint the bowl red",
+ "visibility": {
+ "isHidden": false,
+ "activationInteractable": "ALWAYS UNLOCKED",
+ "stickyNoteIndex": 0
+ },
+ "canReset": false
+ }
+ ],
+ "game_interactions": {
+ "camera_movements": {
+ "task_beginning": [],
+ "task_procedure": [],
+ "task_ending": [],
+ "object_conditions": []
+ },
+ "game_messages": {
+ "task_beginning": [],
+ "task_procedure": [],
+ "task_ending": [],
+ "object_conditions": []
+ }
+ },
+ "stateconditions": [
+ {
+ "expression": {
+ "AND": {
+ "expressions": [
+ {
+ "isScanned": {
+ "target": "Bowl_01_1",
+ "message": "",
+ "value": true
+ }
+ },
+ {
+ "ColorMetaDataChange": {
+ "colorvalue": "Red",
+ "target": "Bowl_01_1",
+ "message": ""
+ }
+ }
+ ]
+ }
+ },
+ "stateName": "bowl1",
+ "context": "Bowl_01_1"
+ },
+ {
+ "expression": {
+ "AND": {
+ "expressions": [
+ {
+ "isScanned": {
+ "target": "Bowl_01_2",
+ "message": "",
+ "value": true
+ }
+ },
+ {
+ "ColorMetaDataChange": {
+ "colorvalue": "Red",
+ "target": "Bowl_01_2",
+ "message": ""
+ }
+ }
+ ]
+ }
+ },
+ "stateName": "bowl2",
+ "context": "Bowl_01_2"
+ },
+ {
+ "expression": {
+ "AND": {
+ "expressions": [
+ {
+ "isScanned": {
+ "target": "Bowl_01_3",
+ "message": "",
+ "value": true
+ }
+ },
+ {
+ "ColorMetaDataChange": {
+ "colorvalue": "Red",
+ "target": "Bowl_01_3",
+ "message": ""
+ }
+ }
+ ]
+ }
+ },
+ "stateName": "bowl3",
+ "context": "Bowl_01_3"
+ },
+ {
+ "expression": {
+ "ColorMetaDataChange": {
+ "colorvalue": "Red",
+ "target": "Bowl_01_1",
+ "message": ""
+ }
+ },
+ "stateName": "red1",
+ "context": "Bowl_01_1"
+ },
+ {
+ "expression": {
+ "ColorMetaDataChange": {
+ "colorvalue": "Red",
+ "target": "Bowl_01_2",
+ "message": ""
+ }
+ },
+ "stateName": "red2",
+ "context": "Bowl_01_2"
+ },
+ {
+ "expression": {
+ "ColorMetaDataChange": {
+ "colorvalue": "Red",
+ "target": "Bowl_01_3",
+ "message": ""
+ }
+ },
+ "stateName": "red3",
+ "context": "Bowl_01_3"
+ }
+ ],
+ "pastPortals": [
+ {
+ "PortalName": "past",
+ "PortalStatus": false
+ }
+ ],
+ "futurePortals": [
+ {
+ "PortalName": "future",
+ "PortalStatus": false
+ }
+ ]
+}
diff --git a/tests/__init__.py b/tests/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/tests/conftest.py b/tests/conftest.py
new file mode 100644
index 0000000..cd1f06e
--- /dev/null
+++ b/tests/conftest.py
@@ -0,0 +1,23 @@
+import os
+from glob import glob
+
+import pytest
+
+
+# Import all the fixtures from every file in the tests/fixtures dir.
+pytest_plugins = [
+ fixture_file.replace("/", ".").replace(".py", "")
+ for fixture_file in glob("tests/fixtures/[!__]*.py", recursive=True)
+]
+
+os.environ["RUNNING_TESTS"] = "1"
+
+if os.getenv("_PYTEST_RAISE", "0") != "0":
+
+ @pytest.hookimpl(tryfirst=True)
+ def pytest_exception_interact(call):
+ raise call.excinfo.value
+
+ @pytest.hookimpl(tryfirst=True)
+ def pytest_internalerror(excinfo):
+ raise excinfo.value
diff --git a/tests/test_generated_missions.py b/tests/test_generated_missions.py
new file mode 100644
index 0000000..e7166ce
--- /dev/null
+++ b/tests/test_generated_missions.py
@@ -0,0 +1,78 @@
+from deepdiff import DeepDiff
+from pytest_cases import fixture, param_fixture
+
+from arena_missions.builders import (
+ ChallengeBuilder,
+ ChallengeBuilderFunction,
+ MissionBuilder,
+ RequiredObjectBuilder,
+)
+from arena_missions.structures import CDF, HighLevelKey, Mission
+
+
+@fixture(scope="module")
+def required_object_builder() -> RequiredObjectBuilder:
+ return RequiredObjectBuilder()
+
+
+@fixture(scope="module")
+def mission_builder(required_object_builder: RequiredObjectBuilder) -> MissionBuilder:
+ return MissionBuilder(ChallengeBuilder(), required_object_builder)
+
+
+challenge_builder_function = param_fixture(
+ "challenge_builder_function",
+ [x[1] for x in ChallengeBuilder()],
+ ids=[x[0].key for x in ChallengeBuilder()],
+ scope="module",
+)
+
+build_challenge_tuple = param_fixture(
+ "build_challenge_tuple",
+ list(ChallengeBuilder()),
+ ids=[x[0].key for x in ChallengeBuilder()],
+ scope="module",
+)
+
+
+def test_challenge_builder_instantiates_without_error() -> None:
+ assert ChallengeBuilder()
+
+
+def test_registered_challenge_builders_are_valid(
+ challenge_builder_function: ChallengeBuilderFunction,
+) -> None:
+ builder_output = challenge_builder_function()
+ assert builder_output
+
+
+def test_generated_cdfs_are_valid(
+ challenge_builder_function: ChallengeBuilderFunction,
+ mission_builder: MissionBuilder,
+) -> None:
+ builder_output = challenge_builder_function()
+ cdf = mission_builder.generate_cdf(builder_output)
+
+ # Verify the CDF is valid
+ assert cdf
+
+ # Make sure the CDF can be reimported successfully
+ reimported_cdf = CDF.parse_obj(cdf.dict(by_alias=True))
+ assert reimported_cdf
+
+ # Make sure the reimported CDF is identical to the original. If there is no difference, they
+ # are identical.
+ assert not DeepDiff(cdf.dict(by_alias=True), reimported_cdf.dict(by_alias=True))
+
+
+def test_generated_missions_are_valid(
+ build_challenge_tuple: tuple[HighLevelKey, ChallengeBuilderFunction],
+ mission_builder: MissionBuilder,
+) -> None:
+ high_level_key, challenge_builder_function = build_challenge_tuple
+ mission = mission_builder.generate_mission(high_level_key, challenge_builder_function)
+
+ assert mission
+
+ # Make sure the mission can be reimported successfully
+ assert Mission.parse_obj(mission.dict(by_alias=True))